From c24c93043ee4fb63646286fa436b5970d8324945 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead Date: Fri, 4 Mar 2022 18:41:25 +0000 Subject: llvm_tools: Clean up start_, end_version entries This removes any dangling end_version or start_version entry use in the patch_manager code. This forces all PATCHES.json to use the version_range schema. There was also a bug where end_version was being incorrectly set, which messed with nightly builders. BUG=None TEST=patch_manager_unittest.py TEST=cargo test Change-Id: If8fde535794b95a2cd80291f61a7b4d782b575ce Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3503392 Reviewed-by: Manoj Gupta Commit-Queue: Jordan Abrahams-Whitehead Tested-by: Jordan Abrahams-Whitehead --- llvm_tools/patch_manager.py | 48 ++++++++++--------- llvm_tools/patch_manager_unittest.py | 77 +++++++++++++++--------------- llvm_tools/patch_sync/src/main.rs | 2 +- llvm_tools/patch_sync/src/patch_parsing.rs | 31 +++--------- 4 files changed, 73 insertions(+), 85 deletions(-) diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index f2d6b322..eff1ba8d 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -211,17 +211,15 @@ def GetPatchMetadata(patch_dict): A tuple that contains the metadata values. """ - # Get the metadata values of a patch if possible. - # FIXME(b/221489531): Remove start_version & end_version if 'version_range' in patch_dict: - start_version = patch_dict['version_range'].get('from', 0) - end_version = patch_dict['version_range'].get('until', None) + from_version = patch_dict['version_range'].get('from', 0) + until_version = patch_dict['version_range'].get('until', None) else: - start_version = patch_dict.get('start_version', 0) - end_version = patch_dict.get('end_version', None) + from_version = 0 + until_version = None is_critical = patch_dict.get('is_critical', False) - return start_version, end_version, is_critical + return from_version, until_version, is_critical def ApplyPatch(src_path, patch_path): @@ -475,19 +473,20 @@ def HandlePatches(svn_version, # Get the patch's metadata. # # Index information of 'patch_metadata': - # [0]: start_version - # [1]: end_version + # [0]: from_version + # [1]: until_version # [2]: is_critical patch_metadata = GetPatchMetadata(cur_patch_dict) if not patch_metadata[1]: - # Patch does not have an 'end_version' value which implies 'end_version' - # == 'inf' ('svn_version' will always be less than 'end_version'), so - # the patch is applicable if 'svn_version' >= 'start_version'. + # Patch does not have an 'until' value which implies + # 'until' == 'inf' ('svn_version' will always be less + # than 'until'), so the patch is applicable if + # 'svn_version' >= 'from'. patch_applicable = svn_version >= patch_metadata[0] else: - # Patch is applicable if 'svn_version' >= 'start_version' && - # "svn_version" < "end_version". + # Patch is applicable if 'svn_version' >= 'from' && + # "svn_version" < "until". patch_applicable = (svn_version >= patch_metadata[0] and \ svn_version < patch_metadata[1]) @@ -524,12 +523,14 @@ def HandlePatches(svn_version, # Check the mode to determine what action to take on the failing # patch. if mode == FailureModes.DISABLE_PATCHES: - # Set the patch's 'end_version' to 'svn_version' so the patch - # would not be applicable anymore (i.e. the patch's 'end_version' + # Set the patch's 'until' to 'svn_version' so the patch + # would not be applicable anymore (i.e. the patch's 'until' # would not be greater than 'svn_version'). # Last element in 'applicable_patches' is the current patch. - applicable_patches[-1]['end_version'] = svn_version + new_version_range = applicable_patches[-1].get('version_range', {}) + new_version_range['until'] = svn_version + applicable_patches[-1]['version_range'] = new_version_range disabled_patches.append(os.path.basename(path_to_patch)) @@ -541,7 +542,7 @@ def HandlePatches(svn_version, modified_metadata = patch_metadata_file elif mode == FailureModes.BISECT_PATCHES: # Figure out where the patch's stops applying and set the patch's - # 'end_version' to that version. + # 'until' to that version. # Do not want to overwrite the changes to the current progress of # 'bisect_patches' on the source tree. @@ -566,14 +567,17 @@ def HandlePatches(svn_version, 'at %d\n' % (os.path.basename( cur_patch_dict['rel_patch_path']), bad_svn_version)) - # Overwrite the .JSON file with the new 'end_version' for the + # Overwrite the .JSON file with the new 'until' for the # current failed patch so that if there are other patches that - # fail to apply, then the 'end_version' for the current patch could + # fail to apply, then the 'until' for the current patch could # be applicable when `git bisect run` is performed on the next # failed patch because the same .JSON file is used for `git bisect # run`. + new_version_range = patch_file_contents[patch_dict_index].get( + 'version_range', {}) + new_version_range['until'] = bad_svn_version patch_file_contents[patch_dict_index][ - 'end_version'] = bad_svn_version + 'version_range'] = new_version_range UpdatePatchMetadataFile(patch_metadata_file, patch_file_contents) # Clear the changes made to the source tree by `git bisect run`. @@ -591,7 +595,7 @@ def HandlePatches(svn_version, RestoreSrcTreeState(src_path, bad_commit) if not modified_metadata: - # At least one patch's 'end_version' has been updated. + # At least one patch's 'until' has been updated. modified_metadata = patch_metadata_file elif mode == FailureModes.FAIL: diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py index 69bb683e..25c68eee 100755 --- a/llvm_tools/patch_manager_unittest.py +++ b/llvm_tools/patch_manager_unittest.py @@ -34,9 +34,8 @@ class PatchManagerTest(unittest.TestCase): with self.assertRaises(ValueError) as err: patch_manager.is_directory(test_dir) - self.assertEqual( - str(err.exception), 'Path is not a directory: ' - '%s' % test_dir) + self.assertEqual(str(err.exception), 'Path is not a directory: ' + '%s' % test_dir) mock_isdir.assert_called_once() @@ -173,8 +172,8 @@ class PatchManagerTest(unittest.TestCase): 'rel_patch_path': 'cherry/fixes_stdout.patch' } - self.assertEqual( - patch_manager.GetPatchMetadata(test_patch), expected_patch_metadata) + self.assertEqual(patch_manager.GetPatchMetadata(test_patch), + expected_patch_metadata) def testSuccessfullyGetPatchMetdataForPatchWithSomeMetadata(self): expected_patch_metadata = 0, 1000, False @@ -187,8 +186,8 @@ class PatchManagerTest(unittest.TestCase): } } - self.assertEqual( - patch_manager.GetPatchMetadata(test_patch), expected_patch_metadata) + self.assertEqual(patch_manager.GetPatchMetadata(test_patch), + expected_patch_metadata) def testFailedToApplyPatchWhenInvalidSrcPathIsPassedIn(self): src_path = '/abs/path/to/src' @@ -200,8 +199,8 @@ class PatchManagerTest(unittest.TestCase): with self.assertRaises(ValueError) as err: patch_manager.ApplyPatch(src_path, abs_patch_path) - self.assertEqual( - str(err.exception), 'Invalid src path provided: %s' % src_path) + self.assertEqual(str(err.exception), + 'Invalid src path provided: %s' % src_path) # Simulate behavior of 'os.path.isdir()' when the absolute path to the # unpacked sources of the package is valid and exists. @@ -216,9 +215,8 @@ class PatchManagerTest(unittest.TestCase): with self.assertRaises(ValueError) as err: patch_manager.ApplyPatch(src_path, abs_patch_path) - self.assertEqual( - str(err.exception), 'Invalid patch file provided: ' - '%s' % abs_patch_path) + self.assertEqual(str(err.exception), 'Invalid patch file provided: ' + '%s' % abs_patch_path) mock_isdir.assert_called_once() @@ -260,7 +258,8 @@ class PatchManagerTest(unittest.TestCase): # Simulate behavior of 'os.path.isfile()' when the absolute path to the # patch exists and is a file. @mock.patch.object(patch_manager, 'check_output') - def testSuccessfullyAppliedPatch(self, mock_dry_run, mock_isfile, mock_isdir): + def testSuccessfullyAppliedPatch(self, mock_dry_run, mock_isfile, + mock_isdir): src_path = '/abs/path/to/src' abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_stdout.patch' @@ -289,9 +288,8 @@ class PatchManagerTest(unittest.TestCase): with self.assertRaises(ValueError) as err: patch_manager.UpdatePatchMetadataFile(abs_patch_path, patch) - self.assertEqual( - str(err.exception), 'File does not end in ".json": ' - '%s' % abs_patch_path) + self.assertEqual(str(err.exception), 'File does not end in ".json": ' + '%s' % abs_patch_path) def testSuccessfullyUpdatedPatchMetadataFile(self): test_updated_patch_metadata = [{ @@ -334,8 +332,8 @@ class PatchManagerTest(unittest.TestCase): # Simulate behavior of 'GetPathToPatch()' when the absolute path to the # patch does not exist. def PathToPatchDoesNotExist(filesdir_path, rel_patch_path): - raise ValueError('The absolute path to %s does not exist' % os.path.join( - filesdir_path, rel_patch_path)) + raise ValueError('The absolute path to %s does not exist' % + os.path.join(filesdir_path, rel_patch_path)) # Use the test function to simulate the behavior of 'GetPathToPatch()'. mock_get_path_to_patch.side_effect = PathToPatchDoesNotExist @@ -363,9 +361,8 @@ class PatchManagerTest(unittest.TestCase): patch_manager.HandlePatches(revision, json_test_file, filesdir_path, src_path, FailureModes.FAIL) - self.assertEqual( - str(err.exception), - 'The absolute path to %s does not exist' % abs_patch_path) + self.assertEqual(str(err.exception), + 'The absolute path to %s does not exist' % abs_patch_path) mock_get_path_to_patch.assert_called_once_with(filesdir_path, rel_patch_path) @@ -411,8 +408,8 @@ class PatchManagerTest(unittest.TestCase): patch_manager.HandlePatches(revision, json_test_file, filesdir_path, src_path, FailureModes.FAIL) - self.assertEqual( - str(err.exception), 'Failed to apply patch: %s' % patch_name) + self.assertEqual(str(err.exception), + 'Failed to apply patch: %s' % patch_name) mock_get_path_to_patch.assert_called_once_with(filesdir_path, rel_patch_path) @@ -602,7 +599,8 @@ class PatchManagerTest(unittest.TestCase): self.assertEqual( path_to_patch, os.path.join(abs_path_to_filesdir, - test_patch_metadata[call_count + 1]['rel_patch_path'])) + test_patch_metadata[call_count + + 1]['rel_patch_path'])) # Simulate that the second patch applied successfully. return call_count == 1 @@ -653,10 +651,10 @@ class PatchManagerTest(unittest.TestCase): # 'test_patch_1' and 'test_patch_3' were not modified/disabled, so their # dictionary is the same, but 'test_patch_2' and 'test_patch_4' were - # disabled, so their 'end_version' would be set to 1200, which was the + # disabled, so their 'until' would be set to 1200, which was the # value passed into 'HandlePatches()' for the 'svn_version'. - test_patch_2['end_version'] = 1200 - test_patch_4['end_version'] = 1200 + test_patch_2['version_range']['until'] = 1200 + test_patch_4['version_range']['until'] = 1200 expected_json_file = [ test_patch_1, test_patch_2, test_patch_3, test_patch_4 @@ -675,10 +673,11 @@ class PatchManagerTest(unittest.TestCase): @mock.patch.object(patch_manager, 'GetPathToPatch') @mock.patch.object(patch_manager, 'ApplyPatch') - def testSomePatchesAreRemoved(self, mock_apply_patch, mock_get_path_to_patch): + def testSomePatchesAreRemoved(self, mock_apply_patch, + mock_get_path_to_patch): # For the 'remove_patches' mode, this patch is expected to be in the # 'non_applicable_patches' list and 'removed_patches' list because - # the 'svn_version' (1500) >= 'end_version' (1190). + # the 'svn_version' (1500) >= 'until' (1190). test_patch_1 = { 'comment': 'Redirects output to stdout', 'rel_patch_path': 'cherry/fixes_output.patch', @@ -691,7 +690,7 @@ class PatchManagerTest(unittest.TestCase): # For the 'remove_patches' mode, this patch is expected to be in the # 'applicable_patches' list (which is the list that the .json file will be # updated with) because the 'svn_version' < 'inf' (this patch does not have - # an 'end_version' value which implies 'end_version' == 'inf'). + # an 'until' value which implies 'until' == 'inf'). test_patch_2 = { 'comment': 'Fixes input', 'rel_patch_path': 'cherry/fixes_input.patch', @@ -702,7 +701,7 @@ class PatchManagerTest(unittest.TestCase): # For the 'remove_patches' mode, this patch is expected to be in the # 'non_applicable_patches' list and 'removed_patches' list because - # the 'svn_version' (1500) >= 'end_version' (1500). + # the 'svn_version' (1500) >= 'until' (1500). test_patch_3 = { 'comment': 'Adds a warning', 'rel_patch_path': 'add_warning.patch', @@ -714,7 +713,7 @@ class PatchManagerTest(unittest.TestCase): # For the 'remove_patches' mode, this patch is expected to be in the # 'non_applicable_patches' list and 'removed_patches' list because - # the 'svn_version' (1500) >= 'end_version' (1400). + # the 'svn_version' (1500) >= 'until' (1400). test_patch_4 = { 'comment': 'Adds a helper function', 'rel_patch_path': 'add_helper.patch', @@ -818,7 +817,7 @@ class PatchManagerTest(unittest.TestCase): # For the 'remove_patches' mode, this patch is expected to be in the # 'non_applicable_patches' list and 'removed_patches' list because - # the 'svn_version' (1200) >= 'end_version' (1190). + # the 'svn_version' (1200) >= 'until' (1190). test_patch_1 = { 'comment': 'Redirects output to stdout', 'rel_patch_path': 'cherry/fixes_output.patch', @@ -831,7 +830,7 @@ class PatchManagerTest(unittest.TestCase): # For the 'remove_patches' mode, this patch is expected to be in the # 'applicable_patches' list (which is the list that the .json file will be # updated with) because the 'svn_version' < 'inf' (this patch does not have - # an 'end_version' value which implies 'end_version' == 'inf'). + # an 'until' value which implies 'until' == 'inf'). test_patch_2 = { 'comment': 'Fixes input', 'rel_patch_path': 'cherry/fixes_input.patch', @@ -841,8 +840,8 @@ class PatchManagerTest(unittest.TestCase): } # For the 'remove_patches' mode, this patch is expected to be in the - # 'applicable_patches' list because 'svn_version' >= 'start_version' and - # 'svn_version' < 'end_version'. + # 'applicable_patches' list because 'svn_version' >= 'from' and + # 'svn_version' < 'until'. test_patch_3 = { 'comment': 'Adds a warning', 'rel_patch_path': 'add_warning.patch', @@ -854,7 +853,7 @@ class PatchManagerTest(unittest.TestCase): # For the 'remove_patches' mode, this patch is expected to be in the # 'applicable_patches' list because the patch is from the future (e.g. - # 'start_version' > 'svn_version' (1200), so it should NOT be removed. + # 'from' > 'svn_version' (1200), so it should NOT be removed. test_patch_4 = { 'comment': 'Adds a helper function', 'rel_patch_path': 'add_helper.patch', @@ -896,7 +895,9 @@ class PatchManagerTest(unittest.TestCase): # 'add_helper.patch' is still a 'non applicable' patch meaning it does not # apply in revision 1200 but it will NOT be removed because it is a future # patch. - expected_non_applicable_patches = ['fixes_output.patch', 'add_helper.patch'] + expected_non_applicable_patches = [ + 'fixes_output.patch', 'add_helper.patch' + ] expected_removed_patches = [ '/abs/path/to/filesdir/cherry/fixes_output.patch' ] diff --git a/llvm_tools/patch_sync/src/main.rs b/llvm_tools/patch_sync/src/main.rs index c244f1c0..65637a47 100644 --- a/llvm_tools/patch_sync/src/main.rs +++ b/llvm_tools/patch_sync/src/main.rs @@ -164,7 +164,7 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> { })? }; let new_android_patches = new_android_patches.filter_patches(|p| { - match (p.get_start_version(), p.get_end_version()) { + match (p.get_from_version(), p.get_until_version()) { (Some(start), Some(end)) => start <= android_llvm_version && android_llvm_version < end, (Some(start), None) => start <= android_llvm_version, (None, Some(end)) => android_llvm_version < end, diff --git a/llvm_tools/patch_sync/src/patch_parsing.rs b/llvm_tools/patch_sync/src/patch_parsing.rs index 124f0d6f..f1ad52fb 100644 --- a/llvm_tools/patch_sync/src/patch_parsing.rs +++ b/llvm_tools/patch_sync/src/patch_parsing.rs @@ -8,20 +8,12 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; /// JSON serde struct. -// FIXME(b/221489531): Remove when we clear out start_version and -// end_version. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PatchDictSchema { - /// [deprecated(since = "1.1", note = "Use version_range")] - #[serde(skip_serializing_if = "Option::is_none")] - pub end_version: Option, pub metadata: Option>, #[serde(default, skip_serializing_if = "BTreeSet::is_empty")] pub platforms: BTreeSet, pub rel_patch_path: String, - /// [deprecated(since = "1.1", note = "Use version_range")] - #[serde(skip_serializing_if = "Option::is_none")] - pub start_version: Option, pub version_range: Option, } @@ -31,19 +23,16 @@ pub struct VersionRange { pub until: Option, } -// FIXME(b/221489531): Remove when we clear out start_version and -// end_version. impl PatchDictSchema { - pub fn get_start_version(&self) -> Option { - self.version_range - .map(|x| x.from) - .unwrap_or(self.start_version) + /// Return the first version this patch applies to. + pub fn get_from_version(&self) -> Option { + self.version_range.and_then(|x| x.from) } - pub fn get_end_version(&self) -> Option { - self.version_range - .map(|x| x.until) - .unwrap_or(self.end_version) + /// Return the version after the last version this patch + /// applies to. + pub fn get_until_version(&self) -> Option { + self.version_range.and_then(|x| x.until) } } @@ -162,8 +151,6 @@ impl PatchCollection { // ii. combined_patches.push(PatchDictSchema { rel_patch_path: p.rel_patch_path.clone(), - start_version: p.start_version, - end_version: p.end_version, platforms: new_platforms, metadata: p.metadata.clone(), version_range: p.version_range, @@ -383,8 +370,6 @@ mod test { #[test] fn test_union() { let patch1 = PatchDictSchema { - start_version: Some(0), - end_version: Some(1), rel_patch_path: "a".into(), metadata: None, platforms: BTreeSet::from(["x".into()]), @@ -431,8 +416,6 @@ mod test { #[test] fn test_union_empties() { let patch1 = PatchDictSchema { - start_version: Some(0), - end_version: Some(1), rel_patch_path: "a".into(), metadata: None, platforms: Default::default(), -- cgit v1.2.3 From a7acc255400f3acb21922d5e6aff048ccba75df0 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead Date: Wed, 2 Mar 2022 21:21:11 +0000 Subject: patch_sync: Use upstream branches, not mirrors At present, on the automation system that runs patch_sync, we check out the main branch for AOSP & CrOS as a mirror locally. However, this leads to issues where the local mirror will detach from remote, and causes rebase conflicts that shouldn't happen. This patch makes us check out the remote in a detached state, which will update with the repo sync. BUG=b:221572478 TEST=Testing on mobiletc-prebuild Change-Id: I9124901f8d657fa9c25bc7125b374c5b9006f325 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3501012 Tested-by: Jordan Abrahams-Whitehead Reviewed-by: George Burgess Commit-Queue: Jordan Abrahams-Whitehead --- llvm_tools/patch_sync/src/version_control.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/llvm_tools/patch_sync/src/version_control.rs b/llvm_tools/patch_sync/src/version_control.rs index e07d39d6..cfe88aec 100644 --- a/llvm_tools/patch_sync/src/version_control.rs +++ b/llvm_tools/patch_sync/src/version_control.rs @@ -8,8 +8,9 @@ use std::process::{Command, Output}; const CHROMIUMOS_OVERLAY_REL_PATH: &str = "src/third_party/chromiumos-overlay"; const ANDROID_LLVM_REL_PATH: &str = "toolchain/llvm_android"; -const CROS_MAIN_BRANCH: &str = "main"; -const ANDROID_MAIN_BRANCH: &str = "master"; // nocheck +// Need to checkout the upstream, rather than the local clone. +const CROS_MAIN_BRANCH: &str = "cros/main"; +const ANDROID_MAIN_BRANCH: &str = "aosp/master"; // nocheck const WORK_BRANCH_NAME: &str = "__patch_sync_tmp"; /// Context struct to keep track of both Chromium OS and Android checkouts. -- cgit v1.2.3 From d2b64f8ce4424f30a8cc268c1cfbd0f71d2631e3 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead Date: Fri, 4 Mar 2022 20:26:01 +0000 Subject: llvm_tools: Reformat all python llvm_tools This CL runs `yapf` from inside the chroot for all python files in llvm_tools, except revert_checker.py, which comes from upstream LLVM. Also manually fixes some cros_lint backslash errors. BUG=None TEST=Presubmit checks Change-Id: I5f81fbdbbf954fe9ec591e641844f4ef3c17b73b Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3503393 Reviewed-by: Manoj Gupta Commit-Queue: Jordan Abrahams-Whitehead Tested-by: Jordan Abrahams-Whitehead --- llvm_tools/auto_llvm_bisection.py | 3 +- llvm_tools/auto_llvm_bisection_unittest.py | 33 ++++---- llvm_tools/bisect_clang_crashes.py | 18 +++-- llvm_tools/bisect_clang_crashes_unittest.py | 1 - llvm_tools/chroot_unittest.py | 10 +-- llvm_tools/fetch_cros_sdk_rolls.py | 12 ++- llvm_tools/get_llvm_hash.py | 19 ++--- llvm_tools/get_llvm_hash_unittest.py | 23 +++--- llvm_tools/git.py | 4 +- llvm_tools/git_llvm_rev.py | 8 +- llvm_tools/git_llvm_rev_test.py | 8 +- llvm_tools/git_unittest.py | 23 +++--- llvm_tools/llvm_bisection_unittest.py | 42 +++++----- llvm_tools/llvm_patch_management.py | 29 ++++--- llvm_tools/llvm_patch_management_unittest.py | 48 +++++------ llvm_tools/llvm_project.py | 24 +++--- llvm_tools/modify_a_tryjob_unittest.py | 46 ++++++----- llvm_tools/nightly_revert_checker_test.py | 46 +++++------ llvm_tools/patch_manager.py | 94 +++++++++++----------- llvm_tools/subprocess_helpers.py | 11 ++- llvm_tools/update_chromeos_llvm_hash_unittest.py | 55 ++++++------- .../update_packages_and_run_tests_unittest.py | 32 +++++--- llvm_tools/update_tryjob_status.py | 38 +++++---- llvm_tools/update_tryjob_status_unittest.py | 31 +++---- llvm_tools/upload_lexan_crashes_to_forcey.py | 19 ++--- llvm_tools/upload_lexan_crashes_to_forcey_test.py | 32 ++++---- 26 files changed, 367 insertions(+), 342 deletions(-) diff --git a/llvm_tools/auto_llvm_bisection.py b/llvm_tools/auto_llvm_bisection.py index 7e8fb1dd..dbd8f37e 100755 --- a/llvm_tools/auto_llvm_bisection.py +++ b/llvm_tools/auto_llvm_bisection.py @@ -52,7 +52,8 @@ class BuilderStatus(enum.Enum): builder_status_mapping = { BuilderStatus.PASS.value: update_tryjob_status.TryjobStatus.GOOD.value, BuilderStatus.FAIL.value: update_tryjob_status.TryjobStatus.BAD.value, - BuilderStatus.RUNNING.value: update_tryjob_status.TryjobStatus.PENDING.value + BuilderStatus.RUNNING.value: + update_tryjob_status.TryjobStatus.PENDING.value } diff --git a/llvm_tools/auto_llvm_bisection_unittest.py b/llvm_tools/auto_llvm_bisection_unittest.py index 07c0e715..3f7e821b 100755 --- a/llvm_tools/auto_llvm_bisection_unittest.py +++ b/llvm_tools/auto_llvm_bisection_unittest.py @@ -27,10 +27,9 @@ class AutoLLVMBisectionTest(unittest.TestCase): """Unittests for auto bisection of LLVM.""" @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - @mock.patch.object( - llvm_bisection, - 'GetCommandLineArgs', - return_value=test_helpers.ArgsOutputTest()) + @mock.patch.object(llvm_bisection, + 'GetCommandLineArgs', + return_value=test_helpers.ArgsOutputTest()) @mock.patch.object(time, 'sleep') @mock.patch.object(traceback, 'print_exc') @mock.patch.object(llvm_bisection, 'main') @@ -62,9 +61,9 @@ class AutoLLVMBisectionTest(unittest.TestCase): ] mock_json_load.return_value = { 'start': - 369410, + 369410, 'end': - 369420, + 369420, 'jobs': [{ 'buildbucket_id': 12345, 'rev': 369411, @@ -93,10 +92,9 @@ class AutoLLVMBisectionTest(unittest.TestCase): @mock.patch.object(traceback, 'print_exc') @mock.patch.object(llvm_bisection, 'main') @mock.patch.object(os.path, 'isfile') - @mock.patch.object( - llvm_bisection, - 'GetCommandLineArgs', - return_value=test_helpers.ArgsOutputTest()) + @mock.patch.object(llvm_bisection, + 'GetCommandLineArgs', + return_value=test_helpers.ArgsOutputTest()) def testFailedToStartBisection(self, mock_get_args, mock_isfile, mock_llvm_bisection, mock_traceback, mock_sleep, mock_outside_chroot): @@ -121,10 +119,9 @@ class AutoLLVMBisectionTest(unittest.TestCase): self.assertEqual(mock_sleep.call_count, 2) @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - @mock.patch.object( - llvm_bisection, - 'GetCommandLineArgs', - return_value=test_helpers.ArgsOutputTest()) + @mock.patch.object(llvm_bisection, + 'GetCommandLineArgs', + return_value=test_helpers.ArgsOutputTest()) @mock.patch.object(time, 'time') @mock.patch.object(time, 'sleep') @mock.patch.object(os.path, 'isfile') @@ -154,9 +151,9 @@ class AutoLLVMBisectionTest(unittest.TestCase): mock_isfile.return_value = True mock_json_load.return_value = { 'start': - 369410, + 369410, 'end': - 369420, + 369420, 'jobs': [{ 'buildbucket_id': 12345, 'rev': 369411, @@ -235,8 +232,8 @@ class AutoLLVMBisectionTest(unittest.TestCase): auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id) self.assertEqual( - str(err.exception), - '"cros buildresult" return value is invalid: %s' % invalid_build_status) + str(err.exception), '"cros buildresult" return value is invalid: %s' % + invalid_build_status) mock_chroot_command.assert_called_once_with( [ diff --git a/llvm_tools/bisect_clang_crashes.py b/llvm_tools/bisect_clang_crashes.py index c53db179..d9b3d141 100755 --- a/llvm_tools/bisect_clang_crashes.py +++ b/llvm_tools/bisect_clang_crashes.py @@ -64,12 +64,13 @@ def main(argv): ) cur_dir = os.path.dirname(os.path.abspath(__file__)) parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - '--4c', dest='forcey', required=True, help='Path to a 4c client binary') - parser.add_argument( - '--state_file', - default=os.path.join(cur_dir, 'chromeos-state.json'), - help='The path to the state file.') + parser.add_argument('--4c', + dest='forcey', + required=True, + help='Path to a 4c client binary') + parser.add_argument('--state_file', + default=os.path.join(cur_dir, 'chromeos-state.json'), + help='The path to the state file.') parser.add_argument( '--nocleanup', action='store_false', @@ -81,8 +82,9 @@ def main(argv): os.makedirs(os.path.dirname(state_file), exist_ok=True) temporary_directory = '/tmp/bisect_clang_crashes' os.makedirs(temporary_directory, exist_ok=True) - urls = get_artifacts('gs://chromeos-toolchain-artifacts/clang-crash-diagnoses' - '/**/*clang_crash_diagnoses.tar.xz') + urls = get_artifacts( + 'gs://chromeos-toolchain-artifacts/clang-crash-diagnoses' + '/**/*clang_crash_diagnoses.tar.xz') logging.info('%d crash URLs found', len(urls)) visited = {} diff --git a/llvm_tools/bisect_clang_crashes_unittest.py b/llvm_tools/bisect_clang_crashes_unittest.py index a3dc0c6d..238b674d 100755 --- a/llvm_tools/bisect_clang_crashes_unittest.py +++ b/llvm_tools/bisect_clang_crashes_unittest.py @@ -63,7 +63,6 @@ class Test(unittest.TestCase): @mock.patch.object(glob, 'glob') def test_get_crash_reproducers_no_matching_script(self, mock_file_search, mock_file_check): - def silence_logging(): root = logging.getLogger() filt = self._SilencingFilter() diff --git a/llvm_tools/chroot_unittest.py b/llvm_tools/chroot_unittest.py index 5eec5675..9fb1d0c0 100755 --- a/llvm_tools/chroot_unittest.py +++ b/llvm_tools/chroot_unittest.py @@ -32,9 +32,8 @@ class HelperFunctionsTest(unittest.TestCase): chroot_path = '/test/chroot/path' package_list = ['new-test/package'] - self.assertEqual( - chroot.GetChrootEbuildPaths(chroot_path, package_list), - [package_chroot_path]) + self.assertEqual(chroot.GetChrootEbuildPaths(chroot_path, package_list), + [package_chroot_path]) mock_chroot_command.assert_called_once() @@ -58,8 +57,9 @@ class HelperFunctionsTest(unittest.TestCase): expected_abs_path = '/path/to/chroot/src/package.ebuild' self.assertEqual( - chroot.ConvertChrootPathsToAbsolutePaths( - chroot_path, chroot_file_paths), [expected_abs_path]) + chroot.ConvertChrootPathsToAbsolutePaths(chroot_path, + chroot_file_paths), + [expected_abs_path]) if __name__ == '__main__': diff --git a/llvm_tools/fetch_cros_sdk_rolls.py b/llvm_tools/fetch_cros_sdk_rolls.py index 83d7025a..b8fdf943 100755 --- a/llvm_tools/fetch_cros_sdk_rolls.py +++ b/llvm_tools/fetch_cros_sdk_rolls.py @@ -68,15 +68,19 @@ def load_manifest_versions(manifest: Path) -> Dict[str, str]: def main(): parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - '-d', '--debug', action='store_true', help='Emit debugging output') + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('-d', + '--debug', + action='store_true', + help='Emit debugging output') parser.add_argument( '-n', '--number', type=int, default=20, - help='Number of recent manifests to fetch info about. 0 means unlimited.') + help='Number of recent manifests to fetch info about. 0 means unlimited.' + ) args = parser.parse_args() is_debug = args.debug diff --git a/llvm_tools/get_llvm_hash.py b/llvm_tools/get_llvm_hash.py index 83b5ae76..f566f6f3 100755 --- a/llvm_tools/get_llvm_hash.py +++ b/llvm_tools/get_llvm_hash.py @@ -91,7 +91,8 @@ def ParseLLVMMajorVersion(cmakelist): Raises: ValueError: The major version cannot be parsed from cmakelist """ - match = re.search(r'\n\s+set\(LLVM_VERSION_MAJOR (?P\d+)\)', cmakelist) + match = re.search(r'\n\s+set\(LLVM_VERSION_MAJOR (?P\d+)\)', + cmakelist) if not match: raise ValueError('Failed to parse CMakeList for llvm major version') return match.group('major') @@ -158,8 +159,8 @@ def CreateTempLLVMRepo(temp_dir): finally: if os.path.isdir(temp_dir): check_output([ - 'git', '-C', abs_path_to_llvm_project_dir, 'worktree', 'remove', '-f', - temp_dir + 'git', '-C', abs_path_to_llvm_project_dir, 'worktree', 'remove', + '-f', temp_dir ]) @@ -189,11 +190,10 @@ def GetAndUpdateLLVMProjectInLLVMTools(): 'llvm-project-copy') if not os.path.isdir(abs_path_to_llvm_project_dir): - print( - (f'Checking out LLVM to {abs_path_to_llvm_project_dir}\n' - 'so that we can map between commit hashes and revision numbers.\n' - 'This may take a while, but only has to be done once.'), - file=sys.stderr) + print((f'Checking out LLVM to {abs_path_to_llvm_project_dir}\n' + 'so that we can map between commit hashes and revision numbers.\n' + 'This may take a while, but only has to be done once.'), + file=sys.stderr) os.mkdir(abs_path_to_llvm_project_dir) LLVMHash().CloneLLVMRepo(abs_path_to_llvm_project_dir) @@ -240,7 +240,8 @@ def GetGoogle3LLVMVersion(stable): git_hash = check_output(cmd) # Change type to an integer - return GetVersionFrom(GetAndUpdateLLVMProjectInLLVMTools(), git_hash.rstrip()) + return GetVersionFrom(GetAndUpdateLLVMProjectInLLVMTools(), + git_hash.rstrip()) def IsSvnOption(svn_option): diff --git a/llvm_tools/get_llvm_hash_unittest.py b/llvm_tools/get_llvm_hash_unittest.py index 49740f33..b7c9e972 100755 --- a/llvm_tools/get_llvm_hash_unittest.py +++ b/llvm_tools/get_llvm_hash_unittest.py @@ -20,7 +20,6 @@ from get_llvm_hash import LLVMHash def MakeMockPopen(return_code): - def MockPopen(*_args, **_kwargs): result = mock.MagicMock() result.returncode = return_code @@ -61,8 +60,8 @@ class TestGetLLVMHash(unittest.TestCase): def testGetGitHashWorks(self, mock_get_git_hash): mock_get_git_hash.return_value = 'a13testhash2' - self.assertEqual( - get_llvm_hash.GetGitHashFrom('/tmp/tmpTest', 100), 'a13testhash2') + self.assertEqual(get_llvm_hash.GetGitHashFrom('/tmp/tmpTest', 100), + 'a13testhash2') mock_get_git_hash.assert_called_once() @@ -92,14 +91,14 @@ class TestGetLLVMHash(unittest.TestCase): @mock.patch.object(subprocess, 'Popen') def testCheckoutBranch(self, mock_popen): - mock_popen.return_value = mock.MagicMock( - communicate=lambda: (None, None), returncode=0) + mock_popen.return_value = mock.MagicMock(communicate=lambda: (None, None), + returncode=0) get_llvm_hash.CheckoutBranch('fake/src_dir', 'fake_branch') self.assertEqual( mock_popen.call_args_list[0][0], - (['git', '-C', 'fake/src_dir', 'checkout', 'fake_branch'],)) + (['git', '-C', 'fake/src_dir', 'checkout', 'fake_branch'], )) self.assertEqual(mock_popen.call_args_list[1][0], - (['git', '-C', 'fake/src_dir', 'pull'],)) + (['git', '-C', 'fake/src_dir', 'pull'], )) def testParseLLVMMajorVersion(self): cmakelist_42 = ('set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)\n' @@ -117,10 +116,9 @@ class TestGetLLVMHash(unittest.TestCase): @mock.patch.object(get_llvm_hash, 'ParseLLVMMajorVersion') @mock.patch.object(get_llvm_hash, 'CheckCommand') @mock.patch.object(get_llvm_hash, 'CheckoutBranch') - @mock.patch( - 'get_llvm_hash.open', - mock.mock_open(read_data='mock contents'), - create=True) + @mock.patch('get_llvm_hash.open', + mock.mock_open(read_data='mock contents'), + create=True) def testGetLLVMMajorVersion(self, mock_checkout_branch, mock_git_checkout, mock_major_version, mock_llvm_project_path): mock_llvm_project_path.return_value = 'path/to/llvm-project' @@ -132,7 +130,8 @@ class TestGetLLVMHash(unittest.TestCase): mock_major_version.assert_called_with('mock contents') mock_git_checkout.assert_called_once_with( ['git', '-C', 'path/to/llvm-project', 'checkout', '314159265']) - mock_checkout_branch.assert_called_once_with('path/to/llvm-project', 'main') + mock_checkout_branch.assert_called_once_with('path/to/llvm-project', + 'main') if __name__ == '__main__': diff --git a/llvm_tools/git.py b/llvm_tools/git.py index 22c7002a..2fa99de8 100755 --- a/llvm_tools/git.py +++ b/llvm_tools/git.py @@ -131,5 +131,5 @@ def UploadChanges(repo, branch, commit_messages, reviewers=None, cc=None): if not found_url: raise ValueError('Failed to find change list URL.') - return CommitContents( - url=found_url.group(0), cl_number=int(found_url.group(1))) + return CommitContents(url=found_url.group(0), + cl_number=int(found_url.group(1))) diff --git a/llvm_tools/git_llvm_rev.py b/llvm_tools/git_llvm_rev.py index b62b26e2..8ca60dca 100755 --- a/llvm_tools/git_llvm_rev.py +++ b/llvm_tools/git_llvm_rev.py @@ -246,8 +246,8 @@ def translate_prebase_rev_to_sha(llvm_config: LLVMConfig, rev: Rev) -> str: looking_for = f'llvm-svn: {rev.number}' git_command = [ - 'git', 'log', '--grep', f'^{looking_for}$', f'--format=%H%n%B{separator}', - base_llvm_sha + 'git', 'log', '--grep', f'^{looking_for}$', + f'--format=%H%n%B{separator}', base_llvm_sha ] subp = subprocess.Popen( @@ -353,8 +353,8 @@ def main(argv: t.List[str]) -> None: default='origin', help="LLVM upstream's remote name. Defaults to %(default)s.") sha_or_rev = parser.add_mutually_exclusive_group(required=True) - sha_or_rev.add_argument( - '--sha', help='A git SHA (or ref) to convert to a rev') + sha_or_rev.add_argument('--sha', + help='A git SHA (or ref) to convert to a rev') sha_or_rev.add_argument('--rev', help='A rev to convert into a sha') opts = parser.parse_args(argv) diff --git a/llvm_tools/git_llvm_rev_test.py b/llvm_tools/git_llvm_rev_test.py index d05093a8..0a6719c1 100755 --- a/llvm_tools/git_llvm_rev_test.py +++ b/llvm_tools/git_llvm_rev_test.py @@ -14,8 +14,8 @@ from git_llvm_rev import MAIN_BRANCH def get_llvm_config() -> git_llvm_rev.LLVMConfig: - return git_llvm_rev.LLVMConfig( - dir=llvm_project.get_location(), remote='origin') + return git_llvm_rev.LLVMConfig(dir=llvm_project.get_location(), + remote='origin') class Test(unittest.TestCase): @@ -30,8 +30,8 @@ class Test(unittest.TestCase): def test_sha_to_rev_on_base_sha_works(self) -> None: sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev( - branch=MAIN_BRANCH, number=git_llvm_rev.base_llvm_revision)) + git_llvm_rev.Rev(branch=MAIN_BRANCH, + number=git_llvm_rev.base_llvm_revision)) self.assertEqual(sha, git_llvm_rev.base_llvm_sha) def test_sha_to_rev_prior_to_base_rev_works(self) -> None: diff --git a/llvm_tools/git_unittest.py b/llvm_tools/git_unittest.py index 47927716..7c654475 100755 --- a/llvm_tools/git_unittest.py +++ b/llvm_tools/git_unittest.py @@ -32,9 +32,8 @@ class HelperFunctionsTest(unittest.TestCase): with self.assertRaises(ValueError) as err: git.CreateBranch(path_to_repo, branch) - self.assertEqual( - str(err.exception), - 'Invalid directory path provided: %s' % path_to_repo) + self.assertEqual(str(err.exception), + 'Invalid directory path provided: %s' % path_to_repo) mock_isdir.assert_called_once() @@ -59,9 +58,8 @@ class HelperFunctionsTest(unittest.TestCase): with self.assertRaises(ValueError) as err: git.DeleteBranch(path_to_repo, branch) - self.assertEqual( - str(err.exception), - 'Invalid directory path provided: %s' % path_to_repo) + self.assertEqual(str(err.exception), + 'Invalid directory path provided: %s' % path_to_repo) mock_isdir.assert_called_once() @@ -87,8 +85,8 @@ class HelperFunctionsTest(unittest.TestCase): with self.assertRaises(ValueError) as err: git.UploadChanges(path_to_repo, branch, commit_messages) - self.assertEqual( - str(err.exception), 'Invalid path provided: %s' % path_to_repo) + self.assertEqual(str(err.exception), + 'Invalid path provided: %s' % path_to_repo) mock_isdir.assert_called_once() @@ -129,11 +127,10 @@ class HelperFunctionsTest(unittest.TestCase): ] self.assertEqual( mock_commands.call_args_list[1], - mock.call( - expected_cmd, - stderr=subprocess.STDOUT, - cwd=path_to_repo, - encoding='utf-8')) + mock.call(expected_cmd, + stderr=subprocess.STDOUT, + cwd=path_to_repo, + encoding='utf-8')) self.assertEqual( change_list.url, diff --git a/llvm_tools/llvm_bisection_unittest.py b/llvm_tools/llvm_bisection_unittest.py index cc22dfa4..207f4c24 100755 --- a/llvm_tools/llvm_bisection_unittest.py +++ b/llvm_tools/llvm_bisection_unittest.py @@ -128,7 +128,8 @@ class LLVMBisectionTest(unittest.TestCase): mock_get_git_hash.side_effect = revs git_hashes = [ - git_llvm_rev.base_llvm_revision + 3, git_llvm_rev.base_llvm_revision + 5 + git_llvm_rev.base_llvm_revision + 3, + git_llvm_rev.base_llvm_revision + 5 ] self.assertEqual( @@ -160,9 +161,8 @@ class LLVMBisectionTest(unittest.TestCase): last_tested = '/abs/path/to/file_that_does_not_exist.json' - self.assertEqual( - llvm_bisection.LoadStatusFile(last_tested, start, end), - expected_bisect_state) + self.assertEqual(llvm_bisection.LoadStatusFile(last_tested, start, end), + expected_bisect_state) @mock.patch.object(modify_a_tryjob, 'AddTryjob') def testBisectPassed(self, mock_add_tryjob): @@ -173,9 +173,9 @@ class LLVMBisectionTest(unittest.TestCase): # Simulate behavior of `AddTryjob()` when successfully launched a tryjob for # the updated packages. @test_helpers.CallCountsToMockFunctions - def MockAddTryjob(call_count, _packages, _git_hash, _revision, _chroot_path, - _patch_file, _extra_cls, _options, _builder, _verbose, - _svn_revision): + def MockAddTryjob(call_count, _packages, _git_hash, _revision, + _chroot_path, _patch_file, _extra_cls, _options, + _builder, _verbose, _svn_revision): if call_count < 2: return {'rev': revisions_list[call_count], 'status': 'pending'} @@ -208,17 +208,18 @@ class LLVMBisectionTest(unittest.TestCase): # Verify that the status file is updated when an exception happened when # attempting to launch a revision (i.e. progress is not lost). with self.assertRaises(ValueError) as err: - llvm_bisection.Bisect(revisions_list, git_hash_list, bisection_contents, - temp_json_file, packages, args_output.chroot_path, - patch_file, args_output.extra_change_lists, + llvm_bisection.Bisect(revisions_list, git_hash_list, + bisection_contents, temp_json_file, packages, + args_output.chroot_path, patch_file, + args_output.extra_change_lists, args_output.options, args_output.builders, args_output.verbose) expected_bisection_contents = { 'start': - start, + start, 'end': - end, + end, 'jobs': [{ 'rev': revisions_list[0], 'status': 'pending' @@ -240,8 +241,9 @@ class LLVMBisectionTest(unittest.TestCase): self.assertEqual(mock_add_tryjob.call_count, 3) @mock.patch.object(subprocess, 'check_output', return_value=None) - @mock.patch.object( - get_llvm_hash.LLVMHash, 'GetLLVMHash', return_value='a123testhash4') + @mock.patch.object(get_llvm_hash.LLVMHash, + 'GetLLVMHash', + return_value='a123testhash4') @mock.patch.object(llvm_bisection, 'GetCommitsBetween') @mock.patch.object(llvm_bisection, 'GetRemainingRange') @mock.patch.object(llvm_bisection, 'LoadStatusFile') @@ -331,9 +333,10 @@ class LLVMBisectionTest(unittest.TestCase): with self.assertRaises(ValueError) as err: llvm_bisection.main(args_output) - error_message = (f'The start {start} or the end {end} version provided is ' - f'different than "start" {bisect_state["start"]} or "end" ' - f'{bisect_state["end"]} in the .JSON file') + error_message = ( + f'The start {start} or the end {end} version provided is ' + f'different than "start" {bisect_state["start"]} or "end" ' + f'{bisect_state["end"]} in the .JSON file') self.assertEqual(str(err.exception), error_message) @@ -448,8 +451,9 @@ class LLVMBisectionTest(unittest.TestCase): mock_get_revision_and_hash_list.assert_called_once() @mock.patch.object(subprocess, 'check_output', return_value=None) - @mock.patch.object( - get_llvm_hash.LLVMHash, 'GetLLVMHash', return_value='a123testhash4') + @mock.patch.object(get_llvm_hash.LLVMHash, + 'GetLLVMHash', + return_value='a123testhash4') @mock.patch.object(llvm_bisection, 'GetCommitsBetween') @mock.patch.object(llvm_bisection, 'GetRemainingRange') @mock.patch.object(llvm_bisection, 'LoadStatusFile') diff --git a/llvm_tools/llvm_patch_management.py b/llvm_tools/llvm_patch_management.py index 90f9a5c0..53ffc3c2 100755 --- a/llvm_tools/llvm_patch_management.py +++ b/llvm_tools/llvm_patch_management.py @@ -36,7 +36,8 @@ def GetCommandLineArgs(): cros_root = os.path.join(cros_root, 'chromiumos') # Create parser and add optional command-line arguments. - parser = argparse.ArgumentParser(description='Patch management for packages.') + parser = argparse.ArgumentParser( + description='Patch management for packages.') # Add argument for a specific chroot path. parser.add_argument( @@ -54,11 +55,10 @@ def GetCommandLineArgs(): help='the packages to manage their patches (default: %(default)s)') # Add argument for whether to display command contents to `stdout`. - parser.add_argument( - '--verbose', - action='store_true', - help='display contents of a command to the terminal ' - '(default: %(default)s)') + parser.add_argument('--verbose', + action='store_true', + help='display contents of a command to the terminal ' + '(default: %(default)s)') # Add argument for the LLVM version to use for patch management. parser.add_argument( @@ -95,8 +95,8 @@ def GetCommandLineArgs(): # Duplicate packages were passed into the command line if len(unique_packages) != len(args_output.packages): - raise ValueError('Duplicate packages were passed in: %s' % ' '.join( - args_output.packages)) + raise ValueError('Duplicate packages were passed in: %s' % + ' '.join(args_output.packages)) args_output.packages = unique_packages @@ -178,7 +178,8 @@ def _MoveSrcTreeHEADToGitHash(src_path, git_hash): move_head_cmd = ['git', '-C', src_path, 'checkout', git_hash] - subprocess_helpers.ExecCommandAndCaptureOutput(move_head_cmd, verbose=verbose) + subprocess_helpers.ExecCommandAndCaptureOutput(move_head_cmd, + verbose=verbose) def UpdatePackagesPatchMetadataFile(chroot_path, svn_version, @@ -230,8 +231,10 @@ def UpdatePackagesPatchMetadataFile(chroot_path, svn_version, patch_manager.CleanSrcTree(src_path) # Get the patch results for the current package. - patches_info = patch_manager.HandlePatches( - svn_version, patch_metadata_path, filesdir_path, src_path, mode) + patches_info = patch_manager.HandlePatches(svn_version, + patch_metadata_path, + filesdir_path, src_path, + mode) package_info[cur_package] = patches_info._asdict() @@ -262,8 +265,8 @@ def main(): # Only 'disable_patches' and 'remove_patches' can potentially modify the patch # metadata file. - if args_output.failure_mode == FailureModes.DISABLE_PATCHES.value or \ - args_output.failure_mode == FailureModes.REMOVE_PATCHES.value: + if (args_output.failure_mode == FailureModes.DISABLE_PATCHES.value + or args_output.failure_mode == FailureModes.REMOVE_PATCHES.value): print('The patch file %s has been modified for the packages:' % args_output.patch_metadata_file) print('\n'.join(args_output.packages)) diff --git a/llvm_tools/llvm_patch_management_unittest.py b/llvm_tools/llvm_patch_management_unittest.py index 968a816a..92dc64a9 100755 --- a/llvm_tools/llvm_patch_management_unittest.py +++ b/llvm_tools/llvm_patch_management_unittest.py @@ -36,8 +36,8 @@ class LlvmPatchManagementTest(unittest.TestCase): with self.assertRaises(ValueError) as err: llvm_patch_management.GetPathToFilesDirectory(chroot_path, package) - self.assertEqual( - str(err.exception), 'Invalid chroot provided: %s' % chroot_path) + self.assertEqual(str(err.exception), + 'Invalid chroot provided: %s' % chroot_path) mock_isdir.assert_called_once() @@ -46,8 +46,9 @@ class LlvmPatchManagementTest(unittest.TestCase): @mock.patch.object(os.path, 'isdir', return_value=True) @mock.patch.object(subprocess_helpers, 'ChrootRunCommand') @mock.patch.object(llvm_patch_management, '_GetRelativePathOfChrootPath') - def testSuccessfullyGetPathToFilesDir( - self, mock_get_relative_path_of_chroot_path, mock_chroot_cmd, mock_isdir): + def testSuccessfullyGetPathToFilesDir(self, + mock_get_relative_path_of_chroot_path, + mock_chroot_cmd, mock_isdir): package_chroot_path = '/mnt/host/source/path/to/llvm/llvm.ebuild' @@ -95,8 +96,8 @@ class LlvmPatchManagementTest(unittest.TestCase): package_rel_path = 'path/to/llvm' self.assertEqual( - llvm_patch_management._GetRelativePathOfChrootPath(package_chroot_path), - package_rel_path) + llvm_patch_management._GetRelativePathOfChrootPath( + package_chroot_path), package_rel_path) # Simulate behavior of 'os.path.isfile()' when the patch metadata file does # not exist. @@ -109,9 +110,8 @@ class LlvmPatchManagementTest(unittest.TestCase): with self.assertRaises(ValueError) as err: llvm_patch_management._CheckPatchMetadataPath(abs_path_to_patch_file) - self.assertEqual( - str(err.exception), - 'Invalid file provided: %s' % abs_path_to_patch_file) + self.assertEqual(str(err.exception), + 'Invalid file provided: %s' % abs_path_to_patch_file) mock_isfile.assert_called_once() @@ -146,8 +146,9 @@ class LlvmPatchManagementTest(unittest.TestCase): # Simulate `GetGitHashFrom()` when successfully retrieved the git hash # of the version passed in. - @mock.patch.object( - get_llvm_hash, 'GetGitHashFrom', return_value='a123testhash1') + @mock.patch.object(get_llvm_hash, + 'GetGitHashFrom', + return_value='a123testhash1') # Simulate `CreateTempLLVMRepo()` when successfully created a work tree from # the LLVM repo copy in `llvm_tools` directory. @mock.patch.object(get_llvm_hash, 'CreateTempLLVMRepo') @@ -160,16 +161,16 @@ class LlvmPatchManagementTest(unittest.TestCase): self, mock_check_patch_metadata_path, mock_get_filesdir_path, mock_move_head_pointer, mock_create_temp_llvm_repo, mock_get_git_hash): - abs_path_to_patch_file = \ - '/some/path/to/chroot/some/path/to/filesdir/PATCHES' + abs_path_to_patch_file = ( + '/some/path/to/chroot/some/path/to/filesdir/PATCHES') # Simulate the behavior of '_CheckPatchMetadataPath()' when the patch # metadata file in $FILESDIR does not exist or does not end in '.json'. def InvalidPatchMetadataFile(patch_metadata_path): self.assertEqual(patch_metadata_path, abs_path_to_patch_file) - raise ValueError( - 'File does not end in ".json": %s' % abs_path_to_patch_file) + raise ValueError('File does not end in ".json": %s' % + abs_path_to_patch_file) # Use the test function to simulate behavior of '_CheckPatchMetadataPath()'. mock_check_patch_metadata_path.side_effect = InvalidPatchMetadataFile @@ -184,8 +185,8 @@ class LlvmPatchManagementTest(unittest.TestCase): # Simulate the behavior of returning the absolute path to a worktree via # `git worktree add`. - mock_create_temp_llvm_repo.return_value.__enter__.return_value.name = \ - temp_work_tree + mock_create_temp_llvm_repo.return_value.__enter__.return_value.name = ( + temp_work_tree) chroot_path = '/some/path/to/chroot' revision = 1000 @@ -219,8 +220,9 @@ class LlvmPatchManagementTest(unittest.TestCase): @mock.patch.object(patch_manager, 'CleanSrcTree') # Simulate `GetGitHashFrom()` when successfully retrieved the git hash # of the version passed in. - @mock.patch.object( - get_llvm_hash, 'GetGitHashFrom', return_value='a123testhash1') + @mock.patch.object(get_llvm_hash, + 'GetGitHashFrom', + return_value='a123testhash1') # Simulate `CreateTempLLVMRepo()` when successfully created a work tree from # the LLVM repo copy in `llvm_tools` directory. @mock.patch.object(get_llvm_hash, 'CreateTempLLVMRepo') @@ -237,8 +239,8 @@ class LlvmPatchManagementTest(unittest.TestCase): abs_path_to_filesdir = '/some/path/to/chroot/some/path/to/filesdir' - abs_path_to_patch_file = \ - '/some/path/to/chroot/some/path/to/filesdir/PATCHES.json' + abs_path_to_patch_file = ( + '/some/path/to/chroot/some/path/to/filesdir/PATCHES.json') # Simulate the behavior of 'GetPathToFilesDirectory()' when successfully # constructed the absolute path to $FILESDIR of a package. @@ -264,8 +266,8 @@ class LlvmPatchManagementTest(unittest.TestCase): # Simulate the behavior of returning the absolute path to a worktree via # `git worktree add`. - mock_create_temp_llvm_repo.return_value.__enter__.return_value.name = \ - temp_work_tree + mock_create_temp_llvm_repo.return_value.__enter__.return_value.name = ( + temp_work_tree) expected_patch_results = { 'applied_patches': ['fixes_something.patch'], diff --git a/llvm_tools/llvm_project.py b/llvm_tools/llvm_project.py index 7937729f..e059ae29 100644 --- a/llvm_tools/llvm_project.py +++ b/llvm_tools/llvm_project.py @@ -27,11 +27,11 @@ def ensure_up_to_date(): checkout = get_location() if not os.path.isdir(checkout): - print( - 'No llvm-project exists locally; syncing it. This takes a while.', - file=sys.stderr) + print('No llvm-project exists locally; syncing it. This takes a while.', + file=sys.stderr) actual_checkout = get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools() - assert checkout == actual_checkout, '%s != %s' % (actual_checkout, checkout) + assert checkout == actual_checkout, '%s != %s' % (actual_checkout, + checkout) commit_timestamp = subprocess.check_output( [ @@ -52,13 +52,13 @@ def ensure_up_to_date(): if time_since_last_commit <= datetime.timedelta(days=2): return - print( - '%d days have elapsed since the last commit to %s; auto-syncing' % - (time_since_last_commit.days, checkout), - file=sys.stderr) + print('%d days have elapsed since the last commit to %s; auto-syncing' % + (time_since_last_commit.days, checkout), + file=sys.stderr) - result = subprocess.run(['git', 'fetch', 'origin'], check=False, cwd=checkout) + result = subprocess.run(['git', 'fetch', 'origin'], + check=False, + cwd=checkout) if result.returncode: - print( - 'Sync failed somehow; hoping that things are fresh enough, then...', - file=sys.stderr) + print('Sync failed somehow; hoping that things are fresh enough, then...', + file=sys.stderr) diff --git a/llvm_tools/modify_a_tryjob_unittest.py b/llvm_tools/modify_a_tryjob_unittest.py index e3c62972..c03a1e18 100755 --- a/llvm_tools/modify_a_tryjob_unittest.py +++ b/llvm_tools/modify_a_tryjob_unittest.py @@ -42,14 +42,17 @@ class ModifyATryjobTest(unittest.TestCase): with self.assertRaises(SystemExit) as err: modify_a_tryjob.PerformTryjobModification( revision_to_modify, modify_a_tryjob.ModifyTryjob.REMOVE, - temp_json_file, args_output.extra_change_lists, args_output.options, - args_output.builders, args_output.chroot_path, args_output.verbose) + temp_json_file, args_output.extra_change_lists, + args_output.options, args_output.builders, args_output.chroot_path, + args_output.verbose) self.assertEqual(str(err.exception), 'No tryjobs in %s' % temp_json_file) # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob # was not found. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=None) + @mock.patch.object(update_tryjob_status, + 'FindTryjobIndex', + return_value=None) def testNoTryjobIndexFound(self, mock_find_tryjob_index): bisect_test_contents = { 'start': 369410, @@ -78,8 +81,9 @@ class ModifyATryjobTest(unittest.TestCase): with self.assertRaises(ValueError) as err: modify_a_tryjob.PerformTryjobModification( revision_to_modify, modify_a_tryjob.ModifyTryjob.REMOVE, - temp_json_file, args_output.extra_change_lists, args_output.options, - args_output.builders, args_output.chroot_path, args_output.verbose) + temp_json_file, args_output.extra_change_lists, + args_output.options, args_output.builders, args_output.chroot_path, + args_output.verbose) self.assertEqual( str(err.exception), 'Unable to find tryjob for %d in %s' % @@ -139,9 +143,9 @@ class ModifyATryjobTest(unittest.TestCase): bisect_test_contents = { 'start': - 369410, + 369410, 'end': - 369420, + 369420, 'jobs': [{ 'rev': 369411, 'status': 'bad', @@ -185,9 +189,9 @@ class ModifyATryjobTest(unittest.TestCase): expected_file_contents = { 'start': - 369410, + 369410, 'end': - 369420, + 369420, 'jobs': [{ 'rev': 369411, 'status': 'pending', @@ -249,7 +253,9 @@ class ModifyATryjobTest(unittest.TestCase): mock_find_tryjob_index.assert_called_once() # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=None) + @mock.patch.object(update_tryjob_status, + 'FindTryjobIndex', + return_value=None) def testSuccessfullyDidNotAddTryjobOutsideOfBisectionBounds( self, mock_find_tryjob_index): @@ -282,8 +288,8 @@ class ModifyATryjobTest(unittest.TestCase): args_output.extra_change_lists, args_output.options, args_output.builders, args_output.chroot_path, args_output.verbose) - self.assertEqual( - str(err.exception), 'Failed to add tryjob to %s' % temp_json_file) + self.assertEqual(str(err.exception), + 'Failed to add tryjob to %s' % temp_json_file) mock_find_tryjob_index.assert_called_once() @@ -292,12 +298,13 @@ class ModifyATryjobTest(unittest.TestCase): @mock.patch.object(modify_a_tryjob, 'AddTryjob') # Simulate the behavior of `GetLLVMHashAndVersionFromSVNOption()` when # successfully retrieved the git hash of the revision to launch a tryjob for. - @mock.patch.object( - get_llvm_hash, - 'GetLLVMHashAndVersionFromSVNOption', - return_value=('a123testhash1', 369418)) + @mock.patch.object(get_llvm_hash, + 'GetLLVMHashAndVersionFromSVNOption', + return_value=('a123testhash1', 369418)) # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=None) + @mock.patch.object(update_tryjob_status, + 'FindTryjobIndex', + return_value=None) def testSuccessfullyAddedTryjob(self, mock_find_tryjob_index, mock_get_llvm_hash, mock_add_tryjob): @@ -391,9 +398,8 @@ class ModifyATryjobTest(unittest.TestCase): args_output.extra_change_lists, args_output.options, args_output.builders, args_output.chroot_path, args_output.verbose) - self.assertEqual( - str(err.exception), - 'Invalid "modify_tryjob" option provided: remove_link') + self.assertEqual(str(err.exception), + 'Invalid "modify_tryjob" option provided: remove_link') mock_find_tryjob_index.assert_called_once() diff --git a/llvm_tools/nightly_revert_checker_test.py b/llvm_tools/nightly_revert_checker_test.py index a8ab4195..f68513af 100755 --- a/llvm_tools/nightly_revert_checker_test.py +++ b/llvm_tools/nightly_revert_checker_test.py @@ -24,7 +24,6 @@ class Test(unittest.TestCase): """Tests for nightly_revert_checker.""" def test_email_rendering_works_for_singular_revert(self): - def prettify_sha(sha: str) -> tiny_render.Piece: return 'pretty_' + sha @@ -38,8 +37,8 @@ class Test(unittest.TestCase): prettify_sha=prettify_sha, get_sha_description=get_sha_description, new_reverts=[ - revert_checker.Revert( - sha='${revert_sha}', reverted_sha='${reverted_sha}') + revert_checker.Revert(sha='${revert_sha}', + reverted_sha='${reverted_sha}') ]) expected_email = nightly_revert_checker._Email( @@ -65,7 +64,6 @@ class Test(unittest.TestCase): self.assertEqual(email, expected_email) def test_email_rendering_works_for_multiple_reverts(self): - def prettify_sha(sha: str) -> tiny_render.Piece: return 'pretty_' + sha @@ -79,13 +77,13 @@ class Test(unittest.TestCase): prettify_sha=prettify_sha, get_sha_description=get_sha_description, new_reverts=[ - revert_checker.Revert( - sha='${revert_sha1}', reverted_sha='${reverted_sha1}'), - revert_checker.Revert( - sha='${revert_sha2}', reverted_sha='${reverted_sha2}'), + revert_checker.Revert(sha='${revert_sha1}', + reverted_sha='${reverted_sha1}'), + revert_checker.Revert(sha='${revert_sha2}', + reverted_sha='${reverted_sha2}'), # Keep this out-of-order to check that we sort based on SHAs - revert_checker.Revert( - sha='${revert_sha0}', reverted_sha='${reverted_sha0}'), + revert_checker.Revert(sha='${revert_sha0}', + reverted_sha='${reverted_sha0}'), ]) expected_email = nightly_revert_checker._Email( @@ -161,13 +159,13 @@ class Test(unittest.TestCase): find_reverts.return_value = [ revert_checker.Revert('12345abcdef', 'fedcba54321') ] - nightly_revert_checker.do_cherrypick( - chroot_path='/path/to/chroot', - llvm_dir='/path/to/llvm', - interesting_shas=[('12345abcdef', 'fedcba54321')], - state={}, - reviewers=['meow@chromium.org'], - cc=['purr@chromium.org']) + nightly_revert_checker.do_cherrypick(chroot_path='/path/to/chroot', + llvm_dir='/path/to/llvm', + interesting_shas=[('12345abcdef', + 'fedcba54321')], + state={}, + reviewers=['meow@chromium.org'], + cc=['purr@chromium.org']) do_cherrypick.assert_called_once() find_reverts.assert_called_once() @@ -181,13 +179,13 @@ class Test(unittest.TestCase): ] do_cherrypick.side_effect = get_upstream_patch.CherrypickError( 'Patch at 12345abcdef already exists in PATCHES.json') - nightly_revert_checker.do_cherrypick( - chroot_path='/path/to/chroot', - llvm_dir='/path/to/llvm', - interesting_shas=[('12345abcdef', 'fedcba54321')], - state={}, - reviewers=['meow@chromium.org'], - cc=['purr@chromium.org']) + nightly_revert_checker.do_cherrypick(chroot_path='/path/to/chroot', + llvm_dir='/path/to/llvm', + interesting_shas=[('12345abcdef', + 'fedcba54321')], + state={}, + reviewers=['meow@chromium.org'], + cc=['purr@chromium.org']) do_cherrypick.assert_called_once() find_reverts.assert_called_once() diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index eff1ba8d..303b0f39 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -34,12 +34,12 @@ def is_patch_metadata_file(patch_metadata_file): """Valides the argument into 'argparse' is a patch file.""" if not os.path.isfile(patch_metadata_file): - raise ValueError( - 'Invalid patch metadata file provided: %s' % patch_metadata_file) + raise ValueError('Invalid patch metadata file provided: %s' % + patch_metadata_file) if not patch_metadata_file.endswith('.json'): - raise ValueError( - 'Patch metadata file does not end in ".json": %s' % patch_metadata_file) + raise ValueError('Patch metadata file does not end in ".json": %s' % + patch_metadata_file) return patch_metadata_file @@ -61,8 +61,8 @@ def EnsureBisectModeAndSvnVersionAreSpecifiedTogether(failure_mode, if failure_mode != FailureModes.BISECT_PATCHES.value and good_svn_version: raise ValueError('"good_svn_version" is only available for bisection.') - elif failure_mode == FailureModes.BISECT_PATCHES.value and \ - not good_svn_version: + elif (failure_mode == FailureModes.BISECT_PATCHES.value + and not good_svn_version): raise ValueError('A good SVN version is required for bisection (used by' '"git bisect start".') @@ -75,15 +75,15 @@ def GetCommandLineArgs(): # Add argument for the last good SVN version which is required by # `git bisect start` (only valid for bisection mode). - parser.add_argument( - '--good_svn_version', - type=int, - help='INTERNAL USE ONLY... (used for bisection.)') + parser.add_argument('--good_svn_version', + type=int, + help='INTERNAL USE ONLY... (used for bisection.)') # Add argument for the number of patches it iterate. Only used when performing # `git bisect run`. - parser.add_argument( - '--num_patches_to_iterate', type=int, help=argparse.SUPPRESS) + parser.add_argument('--num_patches_to_iterate', + type=int, + help=argparse.SUPPRESS) # Add argument for whether bisection should continue. Only used for # 'bisect_patches.' @@ -127,11 +127,10 @@ def GetCommandLineArgs(): help='the absolute path to the ebuild "files/" directory') # Add argument for the absolute path to the unpacked sources. - parser.add_argument( - '--src_path', - required=True, - type=is_directory, - help='the absolute path to the unpacked LLVM sources') + parser.add_argument('--src_path', + required=True, + type=is_directory, + help='the absolute path to the unpacked LLVM sources') # Add argument for the mode of the patch manager when handling failing # applicable patches. @@ -139,8 +138,8 @@ def GetCommandLineArgs(): '--failure_mode', default=FailureModes.FAIL.value, type=is_valid_failure_mode, - help='the mode of the patch manager when handling failed patches ' \ - '(default: %(default)s)') + help='the mode of the patch manager when handling failed patches ' + '(default: %(default)s)') # Parse the command line. args_output = parser.parse_args() @@ -462,8 +461,8 @@ def HandlePatches(svn_version, for patch_dict_index, cur_patch_dict in enumerate(patch_file_contents): # Used by the internal bisection. All the patches in the interval [0, N] # have been iterated. - if num_patches_to_iterate and \ - (patch_dict_index + 1) > num_patches_to_iterate: + if (num_patches_to_iterate + and (patch_dict_index + 1) > num_patches_to_iterate): break # Get the absolute path to the patch in $FILESDIR. @@ -487,8 +486,8 @@ def HandlePatches(svn_version, else: # Patch is applicable if 'svn_version' >= 'from' && # "svn_version" < "until". - patch_applicable = (svn_version >= patch_metadata[0] and \ - svn_version < patch_metadata[1]) + patch_applicable = (svn_version >= patch_metadata[0] + and svn_version < patch_metadata[1]) if can_modify_patches: # Add to the list only if the mode can potentially modify a patch. @@ -498,8 +497,8 @@ def HandlePatches(svn_version, # file and all patches that are not applicable will be added to the # remove patches list which will not be included in the updated .json # file. - if patch_applicable or svn_version < patch_metadata[0] or \ - mode != FailureModes.REMOVE_PATCHES: + if (patch_applicable or svn_version < patch_metadata[0] + or mode != FailureModes.REMOVE_PATCHES): applicable_patches.append(cur_patch_dict) elif mode == FailureModes.REMOVE_PATCHES: removed_patches.append(path_to_patch) @@ -553,15 +552,17 @@ def HandlePatches(svn_version, CleanSrcTree(src_path) print('\nStarting to bisect patch %s for SVN version %d:\n' % - (os.path.basename(cur_patch_dict['rel_patch_path']), - svn_version)) + (os.path.basename( + cur_patch_dict['rel_patch_path']), svn_version)) # Performs the bisection: calls `git bisect start` and # `git bisect run`, where `git bisect run` is going to call this # script as many times as needed with specific arguments. - bad_svn_version = PerformBisection( - src_path, good_commit, bad_commit, svn_version, - patch_metadata_file, filesdir_path, patch_dict_index + 1) + bad_svn_version = PerformBisection(src_path, good_commit, + bad_commit, svn_version, + patch_metadata_file, + filesdir_path, + patch_dict_index + 1) print('\nSuccessfully bisected patch %s, starts to fail to apply ' 'at %d\n' % (os.path.basename( @@ -605,8 +606,8 @@ def HandlePatches(svn_version, print('\n'.join(applied_patches)) # Throw an exception on the first patch that failed to apply. - raise ValueError( - 'Failed to apply patch: %s' % os.path.basename(path_to_patch)) + raise ValueError('Failed to apply patch: %s' % + os.path.basename(path_to_patch)) elif mode == FailureModes.INTERNAL_BISECTION: # Determine the exit status for `git bisect run` because of the # failed patch in the interval [0, N]. @@ -662,13 +663,12 @@ def HandlePatches(svn_version, 'disabled_patches', 'removed_patches', 'modified_metadata' ]) - patch_info = PatchInfo( - applied_patches=applied_patches, - failed_patches=failed_patches, - non_applicable_patches=non_applicable_patches, - disabled_patches=disabled_patches, - removed_patches=removed_patches, - modified_metadata=modified_metadata) + patch_info = PatchInfo(applied_patches=applied_patches, + failed_patches=failed_patches, + non_applicable_patches=non_applicable_patches, + disabled_patches=disabled_patches, + removed_patches=removed_patches, + modified_metadata=modified_metadata) # Determine post actions after iterating through the patches. if mode == FailureModes.REMOVE_PATCHES: @@ -711,8 +711,8 @@ def PrintPatchResults(patch_info): print('\n'.join(patch_info.non_applicable_patches)) if patch_info.modified_metadata: - print('\nThe patch metadata file %s has been modified' % os.path.basename( - patch_info.modified_metadata)) + print('\nThe patch metadata file %s has been modified' % + os.path.basename(patch_info.modified_metadata)) if patch_info.disabled_patches: print('\nThe following patches were disabled:') @@ -746,11 +746,13 @@ def main(): args_output.svn_version = GetHEADSVNVersion(args_output.src_path) # Get the results of handling the patches of the package. - patch_info = HandlePatches( - args_output.svn_version, args_output.patch_metadata_file, - args_output.filesdir_path, args_output.src_path, - FailureModes(args_output.failure_mode), args_output.good_svn_version, - args_output.num_patches_to_iterate, args_output.continue_bisection) + patch_info = HandlePatches(args_output.svn_version, + args_output.patch_metadata_file, + args_output.filesdir_path, args_output.src_path, + FailureModes(args_output.failure_mode), + args_output.good_svn_version, + args_output.num_patches_to_iterate, + args_output.continue_bisection) PrintPatchResults(patch_info) diff --git a/llvm_tools/subprocess_helpers.py b/llvm_tools/subprocess_helpers.py index 8845112c..2e013780 100644 --- a/llvm_tools/subprocess_helpers.py +++ b/llvm_tools/subprocess_helpers.py @@ -13,8 +13,10 @@ import subprocess def CheckCommand(cmd): """Executes the command using Popen().""" - cmd_obj = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='UTF-8') + cmd_obj = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='UTF-8') stdout, _ = cmd_obj.communicate() @@ -43,8 +45,9 @@ def ChrootRunCommand(chroot_path, cmd, verbose=False): exec_chroot_cmd = ['cros_sdk', '--'] exec_chroot_cmd.extend(cmd) - return ExecCommandAndCaptureOutput( - exec_chroot_cmd, cwd=chroot_path, verbose=verbose) + return ExecCommandAndCaptureOutput(exec_chroot_cmd, + cwd=chroot_path, + verbose=verbose) def ExecCommandAndCaptureOutput(cmd, cwd=None, verbose=False): diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index adb20598..2e93eae9 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -11,7 +11,6 @@ from __future__ import print_function import collections import datetime import os -import re import subprocess import unittest import unittest.mock as mock @@ -58,8 +57,8 @@ class UpdateLLVMHashTest(unittest.TestCase): update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_path, llvm_variant, git_hash, svn_version) - self.assertEqual( - str(err.exception), 'Invalid ebuild path provided: %s' % ebuild_path) + self.assertEqual(str(err.exception), + 'Invalid ebuild path provided: %s' % ebuild_path) mock_isfile.assert_called_once() @@ -195,8 +194,8 @@ class UpdateLLVMHashTest(unittest.TestCase): @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') @mock.patch.object(os.path, 'islink', return_value=False) - def testFailedToUprevEbuildToVersionForInvalidSymlink(self, mock_islink, - mock_llvm_version): + def testFailedToUprevEbuildToVersionForInvalidSymlink( + self, mock_islink, mock_llvm_version): symlink_path = '/path/to/chroot/package/package.ebuild' svn_version = 1000 git_hash = 'badf00d' @@ -207,8 +206,8 @@ class UpdateLLVMHashTest(unittest.TestCase): update_chromeos_llvm_hash.UprevEbuildToVersion(symlink_path, svn_version, git_hash) - self.assertEqual( - str(err.exception), 'Invalid symlink provided: %s' % symlink_path) + self.assertEqual(str(err.exception), + 'Invalid symlink provided: %s' % symlink_path) mock_islink.assert_called_once() mock_llvm_version.assert_not_called() @@ -221,8 +220,8 @@ class UpdateLLVMHashTest(unittest.TestCase): with self.assertRaises(ValueError) as err: update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_path) - self.assertEqual( - str(err.exception), 'Invalid symlink provided: %s' % symlink_path) + self.assertEqual(str(err.exception), + 'Invalid symlink provided: %s' % symlink_path) mock_islink.assert_called_once() @@ -418,8 +417,8 @@ class UpdateLLVMHashTest(unittest.TestCase): @mock.patch.object(update_chromeos_llvm_hash, 'GetEbuildPathsFromSymLinkPaths') def testSuccessfullyCreatedPathDictionaryFromPackages( - self, mock_ebuild_paths_from_symlink_paths, mock_chroot_paths_to_symlinks, - mock_get_chroot_paths): + self, mock_ebuild_paths_from_symlink_paths, + mock_chroot_paths_to_symlinks, mock_get_chroot_paths): package_chroot_path = '/mnt/host/source/src/path/to/package-r1.ebuild' @@ -662,12 +661,10 @@ class UpdateLLVMHashTest(unittest.TestCase): # Verify exception is raised when an exception is thrown within # the 'try' block by UprevEbuildSymlink function. with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.UpdatePackages(packages_to_update, llvm_variant, - git_hash, svn_version, - chroot_path, patch_metadata_file, - failure_modes.FailureModes.FAIL, - git_hash_source, - extra_commit_msg) + update_chromeos_llvm_hash.UpdatePackages( + packages_to_update, llvm_variant, git_hash, svn_version, chroot_path, + patch_metadata_file, failure_modes.FailureModes.FAIL, + git_hash_source, extra_commit_msg) self.assertEqual(str(err.exception), 'Failed to uprev the ebuild.') @@ -698,12 +695,11 @@ class UpdateLLVMHashTest(unittest.TestCase): @mock.patch.object(llvm_patch_management, 'UpdatePackagesPatchMetadataFile') @mock.patch.object(update_chromeos_llvm_hash, 'StagePatchMetadataFileForCommit') - def testSuccessfullyUpdatedPackages(self, mock_stage_patch_file, - mock_update_package_metadata_file, - mock_delete_repo, mock_upload_changes, - mock_uprev_symlink, mock_update_llvm_next, - mock_create_repo, mock_create_path_dict, - mock_llvm_version, mock_mask_contains): + def testSuccessfullyUpdatedPackages( + self, mock_stage_patch_file, mock_update_package_metadata_file, + mock_delete_repo, mock_upload_changes, mock_uprev_symlink, + mock_update_llvm_next, mock_create_repo, mock_create_path_dict, + mock_llvm_version, mock_mask_contains): path_to_package_dir = '/some/path/to/chroot/src/path/to' abs_path_to_package = os.path.join(path_to_package_dir, 'package.ebuild') @@ -848,7 +844,8 @@ class UpdateLLVMHashTest(unittest.TestCase): 'update_chromeos_llvm_hash.open', mock.mock_open(read_data='\n=sys-devel/llvm-1234.0_pre*\n'), create=True) as mock_file: - update_chromeos_llvm_hash.EnsurePackageMaskContains(chroot_path, git_hash) + update_chromeos_llvm_hash.EnsurePackageMaskContains( + chroot_path, git_hash) handle = mock_file() handle.write.assert_not_called() mock_llvm_version.assert_called_once_with(git_hash) @@ -865,11 +862,11 @@ class UpdateLLVMHashTest(unittest.TestCase): chroot_path = 'absolute/path/to/chroot' git_hash = 'badf00d' mock_llvm_version.return_value = '1234' - with mock.patch( - 'update_chromeos_llvm_hash.open', - mock.mock_open(read_data='nothing relevant'), - create=True) as mock_file: - update_chromeos_llvm_hash.EnsurePackageMaskContains(chroot_path, git_hash) + with mock.patch('update_chromeos_llvm_hash.open', + mock.mock_open(read_data='nothing relevant'), + create=True) as mock_file: + update_chromeos_llvm_hash.EnsurePackageMaskContains( + chroot_path, git_hash) handle = mock_file() handle.write.assert_called_once_with('=sys-devel/llvm-1234.0_pre*\n') mock_llvm_version.assert_called_once_with(git_hash) diff --git a/llvm_tools/update_packages_and_run_tests_unittest.py b/llvm_tools/update_packages_and_run_tests_unittest.py index 11f2b7f8..b48f6338 100755 --- a/llvm_tools/update_packages_and_run_tests_unittest.py +++ b/llvm_tools/update_packages_and_run_tests_unittest.py @@ -46,7 +46,8 @@ class UpdatePackagesAndRunTryjobsTest(unittest.TestCase): def testMatchedLastTestedFile(self): with test_helpers.CreateTemporaryFile() as last_tested_file: arg_dict = { - 'svn_version': 1234, + 'svn_version': + 1234, 'ebuilds': [ '/path/to/package1-r2.ebuild', '/path/to/package2/package2-r3.ebuild' @@ -107,10 +108,9 @@ class UpdatePackagesAndRunTryjobsTest(unittest.TestCase): options, builder), expected_cmd) - @mock.patch.object( - update_packages_and_run_tests, - 'GetCurrentTimeInUTC', - return_value='2019-09-09') + @mock.patch.object(update_packages_and_run_tests, + 'GetCurrentTimeInUTC', + return_value='2019-09-09') @mock.patch.object(update_packages_and_run_tests, 'AddLinksToCL') @mock.patch.object(subprocess, 'check_output') def testSuccessfullySubmittedTryJob(self, mock_cmd, mock_add_links_to_cl, @@ -147,8 +147,9 @@ class UpdatePackagesAndRunTryjobsTest(unittest.TestCase): self.assertEqual(tests, expected_tests) - mock_cmd.assert_called_once_with( - expected_cmd, cwd=chroot_path, encoding='utf-8') + mock_cmd.assert_called_once_with(expected_cmd, + cwd=chroot_path, + encoding='utf-8') mock_add_links_to_cl.assert_called_once() @@ -166,7 +167,10 @@ class UpdatePackagesAndRunTryjobsTest(unittest.TestCase): bb_id = '1234' create_time = '2020-04-18T00:03:53.978767Z' - mock_cmd.return_value = json.dumps({'id': bb_id, 'createTime': create_time}) + mock_cmd.return_value = json.dumps({ + 'id': bb_id, + 'createTime': create_time + }) chroot_path = '/some/path/to/chroot' cl = 900 @@ -188,8 +192,9 @@ class UpdatePackagesAndRunTryjobsTest(unittest.TestCase): self.assertEqual(tests, expected_tests) - mock_cmd.assert_called_once_with( - expected_cmd, cwd=chroot_path, encoding='utf-8') + mock_cmd.assert_called_once_with(expected_cmd, + cwd=chroot_path, + encoding='utf-8') mock_add_links_to_cl.assert_called_once() @@ -304,8 +309,8 @@ class UpdatePackagesAndRunTestCQTest(unittest.TestCase): update_packages_and_run_tests.GetCQDependString(test_no_changelists)) self.assertEqual( - update_packages_and_run_tests.GetCQDependString(test_single_changelist), - '\nCq-Depend: chromium:1234') + update_packages_and_run_tests.GetCQDependString( + test_single_changelist), '\nCq-Depend: chromium:1234') self.assertEqual( update_packages_and_run_tests.GetCQDependString( @@ -318,7 +323,8 @@ class UpdatePackagesAndRunTestCQTest(unittest.TestCase): test_invalid_trybot = 'invalid-name' self.assertIsNone( - update_packages_and_run_tests.GetCQIncludeTrybotsString(test_no_trybot)) + update_packages_and_run_tests.GetCQIncludeTrybotsString( + test_no_trybot)) self.assertEqual( update_packages_and_run_tests.GetCQIncludeTrybotsString( diff --git a/llvm_tools/update_tryjob_status.py b/llvm_tools/update_tryjob_status.py index f25fadca..61aa9d1c 100755 --- a/llvm_tools/update_tryjob_status.py +++ b/llvm_tools/update_tryjob_status.py @@ -82,11 +82,10 @@ def GetCommandLineArgs(): # Add argument that determines which revision to search for in the list of # tryjobs. - parser.add_argument( - '--revision', - required=True, - type=int, - help='The revision to set its status.') + parser.add_argument('--revision', + required=True, + type=int, + help='The revision to set its status.') # Add argument for the custom script to execute for the 'custom_script' # option in '--set_status'. @@ -99,14 +98,13 @@ def GetCommandLineArgs(): args_output = parser.parse_args() - if not (os.path.isfile( - args_output.status_file and - not args_output.status_file.endswith('.json'))): + if not (os.path.isfile(args_output.status_file + and not args_output.status_file.endswith('.json'))): raise ValueError('File does not exist or does not ending in ".json" ' ': %s' % args_output.status_file) - if (args_output.set_status == TryjobStatus.CUSTOM_SCRIPT.value and - not args_output.custom_script): + if (args_output.set_status == TryjobStatus.CUSTOM_SCRIPT.value + and not args_output.custom_script): raise ValueError('Please provide the absolute path to the script to ' 'execute.') @@ -169,15 +167,16 @@ def GetCustomScriptResult(custom_script, status_file, tryjob_contents): exec_script_cmd = [custom_script, temp_json_file] # Execute the custom script to get the exit code. - exec_script_cmd_obj = subprocess.Popen( - exec_script_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + exec_script_cmd_obj = subprocess.Popen(exec_script_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) _, stderr = exec_script_cmd_obj.communicate() # Invalid exit code by the custom script. if exec_script_cmd_obj.returncode not in custom_script_exit_value_mapping: # Save the .JSON file to the directory of 'status_file'. - name_of_json_file = os.path.join( - os.path.dirname(status_file), os.path.basename(temp_json_file)) + name_of_json_file = os.path.join(os.path.dirname(status_file), + os.path.basename(temp_json_file)) os.rename(temp_json_file, name_of_json_file) @@ -236,7 +235,8 @@ def UpdateTryjobStatus(revision, set_status, status_file, custom_script): elif set_status == TryjobStatus.BAD: bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.BAD.value elif set_status == TryjobStatus.PENDING: - bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.PENDING.value + bisect_contents['jobs'][tryjob_index][ + 'status'] = TryjobStatus.PENDING.value elif set_status == TryjobStatus.SKIP: bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.SKIP.value elif set_status == TryjobStatus.CUSTOM_SCRIPT: @@ -246,7 +246,10 @@ def UpdateTryjobStatus(revision, set_status, status_file, custom_script): raise ValueError('Invalid "set_status" option provided: %s' % set_status) with open(status_file, 'w') as update_tryjobs: - json.dump(bisect_contents, update_tryjobs, indent=4, separators=(',', ': ')) + json.dump(bisect_contents, + update_tryjobs, + indent=4, + separators=(',', ': ')) def main(): @@ -256,7 +259,8 @@ def main(): args_output = GetCommandLineArgs() - UpdateTryjobStatus(args_output.revision, TryjobStatus(args_output.set_status), + UpdateTryjobStatus(args_output.revision, + TryjobStatus(args_output.set_status), args_output.status_file, args_output.custom_script) diff --git a/llvm_tools/update_tryjob_status_unittest.py b/llvm_tools/update_tryjob_status_unittest.py index c42c6718..bf078f3b 100755 --- a/llvm_tools/update_tryjob_status_unittest.py +++ b/llvm_tools/update_tryjob_status_unittest.py @@ -100,8 +100,8 @@ class UpdateTryjobStatusTest(unittest.TestCase): custom_script_path = '/abs/path/to/script.py' status_file_path = '/abs/path/to/status_file.json' - name_json_file = os.path.join( - os.path.dirname(status_file_path), 'tmpFile.json') + name_json_file = os.path.join(os.path.dirname(status_file_path), + 'tmpFile.json') expected_error_message = ( 'Custom script %s exit code %d did not match ' @@ -142,7 +142,8 @@ class UpdateTryjobStatusTest(unittest.TestCase): # script. # # `Popen.communicate()` returns a tuple of `stdout` and `stderr`. - mock_exec_custom_script.return_value.communicate.return_value = (None, None) + mock_exec_custom_script.return_value.communicate.return_value = (None, + None) mock_exec_custom_script.return_value.returncode = ( CustomScriptStatus.GOOD.value) @@ -193,7 +194,9 @@ class UpdateTryjobStatusTest(unittest.TestCase): # Simulate the behavior of `FindTryjobIndex()` when the tryjob does not exist # in the status file. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=None) + @mock.patch.object(update_tryjob_status, + 'FindTryjobIndex', + return_value=None) def testNotFindTryjobIndexWhenUpdatingTryjobStatus(self, mock_find_tryjob_index): @@ -256,8 +259,8 @@ class UpdateTryjobStatusTest(unittest.TestCase): custom_script = None update_tryjob_status.UpdateTryjobStatus(revision_to_update, - TryjobStatus.GOOD, temp_json_file, - custom_script) + TryjobStatus.GOOD, + temp_json_file, custom_script) # Verify that the tryjob's 'status' has been updated in the status file. with open(temp_json_file) as status_file: @@ -387,10 +390,9 @@ class UpdateTryjobStatusTest(unittest.TestCase): mock_find_tryjob_index.assert_called_once() @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - @mock.patch.object( - update_tryjob_status, - 'GetCustomScriptResult', - return_value=TryjobStatus.SKIP.value) + @mock.patch.object(update_tryjob_status, + 'GetCustomScriptResult', + return_value=TryjobStatus.SKIP.value) def testUpdatedTryjobStatusToAutoPassedWithCustomScript( self, mock_get_custom_script_result, mock_find_tryjob_index): bisect_test_contents = { @@ -434,8 +436,8 @@ class UpdateTryjobStatusTest(unittest.TestCase): # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the # status file. @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testSetStatusDoesNotExistWhenUpdatingTryjobStatus(self, - mock_find_tryjob_index): + def testSetStatusDoesNotExistWhenUpdatingTryjobStatus( + self, mock_find_tryjob_index): bisect_test_contents = { 'start': 369410, @@ -466,9 +468,8 @@ class UpdateTryjobStatusTest(unittest.TestCase): nonexistent_update_status, temp_json_file, custom_script) - self.assertEqual( - str(err.exception), - 'Invalid "set_status" option provided: revert_status') + self.assertEqual(str(err.exception), + 'Invalid "set_status" option provided: revert_status') mock_find_tryjob_index.assert_called_once() diff --git a/llvm_tools/upload_lexan_crashes_to_forcey.py b/llvm_tools/upload_lexan_crashes_to_forcey.py index 61bf6b7d..5b038f53 100755 --- a/llvm_tools/upload_lexan_crashes_to_forcey.py +++ b/llvm_tools/upload_lexan_crashes_to_forcey.py @@ -50,7 +50,7 @@ def get_available_test_case_urls(year: int, month: int, day: int) -> List[str]: def test_cases_on_or_after(date: datetime.datetime - ) -> Generator[str, None, None]: + ) -> Generator[str, None, None]: """Yields all test-cases submitted on or after the given date.""" for year in get_available_year_numbers(): if year < date.year: @@ -118,8 +118,7 @@ def submit_test_case(gs_url: str, cr_tool: str) -> None: # chromium.clang-ToTiOS-12754-GTXToolKit-2bfcde.tgz) # we'll get `.crash` files. Unclear why, but let's filter them out anyway. repro_files = [ - os.path.join(tempdir, x) - for x in os.listdir(tempdir) + os.path.join(tempdir, x) for x in os.listdir(tempdir) if not x.endswith('.crash') ] assert len(repro_files) == 2, repro_files @@ -133,8 +132,8 @@ def submit_test_case(gs_url: str, cr_tool: str) -> None: # Peephole: lexan got a crash upload with a way old clang. Ignore it. with open(sh_file, encoding='utf-8') as f: if 'Crash reproducer for clang version 9.0.0' in f.read(): - logging.warning('Skipping upload for %s; seems to be with an old clang', - gs_url) + logging.warning( + 'Skipping upload for %s; seems to be with an old clang', gs_url) return subprocess.run( @@ -226,14 +225,16 @@ def main(argv: List[str]): my_dir = os.path.dirname(os.path.abspath(__file__)) parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - '--state_file', default=os.path.join(my_dir, 'lexan-state.json')) + parser.add_argument('--state_file', + default=os.path.join(my_dir, 'lexan-state.json')) parser.add_argument( '--last_date', help='The earliest date that we care about. All test cases from here ' 'on will be picked up. Format is YYYY-MM-DD.') - parser.add_argument( - '--4c', dest='forcey', required=True, help='Path to a 4c client binary') + parser.add_argument('--4c', + dest='forcey', + required=True, + help='Path to a 4c client binary') opts = parser.parse_args(argv) forcey = opts.forcey diff --git a/llvm_tools/upload_lexan_crashes_to_forcey_test.py b/llvm_tools/upload_lexan_crashes_to_forcey_test.py index 937cbf8e..36a35048 100755 --- a/llvm_tools/upload_lexan_crashes_to_forcey_test.py +++ b/llvm_tools/upload_lexan_crashes_to_forcey_test.py @@ -18,21 +18,19 @@ class Test(unittest.TestCase): """Tests for upload_lexan_crashes_to_forcey.""" def test_date_parsing_functions(self): - self.assertEqual( - datetime.date(2020, 2, 1), - upload_lexan_crashes_to_forcey.from_ymd('2020-02-01')) - - @unittest.mock.patch( - 'upload_lexan_crashes_to_forcey.test_cases_on_or_after', - return_value=( - ( - datetime.date(2020, 1, 1), - ('gs://test-case-1', 'gs://test-case-1.1'), - ), - (datetime.date(2020, 1, 2), ('gs://test-case-2',)), - (datetime.date(2020, 1, 1), ('gs://test-case-3',)), - (datetime.date(2020, 1, 4), ('gs://test-case-4',)), - )) + self.assertEqual(datetime.date(2020, 2, 1), + upload_lexan_crashes_to_forcey.from_ymd('2020-02-01')) + + @unittest.mock.patch('upload_lexan_crashes_to_forcey.test_cases_on_or_after', + return_value=( + ( + datetime.date(2020, 1, 1), + ('gs://test-case-1', 'gs://test-case-1.1'), + ), + (datetime.date(2020, 1, 2), ('gs://test-case-2', )), + (datetime.date(2020, 1, 1), ('gs://test-case-3', )), + (datetime.date(2020, 1, 4), ('gs://test-case-4', )), + )) @unittest.mock.patch('upload_lexan_crashes_to_forcey.submit_test_case') @unittest.mock.patch('upload_lexan_crashes_to_forcey.persist_state') def test_new_test_case_submission_functions(self, persist_state_mock, @@ -132,8 +130,8 @@ class Test(unittest.TestCase): # All we need is an empty file here. pass - with open( - os.path.join(tempdir, 'test_case.sh'), 'w', encoding='utf-8') as f: + with open(os.path.join(tempdir, 'test_case.sh'), 'w', + encoding='utf-8') as f: f.write('# Crash reproducer for clang version 9.0.0 (...)\n') f.write('clang something or other\n') -- cgit v1.2.3 From 1222bbbd4bc1d31bffb9f15bd9d0e5948f1f2b06 Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Mon, 7 Mar 2022 17:45:25 -0800 Subject: update_kernel_afdo: Fix lint errors BUG=b:221882737 TEST=cros lint afdo_tools/update_kernel_afdo Change-Id: Ib4c5a19531400d8cad1060679cf167397ddb4540 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3508820 Reviewed-by: Manoj Gupta Tested-by: Denis Nikitin Commit-Queue: Denis Nikitin --- afdo_tools/update_kernel_afdo | 67 +++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo index ff0ab224..3944407c 100755 --- a/afdo_tools/update_kernel_afdo +++ b/afdo_tools/update_kernel_afdo @@ -9,7 +9,7 @@ # USAGE=" -Usage: $(basename $0) [main|beta|stable|all] [--help] +Usage: $(basename "$0") [main|beta|stable|all] [--help] Description: The script takes one optional argument which is the channel where we want @@ -36,7 +36,7 @@ metadata_dir="${tc_utils_dir}/afdo_metadata" outfile="$(realpath --relative-to="${tc_utils_dir}" \ "${metadata_dir}"/kernel_afdo.json)" # Convert toolchain_utils into the absolute path. -abs_tc_utils_dir="$(realpath ${tc_utils_dir})" +abs_tc_utils_dir="$(realpath "${tc_utils_dir}")" # Check profiles uploaded within the last week. expected_time=$(date +%s -d "week ago") @@ -47,10 +47,9 @@ canary_ref="refs/heads/main" # Read the last two release-Rxx from remote branches # and assign them to stable_ref and beta_ref. # sort -V is the version sort which puts R100 after R99. -last_branches=$(git -C "${tc_utils_dir}" ls-remote -h "${remote_repo}" \ - release-R\* | cut -f2 | sort -V | tail -n 2) # We need `echo` to convert newlines into spaces for read. -read stable_ref beta_ref <<< $(echo ${last_branches}) +read -r stable_ref beta_ref <<< "$(git -C "${tc_utils_dir}" ls-remote -h \ + "${remote_repo}" release-R\* | cut -f2 | sort -V | tail -n 2 | paste -s)" # Branch names which start from release-R. branch["beta"]=${beta_ref##*/} branch["stable"]=${stable_ref##*/} @@ -62,8 +61,8 @@ branch_number["stable"]=$(echo "${branch["stable"]}" | \ branch_number["beta"]=$(echo "${branch["beta"]}" | \ sed -n -e "s/^release-R\([0-9][0-9]*\).*$/\1/p") branch_number["canary"]="$((branch_number[beta] + 1))" -for skipped_branch in $SKIPPED_BRANCHES ; do - if [[ ${branch_number["canary"]} == $skipped_branch ]] ; then +for skipped_branch in ${SKIPPED_BRANCHES} ; do + if [[ ${branch_number["canary"]} == "${skipped_branch}" ]] ; then ((branch_number[canary]++)) fi done @@ -80,13 +79,13 @@ case "${channels}" in channels="canary beta stable" ;; --help | help | -h ) - echo "$USAGE" + echo "${USAGE}" exit 0 ;; * ) echo "Channel \"${channels}\" is not supported. Must be main (or canary), beta, stable or all." >&2 - echo "$USAGE" + echo "${USAGE}" exit 1 esac @@ -99,7 +98,7 @@ echo "-> Working in ${worktree_dir}" # This way we don't need to clean-up and sync toolchain_utils before the # change. Neither we should care about clean-up after the submit. git -C "${tc_utils_dir}" worktree add --detach "${worktree_dir}" -trap "git -C ${abs_tc_utils_dir} worktree remove ${worktree_dir}" EXIT +trap 'git -C "${abs_tc_utils_dir}" worktree remove "${worktree_dir}"' EXIT cd "${worktree_dir}" for channel in ${channels} @@ -113,39 +112,39 @@ do echo "branch_number=${curr_branch_number} branch=${curr_branch}" json="{" sep="" - for kver in $KVERS + for kver in ${KVERS} do # Sort the gs output by timestamp (default ordering is by name, so # R86-13310.3-1594633089.gcov.xz goes after R86-13310.18-1595237847.gcov.xz) - latest=$(gsutil.py ls -l "$GS_BASE/$kver/" | sort -k2 | \ + latest=$(gsutil.py ls -l "${GS_BASE}/${kver}/" | sort -k2 | \ grep "R${curr_branch_number}" | tail -1 || true) - if [[ -z "$latest" && "${channel}" != "stable" ]] + if [[ -z "${latest}" && "${channel}" != "stable" ]] then # if no profiles exist for the current branch, try the previous branch - latest=$(gsutil.py ls -l "$GS_BASE/$kver/" | sort -k2 | \ + latest=$(gsutil.py ls -l "${GS_BASE}/${kver}/" | sort -k2 | \ grep "R$((curr_branch_number - 1))" | tail -1) fi # Verify that the file has the expected date. - file_time=$(echo "$latest" | awk '{print $2}') - file_time_unix=$(date +%s -d "$file_time") - if [ $file_time_unix -lt $expected_time ] + file_time=$(echo "${latest}" | awk '{print $2}') + file_time_unix=$(date +%s -d "${file_time}") + if [ "${file_time_unix}" -lt "${expected_time}" ] then - expected=$(env TZ=UTC date +%Y-%m-%dT%H:%M:%SZ -d @$expected_time) - echo "Wrong date for $kver: $file_time is before $expected" >&2 - errs="$errs $kver" + expected=$(env TZ=UTC date +%Y-%m-%dT%H:%M:%SZ -d @"${expected_time}") + echo "Wrong date for ${kver}: ${file_time} is before ${expected}" >&2 + errs="${errs} ${kver}" continue fi # Generate JSON. - json_kver=$(echo "$kver" | tr . _) + json_kver=$(echo "${kver}" | tr . _) # b/147370213 (migrating profiles from gcov format) may result in the # pattern below no longer doing the right thing. - name=$(echo "$latest" | sed 's%.*/\(.*\)\.gcov.*%\1%') + name="$(basename "${latest%.gcov.*}")" json=$(cat <&2 failed_channels="${failed_channels} ${channel}" @@ -167,18 +166,18 @@ EOT # Write new JSON file. # Don't use `echo` since `json` might have esc characters in it. - printf "%s\n}\n" "$json" > "$outfile" + printf "%s\n}\n" "${json}" > "${outfile}" # If no changes were made, say so. - outdir=$(dirname "$outfile") - shortstat=$(cd "$outdir" && git status --short $(basename "$outfile")) - [ -z "$shortstat" ] && echo $(basename "$outfile")" is up to date." \ + outdir=$(dirname "${outfile}") + shortstat=$(cd "${outdir}" && git status --short "$(basename "${outfile}")") + [ -z "${shortstat}" ] && echo "$(basename "${outfile}") is up to date." \ && continue # If we had any errors, warn about them. - if [[ -n "$errs" ]] + if [[ -n "${errs}" ]] then - echo "warning: failed to update $errs in ${channel}" >&2 + echo "warning: failed to update ${errs} in ${channel}" >&2 failed_channels="${failed_channels} ${channel}" continue fi @@ -221,10 +220,10 @@ set +u if [[ ${#commit[@]} -gt 0 ]] then set -u - echo "The change is applied in ${!commit[@]}." + echo "The change is applied in ${!commit[*]}." echo "Run these commands to submit the change:" echo - for channel in ${!commit[@]} + for channel in "${!commit[@]}" do echo -e "\tgit -C ${tc_utils_dir} push ${remote_repo} \ ${commit[${channel}]}:refs/for/${branch[${channel}]}" -- cgit v1.2.3 From 128417b250ed7597a4adf548a6ae879c3f028736 Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Mon, 7 Mar 2022 17:51:16 -0800 Subject: update_kernel_afdo: Add kernel 5.10 Add 5.10 and skip the kernel AFDO update if the channel does not support AFDO. Force worktree clean-up if the update fails. BUG=b:218702582 TEST=./afdo_tools/update_kernel_afdo main Change-Id: I9282e7cf600c27ba5a8b7e7329eae1512804af29 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3508821 Reviewed-by: Manoj Gupta Tested-by: Denis Nikitin Commit-Queue: Denis Nikitin --- afdo_tools/update_kernel_afdo | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo index 3944407c..7619b22c 100755 --- a/afdo_tools/update_kernel_afdo +++ b/afdo_tools/update_kernel_afdo @@ -25,7 +25,7 @@ set -eu set -o pipefail GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel -KVERS="4.4 4.14 4.19 5.4" +KVERS="4.4 4.14 4.19 5.4 5.10" failed_channels="" # Add skipped chrome branches in ascending order here. SKIPPED_BRANCHES="95" @@ -98,7 +98,7 @@ echo "-> Working in ${worktree_dir}" # This way we don't need to clean-up and sync toolchain_utils before the # change. Neither we should care about clean-up after the submit. git -C "${tc_utils_dir}" worktree add --detach "${worktree_dir}" -trap 'git -C "${abs_tc_utils_dir}" worktree remove "${worktree_dir}"' EXIT +trap 'git -C "${abs_tc_utils_dir}" worktree remove -f "${worktree_dir}"' EXIT cd "${worktree_dir}" for channel in ${channels} @@ -141,6 +141,11 @@ do # b/147370213 (migrating profiles from gcov format) may result in the # pattern below no longer doing the right thing. name="$(basename "${latest%.gcov.*}")" + # Skip kernels with no AFDO support in the current channel. + if [[ "${name}" == "" ]] + then + continue + fi json=$(cat < Date: Thu, 10 Mar 2022 01:29:14 +0000 Subject: lock_machine: remove python2 from call to swarming swarming.py is being called with python2 which no longer exists in chrotomation. This was being done becuase swarming.py did not support python3 but this is no longer the case. BUG=b:221777277 TEST=manually verified that call executes the write script Change-Id: I2ec50a6f10b597ac7ca0a478e83eb9e2a5e3bb5e Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3514287 Commit-Queue: George Burgess Tested-by: George Burgess Reviewed-by: George Burgess --- lock_machine.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lock_machine.py b/lock_machine.py index 03c8c991..1a41290b 100755 --- a/lock_machine.py +++ b/lock_machine.py @@ -379,10 +379,7 @@ class LockManager(object): if os.path.exists(self.CROSFLEET_CREDENTIAL): credential = '--auth-service-account-json %s' % self.CROSFLEET_CREDENTIAL swarming = os.path.join(self.chromeos_root, self.SWARMING) - # TODO(zhizhouy): Swarming script doesn't support python3 so explicitly - # launch it with python2 until migrated. - cmd = (('python2 %s ' - 'query --swarming https://chromeos-swarming.appspot.com ' + cmd = (('%s query --swarming https://chromeos-swarming.appspot.com ' "%s 'bots/list?is_dead=FALSE&dimensions=dut_name:%s'") % (swarming, credential, machine.rstrip('.cros'))) exit_code, stdout, stderr = self.ce.RunCommandWOutput(cmd) -- cgit v1.2.3 From 517d8e9cfb07243e5d1c66c8e75285236b848b91 Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Fri, 18 Mar 2022 11:23:12 -0700 Subject: update_kernel_afdo: Update commmit message and comments BUG=None TEST=./afdo_tools/update_kernel_afdo Change-Id: Idcb3cfc4cd9c71f0e88b8d2f6a0f1ddf14ca3748 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3536832 Reviewed-by: George Burgess Tested-by: Denis Nikitin Commit-Queue: Denis Nikitin --- afdo_tools/update_kernel_afdo | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo index 7619b22c..c0417083 100755 --- a/afdo_tools/update_kernel_afdo +++ b/afdo_tools/update_kernel_afdo @@ -16,7 +16,7 @@ Description: to update the kernel afdo and creates a commit (or commits with \"all\" channels) in the corresponding branch. No arguments defaults to \"all\". - Follow the prompt to submit the changes. + Follow the prompt to upload the changes. NO CLEAN-UP NEEDED. The script ignores any local changes and keeps the current branch unchanged. " @@ -190,12 +190,11 @@ EOT git add afdo_metadata/kernel_afdo.json case "${channel}" in canary ) - commit_contents="afdo_metadata: Publish the new kernel profiles - -Update chromeos-kernel-4_4 -Update chromeos-kernel-4_14 -Update chromeos-kernel-4_19 -Update chromeos-kernel-5_4 + commit_contents=$'afdo_metadata: Publish the new kernel profiles\n\n' + for kver in ${KVERS} ; do + commit_contents="${commit_contents}Update chromeos-kernel-${kver}"$'\n' + done + commit_contents="${commit_contents} BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator" @@ -226,7 +225,7 @@ if [[ ${#commit[@]} -gt 0 ]] then set -u echo "The change is applied in ${!commit[*]}." - echo "Run these commands to submit the change:" + echo "Run these commands to upload the change:" echo for channel in "${!commit[@]}" do -- cgit v1.2.3 From 0b0d2fdaddfa6626756d21f1d965179136d237ab Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Fri, 18 Mar 2022 14:12:30 -0700 Subject: update_kernel_afdo: Skip 5.10 update in M-100 Add mechanism to skip kernel versions in individual branches. BUG=None TEST=./afdo_tools/update_kernel_afdo stable Change-Id: Ic91ff99bd4dd2e57950376066ae2d4117c3845ed Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3536843 Reviewed-by: George Burgess Tested-by: Denis Nikitin Commit-Queue: Denis Nikitin --- afdo_tools/update_kernel_afdo | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo index c0417083..b3a9e6d1 100755 --- a/afdo_tools/update_kernel_afdo +++ b/afdo_tools/update_kernel_afdo @@ -30,6 +30,22 @@ failed_channels="" # Add skipped chrome branches in ascending order here. SKIPPED_BRANCHES="95" +# NOTE: We enable/disable kernel AFDO starting from a particular branch. +# For example if we want to enable kernel AFDO in 5.15, first, we do it +# in main. In this case we want to disable it in beta and stable branches. +# The second scenario is when we want to disable kernel AFDO (when all devices +# move to kernelnext and there are no new profiles from the field). In this +# case we disable AFDO in main but still keep it live in beta and stable. +declare -A SKIPPED_KVERS_IN_BRANCHES +# In SKIPPED_KVERS_IN_BRANCHES +# - key is a branch number string; +# - value is the list of kernels separated by space. +# Example: SKIPPED_KVERS_IN_BRANCHES["105"]="4.4 4.14" + +# b/223115767. In M-100 there are no new profiles in 5.10. And AFDO is not +# enabled on any 5.10 board in M-100 either. +SKIPPED_KVERS_IN_BRANCHES["100"]="5.10" + script_dir=$(dirname "$0") tc_utils_dir="${script_dir}/.." metadata_dir="${tc_utils_dir}/afdo_metadata" @@ -114,6 +130,29 @@ do sep="" for kver in ${KVERS} do + # Skip kernels disabled in this branch. + skipped=false + for skipped_branch in "${!SKIPPED_KVERS_IN_BRANCHES[@]}" + do + if [[ ${curr_branch_number} == "${skipped_branch}" ]] + then + # Current branch is in the keys of SKIPPED_KVERS_IN_BRANCHES. + # Now lets check if $kver is in the list. + for skipped_kver in ${SKIPPED_KVERS_IN_BRANCHES[${skipped_branch}]} + do + if [[ ${kver} == "${skipped_kver}" ]] + then + skipped=true + break + fi + done + fi + done + if ${skipped} + then + echo "${kver} is skipped in branch ${curr_branch_number}. Skip it." + continue + fi # Sort the gs output by timestamp (default ordering is by name, so # R86-13310.3-1594633089.gcov.xz goes after R86-13310.18-1595237847.gcov.xz) latest=$(gsutil.py ls -l "${GS_BASE}/${kver}/" | sort -k2 | \ -- cgit v1.2.3 From af5d07813bf09c86b58fc3a04d2eded8eb28271f Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Fri, 18 Mar 2022 11:06:21 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: Id3ddbafa957b5d9f7bcf437ed50498ab81097b3b Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3536829 Tested-by: Denis Nikitin Reviewed-by: Christopher Di Bella Commit-Queue: Denis Nikitin --- afdo_metadata/kernel_afdo.json | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 49e19277..bc969a89 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,14 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R100-14516.0-1645439511" + "name": "R101-14553.0-1647250486" }, "chromeos-kernel-4_14": { - "name": "R100-14516.0-1645439661" + "name": "R101-14543.0-1647250600" }, "chromeos-kernel-4_19": { - "name": "R100-14516.0-1645439606" + "name": "R101-14553.0-1647250349" }, "chromeos-kernel-5_4": { - "name": "R100-14516.0-1645439482" + "name": "R101-14556.0-1647250363" + }, + "chromeos-kernel-5_10": { + "name": "R101-14553.0-1647251059" } } -- cgit v1.2.3 From 08da726f714a8220bfdf1d609e392f3c102171b1 Mon Sep 17 00:00:00 2001 From: Ryan Beltran Date: Fri, 25 Mar 2022 20:03:56 +0000 Subject: lock_machine: specify python3 for swarming swarming.py was being called with python2 which no longer exists in chrotomation. This was later changed to make the invokaction call swarming as an exexutable, however, this lead swarming.py to then invoke the default `python` executable which also doesn't exist in chrotomation and so a similar problem ensued. BUG=b:221777277 TEST=manually verified Change-Id: Ie6537b00a25fbc12cdeb95ada92d8095a609b291 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3553430 Reviewed-by: Luis Lozano Commit-Queue: Luis Lozano Tested-by: Luis Lozano --- lock_machine.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lock_machine.py b/lock_machine.py index 1a41290b..c64d3164 100755 --- a/lock_machine.py +++ b/lock_machine.py @@ -379,8 +379,8 @@ class LockManager(object): if os.path.exists(self.CROSFLEET_CREDENTIAL): credential = '--auth-service-account-json %s' % self.CROSFLEET_CREDENTIAL swarming = os.path.join(self.chromeos_root, self.SWARMING) - cmd = (('%s query --swarming https://chromeos-swarming.appspot.com ' - "%s 'bots/list?is_dead=FALSE&dimensions=dut_name:%s'") % + cmd = (('python3 %s query --swarming https://chromeos-swarming.appspot.com' + " %s 'bots/list?is_dead=FALSE&dimensions=dut_name:%s'") % (swarming, credential, machine.rstrip('.cros'))) exit_code, stdout, stderr = self.ce.RunCommandWOutput(cmd) if exit_code: -- cgit v1.2.3 From ff75f0210988450f19d95f99402cf9f848e96b71 Mon Sep 17 00:00:00 2001 From: Luis Lozano Date: Fri, 25 Mar 2022 14:26:07 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: Ia84aafd0138f0de9df17d17b8db62e8b4bb4057d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3551233 Auto-Submit: Luis Lozano Reviewed-by: Denis Nikitin Commit-Queue: Denis Nikitin Tested-by: Denis Nikitin --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index bc969a89..097f281f 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R101-14553.0-1647250486" + "name": "R102-14574.0-1647855795" }, "chromeos-kernel-4_14": { - "name": "R101-14543.0-1647250600" + "name": "R102-14555.0-1647855537" }, "chromeos-kernel-4_19": { - "name": "R101-14553.0-1647250349" + "name": "R102-14574.0-1647855173" }, "chromeos-kernel-5_4": { - "name": "R101-14556.0-1647250363" + "name": "R102-14583.0-1647855216" }, "chromeos-kernel-5_10": { - "name": "R101-14553.0-1647251059" + "name": "R102-14574.0-1647855945" } } -- cgit v1.2.3 From 20d36f9fe54ede6a1d1e7b3f2747ac79235263c6 Mon Sep 17 00:00:00 2001 From: Ryan Beltran Date: Sat, 26 Mar 2022 00:13:03 +0000 Subject: lock_machine: use swarming binary instead of .py This cl moves away from calling swarming.py in favor of direcly invoking the binary for swarming. The justification for doing so is this: 1) swarming.py in luci-client is now 3 years out of date 2) uprevving luci-client in the manifest could have unforseen impact on other consumers BUG=b:221777277 TEST=Verify in chrotomation Change-Id: I269ef5fb4a450f166bbff50c97a4d545680d028c Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3553787 Reviewed-by: Denis Nikitin Commit-Queue: Ryan Beltran Tested-by: Ryan Beltran --- lock_machine.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lock_machine.py b/lock_machine.py index c64d3164..85a0cfa3 100755 --- a/lock_machine.py +++ b/lock_machine.py @@ -62,7 +62,7 @@ class LockManager(object): CROSFLEET_CREDENTIAL = ('/usr/local/google/home/mobiletc-prebuild' '/sheriff_utils/credentials/skylab' '/chromeos-swarming-credential.json') - SWARMING = 'chromite/third_party/swarming.client/swarming.py' + SWARMING = '~/cipd_binaries/swarming' SUCCESS = 0 def __init__(self, @@ -377,11 +377,11 @@ class LockManager(object): """ credential = '' if os.path.exists(self.CROSFLEET_CREDENTIAL): - credential = '--auth-service-account-json %s' % self.CROSFLEET_CREDENTIAL - swarming = os.path.join(self.chromeos_root, self.SWARMING) - cmd = (('python3 %s query --swarming https://chromeos-swarming.appspot.com' - " %s 'bots/list?is_dead=FALSE&dimensions=dut_name:%s'") % - (swarming, credential, machine.rstrip('.cros'))) + credential = '--service-account-json %s' % self.CROSFLEET_CREDENTIAL + server = '--server https://chromeos-swarming.appspot.com' + dimensions = '--dimension dut_name=%s' % machine.rstrip('.cros') + + cmd = f'{self.SWARMING} bots {server} {credential} {dimensions}' exit_code, stdout, stderr = self.ce.RunCommandWOutput(cmd) if exit_code: raise ValueError('Querying bots failed (2); stdout: %r; stderr: %r' % @@ -395,7 +395,7 @@ class LockManager(object): # } # Otherwise there will be a tuple starting with 'items', we simply detect # this keyword for result. - return 'items' in stdout + return stdout != '[]' def LeaseCrosfleetMachine(self, machine): """Run command to lease dut from crosfleet. @@ -424,6 +424,7 @@ class LockManager(object): credential = '' if os.path.exists(self.CROSFLEET_CREDENTIAL): credential = '-service-account-json %s' % self.CROSFLEET_CREDENTIAL + cmd = (('%s dut abandon %s %s') % (self.CROSFLEET_PATH, credential, machine.rstrip('.cros'))) retval = self.ce.RunCommand(cmd) -- cgit v1.2.3 From 8c45bc73aa01806b21cfd14f7b4637802054860b Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead Date: Wed, 30 Mar 2022 00:55:02 +0000 Subject: llvm_tools: Add URL formatting for revert_checker Copied from llvm-project/llvm/utils. This lets the revert_checker.py get called with the `-u` option which formats the revert SHAs into a handy URLs to the LLVM reviews. BUG=None TEST=revert_checker.py -C ../../llvm-project -u HEAD Change-Id: I8dd90f20727a73a481a61519a9fb56364b20107c Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3558477 Reviewed-by: Manoj Gupta Reviewed-by: George Burgess Commit-Queue: Jordan Abrahams-Whitehead Tested-by: Jordan Abrahams-Whitehead --- llvm_tools/revert_checker.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/llvm_tools/revert_checker.py b/llvm_tools/revert_checker.py index acc8b5fa..2a0ab861 100755 --- a/llvm_tools/revert_checker.py +++ b/llvm_tools/revert_checker.py @@ -243,6 +243,9 @@ def _main() -> None: parser.add_argument( 'root', nargs='+', help='Root(s) to search for commits from.') parser.add_argument('--debug', action='store_true') + parser.add_argument( + '-u', '--review_url', action='store_true', + help='Format SHAs as llvm review URLs') opts = parser.parse_args() logging.basicConfig( @@ -263,7 +266,11 @@ def _main() -> None: all_reverts.append(revert) for revert in all_reverts: - print(f'{revert.sha} claims to revert {revert.reverted_sha}') + sha_fmt = (f'https://reviews.llvm.org/rG{revert.sha}' + if opts.review_url else revert.sha) + reverted_sha_fmt = (f'https://reviews.llvm.org/rG{revert.reverted_sha}' + if opts.review_url else revert.reverted_sha) + print(f'{sha_fmt} claims to revert {reverted_sha_fmt}') if __name__ == '__main__': -- cgit v1.2.3 From c75066609f65e63fe5f73da016b106379f5409bf Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Thu, 31 Mar 2022 15:57:51 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: I1036ff48f370bfa11daeb401f143900aea2fb551 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3564694 Tested-by: Denis Nikitin Auto-Submit: Denis Nikitin Reviewed-by: Manoj Gupta Commit-Queue: Manoj Gupta --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 097f281f..4c16c74d 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R102-14574.0-1647855795" + "name": "R102-14588.11-1648459935" }, "chromeos-kernel-4_14": { - "name": "R102-14555.0-1647855537" + "name": "R102-14555.0-1648460042" }, "chromeos-kernel-4_19": { - "name": "R102-14574.0-1647855173" + "name": "R102-14588.11-1648459977" }, "chromeos-kernel-5_4": { - "name": "R102-14583.0-1647855216" + "name": "R102-14588.11-1648460110" }, "chromeos-kernel-5_10": { - "name": "R102-14574.0-1647855945" + "name": "R102-14588.13-1648460351" } } -- cgit v1.2.3 From 273a7232175a649c294ceb71de9b8bc6a091cb3c Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Fri, 1 Apr 2022 11:21:51 -0700 Subject: crosperf: Remove veyron from remotes Veyron was removed from the nightly toolchain testing. Remove it from remotes monitoring. BUG=b:227714343 TEST=None Change-Id: I23d0b6027b0a37e6ebcde77955fa00eb8941b2b0 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3565860 Tested-by: Denis Nikitin Auto-Submit: Denis Nikitin Reviewed-by: Ryan Beltran Commit-Queue: Ryan Beltran Feels: Ryan Beltran --- crosperf/default_remotes | 1 - crosperf/experiment_factory_unittest.py | 10 ++++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/crosperf/default_remotes b/crosperf/default_remotes index faecb833..4910a58b 100644 --- a/crosperf/default_remotes +++ b/crosperf/default_remotes @@ -5,4 +5,3 @@ elm : chromeos6-row14-rack15-host21.cros kefka : chromeos6-row6-rack22-host2.cros chromeos6-row6-rack22-host3.cros chromeos6-row11-rack22-host7.cros nautilus : chromeos6-row5-rack10-host1.cros chromeos6-row5-rack10-host3.cros snappy : chromeos6-row3-rack20-host1.cros chromeos6-row3-rack20-host3.cros -veyron_tiger : chromeos6-row3-rack7-host1.cros diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py index 9637c108..d52f2a55 100755 --- a/crosperf/experiment_factory_unittest.py +++ b/crosperf/experiment_factory_unittest.py @@ -79,6 +79,7 @@ EXPERIMENT_FILE_2 = """ class ExperimentFactoryTest(unittest.TestCase): """Class for running experiment factory unittests.""" + def setUp(self): self.append_benchmark_call_args = [] @@ -402,8 +403,13 @@ class ExperimentFactoryTest(unittest.TestCase): def test_get_default_remotes(self): board_list = [ - 'bob', 'chell', 'coral', 'elm', 'kefka', 'nautilus', 'snappy', - 'veyron_tiger' + 'bob', + 'chell', + 'coral', + 'elm', + 'kefka', + 'nautilus', + 'snappy', ] ef = ExperimentFactory() -- cgit v1.2.3 From bcfe50b5b79bd34b40b8ca5895767bb9540246d9 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead Date: Thu, 31 Mar 2022 16:03:32 +0000 Subject: llvm_tools: Add llvm_local_bisection.sh This adds a template for a complete bash script for llvm local bisection. This template is from a script that llozano gave to ryanbeltran, but reworked to be easily extendable to other bisection steps. Comes with "batteries included" build_llvm and "build_pkg" functions. BUG=None TEST=Run on llvm-project as the `git bisect run` arg. Change-Id: Ibc0115033d07b76d40ec2f4b7d7950cf9a518152 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3563907 Tested-by: Jordan Abrahams-Whitehead Reviewed-by: Manoj Gupta Commit-Queue: Jordan Abrahams-Whitehead --- llvm_tools/llvm_local_bisection.sh | 108 +++++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100755 llvm_tools/llvm_local_bisection.sh diff --git a/llvm_tools/llvm_local_bisection.sh b/llvm_tools/llvm_local_bisection.sh new file mode 100755 index 00000000..f84c2410 --- /dev/null +++ b/llvm_tools/llvm_local_bisection.sh @@ -0,0 +1,108 @@ +#!/bin/bash -u +# -*- coding: utf-8 -*- +# Copyright 2022 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# llvm_bisection_template.sh +# +# This script is meant to be run inside a `git bisect` process, like so: +# +# $ cd +# $ git bisect start +# $ git bisect bad +# $ git bisect good +# $ git bisect run ~/chromimuos/src/scripts/llvm_bisection_template.sh +# +# This template exists as a "batteries included" LLVM bisection script, +# which will modify the LLVM_NEXT hash to help the mage track down issues +# locally. +# +# Modify the fixme sections below to customize to your bisection use-case. + +# FIXME: Replace this for the location of your llvm clone within the chroot. +# We need this for the git history. +LLVM_CLONE_PATH="${HOME}/chromiumos/src/third_party/llvm-project" + +main () { + # Note this builds with USE="llvm-next debug -thinlto -llvm_pgo_use" + build_llvm || exit + + # FIXME: Write your actual bisection command here which uses + # LLVM_NEXT here. + # + # Example bisection command: + # + # build_pkg efitools || exit 1 + # + # You can use build_pkg if you want to emerge a package and print + # out diagnostics along the way + # + # Fail Example: build_pkg "${MY_PACKAGE}" || exit 1 + # Skip Example: build_pkg "${MY_PACKAGE}" || exit 125 + # +} + +# --------------------------------------------------------------------- + +# Current LLVM_NEXT_HASH we're using. Does not need to be set. +CURRENT='UNKNOWN' + +logdo () { + local cmd="${1}" + shift + printf '%1 $ %2' "$(date '+%T')" "${cmd}" + for i in "$@"; do + printf "'%1'" "${i}" + done + printf "\n" + "${cmd}" "$@" +} + +log () { + echo "$(date '+%T') | $*" +} + +build_llvm () { + cd "${LLVM_CLONE_PATH}" || exit 2 # Exit with error + local llvm_ebuild_path + llvm_ebuild_path="$(readlink -f "$(equery which llvm)")" + CURRENT="$(git rev-parse --short HEAD)" + log "Current hash=${CURRENT}" + NEW_LINE="LLVM_NEXT_HASH=\"${CURRENT}\"" + sed -i "s/^LLVM_NEXT_HASH=\".*\"/${NEW_LINE}/" "${llvm_ebuild_path}" + + local logfile="/tmp/build-llvm.${CURRENT}.out" + log "Writing logs to ${logfile}" + log "sudo USE='llvm-next debug -thinlto -llvm_use_pgo' emerge sys-devel/llvm" + logdo sudo USE='llvm-next debug -thinlto -llvm_use_pgo' emerge \ + sys-devel/llvm \ + &> "${logfile}" + local emerge_exit_code="$?" + if [[ "${emerge_exit_code}" -ne 0 ]]; then + log "FAILED to build llvm with hash=${CURRENT}" + log 'Skipping this hash' + return 125 # 125 is the "skip" exit code. + fi + log "Succesfully built LLVM with hash=${CURRENT}" + return 0 # Explicitly returning 0 for "good" even if a command errors out +} + +build_pkg () { + local pkg="${1}" + + local logfile="/tmp/build-${pkg}.${CURRENT}.out" + log "Writing logs to ${logfile}" + log "sudo emerge ${pkg}" + logdo sudo emerge "${pkg}" \ + &> "${logfile}" + local emerge_exit_code="$?" + if [[ "${emerge_exit_code}" -ne 0 ]]; then + log "FAILED to build ${pkg} with hash=${CURRENT}" + return 1 # 1 here isn't for bisection, but for chaining with `||` + fi + log "Successfully built ${pkg} with hash=${CURRENT}" + return 0 # Explicitly returning 0 for "good" even if a command errors out +} + +main -- cgit v1.2.3 From 424b0f6a965d95139c6e19ec1d37a1d6ce0dc47d Mon Sep 17 00:00:00 2001 From: Manoj Gupta Date: Wed, 6 Apr 2022 11:19:05 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: I55ee3a4799f0ecaab87ad1d8207d688c908fbea9 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3573011 Commit-Queue: Manoj Gupta Tested-by: Manoj Gupta Auto-Submit: Manoj Gupta Reviewed-by: Denis Nikitin Commit-Queue: Denis Nikitin --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 4c16c74d..2d640804 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R102-14588.11-1648459935" + "name": "R102-14588.23-1649065028" }, "chromeos-kernel-4_14": { - "name": "R102-14555.0-1648460042" + "name": "R102-14574.0-1649065329" }, "chromeos-kernel-4_19": { - "name": "R102-14588.11-1648459977" + "name": "R102-14588.23-1649064755" }, "chromeos-kernel-5_4": { - "name": "R102-14588.11-1648460110" + "name": "R102-14588.23-1649064834" }, "chromeos-kernel-5_10": { - "name": "R102-14588.13-1648460351" + "name": "R102-14588.23-1649064775" } } -- cgit v1.2.3 From 2124be5caee6803d5bfe6f7cdc8e3367cb375807 Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Thu, 21 Apr 2022 10:27:37 -0700 Subject: toolchain_utils: s/Cr OS/CrOS/g Result of running `sed -ri 's/Chrom(ium|e) OS/Chrom\1OS/g' $(find -type f)`. BUG=None TEST=None Change-Id: I59be92537aa19bc989f52b585e307e76dbde401b Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3600147 Reviewed-by: Manoj Gupta Commit-Queue: George Burgess Tested-by: George Burgess --- LICENSE | 2 +- README.chromium | 6 +++--- README.md | 2 +- afdo_metadata/README.md | 6 +++--- afdo_redaction/redact_profile.py | 2 +- afdo_redaction/redact_profile_test.py | 2 +- afdo_redaction/remove_cold_functions.py | 4 ++-- afdo_redaction/remove_cold_functions_test.py | 2 +- afdo_redaction/remove_indirect_calls.py | 2 +- afdo_redaction/remove_indirect_calls_test.py | 2 +- afdo_tools/bisection/afdo_prof_analysis.py | 2 +- afdo_tools/bisection/afdo_prof_analysis_e2e_test.py | 2 +- afdo_tools/bisection/afdo_prof_analysis_test.py | 2 +- afdo_tools/bisection/state_assumption_external.sh | 2 +- afdo_tools/bisection/state_assumption_interrupt.sh | 2 +- afdo_tools/generate_afdo_from_tryjob.py | 2 +- afdo_tools/run_afdo_tryjob.py | 2 +- afdo_tools/update_kernel_afdo | 2 +- android_merge_from_upstream.sh | 2 +- auto_delete_nightly_test_data.py | 2 +- bestflags/example_algorithms.py | 2 +- bestflags/flags.py | 2 +- bestflags/flags_test.py | 2 +- bestflags/flags_util.py | 2 +- bestflags/generation.py | 2 +- bestflags/generation_test.py | 2 +- bestflags/genetic_algorithm.py | 2 +- bestflags/hill_climb_best_neighbor.py | 2 +- bestflags/iterative_elimination.py | 2 +- bestflags/mock_task.py | 2 +- bestflags/pipeline_process.py | 2 +- bestflags/pipeline_process_test.py | 2 +- bestflags/pipeline_worker.py | 2 +- bestflags/pipeline_worker_test.py | 2 +- bestflags/steering.py | 2 +- bestflags/steering_test.py | 2 +- bestflags/task.py | 2 +- bestflags/task_test.py | 2 +- bestflags/testing_batch.py | 2 +- binary_search_tool/MAINTENANCE | 2 +- binary_search_tool/__init__.py | 2 +- binary_search_tool/android/generate_cmd.sh | 2 +- binary_search_tool/binary_search_perforce.py | 2 +- binary_search_tool/binary_search_state.py | 2 +- binary_search_tool/bisect_driver.py | 2 +- binary_search_tool/common.py | 2 +- binary_search_tool/common/test_setup.sh | 2 +- binary_search_tool/compiler_wrapper.py | 2 +- binary_search_tool/cros_pkg/create_cleanup_script.py | 2 +- binary_search_tool/pass_mapping.py | 2 +- binary_search_tool/run_bisect.py | 2 +- binary_search_tool/run_bisect_tests.py | 2 +- binary_search_tool/sysroot_wrapper/setup.sh | 2 +- binary_search_tool/sysroot_wrapper/testing_test.py | 2 +- binary_search_tool/test/__init__.py | 2 +- binary_search_tool/test/binary_search_tool_test.py | 2 +- binary_search_tool/test/cmd_script.py | 2 +- binary_search_tool/test/cmd_script_no_support.py | 2 +- binary_search_tool/test/common.py | 2 +- binary_search_tool/test/gen_init_list.py | 2 +- binary_search_tool/test/gen_obj.py | 2 +- binary_search_tool/test/generate_cmd.py | 2 +- binary_search_tool/test/is_good.py | 2 +- binary_search_tool/test/is_good_noinc_prune.py | 2 +- binary_search_tool/test/switch_tmp.py | 2 +- binary_search_tool/test/switch_to_bad.py | 2 +- binary_search_tool/test/switch_to_bad_noinc_prune.py | 2 +- binary_search_tool/test/switch_to_bad_set_file.py | 2 +- binary_search_tool/test/switch_to_good.py | 2 +- binary_search_tool/test/switch_to_good_noinc_prune.py | 2 +- binary_search_tool/test/switch_to_good_set_file.py | 2 +- binary_search_tool/test/test_setup.py | 2 +- binary_search_tool/test/test_setup_bad.py | 2 +- build_chromeos.py | 2 +- build_tc.py | 2 +- buildbot_test_llvm.py | 2 +- buildbot_test_toolchains.py | 2 +- chromiumos_image_diff.py | 2 +- compiler_wrapper/README.md | 4 ++-- compiler_wrapper/android_config_test.go | 2 +- compiler_wrapper/bisect_flag.go | 2 +- compiler_wrapper/bisect_flag_test.go | 2 +- compiler_wrapper/build.py | 4 ++-- compiler_wrapper/bundle.README | 2 +- compiler_wrapper/bundle.py | 2 +- compiler_wrapper/ccache_flag.go | 2 +- compiler_wrapper/ccache_flag_test.go | 2 +- compiler_wrapper/clang_flags.go | 2 +- compiler_wrapper/clang_flags_test.go | 2 +- compiler_wrapper/clang_syntax_flag.go | 2 +- compiler_wrapper/clang_syntax_flag_test.go | 2 +- compiler_wrapper/clang_tidy_flag.go | 2 +- compiler_wrapper/clang_tidy_flag_test.go | 2 +- compiler_wrapper/command.go | 2 +- compiler_wrapper/command_test.go | 2 +- compiler_wrapper/compile_with_fallback.go | 2 +- compiler_wrapper/compile_with_fallback_test.go | 2 +- compiler_wrapper/compiler_wrapper.go | 2 +- compiler_wrapper/compiler_wrapper_test.go | 2 +- compiler_wrapper/config.go | 2 +- compiler_wrapper/config_test.go | 2 +- compiler_wrapper/cros_hardened_config_test.go | 2 +- compiler_wrapper/cros_host_config_test.go | 2 +- compiler_wrapper/cros_llvm_next_flags.go | 4 ++-- compiler_wrapper/cros_nonhardened_config_test.go | 2 +- compiler_wrapper/disable_werror_flag.go | 2 +- compiler_wrapper/disable_werror_flag_test.go | 2 +- compiler_wrapper/env.go | 2 +- compiler_wrapper/env_test.go | 2 +- compiler_wrapper/errors.go | 2 +- compiler_wrapper/errors_test.go | 2 +- compiler_wrapper/gcc_flags.go | 2 +- compiler_wrapper/gcc_flags_test.go | 2 +- compiler_wrapper/go_exec.go | 2 +- compiler_wrapper/goldenutil_test.go | 2 +- compiler_wrapper/install_compiler_wrapper.sh | 2 +- compiler_wrapper/kernel_bug.go | 2 +- compiler_wrapper/kernel_bug_test.go | 2 +- compiler_wrapper/libc_exec.go | 2 +- compiler_wrapper/libgcc_flags.go | 2 +- compiler_wrapper/libgcc_flags_test.go | 2 +- compiler_wrapper/main.go | 2 +- compiler_wrapper/pie_flags.go | 2 +- compiler_wrapper/pie_flags_test.go | 2 +- compiler_wrapper/print_cmdline_flag.go | 2 +- compiler_wrapper/print_cmdline_flag_test.go | 2 +- compiler_wrapper/print_config_flag.go | 2 +- compiler_wrapper/print_config_flag_test.go | 2 +- compiler_wrapper/remote_build_flag_test.go | 2 +- compiler_wrapper/remote_build_flags.go | 2 +- compiler_wrapper/reset_compiler_wrapper.sh | 2 +- compiler_wrapper/rusage_flag.go | 2 +- compiler_wrapper/rusage_flag_test.go | 2 +- compiler_wrapper/sanitizer_flags.go | 2 +- compiler_wrapper/sanitizer_flags_test.go | 2 +- compiler_wrapper/stackprotector_flags.go | 2 +- compiler_wrapper/stackprotector_flags_test.go | 2 +- compiler_wrapper/sysroot_flag.go | 2 +- compiler_wrapper/sysroot_flag_test.go | 2 +- compiler_wrapper/testutil_test.go | 2 +- compiler_wrapper/thumb_flags.go | 2 +- compiler_wrapper/thumb_flags_test.go | 2 +- compiler_wrapper/unsupported_flags.go | 2 +- compiler_wrapper/unsupported_flags_test.go | 2 +- compiler_wrapper/x64_flags.go | 2 +- compiler_wrapper/x64_flags_test.go | 2 +- cros_utils/__init__.py | 2 +- cros_utils/bugs.py | 2 +- cros_utils/bugs_test.py | 2 +- cros_utils/buildbot_utils.py | 2 +- cros_utils/buildbot_utils_unittest.py | 2 +- cros_utils/command_executer.py | 2 +- cros_utils/command_executer_timeout_test.py | 2 +- cros_utils/command_executer_unittest.py | 2 +- cros_utils/constants.py | 2 +- cros_utils/device_setup_utils.py | 2 +- cros_utils/device_setup_utils_unittest.py | 2 +- cros_utils/email_sender.py | 2 +- cros_utils/email_sender_unittest.py | 2 +- cros_utils/file_utils.py | 2 +- cros_utils/html_tools.py | 2 +- cros_utils/locks.py | 2 +- cros_utils/logger.py | 2 +- cros_utils/machines.py | 2 +- cros_utils/misc.py | 2 +- cros_utils/misc_test.py | 2 +- cros_utils/no_pseudo_terminal_test.py | 2 +- cros_utils/perf_diff.py | 2 +- cros_utils/tabulator.py | 2 +- cros_utils/tabulator_test.py | 2 +- cros_utils/timeline.py | 2 +- cros_utils/timeline_test.py | 2 +- cros_utils/tiny_render.py | 2 +- cros_utils/tiny_render_test.py | 2 +- cros_utils/toolchain_utils.sh | 2 +- crosperf/benchmark.py | 2 +- crosperf/benchmark_run.py | 2 +- crosperf/benchmark_run_unittest.py | 2 +- crosperf/benchmark_unittest.py | 2 +- crosperf/column_chart.py | 2 +- crosperf/compare_machines.py | 2 +- crosperf/config.py | 2 +- crosperf/config_unittest.py | 2 +- crosperf/crosperf | 2 +- crosperf/crosperf.py | 2 +- crosperf/crosperf_autolock.py | 2 +- crosperf/crosperf_unittest.py | 2 +- crosperf/download_images.py | 2 +- crosperf/download_images_buildid_test.py | 2 +- crosperf/download_images_unittest.py | 2 +- crosperf/experiment.py | 2 +- crosperf/experiment_factory.py | 2 +- crosperf/experiment_factory_unittest.py | 2 +- crosperf/experiment_file.py | 2 +- crosperf/experiment_file_unittest.py | 2 +- crosperf/experiment_files/telemetry_perf_perf | 4 ++-- crosperf/experiment_runner.py | 2 +- crosperf/experiment_runner_unittest.py | 2 +- crosperf/experiment_status.py | 2 +- crosperf/field.py | 2 +- crosperf/flag_test_unittest.py | 2 +- crosperf/generate_report.py | 2 +- crosperf/generate_report_unittest.py | 2 +- crosperf/help.py | 2 +- crosperf/image_checksummer.py | 2 +- crosperf/label.py | 2 +- crosperf/machine_image_manager.py | 2 +- crosperf/machine_image_manager_unittest.py | 2 +- crosperf/machine_manager.py | 2 +- crosperf/machine_manager_unittest.py | 2 +- crosperf/mock_instance.py | 2 +- crosperf/results_cache.py | 2 +- crosperf/results_cache_unittest.py | 2 +- crosperf/results_organizer.py | 2 +- crosperf/results_organizer_unittest.py | 2 +- crosperf/results_report.py | 2 +- crosperf/results_report_templates.py | 2 +- crosperf/results_report_unittest.py | 2 +- crosperf/schedv2.py | 2 +- crosperf/schedv2_unittest.py | 2 +- crosperf/settings.py | 2 +- crosperf/settings_factory.py | 2 +- crosperf/settings_factory_unittest.py | 2 +- crosperf/settings_unittest.py | 2 +- crosperf/suite_runner.py | 2 +- crosperf/suite_runner_unittest.py | 2 +- crosperf/test_flag.py | 2 +- crosperf/translate_xbuddy.py | 2 +- cwp/cr-os/fetch_gn_descs.py | 2 +- cwp/cr-os/fetch_gn_descs_test.py | 2 +- debug_info_test/allowlist.py | 2 +- debug_info_test/check_cus.py | 2 +- debug_info_test/check_exist.py | 2 +- debug_info_test/check_icf.py | 2 +- debug_info_test/check_ngcc.py | 2 +- debug_info_test/debug_info_test.py | 2 +- file_lock_machine.py | 2 +- file_lock_machine_test.py | 2 +- go/chromeos/setup_chromeos_testing.py | 2 +- heatmaps/heat_map.py | 2 +- heatmaps/heat_map_test.py | 2 +- heatmaps/heatmap_generator.py | 2 +- heatmaps/heatmap_generator_test.py | 2 +- heatmaps/perf-to-inst-page.sh | 2 +- image_chromeos.py | 2 +- llvm_extra/create_ebuild_file.py | 2 +- llvm_extra/create_llvm_extra.sh | 2 +- llvm_tools/auto_llvm_bisection.py | 2 +- llvm_tools/auto_llvm_bisection_unittest.py | 2 +- llvm_tools/bisect_clang_crashes.py | 4 ++-- llvm_tools/bisect_clang_crashes_unittest.py | 2 +- llvm_tools/chroot.py | 2 +- llvm_tools/chroot_unittest.py | 2 +- llvm_tools/copy_helpers_to_chromiumos_overlay.py | 2 +- llvm_tools/custom_script_example.py | 2 +- llvm_tools/failure_modes.py | 2 +- llvm_tools/fetch_cros_sdk_rolls.py | 2 +- llvm_tools/get_llvm_hash.py | 2 +- llvm_tools/get_llvm_hash_unittest.py | 2 +- llvm_tools/get_upstream_patch.py | 2 +- llvm_tools/git.py | 2 +- llvm_tools/git_llvm_rev.py | 2 +- llvm_tools/git_llvm_rev_test.py | 2 +- llvm_tools/git_unittest.py | 2 +- llvm_tools/llvm_bisection.py | 2 +- llvm_tools/llvm_bisection_unittest.py | 2 +- llvm_tools/llvm_local_bisection.sh | 2 +- llvm_tools/llvm_patch_management.py | 2 +- llvm_tools/llvm_patch_management_unittest.py | 2 +- llvm_tools/llvm_project.py | 2 +- llvm_tools/modify_a_tryjob.py | 2 +- llvm_tools/modify_a_tryjob_unittest.py | 2 +- llvm_tools/nightly_revert_checker.py | 2 +- llvm_tools/nightly_revert_checker_test.py | 2 +- llvm_tools/patch_manager.py | 2 +- llvm_tools/patch_manager_unittest.py | 2 +- llvm_tools/patch_sync/src/main.rs | 4 ++-- llvm_tools/patch_sync/src/version_control.rs | 6 +++--- llvm_tools/subprocess_helpers.py | 2 +- llvm_tools/test_helpers.py | 2 +- llvm_tools/update_chromeos_llvm_hash.py | 2 +- llvm_tools/update_chromeos_llvm_hash_unittest.py | 2 +- llvm_tools/update_packages_and_run_tests.py | 2 +- llvm_tools/update_packages_and_run_tests_unittest.py | 2 +- llvm_tools/update_tryjob_status.py | 2 +- llvm_tools/update_tryjob_status_unittest.py | 2 +- llvm_tools/upload_lexan_crashes_to_forcey.py | 2 +- llvm_tools/upload_lexan_crashes_to_forcey_test.py | 2 +- lock_machine.py | 2 +- make_root_writable.py | 2 +- orderfile/post_process_orderfile.py | 4 ++-- orderfile/post_process_orderfile_test.py | 2 +- pgo_tools/merge_profdata_and_upload.py | 2 +- pgo_tools/monitor_pgo_profiles.py | 2 +- pgo_tools/monitor_pgo_profiles_unittest.py | 2 +- remote_test.py | 2 +- run_tests_for.py | 2 +- rust_tools/rust_uprev.py | 4 ++-- rust_tools/rust_uprev_test.py | 2 +- rust_tools/rust_watch.py | 2 +- rust_tools/rust_watch_test.py | 2 +- seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py | 2 +- seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py | 2 +- tc_enter_chroot.py | 2 +- toolchain_utils_githooks/check-presubmit.py | 8 ++++---- update_telemetry_defaults.py | 2 +- upstream_workon/upstream_workon.bash | 2 +- 307 files changed, 325 insertions(+), 325 deletions(-) diff --git a/LICENSE b/LICENSE index 50bac5d3..2defaff9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -// Copyright (c) 2011-2016 The Chromium OS Authors. All rights reserved. +// Copyright (c) 2011-2016 The ChromiumOS Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/README.chromium b/README.chromium index acbbc5e6..57aa603c 100644 --- a/README.chromium +++ b/README.chromium @@ -7,11 +7,11 @@ Security Critical: no Description: This contains scripts used to help maintain the toolchain. These -include tools for downloading and building Chromium OS; building -custom versions of the toolchain inside Chromium OS; launching +include tools for downloading and building ChromiumOS; building +custom versions of the toolchain inside ChromiumOS; launching performance tests, analyzing the results and generating reports; running toolchain regression tests; and using binary search to isolate toolchain issues. NOTE: These tools are strictly for Chromium developers; none of them -ship on the final product (devices that run Chromium OS). +ship on the final product (devices that run ChromiumOS). diff --git a/README.md b/README.md index a318e38d..4a82ec1d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # `toolchain-utils` -Various utilities used by the Chrome OS toolchain team. +Various utilities used by the ChromeOS toolchain team. ## Note diff --git a/afdo_metadata/README.md b/afdo_metadata/README.md index 2338404c..ad71c0b4 100644 --- a/afdo_metadata/README.md +++ b/afdo_metadata/README.md @@ -1,6 +1,6 @@ # Overview This directory contains JSON files describing metadata of AFDO profiles -used to compile packages (Chrome and kernel) in Chrome OS. +used to compile packages (Chrome and kernel) in ChromeOS. # Description of each JSON Files kernel_afdo.json contains the name of the latest AFDO profiles for each @@ -15,7 +15,7 @@ When a new profile (kernel or Chrome) is successfully uploaded to the production GS bucket, a bot submits to modify the corresponding JSON file to reflect the updates. -## Roll to Chrome OS +## Roll to ChromeOS There will be scheduler jobs listening to the changes made to these JSON files. When changes detected, buildbot will roll these changes into -corresponding Chrome OS packages. +corresponding ChromeOS packages. diff --git a/afdo_redaction/redact_profile.py b/afdo_redaction/redact_profile.py index 02bae928..285dbf53 100755 --- a/afdo_redaction/redact_profile.py +++ b/afdo_redaction/redact_profile.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_redaction/redact_profile_test.py b/afdo_redaction/redact_profile_test.py index e2438972..26fda3fd 100755 --- a/afdo_redaction/redact_profile_test.py +++ b/afdo_redaction/redact_profile_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_redaction/remove_cold_functions.py b/afdo_redaction/remove_cold_functions.py index 097085db..5a1b7439 100755 --- a/afdo_redaction/remove_cold_functions.py +++ b/afdo_redaction/remove_cold_functions.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -14,7 +14,7 @@ same sample count, we need to remove all of them in order to meet the target, so the result profile will always have less than or equal to the given number of functions. -The script is intended to be used on production Chrome OS profiles, after +The script is intended to be used on production ChromeOS profiles, after other redaction/trimming scripts. It can be used with given textual CWP and benchmark profiles, in order to analyze how many removed functions are from which profile (or both), which can be used an indicator of fairness diff --git a/afdo_redaction/remove_cold_functions_test.py b/afdo_redaction/remove_cold_functions_test.py index 14f946b0..839e5378 100755 --- a/afdo_redaction/remove_cold_functions_test.py +++ b/afdo_redaction/remove_cold_functions_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_redaction/remove_indirect_calls.py b/afdo_redaction/remove_indirect_calls.py index 0dc15077..a72e43b5 100755 --- a/afdo_redaction/remove_indirect_calls.py +++ b/afdo_redaction/remove_indirect_calls.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_redaction/remove_indirect_calls_test.py b/afdo_redaction/remove_indirect_calls_test.py index 164b284f..5f8d938c 100755 --- a/afdo_redaction/remove_indirect_calls_test.py +++ b/afdo_redaction/remove_indirect_calls_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/afdo_prof_analysis.py b/afdo_tools/bisection/afdo_prof_analysis.py index ce8afd64..7f7c3cf2 100755 --- a/afdo_tools/bisection/afdo_prof_analysis.py +++ b/afdo_tools/bisection/afdo_prof_analysis.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py index b293b8aa..df334317 100755 --- a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py +++ b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/afdo_prof_analysis_test.py b/afdo_tools/bisection/afdo_prof_analysis_test.py index 245edc33..3e6f41e0 100755 --- a/afdo_tools/bisection/afdo_prof_analysis_test.py +++ b/afdo_tools/bisection/afdo_prof_analysis_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/state_assumption_external.sh b/afdo_tools/bisection/state_assumption_external.sh index 1ad78ee2..153aefa3 100755 --- a/afdo_tools/bisection/state_assumption_external.sh +++ b/afdo_tools/bisection/state_assumption_external.sh @@ -1,5 +1,5 @@ #!/bin/bash -eu -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/state_assumption_interrupt.sh b/afdo_tools/bisection/state_assumption_interrupt.sh index eba3a4b4..7486137a 100755 --- a/afdo_tools/bisection/state_assumption_interrupt.sh +++ b/afdo_tools/bisection/state_assumption_interrupt.sh @@ -1,5 +1,5 @@ #!/bin/bash -eu -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/generate_afdo_from_tryjob.py b/afdo_tools/generate_afdo_from_tryjob.py index 3ed578ea..11055146 100755 --- a/afdo_tools/generate_afdo_from_tryjob.py +++ b/afdo_tools/generate_afdo_from_tryjob.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/run_afdo_tryjob.py b/afdo_tools/run_afdo_tryjob.py index e14cd918..3c5b0072 100755 --- a/afdo_tools/run_afdo_tryjob.py +++ b/afdo_tools/run_afdo_tryjob.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo index b3a9e6d1..60a15ea5 100755 --- a/afdo_tools/update_kernel_afdo +++ b/afdo_tools/update_kernel_afdo @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/android_merge_from_upstream.sh b/android_merge_from_upstream.sh index 7430b8dd..9a8c7dce 100755 --- a/android_merge_from_upstream.sh +++ b/android_merge_from_upstream.sh @@ -1,5 +1,5 @@ #!/bin/bash -eu -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index 67841188..87bf661b 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/bestflags/example_algorithms.py b/bestflags/example_algorithms.py index 9775d491..e16908a5 100644 --- a/bestflags/example_algorithms.py +++ b/bestflags/example_algorithms.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """An example main file running the algorithms. diff --git a/bestflags/flags.py b/bestflags/flags.py index b316421e..01a845ca 100644 --- a/bestflags/flags.py +++ b/bestflags/flags.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Manage bundles of flags used for the optimizing of ChromeOS. diff --git a/bestflags/flags_test.py b/bestflags/flags_test.py index dbbea77c..6e546621 100644 --- a/bestflags/flags_test.py +++ b/bestflags/flags_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the classes in module 'flags'. diff --git a/bestflags/flags_util.py b/bestflags/flags_util.py index 20be57fb..436f9779 100644 --- a/bestflags/flags_util.py +++ b/bestflags/flags_util.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions to explore the neighbor flags. diff --git a/bestflags/generation.py b/bestflags/generation.py index 67c379f5..5c9cd649 100644 --- a/bestflags/generation.py +++ b/bestflags/generation.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A generation of a set of tasks. diff --git a/bestflags/generation_test.py b/bestflags/generation_test.py index 2e042d49..bc5a0b1b 100644 --- a/bestflags/generation_test.py +++ b/bestflags/generation_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generation unittest. diff --git a/bestflags/genetic_algorithm.py b/bestflags/genetic_algorithm.py index deb83f12..2a1b68db 100644 --- a/bestflags/genetic_algorithm.py +++ b/bestflags/genetic_algorithm.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The hill genetic algorithm. diff --git a/bestflags/hill_climb_best_neighbor.py b/bestflags/hill_climb_best_neighbor.py index 7bb5a7ff..dc8d15d1 100644 --- a/bestflags/hill_climb_best_neighbor.py +++ b/bestflags/hill_climb_best_neighbor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A variation of the hill climbing algorithm. diff --git a/bestflags/iterative_elimination.py b/bestflags/iterative_elimination.py index 2f4c41d1..581a855c 100644 --- a/bestflags/iterative_elimination.py +++ b/bestflags/iterative_elimination.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Iterative flags elimination. diff --git a/bestflags/mock_task.py b/bestflags/mock_task.py index 6de2b35c..39ebf50c 100644 --- a/bestflags/mock_task.py +++ b/bestflags/mock_task.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This module defines the common mock tasks used by various unit tests. diff --git a/bestflags/pipeline_process.py b/bestflags/pipeline_process.py index 31f5f21f..97230b9f 100644 --- a/bestflags/pipeline_process.py +++ b/bestflags/pipeline_process.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Pipeline process that encapsulates the actual content. diff --git a/bestflags/pipeline_process_test.py b/bestflags/pipeline_process_test.py index b9d84067..a6d784f5 100644 --- a/bestflags/pipeline_process_test.py +++ b/bestflags/pipeline_process_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Pipeline Process unittest. diff --git a/bestflags/pipeline_worker.py b/bestflags/pipeline_worker.py index e21ec2c8..1ac8ac03 100644 --- a/bestflags/pipeline_worker.py +++ b/bestflags/pipeline_worker.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The pipeline_worker functions of the build and test stage of the framework. diff --git a/bestflags/pipeline_worker_test.py b/bestflags/pipeline_worker_test.py index e3de5e12..842fc542 100644 --- a/bestflags/pipeline_worker_test.py +++ b/bestflags/pipeline_worker_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unittest for the pipeline_worker functions in the build/test stage. diff --git a/bestflags/steering.py b/bestflags/steering.py index 320f7c37..41173e42 100644 --- a/bestflags/steering.py +++ b/bestflags/steering.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The framework stage that produces the next generation of tasks to run. diff --git a/bestflags/steering_test.py b/bestflags/steering_test.py index c96e362f..8ad0b3cb 100644 --- a/bestflags/steering_test.py +++ b/bestflags/steering_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Steering stage unittest. diff --git a/bestflags/task.py b/bestflags/task.py index f055fc75..86a251f9 100644 --- a/bestflags/task.py +++ b/bestflags/task.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A reproducing entity. diff --git a/bestflags/task_test.py b/bestflags/task_test.py index 68a7bf78..fa43bc7d 100644 --- a/bestflags/task_test.py +++ b/bestflags/task_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Task unittest. diff --git a/bestflags/testing_batch.py b/bestflags/testing_batch.py index ffe19448..902500a3 100644 --- a/bestflags/testing_batch.py +++ b/bestflags/testing_batch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Hill climbing unitest. diff --git a/binary_search_tool/MAINTENANCE b/binary_search_tool/MAINTENANCE index 8f96ff10..cc86b760 100644 --- a/binary_search_tool/MAINTENANCE +++ b/binary_search_tool/MAINTENANCE @@ -1,4 +1,4 @@ -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/__init__.py b/binary_search_tool/__init__.py index 76500def..af3cc751 100644 --- a/binary_search_tool/__init__.py +++ b/binary_search_tool/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/android/generate_cmd.sh b/binary_search_tool/android/generate_cmd.sh index 78a39b12..52c26b3c 100755 --- a/binary_search_tool/android/generate_cmd.sh +++ b/binary_search_tool/android/generate_cmd.sh @@ -1,6 +1,6 @@ #!/bin/bash -eu -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/binary_search_perforce.py b/binary_search_tool/binary_search_perforce.py index f2a3c8d5..e60c972a 100755 --- a/binary_search_tool/binary_search_perforce.py +++ b/binary_search_tool/binary_search_perforce.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/binary_search_state.py b/binary_search_tool/binary_search_state.py index 1ddd65ce..1dc2bb20 100755 --- a/binary_search_tool/binary_search_state.py +++ b/binary_search_tool/binary_search_state.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/bisect_driver.py b/binary_search_tool/bisect_driver.py index ac37ad9f..b94266c7 100644 --- a/binary_search_tool/bisect_driver.py +++ b/binary_search_tool/bisect_driver.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # diff --git a/binary_search_tool/common.py b/binary_search_tool/common.py index 85cd478b..a087ee93 100644 --- a/binary_search_tool/common.py +++ b/binary_search_tool/common.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/common/test_setup.sh b/binary_search_tool/common/test_setup.sh index 3ea73272..0abc64de 100755 --- a/binary_search_tool/common/test_setup.sh +++ b/binary_search_tool/common/test_setup.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # diff --git a/binary_search_tool/compiler_wrapper.py b/binary_search_tool/compiler_wrapper.py index 0fd92c67..a1dcb1b7 100755 --- a/binary_search_tool/compiler_wrapper.py +++ b/binary_search_tool/compiler_wrapper.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/cros_pkg/create_cleanup_script.py b/binary_search_tool/cros_pkg/create_cleanup_script.py index 62ee38f1..0c85a88c 100755 --- a/binary_search_tool/cros_pkg/create_cleanup_script.py +++ b/binary_search_tool/cros_pkg/create_cleanup_script.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/pass_mapping.py b/binary_search_tool/pass_mapping.py index 2678fd6d..618509b6 100644 --- a/binary_search_tool/pass_mapping.py +++ b/binary_search_tool/pass_mapping.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/run_bisect.py b/binary_search_tool/run_bisect.py index 249b9cf5..18669cc1 100755 --- a/binary_search_tool/run_bisect.py +++ b/binary_search_tool/run_bisect.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/run_bisect_tests.py b/binary_search_tool/run_bisect_tests.py index 9172d678..097c375b 100755 --- a/binary_search_tool/run_bisect_tests.py +++ b/binary_search_tool/run_bisect_tests.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/sysroot_wrapper/setup.sh b/binary_search_tool/sysroot_wrapper/setup.sh index 6b9b48f1..31cdf113 100755 --- a/binary_search_tool/sysroot_wrapper/setup.sh +++ b/binary_search_tool/sysroot_wrapper/setup.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # diff --git a/binary_search_tool/sysroot_wrapper/testing_test.py b/binary_search_tool/sysroot_wrapper/testing_test.py index b5ceec1f..04b69b74 100755 --- a/binary_search_tool/sysroot_wrapper/testing_test.py +++ b/binary_search_tool/sysroot_wrapper/testing_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/__init__.py b/binary_search_tool/test/__init__.py index 76500def..af3cc751 100644 --- a/binary_search_tool/test/__init__.py +++ b/binary_search_tool/test/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/binary_search_tool_test.py b/binary_search_tool/test/binary_search_tool_test.py index 6f5b514e..493c2e35 100755 --- a/binary_search_tool/test/binary_search_tool_test.py +++ b/binary_search_tool/test/binary_search_tool_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/cmd_script.py b/binary_search_tool/test/cmd_script.py index bfd56052..2f026edd 100755 --- a/binary_search_tool/test/cmd_script.py +++ b/binary_search_tool/test/cmd_script.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/cmd_script_no_support.py b/binary_search_tool/test/cmd_script_no_support.py index badbedc8..0cc9fedc 100644 --- a/binary_search_tool/test/cmd_script_no_support.py +++ b/binary_search_tool/test/cmd_script_no_support.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/common.py b/binary_search_tool/test/common.py index cf5300f5..98f40096 100755 --- a/binary_search_tool/test/common.py +++ b/binary_search_tool/test/common.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/gen_init_list.py b/binary_search_tool/test/gen_init_list.py index bc5dd8fe..718ac877 100755 --- a/binary_search_tool/test/gen_init_list.py +++ b/binary_search_tool/test/gen_init_list.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/gen_obj.py b/binary_search_tool/test/gen_obj.py index 4f65c71b..7ea91788 100755 --- a/binary_search_tool/test/gen_obj.py +++ b/binary_search_tool/test/gen_obj.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/generate_cmd.py b/binary_search_tool/test/generate_cmd.py index 51b36b0a..08b8c646 100755 --- a/binary_search_tool/test/generate_cmd.py +++ b/binary_search_tool/test/generate_cmd.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/is_good.py b/binary_search_tool/test/is_good.py index 662921e8..8212aede 100755 --- a/binary_search_tool/test/is_good.py +++ b/binary_search_tool/test/is_good.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/is_good_noinc_prune.py b/binary_search_tool/test/is_good_noinc_prune.py index c0e42bb1..6329f493 100755 --- a/binary_search_tool/test/is_good_noinc_prune.py +++ b/binary_search_tool/test/is_good_noinc_prune.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_tmp.py b/binary_search_tool/test/switch_tmp.py index 0f3c4234..1d4ccc88 100755 --- a/binary_search_tool/test/switch_tmp.py +++ b/binary_search_tool/test/switch_tmp.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_bad.py b/binary_search_tool/test/switch_to_bad.py index e3553eb6..3a1ec84f 100755 --- a/binary_search_tool/test/switch_to_bad.py +++ b/binary_search_tool/test/switch_to_bad.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_bad_noinc_prune.py b/binary_search_tool/test/switch_to_bad_noinc_prune.py index 81b558e1..a390e9e2 100755 --- a/binary_search_tool/test/switch_to_bad_noinc_prune.py +++ b/binary_search_tool/test/switch_to_bad_noinc_prune.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_bad_set_file.py b/binary_search_tool/test/switch_to_bad_set_file.py index 5b941c62..a0dbb67b 100755 --- a/binary_search_tool/test/switch_to_bad_set_file.py +++ b/binary_search_tool/test/switch_to_bad_set_file.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_good.py b/binary_search_tool/test/switch_to_good.py index 97479329..50e0ddff 100755 --- a/binary_search_tool/test/switch_to_good.py +++ b/binary_search_tool/test/switch_to_good.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_good_noinc_prune.py b/binary_search_tool/test/switch_to_good_noinc_prune.py index 0b91a0d8..5e00a634 100755 --- a/binary_search_tool/test/switch_to_good_noinc_prune.py +++ b/binary_search_tool/test/switch_to_good_noinc_prune.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_good_set_file.py b/binary_search_tool/test/switch_to_good_set_file.py index 1cb05e0c..cc884ddc 100755 --- a/binary_search_tool/test/switch_to_good_set_file.py +++ b/binary_search_tool/test/switch_to_good_set_file.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/test_setup.py b/binary_search_tool/test/test_setup.py index ecc8eb97..fa4743a7 100755 --- a/binary_search_tool/test/test_setup.py +++ b/binary_search_tool/test/test_setup.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/test_setup_bad.py b/binary_search_tool/test/test_setup_bad.py index cbca3c21..1421009b 100755 --- a/binary_search_tool/test/test_setup_bad.py +++ b/binary_search_tool/test/test_setup_bad.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/build_chromeos.py b/build_chromeos.py index e275da1f..6f9c3682 100755 --- a/build_chromeos.py +++ b/build_chromeos.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/build_tc.py b/build_tc.py index 9b90f55c..00065f85 100755 --- a/build_tc.py +++ b/build_tc.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2010 The Chromium OS Authors. All rights reserved. +# Copyright 2010 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/buildbot_test_llvm.py b/buildbot_test_llvm.py index 1c7bb199..7698a07d 100755 --- a/buildbot_test_llvm.py +++ b/buildbot_test_llvm.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2017 The Chromium OS Authors. All rights reserved. +# Copyright 2017 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py index 6c3bfef4..3594fddb 100755 --- a/buildbot_test_toolchains.py +++ b/buildbot_test_toolchains.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2016 The Chromium OS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/chromiumos_image_diff.py b/chromiumos_image_diff.py index 3d54100d..0f22ff35 100755 --- a/chromiumos_image_diff.py +++ b/chromiumos_image_diff.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/compiler_wrapper/README.md b/compiler_wrapper/README.md index 12ae3131..0228e27d 100644 --- a/compiler_wrapper/README.md +++ b/compiler_wrapper/README.md @@ -7,7 +7,7 @@ Build is split into 2 steps via separate commands: - build: builds the actual go binary, assuming it is executed from the folder created by `bundle.py`. -This allows to copy the sources to a Chrome OS / Android +This allows to copy the sources to a ChromeOS / Android package, including the build script, and then build from there without a dependency on toolchain-utils itself. @@ -24,7 +24,7 @@ Run `install_compiler_wrapper.sh` to install the new wrapper in the chroot: Then perform the tests, e.g. build with the new compiler. -## Updating the Wrapper for Chrome OS +## Updating the Wrapper for ChromeOS To update the wrapper for everyone, the new wrapper configuration must be copied into chromiumos-overlay, and new revisions of the gcc and llvm ebuilds must be diff --git a/compiler_wrapper/android_config_test.go b/compiler_wrapper/android_config_test.go index c61490f4..6e341dcc 100644 --- a/compiler_wrapper/android_config_test.go +++ b/compiler_wrapper/android_config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/bisect_flag.go b/compiler_wrapper/bisect_flag.go index adfa8b03..139862e3 100644 --- a/compiler_wrapper/bisect_flag.go +++ b/compiler_wrapper/bisect_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/bisect_flag_test.go b/compiler_wrapper/bisect_flag_test.go index cc203a0f..b7c2dc7e 100644 --- a/compiler_wrapper/bisect_flag_test.go +++ b/compiler_wrapper/bisect_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/build.py b/compiler_wrapper/build.py index f98b2549..44c398bf 100755 --- a/compiler_wrapper/build.py +++ b/compiler_wrapper/build.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -52,7 +52,7 @@ def calc_go_args(args, version, build_dir): 'main.Version=' + version, ] - # If the wrapper is intended for Chrome OS, we need to use libc's exec. + # If the wrapper is intended for ChromeOS, we need to use libc's exec. extra_args = [] if not args.static: extra_args += ['-tags', 'libc_exec'] diff --git a/compiler_wrapper/bundle.README b/compiler_wrapper/bundle.README index 10a28ee0..d526c149 100644 --- a/compiler_wrapper/bundle.README +++ b/compiler_wrapper/bundle.README @@ -1,4 +1,4 @@ -Copyright 2019 The Chromium OS Authors. All rights reserved. +Copyright 2019 The ChromiumOS Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. diff --git a/compiler_wrapper/bundle.py b/compiler_wrapper/bundle.py index 6df82146..4d5f5010 100755 --- a/compiler_wrapper/bundle.py +++ b/compiler_wrapper/bundle.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/compiler_wrapper/ccache_flag.go b/compiler_wrapper/ccache_flag.go index 02fb43ac..7d19da88 100644 --- a/compiler_wrapper/ccache_flag.go +++ b/compiler_wrapper/ccache_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/ccache_flag_test.go b/compiler_wrapper/ccache_flag_test.go index d6eeb926..0d634b61 100644 --- a/compiler_wrapper/ccache_flag_test.go +++ b/compiler_wrapper/ccache_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_flags.go b/compiler_wrapper/clang_flags.go index e25ed74c..fea14249 100644 --- a/compiler_wrapper/clang_flags.go +++ b/compiler_wrapper/clang_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_flags_test.go b/compiler_wrapper/clang_flags_test.go index 23aed7ef..1a9db073 100644 --- a/compiler_wrapper/clang_flags_test.go +++ b/compiler_wrapper/clang_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_syntax_flag.go b/compiler_wrapper/clang_syntax_flag.go index 53240c7f..6874bd1f 100644 --- a/compiler_wrapper/clang_syntax_flag.go +++ b/compiler_wrapper/clang_syntax_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_syntax_flag_test.go b/compiler_wrapper/clang_syntax_flag_test.go index 8ee9c223..d4077098 100644 --- a/compiler_wrapper/clang_syntax_flag_test.go +++ b/compiler_wrapper/clang_syntax_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_tidy_flag.go b/compiler_wrapper/clang_tidy_flag.go index 01387fd6..8c3712d6 100644 --- a/compiler_wrapper/clang_tidy_flag.go +++ b/compiler_wrapper/clang_tidy_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_tidy_flag_test.go b/compiler_wrapper/clang_tidy_flag_test.go index 4293bb21..2639fe96 100644 --- a/compiler_wrapper/clang_tidy_flag_test.go +++ b/compiler_wrapper/clang_tidy_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/command.go b/compiler_wrapper/command.go index eb040b25..253251ab 100644 --- a/compiler_wrapper/command.go +++ b/compiler_wrapper/command.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/command_test.go b/compiler_wrapper/command_test.go index 18d05a9c..622d070f 100644 --- a/compiler_wrapper/command_test.go +++ b/compiler_wrapper/command_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/compile_with_fallback.go b/compiler_wrapper/compile_with_fallback.go index 8b4b5b4d..1e5a95a4 100644 --- a/compiler_wrapper/compile_with_fallback.go +++ b/compiler_wrapper/compile_with_fallback.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/compile_with_fallback_test.go b/compiler_wrapper/compile_with_fallback_test.go index f9da441a..54c0c498 100644 --- a/compiler_wrapper/compile_with_fallback_test.go +++ b/compiler_wrapper/compile_with_fallback_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/compiler_wrapper.go b/compiler_wrapper/compiler_wrapper.go index 986eabab..2581cb0b 100644 --- a/compiler_wrapper/compiler_wrapper.go +++ b/compiler_wrapper/compiler_wrapper.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/compiler_wrapper_test.go b/compiler_wrapper/compiler_wrapper_test.go index 74fe3f58..b5a85c89 100644 --- a/compiler_wrapper/compiler_wrapper_test.go +++ b/compiler_wrapper/compiler_wrapper_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 6c28287c..fdd17763 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/config_test.go b/compiler_wrapper/config_test.go index 86a78928..1d2cafaf 100644 --- a/compiler_wrapper/config_test.go +++ b/compiler_wrapper/config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/cros_hardened_config_test.go b/compiler_wrapper/cros_hardened_config_test.go index 337b27fe..c619e712 100644 --- a/compiler_wrapper/cros_hardened_config_test.go +++ b/compiler_wrapper/cros_hardened_config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/cros_host_config_test.go b/compiler_wrapper/cros_host_config_test.go index 4f3b5cb2..0fd479ea 100644 --- a/compiler_wrapper/cros_host_config_test.go +++ b/compiler_wrapper/cros_host_config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/cros_llvm_next_flags.go b/compiler_wrapper/cros_llvm_next_flags.go index 870e2885..e0b3179f 100644 --- a/compiler_wrapper/cros_llvm_next_flags.go +++ b/compiler_wrapper/cros_llvm_next_flags.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Chromium OS Authors. All rights reserved. +// Copyright 2020 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -7,7 +7,7 @@ package main -// This file defines extra flags for llvm-next testing for Chrome OS. Importantly, these flags don't +// This file defines extra flags for llvm-next testing for ChromeOS. Importantly, these flags don't // apply to Android's llvm-next wrapper. Android's toolchain-utils copy has a // `android_llvm_next_flags.go` file downstream that defines its llvm-next arguments. As you can // probably infer, `android_llvm_next_flags.go` is only compiled if the `android_llvm_next_flags` diff --git a/compiler_wrapper/cros_nonhardened_config_test.go b/compiler_wrapper/cros_nonhardened_config_test.go index 3d413fb8..df14e81b 100644 --- a/compiler_wrapper/cros_nonhardened_config_test.go +++ b/compiler_wrapper/cros_nonhardened_config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/disable_werror_flag.go b/compiler_wrapper/disable_werror_flag.go index cb770b7b..26248f4d 100644 --- a/compiler_wrapper/disable_werror_flag.go +++ b/compiler_wrapper/disable_werror_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/disable_werror_flag_test.go b/compiler_wrapper/disable_werror_flag_test.go index 592c35ba..b96c0d53 100644 --- a/compiler_wrapper/disable_werror_flag_test.go +++ b/compiler_wrapper/disable_werror_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/env.go b/compiler_wrapper/env.go index c8f6ceb3..1b4d9b31 100644 --- a/compiler_wrapper/env.go +++ b/compiler_wrapper/env.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/env_test.go b/compiler_wrapper/env_test.go index b5bf65a3..4864f4db 100644 --- a/compiler_wrapper/env_test.go +++ b/compiler_wrapper/env_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/errors.go b/compiler_wrapper/errors.go index 18e0facf..468fb6a9 100644 --- a/compiler_wrapper/errors.go +++ b/compiler_wrapper/errors.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/errors_test.go b/compiler_wrapper/errors_test.go index 957fae3a..71e35cdb 100644 --- a/compiler_wrapper/errors_test.go +++ b/compiler_wrapper/errors_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/gcc_flags.go b/compiler_wrapper/gcc_flags.go index 2c553e6b..9adc9c0d 100644 --- a/compiler_wrapper/gcc_flags.go +++ b/compiler_wrapper/gcc_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/gcc_flags_test.go b/compiler_wrapper/gcc_flags_test.go index adf72018..2dc8b306 100644 --- a/compiler_wrapper/gcc_flags_test.go +++ b/compiler_wrapper/gcc_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/go_exec.go b/compiler_wrapper/go_exec.go index 2f2e5ad9..74691484 100644 --- a/compiler_wrapper/go_exec.go +++ b/compiler_wrapper/go_exec.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Chromium OS Authors. All rights reserved. +// Copyright 2020 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/goldenutil_test.go b/compiler_wrapper/goldenutil_test.go index 2b391d73..3a6cc7cb 100644 --- a/compiler_wrapper/goldenutil_test.go +++ b/compiler_wrapper/goldenutil_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/install_compiler_wrapper.sh b/compiler_wrapper/install_compiler_wrapper.sh index 3a5b7417..f05f2b4c 100755 --- a/compiler_wrapper/install_compiler_wrapper.sh +++ b/compiler_wrapper/install_compiler_wrapper.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/compiler_wrapper/kernel_bug.go b/compiler_wrapper/kernel_bug.go index 55817cb6..a1c85a71 100644 --- a/compiler_wrapper/kernel_bug.go +++ b/compiler_wrapper/kernel_bug.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Chromium OS Authors. All rights reserved. +// Copyright 2021 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package main diff --git a/compiler_wrapper/kernel_bug_test.go b/compiler_wrapper/kernel_bug_test.go index 3c7bccf1..39f2dbf4 100644 --- a/compiler_wrapper/kernel_bug_test.go +++ b/compiler_wrapper/kernel_bug_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Chromium OS Authors. All rights reserved. +// Copyright 2021 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package main diff --git a/compiler_wrapper/libc_exec.go b/compiler_wrapper/libc_exec.go index a7a561bc..5922c6e8 100644 --- a/compiler_wrapper/libc_exec.go +++ b/compiler_wrapper/libc_exec.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/libgcc_flags.go b/compiler_wrapper/libgcc_flags.go index 72fa8381..5e599504 100644 --- a/compiler_wrapper/libgcc_flags.go +++ b/compiler_wrapper/libgcc_flags.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Chromium OS Authors. All rights reserved. +// Copyright 2021 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/libgcc_flags_test.go b/compiler_wrapper/libgcc_flags_test.go index 717c0e52..ce6456fa 100644 --- a/compiler_wrapper/libgcc_flags_test.go +++ b/compiler_wrapper/libgcc_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Chromium OS Authors. All rights reserved. +// Copyright 2021 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/main.go b/compiler_wrapper/main.go index 046cf5a5..a0981dbe 100644 --- a/compiler_wrapper/main.go +++ b/compiler_wrapper/main.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/pie_flags.go b/compiler_wrapper/pie_flags.go index 9675f6ee..e4110827 100644 --- a/compiler_wrapper/pie_flags.go +++ b/compiler_wrapper/pie_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/pie_flags_test.go b/compiler_wrapper/pie_flags_test.go index 77a0fc8f..d0be08fe 100644 --- a/compiler_wrapper/pie_flags_test.go +++ b/compiler_wrapper/pie_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/print_cmdline_flag.go b/compiler_wrapper/print_cmdline_flag.go index e2092edd..c1375358 100644 --- a/compiler_wrapper/print_cmdline_flag.go +++ b/compiler_wrapper/print_cmdline_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/print_cmdline_flag_test.go b/compiler_wrapper/print_cmdline_flag_test.go index 8f6fc226..8e79435d 100644 --- a/compiler_wrapper/print_cmdline_flag_test.go +++ b/compiler_wrapper/print_cmdline_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/print_config_flag.go b/compiler_wrapper/print_config_flag.go index 9ab9f6bc..bcb23943 100644 --- a/compiler_wrapper/print_config_flag.go +++ b/compiler_wrapper/print_config_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/print_config_flag_test.go b/compiler_wrapper/print_config_flag_test.go index 63451edb..d6868235 100644 --- a/compiler_wrapper/print_config_flag_test.go +++ b/compiler_wrapper/print_config_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/remote_build_flag_test.go b/compiler_wrapper/remote_build_flag_test.go index 4a894179..eecfb400 100644 --- a/compiler_wrapper/remote_build_flag_test.go +++ b/compiler_wrapper/remote_build_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/remote_build_flags.go b/compiler_wrapper/remote_build_flags.go index fc26c93f..7cbddfdc 100644 --- a/compiler_wrapper/remote_build_flags.go +++ b/compiler_wrapper/remote_build_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/reset_compiler_wrapper.sh b/compiler_wrapper/reset_compiler_wrapper.sh index 523e972d..cc1ec7db 100755 --- a/compiler_wrapper/reset_compiler_wrapper.sh +++ b/compiler_wrapper/reset_compiler_wrapper.sh @@ -1,6 +1,6 @@ #!/bin/bash -eux # -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/compiler_wrapper/rusage_flag.go b/compiler_wrapper/rusage_flag.go index 63469602..b2c2a4aa 100644 --- a/compiler_wrapper/rusage_flag.go +++ b/compiler_wrapper/rusage_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/rusage_flag_test.go b/compiler_wrapper/rusage_flag_test.go index 439cfd15..6c264fd7 100644 --- a/compiler_wrapper/rusage_flag_test.go +++ b/compiler_wrapper/rusage_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/sanitizer_flags.go b/compiler_wrapper/sanitizer_flags.go index da0a64b3..5d517e49 100644 --- a/compiler_wrapper/sanitizer_flags.go +++ b/compiler_wrapper/sanitizer_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/sanitizer_flags_test.go b/compiler_wrapper/sanitizer_flags_test.go index a401d58e..8b22a05e 100644 --- a/compiler_wrapper/sanitizer_flags_test.go +++ b/compiler_wrapper/sanitizer_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/stackprotector_flags.go b/compiler_wrapper/stackprotector_flags.go index 24605720..a41bdaf5 100644 --- a/compiler_wrapper/stackprotector_flags.go +++ b/compiler_wrapper/stackprotector_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/stackprotector_flags_test.go b/compiler_wrapper/stackprotector_flags_test.go index a8757579..00d511c7 100644 --- a/compiler_wrapper/stackprotector_flags_test.go +++ b/compiler_wrapper/stackprotector_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/sysroot_flag.go b/compiler_wrapper/sysroot_flag.go index e0583b22..58d010da 100644 --- a/compiler_wrapper/sysroot_flag.go +++ b/compiler_wrapper/sysroot_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/sysroot_flag_test.go b/compiler_wrapper/sysroot_flag_test.go index b05a627e..579d76db 100644 --- a/compiler_wrapper/sysroot_flag_test.go +++ b/compiler_wrapper/sysroot_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/testutil_test.go b/compiler_wrapper/testutil_test.go index 035f2373..ff8c3d56 100644 --- a/compiler_wrapper/testutil_test.go +++ b/compiler_wrapper/testutil_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/thumb_flags.go b/compiler_wrapper/thumb_flags.go index 0edaf4ff..2afd15a1 100644 --- a/compiler_wrapper/thumb_flags.go +++ b/compiler_wrapper/thumb_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/thumb_flags_test.go b/compiler_wrapper/thumb_flags_test.go index 2e8f7e66..23487642 100644 --- a/compiler_wrapper/thumb_flags_test.go +++ b/compiler_wrapper/thumb_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/unsupported_flags.go b/compiler_wrapper/unsupported_flags.go index 48fee2f5..364ee07c 100644 --- a/compiler_wrapper/unsupported_flags.go +++ b/compiler_wrapper/unsupported_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/unsupported_flags_test.go b/compiler_wrapper/unsupported_flags_test.go index a32eb521..e2441141 100644 --- a/compiler_wrapper/unsupported_flags_test.go +++ b/compiler_wrapper/unsupported_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/x64_flags.go b/compiler_wrapper/x64_flags.go index 40505cf8..3d0db701 100644 --- a/compiler_wrapper/x64_flags.go +++ b/compiler_wrapper/x64_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/x64_flags_test.go b/compiler_wrapper/x64_flags_test.go index fd93728f..5261ee83 100644 --- a/compiler_wrapper/x64_flags_test.go +++ b/compiler_wrapper/x64_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Chromium OS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/cros_utils/__init__.py b/cros_utils/__init__.py index 4c4e5554..dc696529 100644 --- a/cros_utils/__init__.py +++ b/cros_utils/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/bugs.py b/cros_utils/bugs.py index 88fb7675..8c32d84e 100755 --- a/cros_utils/bugs.py +++ b/cros_utils/bugs.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/bugs_test.py b/cros_utils/bugs_test.py index 03dee64d..daab4af5 100755 --- a/cros_utils/bugs_test.py +++ b/cros_utils/bugs_test.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py index b600c6aa..2da5c5e4 100644 --- a/cros_utils/buildbot_utils.py +++ b/cros_utils/buildbot_utils.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 The Chromium OS Authors. All rights reserved. +# Copyright 2017 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/buildbot_utils_unittest.py b/cros_utils/buildbot_utils_unittest.py index c615c95f..42b5a8e3 100755 --- a/cros_utils/buildbot_utils_unittest.py +++ b/cros_utils/buildbot_utils_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/command_executer.py b/cros_utils/command_executer.py index cc0f3372..fe21f625 100755 --- a/cros_utils/command_executer.py +++ b/cros_utils/command_executer.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/command_executer_timeout_test.py b/cros_utils/command_executer_timeout_test.py index 1c9c74cd..6efbee74 100755 --- a/cros_utils/command_executer_timeout_test.py +++ b/cros_utils/command_executer_timeout_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/command_executer_unittest.py b/cros_utils/command_executer_unittest.py index 22331ae0..7b023534 100755 --- a/cros_utils/command_executer_unittest.py +++ b/cros_utils/command_executer_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/constants.py b/cros_utils/constants.py index b12175bb..58c0688e 100644 --- a/cros_utils/constants.py +++ b/cros_utils/constants.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/device_setup_utils.py b/cros_utils/device_setup_utils.py index 61dbba27..1a2e6cb3 100644 --- a/cros_utils/device_setup_utils.py +++ b/cros_utils/device_setup_utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/device_setup_utils_unittest.py b/cros_utils/device_setup_utils_unittest.py index 12a70811..f546e881 100755 --- a/cros_utils/device_setup_utils_unittest.py +++ b/cros_utils/device_setup_utils_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/email_sender.py b/cros_utils/email_sender.py index df8afbc4..a4ddb2b5 100755 --- a/cros_utils/email_sender.py +++ b/cros_utils/email_sender.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/email_sender_unittest.py b/cros_utils/email_sender_unittest.py index ae41f143..92519845 100755 --- a/cros_utils/email_sender_unittest.py +++ b/cros_utils/email_sender_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/file_utils.py b/cros_utils/file_utils.py index f0e4064c..437deadb 100644 --- a/cros_utils/file_utils.py +++ b/cros_utils/file_utils.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/html_tools.py b/cros_utils/html_tools.py index 688955ff..c23995b4 100644 --- a/cros_utils/html_tools.py +++ b/cros_utils/html_tools.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/locks.py b/cros_utils/locks.py index 848e23fc..365fe044 100644 --- a/cros_utils/locks.py +++ b/cros_utils/locks.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/logger.py b/cros_utils/logger.py index e304fe12..16ba8971 100644 --- a/cros_utils/logger.py +++ b/cros_utils/logger.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/machines.py b/cros_utils/machines.py index 89b51b01..0eb6d378 100644 --- a/cros_utils/machines.py +++ b/cros_utils/machines.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015 The Chromium OS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/misc.py b/cros_utils/misc.py index a0d0de73..44935e9e 100644 --- a/cros_utils/misc.py +++ b/cros_utils/misc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2013 The Chromium OS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/misc_test.py b/cros_utils/misc_test.py index 21a545e9..b47644cf 100755 --- a/cros_utils/misc_test.py +++ b/cros_utils/misc_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/no_pseudo_terminal_test.py b/cros_utils/no_pseudo_terminal_test.py index 10fd9608..9d6a294c 100755 --- a/cros_utils/no_pseudo_terminal_test.py +++ b/cros_utils/no_pseudo_terminal_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/perf_diff.py b/cros_utils/perf_diff.py index b8ddb0c4..d2bb7221 100755 --- a/cros_utils/perf_diff.py +++ b/cros_utils/perf_diff.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py index 1a3fd4a7..1a46cca2 100644 --- a/cros_utils/tabulator.py +++ b/cros_utils/tabulator.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/tabulator_test.py b/cros_utils/tabulator_test.py index 9dd4828e..5a5d909e 100755 --- a/cros_utils/tabulator_test.py +++ b/cros_utils/tabulator_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2012 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/timeline.py b/cros_utils/timeline.py index cce0b05c..af844c0a 100644 --- a/cros_utils/timeline.py +++ b/cros_utils/timeline.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/timeline_test.py b/cros_utils/timeline_test.py index 8a10e549..337a6676 100755 --- a/cros_utils/timeline_test.py +++ b/cros_utils/timeline_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/tiny_render.py b/cros_utils/tiny_render.py index 629e7719..13463d10 100644 --- a/cros_utils/tiny_render.py +++ b/cros_utils/tiny_render.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/tiny_render_test.py b/cros_utils/tiny_render_test.py index 114a1796..93ad00e7 100755 --- a/cros_utils/tiny_render_test.py +++ b/cros_utils/tiny_render_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/toolchain_utils.sh b/cros_utils/toolchain_utils.sh index 5e9a2a32..78b6dd58 100644 --- a/cros_utils/toolchain_utils.sh +++ b/cros_utils/toolchain_utils.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2012 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py index 0413b593..8b918934 100644 --- a/crosperf/benchmark.py +++ b/crosperf/benchmark.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py index b5912c11..16bef78b 100644 --- a/crosperf/benchmark_run.py +++ b/crosperf/benchmark_run.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py index 9d815b80..d35a53bc 100755 --- a/crosperf/benchmark_run_unittest.py +++ b/crosperf/benchmark_run_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py index 70508b19..0f5d1980 100755 --- a/crosperf/benchmark_unittest.py +++ b/crosperf/benchmark_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2014 The Chromium OS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/column_chart.py b/crosperf/column_chart.py index 400979ee..6063421d 100644 --- a/crosperf/column_chart.py +++ b/crosperf/column_chart.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py index c73f8756..c25fd5ab 100644 --- a/crosperf/compare_machines.py +++ b/crosperf/compare_machines.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014 The Chromium OS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/config.py b/crosperf/config.py index 61ad9c1a..171f98af 100644 --- a/crosperf/config.py +++ b/crosperf/config.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/config_unittest.py b/crosperf/config_unittest.py index 208f44dc..05592d0b 100755 --- a/crosperf/config_unittest.py +++ b/crosperf/config_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2014 The Chromium OS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/crosperf b/crosperf/crosperf index c98f2dd4..313a65ff 100755 --- a/crosperf/crosperf +++ b/crosperf/crosperf @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py index f195b13a..eaeceae8 100755 --- a/crosperf/crosperf.py +++ b/crosperf/crosperf.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/crosperf_autolock.py b/crosperf/crosperf_autolock.py index b593fa9c..7fb86b2c 100755 --- a/crosperf/crosperf_autolock.py +++ b/crosperf/crosperf_autolock.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py index 774159ff..26f6b6a9 100755 --- a/crosperf/crosperf_unittest.py +++ b/crosperf/crosperf_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright (c) 2014 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2014 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/download_images.py b/crosperf/download_images.py index 8e1bad11..51c8325e 100644 --- a/crosperf/download_images.py +++ b/crosperf/download_images.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2014-2015 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2014-2015 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/download_images_buildid_test.py b/crosperf/download_images_buildid_test.py index fc37f2c1..036f1442 100755 --- a/crosperf/download_images_buildid_test.py +++ b/crosperf/download_images_buildid_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2014 The Chromium OS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py index 62b8d891..73ac8d67 100755 --- a/crosperf/download_images_unittest.py +++ b/crosperf/download_images_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment.py b/crosperf/experiment.py index e919f6ee..0cf01db7 100644 --- a/crosperf/experiment.py +++ b/crosperf/experiment.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index a9594a20..882f652f 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py index d52f2a55..139e69ab 100755 --- a/crosperf/experiment_factory_unittest.py +++ b/crosperf/experiment_factory_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py index d2831bda..18eced64 100644 --- a/crosperf/experiment_file.py +++ b/crosperf/experiment_file.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py index 0d4e1e67..71269ad6 100755 --- a/crosperf/experiment_file_unittest.py +++ b/crosperf/experiment_file_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_files/telemetry_perf_perf b/crosperf/experiment_files/telemetry_perf_perf index acdf96d0..dc062710 100755 --- a/crosperf/experiment_files/telemetry_perf_perf +++ b/crosperf/experiment_files/telemetry_perf_perf @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2016 The Chromium OS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # @@ -12,7 +12,7 @@ # Perf will run for the entire benchmark run, so results should be interpreted # in that context. i.e, if this shows a 3% overhead for a particular perf # command, that overhead would only be seen during the 2 seconds of measurement -# during a Chrome OS Wide Profiling collection. +# during a ChromeOS Wide Profiling collection. set -e board=xxx # diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py index 6daef780..e0ba4a91 100644 --- a/crosperf/experiment_runner.py +++ b/crosperf/experiment_runner.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py index 31d02e71..a39f9f1f 100755 --- a/crosperf/experiment_runner_unittest.py +++ b/crosperf/experiment_runner_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright (c) 2014 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2014 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py index 2ac47c74..3207d4a5 100644 --- a/crosperf/experiment_status.py +++ b/crosperf/experiment_status.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/field.py b/crosperf/field.py index f6300f9f..51dd8732 100644 --- a/crosperf/field.py +++ b/crosperf/field.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py index 1e77c8a5..d4fec8a0 100755 --- a/crosperf/flag_test_unittest.py +++ b/crosperf/flag_test_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2014 The Chromium OS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py index bae365dc..3d4732c0 100755 --- a/crosperf/generate_report.py +++ b/crosperf/generate_report.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2016 The Chromium OS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py index 8c3510a9..a3e5ad91 100755 --- a/crosperf/generate_report_unittest.py +++ b/crosperf/generate_report_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2016 The Chromium OS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/help.py b/crosperf/help.py index 4409b770..d9624d07 100644 --- a/crosperf/help.py +++ b/crosperf/help.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py index 8ac5be25..de3fc15a 100644 --- a/crosperf/image_checksummer.py +++ b/crosperf/image_checksummer.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/label.py b/crosperf/label.py index 30bf5f8c..588fb67e 100644 --- a/crosperf/label.py +++ b/crosperf/label.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py index ffdd6436..5d6e6bd7 100644 --- a/crosperf/machine_image_manager.py +++ b/crosperf/machine_image_manager.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015 The Chromium OS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py index fbbca7b6..dd10a0d0 100755 --- a/crosperf/machine_image_manager_unittest.py +++ b/crosperf/machine_image_manager_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2015 The Chromium OS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py index aaf09bf5..f342794b 100644 --- a/crosperf/machine_manager.py +++ b/crosperf/machine_manager.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py index f47cc881..aff18480 100755 --- a/crosperf/machine_manager_unittest.py +++ b/crosperf/machine_manager_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2012 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py index f44ed87c..7ee74a81 100644 --- a/crosperf/mock_instance.py +++ b/crosperf/mock_instance.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py index 5525858c..33a6946d 100644 --- a/crosperf/results_cache.py +++ b/crosperf/results_cache.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py index d6953eed..f4090be4 100755 --- a/crosperf/results_cache_unittest.py +++ b/crosperf/results_cache_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py index 674745fb..210776ab 100644 --- a/crosperf/results_organizer.py +++ b/crosperf/results_organizer.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py index f259879d..efdd215b 100755 --- a/crosperf/results_organizer_unittest.py +++ b/crosperf/results_organizer_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_report.py b/crosperf/results_report.py index dc80b53b..571584bd 100644 --- a/crosperf/results_report.py +++ b/crosperf/results_report.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py index ea411e21..43b935b2 100644 --- a/crosperf/results_report_templates.py +++ b/crosperf/results_report_templates.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016 The Chromium OS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py index 1e96ef97..3b7bc35b 100755 --- a/crosperf/results_report_unittest.py +++ b/crosperf/results_report_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2016 The Chromium OS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py index 49c6344d..b9714529 100644 --- a/crosperf/schedv2.py +++ b/crosperf/schedv2.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015 The Chromium OS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py index 7b56d723..435742f6 100755 --- a/crosperf/schedv2_unittest.py +++ b/crosperf/schedv2_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2015 The Chromium OS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/settings.py b/crosperf/settings.py index 75c8d9ec..9aa6879b 100644 --- a/crosperf/settings.py +++ b/crosperf/settings.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py index 78834c63..4831f64d 100644 --- a/crosperf/settings_factory.py +++ b/crosperf/settings_factory.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py index 8277e870..195c17ff 100755 --- a/crosperf/settings_factory_unittest.py +++ b/crosperf/settings_factory_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2017 The Chromium OS Authors. All rights reserved. +# Copyright 2017 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py index e127552f..fb9b85f3 100755 --- a/crosperf/settings_unittest.py +++ b/crosperf/settings_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index 6bd4ff39..fe6eca4b 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py index c1eacb32..b080c91e 100755 --- a/crosperf/suite_runner_unittest.py +++ b/crosperf/suite_runner_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright (c) 2014 The Chromium OS Authors. All rights reserved. +# Copyright (c) 2014 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/test_flag.py b/crosperf/test_flag.py index 6fa3b589..0b061f95 100644 --- a/crosperf/test_flag.py +++ b/crosperf/test_flag.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/translate_xbuddy.py b/crosperf/translate_xbuddy.py index 80187f9b..9af95e63 100755 --- a/crosperf/translate_xbuddy.py +++ b/crosperf/translate_xbuddy.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cwp/cr-os/fetch_gn_descs.py b/cwp/cr-os/fetch_gn_descs.py index 8a0b2e4e..e1b50cdf 100755 --- a/cwp/cr-os/fetch_gn_descs.py +++ b/cwp/cr-os/fetch_gn_descs.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cwp/cr-os/fetch_gn_descs_test.py b/cwp/cr-os/fetch_gn_descs_test.py index b6fc0eeb..02941792 100755 --- a/cwp/cr-os/fetch_gn_descs_test.py +++ b/cwp/cr-os/fetch_gn_descs_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/allowlist.py b/debug_info_test/allowlist.py index 9cf42af0..9205b37b 100644 --- a/debug_info_test/allowlist.py +++ b/debug_info_test/allowlist.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/check_cus.py b/debug_info_test/check_cus.py index 41123259..a83a9bc4 100644 --- a/debug_info_test/check_cus.py +++ b/debug_info_test/check_cus.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/check_exist.py b/debug_info_test/check_exist.py index 898dae45..768d09bd 100644 --- a/debug_info_test/check_exist.py +++ b/debug_info_test/check_exist.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/check_icf.py b/debug_info_test/check_icf.py index a46968e7..a79d5e80 100644 --- a/debug_info_test/check_icf.py +++ b/debug_info_test/check_icf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/check_ngcc.py b/debug_info_test/check_ngcc.py index c86c220a..4a8241a0 100644 --- a/debug_info_test/check_ngcc.py +++ b/debug_info_test/check_ngcc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/debug_info_test.py b/debug_info_test/debug_info_test.py index ae7e9f48..52875b74 100755 --- a/debug_info_test/debug_info_test.py +++ b/debug_info_test/debug_info_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/file_lock_machine.py b/file_lock_machine.py index 5bba4430..41650ea1 100755 --- a/file_lock_machine.py +++ b/file_lock_machine.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/file_lock_machine_test.py b/file_lock_machine_test.py index bc20a88b..d14deaf4 100755 --- a/file_lock_machine_test.py +++ b/file_lock_machine_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/go/chromeos/setup_chromeos_testing.py b/go/chromeos/setup_chromeos_testing.py index 8b535538..cbb8bc29 100755 --- a/go/chromeos/setup_chromeos_testing.py +++ b/go/chromeos/setup_chromeos_testing.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/heat_map.py b/heatmaps/heat_map.py index a989ab70..64067b61 100755 --- a/heatmaps/heat_map.py +++ b/heatmaps/heat_map.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2015 The Chromium OS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/heat_map_test.py b/heatmaps/heat_map_test.py index ad62cd91..aabb3cac 100755 --- a/heatmaps/heat_map_test.py +++ b/heatmaps/heat_map_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/heatmap_generator.py b/heatmaps/heatmap_generator.py index 0dd6ad28..ad1385f1 100644 --- a/heatmaps/heatmap_generator.py +++ b/heatmaps/heatmap_generator.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/heatmap_generator_test.py b/heatmaps/heatmap_generator_test.py index 5008c653..0838ffc4 100755 --- a/heatmaps/heatmap_generator_test.py +++ b/heatmaps/heatmap_generator_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/perf-to-inst-page.sh b/heatmaps/perf-to-inst-page.sh index d6acd5ed..6aa03eaf 100755 --- a/heatmaps/perf-to-inst-page.sh +++ b/heatmaps/perf-to-inst-page.sh @@ -1,5 +1,5 @@ #! /bin/bash -u -# Copyright 2015 The Chromium OS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/image_chromeos.py b/image_chromeos.py index d5c404dd..64a6a81e 100755 --- a/image_chromeos.py +++ b/image_chromeos.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_extra/create_ebuild_file.py b/llvm_extra/create_ebuild_file.py index 058a270b..ec39fde5 100755 --- a/llvm_extra/create_ebuild_file.py +++ b/llvm_extra/create_ebuild_file.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_extra/create_llvm_extra.sh b/llvm_extra/create_llvm_extra.sh index b58e0508..cd138ccf 100755 --- a/llvm_extra/create_llvm_extra.sh +++ b/llvm_extra/create_llvm_extra.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 The Chromium OS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/auto_llvm_bisection.py b/llvm_tools/auto_llvm_bisection.py index dbd8f37e..02fb7b93 100755 --- a/llvm_tools/auto_llvm_bisection.py +++ b/llvm_tools/auto_llvm_bisection.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/auto_llvm_bisection_unittest.py b/llvm_tools/auto_llvm_bisection_unittest.py index 3f7e821b..b134aa50 100755 --- a/llvm_tools/auto_llvm_bisection_unittest.py +++ b/llvm_tools/auto_llvm_bisection_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/bisect_clang_crashes.py b/llvm_tools/bisect_clang_crashes.py index d9b3d141..9a50f0f5 100755 --- a/llvm_tools/bisect_clang_crashes.py +++ b/llvm_tools/bisect_clang_crashes.py @@ -1,10 +1,10 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -"""Fetches and submits the artifacts from Chrome OS toolchain's crash bucket. +"""Fetches and submits the artifacts from ChromeOS toolchain's crash bucket. """ import argparse diff --git a/llvm_tools/bisect_clang_crashes_unittest.py b/llvm_tools/bisect_clang_crashes_unittest.py index 238b674d..81ee31cd 100755 --- a/llvm_tools/bisect_clang_crashes_unittest.py +++ b/llvm_tools/bisect_clang_crashes_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/chroot.py b/llvm_tools/chroot.py index b10ddbac..31e26e74 100755 --- a/llvm_tools/chroot.py +++ b/llvm_tools/chroot.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/chroot_unittest.py b/llvm_tools/chroot_unittest.py index 9fb1d0c0..5c665de9 100755 --- a/llvm_tools/chroot_unittest.py +++ b/llvm_tools/chroot_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/copy_helpers_to_chromiumos_overlay.py b/llvm_tools/copy_helpers_to_chromiumos_overlay.py index 98f7b966..ee396316 100755 --- a/llvm_tools/copy_helpers_to_chromiumos_overlay.py +++ b/llvm_tools/copy_helpers_to_chromiumos_overlay.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/custom_script_example.py b/llvm_tools/custom_script_example.py index 38dff007..6251b971 100755 --- a/llvm_tools/custom_script_example.py +++ b/llvm_tools/custom_script_example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/failure_modes.py b/llvm_tools/failure_modes.py index 1e05dfcf..13f0a99b 100644 --- a/llvm_tools/failure_modes.py +++ b/llvm_tools/failure_modes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/fetch_cros_sdk_rolls.py b/llvm_tools/fetch_cros_sdk_rolls.py index b8fdf943..cf49c3e1 100755 --- a/llvm_tools/fetch_cros_sdk_rolls.py +++ b/llvm_tools/fetch_cros_sdk_rolls.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/get_llvm_hash.py b/llvm_tools/get_llvm_hash.py index f566f6f3..d5088079 100755 --- a/llvm_tools/get_llvm_hash.py +++ b/llvm_tools/get_llvm_hash.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/get_llvm_hash_unittest.py b/llvm_tools/get_llvm_hash_unittest.py index b7c9e972..7f3ad17a 100755 --- a/llvm_tools/get_llvm_hash_unittest.py +++ b/llvm_tools/get_llvm_hash_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/get_upstream_patch.py b/llvm_tools/get_upstream_patch.py index 5669b023..d036e8c2 100755 --- a/llvm_tools/get_upstream_patch.py +++ b/llvm_tools/get_upstream_patch.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/git.py b/llvm_tools/git.py index 2fa99de8..ef22c7d4 100755 --- a/llvm_tools/git.py +++ b/llvm_tools/git.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/git_llvm_rev.py b/llvm_tools/git_llvm_rev.py index 8ca60dca..3f752210 100755 --- a/llvm_tools/git_llvm_rev.py +++ b/llvm_tools/git_llvm_rev.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/git_llvm_rev_test.py b/llvm_tools/git_llvm_rev_test.py index 0a6719c1..31d45544 100755 --- a/llvm_tools/git_llvm_rev_test.py +++ b/llvm_tools/git_llvm_rev_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/git_unittest.py b/llvm_tools/git_unittest.py index 7c654475..18fb60e8 100755 --- a/llvm_tools/git_unittest.py +++ b/llvm_tools/git_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/llvm_bisection.py b/llvm_tools/llvm_bisection.py index 0148efd2..3f1dde73 100755 --- a/llvm_tools/llvm_bisection.py +++ b/llvm_tools/llvm_bisection.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/llvm_bisection_unittest.py b/llvm_tools/llvm_bisection_unittest.py index 207f4c24..06807ecb 100755 --- a/llvm_tools/llvm_bisection_unittest.py +++ b/llvm_tools/llvm_bisection_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/llvm_local_bisection.sh b/llvm_tools/llvm_local_bisection.sh index f84c2410..04b7b0b7 100755 --- a/llvm_tools/llvm_local_bisection.sh +++ b/llvm_tools/llvm_local_bisection.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -*- coding: utf-8 -*- -# Copyright 2022 The Chromium OS Authors. All rights reserved. +# Copyright 2022 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/llvm_patch_management.py b/llvm_tools/llvm_patch_management.py index 53ffc3c2..b7ac1973 100755 --- a/llvm_tools/llvm_patch_management.py +++ b/llvm_tools/llvm_patch_management.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # diff --git a/llvm_tools/llvm_patch_management_unittest.py b/llvm_tools/llvm_patch_management_unittest.py index 92dc64a9..78d55259 100755 --- a/llvm_tools/llvm_patch_management_unittest.py +++ b/llvm_tools/llvm_patch_management_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/llvm_project.py b/llvm_tools/llvm_project.py index e059ae29..3dba9ffe 100644 --- a/llvm_tools/llvm_project.py +++ b/llvm_tools/llvm_project.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/modify_a_tryjob.py b/llvm_tools/modify_a_tryjob.py index 519fb51e..cea81069 100755 --- a/llvm_tools/modify_a_tryjob.py +++ b/llvm_tools/modify_a_tryjob.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/modify_a_tryjob_unittest.py b/llvm_tools/modify_a_tryjob_unittest.py index c03a1e18..e01506e8 100755 --- a/llvm_tools/modify_a_tryjob_unittest.py +++ b/llvm_tools/modify_a_tryjob_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/nightly_revert_checker.py b/llvm_tools/nightly_revert_checker.py index 89485088..842d9c92 100755 --- a/llvm_tools/nightly_revert_checker.py +++ b/llvm_tools/nightly_revert_checker.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/nightly_revert_checker_test.py b/llvm_tools/nightly_revert_checker_test.py index f68513af..5e077f93 100755 --- a/llvm_tools/nightly_revert_checker_test.py +++ b/llvm_tools/nightly_revert_checker_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index 303b0f39..a4b42109 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py index 25c68eee..452aea39 100755 --- a/llvm_tools/patch_manager_unittest.py +++ b/llvm_tools/patch_manager_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/patch_sync/src/main.rs b/llvm_tools/patch_sync/src/main.rs index 65637a47..5c11b453 100644 --- a/llvm_tools/patch_sync/src/main.rs +++ b/llvm_tools/patch_sync/src/main.rs @@ -173,7 +173,7 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> { }); if args.verbose { - display_patches("New patches from Chromium OS", &new_cros_patches); + display_patches("New patches from ChromiumOS", &new_cros_patches); display_patches("New patches from Android", &new_android_patches); } @@ -281,7 +281,7 @@ enum Opt { #[structopt(long = "cros-checkout", parse(from_os_str))] cros_checkout_path: PathBuf, - /// Emails to send review requests to during Chromium OS upload. + /// Emails to send review requests to during ChromiumOS upload. /// Comma separated. #[structopt(long = "cros-rev")] cros_reviewers: Option, diff --git a/llvm_tools/patch_sync/src/version_control.rs b/llvm_tools/patch_sync/src/version_control.rs index cfe88aec..f8ddbcaa 100644 --- a/llvm_tools/patch_sync/src/version_control.rs +++ b/llvm_tools/patch_sync/src/version_control.rs @@ -13,7 +13,7 @@ const CROS_MAIN_BRANCH: &str = "cros/main"; const ANDROID_MAIN_BRANCH: &str = "aosp/master"; // nocheck const WORK_BRANCH_NAME: &str = "__patch_sync_tmp"; -/// Context struct to keep track of both Chromium OS and Android checkouts. +/// Context struct to keep track of both ChromiumOS and Android checkouts. #[derive(Debug)] pub struct RepoSetupContext { pub cros_checkout: PathBuf, @@ -136,14 +136,14 @@ impl RepoSetupContext { .join("patches/PATCHES.json") } - /// Get the Chromium OS path to the PATCHES.json file + /// Get the ChromiumOS path to the PATCHES.json file pub fn cros_patches_path(&self) -> PathBuf { self.cros_checkout .join(&CHROMIUMOS_OVERLAY_REL_PATH) .join("sys-devel/llvm/files/PATCHES.json") } - /// Return the contents of the old PATCHES.json from Chromium OS + /// Return the contents of the old PATCHES.json from ChromiumOS pub fn old_cros_patch_contents(&self, hash: &str) -> Result { Self::old_file_contents( hash, diff --git a/llvm_tools/subprocess_helpers.py b/llvm_tools/subprocess_helpers.py index 2e013780..ac36ea66 100644 --- a/llvm_tools/subprocess_helpers.py +++ b/llvm_tools/subprocess_helpers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/test_helpers.py b/llvm_tools/test_helpers.py index 99448181..f9748e2a 100644 --- a/llvm_tools/test_helpers.py +++ b/llvm_tools/test_helpers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index 4e9b9104..50d8ecfb 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index 2e93eae9..c1efc910 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_packages_and_run_tests.py b/llvm_tools/update_packages_and_run_tests.py index 2e4a9058..258a3950 100755 --- a/llvm_tools/update_packages_and_run_tests.py +++ b/llvm_tools/update_packages_and_run_tests.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_packages_and_run_tests_unittest.py b/llvm_tools/update_packages_and_run_tests_unittest.py index b48f6338..a4b4f29c 100755 --- a/llvm_tools/update_packages_and_run_tests_unittest.py +++ b/llvm_tools/update_packages_and_run_tests_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_tryjob_status.py b/llvm_tools/update_tryjob_status.py index 61aa9d1c..43901e8e 100755 --- a/llvm_tools/update_tryjob_status.py +++ b/llvm_tools/update_tryjob_status.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_tryjob_status_unittest.py b/llvm_tools/update_tryjob_status_unittest.py index bf078f3b..8487e6f6 100755 --- a/llvm_tools/update_tryjob_status_unittest.py +++ b/llvm_tools/update_tryjob_status_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/upload_lexan_crashes_to_forcey.py b/llvm_tools/upload_lexan_crashes_to_forcey.py index 5b038f53..050168a5 100755 --- a/llvm_tools/upload_lexan_crashes_to_forcey.py +++ b/llvm_tools/upload_lexan_crashes_to_forcey.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/upload_lexan_crashes_to_forcey_test.py b/llvm_tools/upload_lexan_crashes_to_forcey_test.py index 36a35048..ba6298f4 100755 --- a/llvm_tools/upload_lexan_crashes_to_forcey_test.py +++ b/llvm_tools/upload_lexan_crashes_to_forcey_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/lock_machine.py b/lock_machine.py index 85a0cfa3..8bc3ec22 100755 --- a/lock_machine.py +++ b/lock_machine.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/make_root_writable.py b/make_root_writable.py index 0163adf1..5353a750 100755 --- a/make_root_writable.py +++ b/make_root_writable.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/orderfile/post_process_orderfile.py b/orderfile/post_process_orderfile.py index 3db0b3b8..748e5cf2 100755 --- a/orderfile/post_process_orderfile.py +++ b/orderfile/post_process_orderfile.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -16,7 +16,7 @@ Builtin functions and put them after the input symbols. in begin and end of the file. The results of the file is intended to be uploaded and consumed when linking -Chrome in Chrome OS. +Chrome in ChromeOS. """ from __future__ import division, print_function diff --git a/orderfile/post_process_orderfile_test.py b/orderfile/post_process_orderfile_test.py index a5fb2c73..976de7fd 100755 --- a/orderfile/post_process_orderfile_test.py +++ b/orderfile/post_process_orderfile_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/pgo_tools/merge_profdata_and_upload.py b/pgo_tools/merge_profdata_and_upload.py index 15445c83..851edcc9 100755 --- a/pgo_tools/merge_profdata_and_upload.py +++ b/pgo_tools/merge_profdata_and_upload.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/pgo_tools/monitor_pgo_profiles.py b/pgo_tools/monitor_pgo_profiles.py index 5c17423b..e56db427 100755 --- a/pgo_tools/monitor_pgo_profiles.py +++ b/pgo_tools/monitor_pgo_profiles.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/pgo_tools/monitor_pgo_profiles_unittest.py b/pgo_tools/monitor_pgo_profiles_unittest.py index eef33887..dab529b8 100755 --- a/pgo_tools/monitor_pgo_profiles_unittest.py +++ b/pgo_tools/monitor_pgo_profiles_unittest.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/remote_test.py b/remote_test.py index 98ff62a5..197bae68 100755 --- a/remote_test.py +++ b/remote_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/run_tests_for.py b/run_tests_for.py index 807a218d..8d24d3bf 100755 --- a/run_tests_for.py +++ b/run_tests_for.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/rust_tools/rust_uprev.py b/rust_tools/rust_uprev.py index 011639df..9ab7e99d 100755 --- a/rust_tools/rust_uprev.py +++ b/rust_tools/rust_uprev.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Tool to automatically generate a new Rust uprev CL. This tool is intended to automatically generate a CL to uprev Rust to a -newer version in Chrome OS, including creating a new Rust version or +newer version in ChromeOS, including creating a new Rust version or removing an old version. It's based on src/third_party/chromiumos-overlay/dev-lang/rust/UPGRADE.md. When using the tool, the progress can be saved to a JSON file, so the user can resume diff --git a/rust_tools/rust_uprev_test.py b/rust_tools/rust_uprev_test.py index 00761391..743e6130 100755 --- a/rust_tools/rust_uprev_test.py +++ b/rust_tools/rust_uprev_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/rust_tools/rust_watch.py b/rust_tools/rust_watch.py index c347d2c6..59de0ca8 100755 --- a/rust_tools/rust_watch.py +++ b/rust_tools/rust_watch.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/rust_tools/rust_watch_test.py b/rust_tools/rust_watch_test.py index 583a9125..3e25a950 100755 --- a/rust_tools/rust_watch_test.py +++ b/rust_tools/rust_watch_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py b/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py index d8dd7626..957227b8 100755 --- a/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py +++ b/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py b/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py index 3e7aa4cc..c128ad02 100755 --- a/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py +++ b/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/tc_enter_chroot.py b/tc_enter_chroot.py index 3a7538ad..2f4fc8ed 100755 --- a/tc_enter_chroot.py +++ b/tc_enter_chroot.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2010 The Chromium OS Authors. All rights reserved. +# Copyright 2010 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/toolchain_utils_githooks/check-presubmit.py b/toolchain_utils_githooks/check-presubmit.py index 99500acd..fe9648a6 100755 --- a/toolchain_utils_githooks/check-presubmit.py +++ b/toolchain_utils_githooks/check-presubmit.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The Chromium OS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -248,7 +248,7 @@ def check_cros_lint( toolchain_utils_root, env=fixed_env) - # This is returned specifically if cros couldn't find the Chrome OS tree + # This is returned specifically if cros couldn't find the ChromeOS tree # root. if exit_code == 127: return None @@ -310,9 +310,9 @@ def check_cros_lint( tasks.append(('golint', thread_pool.apply_async(run_golint))) complaint = '\n'.join(( - 'WARNING: No Chrome OS checkout detected, and no viable CrOS tree', + 'WARNING: No ChromeOS checkout detected, and no viable CrOS tree', 'found; falling back to linting only python and go. If you have a', - 'Chrome OS checkout, please either develop from inside of the source', + 'ChromeOS checkout, please either develop from inside of the source', 'tree, or set $CHROMEOS_ROOT_DIRECTORY to the root of it.', )) diff --git a/update_telemetry_defaults.py b/update_telemetry_defaults.py index c070aeb1..e9eb0427 100755 --- a/update_telemetry_defaults.py +++ b/update_telemetry_defaults.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/upstream_workon/upstream_workon.bash b/upstream_workon/upstream_workon.bash index f066bbe3..98dced05 100755 --- a/upstream_workon/upstream_workon.bash +++ b/upstream_workon/upstream_workon.bash @@ -1,6 +1,6 @@ #!/bin/bash -eu # -# Copyright 2021 The Chromium OS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -- cgit v1.2.3 From cfaabc189fc96a16bb3f0e4772e9ec82f1d4435d Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Mon, 21 Mar 2022 11:27:40 -0700 Subject: llvm_tools: add a nightly clang-tidy diagnostic checker This CL adds a tool that files bugs on us to evaluate new clang-tidy checks as they're landed upstream. The intent is for this to be run at some regular interval. Hopefully it can be extended to clang, too? BUG=None TEST=Ran a few times on my machine with the bug bits stubbed out Change-Id: I1736d6e18009bb4ea9ba0a9b4d068ed0331d3353 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3539313 Reviewed-by: Christopher Di Bella Commit-Queue: Christopher Di Bella Tested-by: George Burgess --- llvm_tools/check_clang_diags.py | 209 +++++++++++++++++++++++++++++++++++ llvm_tools/check_clang_diags_test.py | 102 +++++++++++++++++ 2 files changed, 311 insertions(+) create mode 100755 llvm_tools/check_clang_diags.py create mode 100755 llvm_tools/check_clang_diags_test.py diff --git a/llvm_tools/check_clang_diags.py b/llvm_tools/check_clang_diags.py new file mode 100755 index 00000000..69f91823 --- /dev/null +++ b/llvm_tools/check_clang_diags.py @@ -0,0 +1,209 @@ +#!/usr/bin/env python3 +# Copyright 2022 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""check_clang_diags monitors for new diagnostics in LLVM + +This looks at projects we care about (currently only clang-tidy, though +hopefully clang in the future, too?) and files bugs whenever a new check or +warning appears. These bugs are intended to keep us up-to-date with new +diagnostics, so we can enable them as they land. +""" + +import argparse +import json +import logging +import os +import shutil +import subprocess +import sys +from typing import Dict, List, Tuple + +from cros_utils import bugs + +_DEFAULT_ASSIGNEE = 'mage' +_DEFAULT_CCS = ['cjdb@google.com'] + + +# FIXME: clang would be cool to check, too? Doesn't seem to have a super stable +# way of listing all warnings, unfortunately. +def _build_llvm(llvm_dir: str, build_dir: str): + """Builds everything that _collect_available_diagnostics depends on.""" + targets = ['clang-tidy'] + # use `-C $llvm_dir` so the failure is easier to handle if llvm_dir DNE. + ninja_result = subprocess.run( + ['ninja', '-C', build_dir] + targets, + check=False, + ) + if not ninja_result.returncode: + return + + # Sometimes the directory doesn't exist, sometimes incremental cmake + # breaks, sometimes something random happens. Start fresh since that fixes + # the issue most of the time. + logging.warning('Initial build failed; trying to build from scratch.') + shutil.rmtree(build_dir, ignore_errors=True) + os.makedirs(build_dir) + subprocess.run( + [ + 'cmake', + '-G', + 'Ninja', + '-DCMAKE_BUILD_TYPE=MinSizeRel', + '-DLLVM_USE_LINKER=lld', + '-DLLVM_ENABLE_PROJECTS=clang;clang-tools-extra', + '-DLLVM_TARGETS_TO_BUILD=X86', + f'{os.path.abspath(llvm_dir)}/llvm', + ], + cwd=build_dir, + check=True, + ) + subprocess.run(['ninja'] + targets, check=True, cwd=build_dir) + + +def _collect_available_diagnostics(llvm_dir: str, + build_dir: str) -> Dict[str, List[str]]: + _build_llvm(llvm_dir, build_dir) + + clang_tidy = os.path.join(os.path.abspath(build_dir), 'bin', 'clang-tidy') + clang_tidy_checks = subprocess.run( + [clang_tidy, '-checks=*', '-list-checks'], + # Use cwd='/' to ensure no .clang-tidy files are picked up. It + # _shouldn't_ matter, but it's also ~free, so... + check=True, + cwd='/', + stdout=subprocess.PIPE, + encoding='utf-8', + ) + clang_tidy_checks_stdout = [ + x.strip() for x in clang_tidy_checks.stdout.strip().splitlines() + ] + + # The first line should always be this, then each line thereafter is a check + # name. + assert clang_tidy_checks_stdout[0] == 'Enabled checks:', ( + clang_tidy_checks_stdout) + clang_tidy_checks = clang_tidy_checks_stdout[1:] + assert not any(check.isspace() + for check in clang_tidy_checks), (clang_tidy_checks) + return {'clang-tidy': clang_tidy_checks} + + +def _process_new_diagnostics( + old: Dict[str, List[str]], new: Dict[str, List[str]] +) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]: + """Determines the set of new diagnostics that we should file bugs for. + + old: The previous state that this function returned as `new_state_file`, or + `{}` + new: The diagnostics that we've most recently found. This is a dict in the + form {tool: [diag]} + + Returns a `new_state_file` to pass into this function as `old` in the + future, and a dict of diags to file bugs about. + """ + new_diagnostics = {} + new_state_file = {} + for tool, diags in new.items(): + if tool not in old: + logging.info('New tool with diagnostics: %s; pretending none are new', + tool) + new_state_file[tool] = diags + else: + old_diags = set(old[tool]) + newly_added_diags = [x for x in diags if x not in old_diags] + if newly_added_diags: + new_diagnostics[tool] = newly_added_diags + # This specifically tries to make diags sticky: if one is landed, then + # reverted, then relanded, we ignore the reland. This might not be + # desirable? I don't know. + new_state_file[tool] = old[tool] + newly_added_diags + + # Sort things so we have more predictable output. + for v in new_diagnostics.values(): + v.sort() + + return new_state_file, new_diagnostics + + +def _file_bugs_for_new_diags(new_diags: Dict[str, List[str]]): + for tool, diags in sorted(new_diags.items()): + for diag in diags: + bugs.CreateNewBug( + component_id=bugs.WellKnownComponents.CrOSToolchainPublic, + title=f'Investigate {tool} check `{diag}`', + body='\n'.join(( + f'It seems that the `{diag}` check was recently added to {tool}.', + "It's probably good to TAL at whether this check would be good", + 'for us to enable in e.g., platform2, or across ChromeOS.', + )), + assignee=_DEFAULT_ASSIGNEE, + cc=_DEFAULT_CCS, + ) + + +def main(argv: List[str]): + logging.basicConfig( + format='>> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: ' + '%(message)s', + level=logging.INFO, + ) + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument('--llvm_dir', + required=True, + help='LLVM directory to check. Required.') + parser.add_argument('--llvm_build_dir', + required=True, + help='Build directory for LLVM. Required & autocreated.') + parser.add_argument( + '--state_file', + required=True, + help='State file to use to suppress duplicate complaints. Required.') + parser.add_argument( + '--dry_run', + action='store_true', + help='Skip filing bugs & writing to the state file; just log ' + 'differences.') + opts = parser.parse_args(argv) + + build_dir = opts.llvm_build_dir + dry_run = opts.dry_run + llvm_dir = opts.llvm_dir + state_file = opts.state_file + + try: + with open(state_file, encoding='utf-8') as f: + prior_diagnostics = json.load(f) + except FileNotFoundError: + # If the state file didn't exist, just create it without complaining this + # time. + prior_diagnostics = {} + + available_diagnostics = _collect_available_diagnostics(llvm_dir, build_dir) + logging.info('Available diagnostics are %s', available_diagnostics) + if available_diagnostics == prior_diagnostics: + logging.info('Current diagnostics are identical to previous ones; quit') + return + + new_state_file, new_diagnostics = _process_new_diagnostics( + prior_diagnostics, available_diagnostics) + logging.info('New diagnostics in existing tool(s): %s', new_diagnostics) + + if dry_run: + logging.info('Skipping new state file writing and bug filing; dry-run ' + 'mode wins') + else: + _file_bugs_for_new_diags(new_diagnostics) + new_state_file_path = state_file + '.new' + with open(new_state_file_path, 'w', encoding='utf-8') as f: + json.dump(new_state_file, f) + os.rename(new_state_file_path, state_file) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/llvm_tools/check_clang_diags_test.py b/llvm_tools/check_clang_diags_test.py new file mode 100755 index 00000000..2c404d62 --- /dev/null +++ b/llvm_tools/check_clang_diags_test.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# Copyright 2022 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Tests for check_clang_diags.""" + +import unittest +from unittest import mock + +from cros_utils import bugs + +import check_clang_diags + +# pylint: disable=protected-access + + +class Test(unittest.TestCase): + """Test class.""" + + def test_process_new_diagnostics_ignores_new_tools(self): + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old={}, + new={'clang': ['-Wone', '-Wtwo']}, + ) + self.assertEqual(new_state, {'clang': ['-Wone', '-Wtwo']}) + self.assertEqual(new_diags, {}) + + def test_process_new_diagnostics_is_a_nop_when_no_changes(self): + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old={'clang': ['-Wone', '-Wtwo']}, + new={'clang': ['-Wone', '-Wtwo']}, + ) + self.assertEqual(new_state, {'clang': ['-Wone', '-Wtwo']}) + self.assertEqual(new_diags, {}) + + def test_process_new_diagnostics_ignores_removals_and_readds(self): + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old={'clang': ['-Wone', '-Wtwo']}, + new={'clang': ['-Wone']}, + ) + self.assertEqual(new_diags, {}) + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old=new_state, + new={'clang': ['-Wone', '-Wtwo']}, + ) + self.assertEqual(new_state, {'clang': ['-Wone', '-Wtwo']}) + self.assertEqual(new_diags, {}) + + def test_process_new_diagnostics_complains_when_warnings_are_added(self): + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old={'clang': ['-Wone']}, + new={'clang': ['-Wone', '-Wtwo']}, + ) + self.assertEqual(new_state, {'clang': ['-Wone', '-Wtwo']}) + self.assertEqual(new_diags, {'clang': ['-Wtwo']}) + + @mock.patch.object(bugs, 'CreateNewBug') + def test_bugs_are_created_as_expected(self, create_new_bug_mock): + check_clang_diags._file_bugs_for_new_diags({ + 'clang': ['-Wone'], + 'clang-tidy': ['bugprone-foo'], + }) + + expected_calls = [ + mock.call( + component_id=bugs.WellKnownComponents.CrOSToolchainPublic, + title='Investigate clang check `-Wone`', + body='\n'.join(( + 'It seems that the `-Wone` check was recently added to clang.', + "It's probably good to TAL at whether this check would be good", + 'for us to enable in e.g., platform2, or across ChromeOS.', + )), + assignee=check_clang_diags._DEFAULT_ASSIGNEE, + cc=check_clang_diags._DEFAULT_CCS, + ), + mock.call( + component_id=bugs.WellKnownComponents.CrOSToolchainPublic, + title='Investigate clang-tidy check `bugprone-foo`', + body='\n'.join(( + 'It seems that the `bugprone-foo` check was recently added to ' + 'clang-tidy.', + "It's probably good to TAL at whether this check would be good", + 'for us to enable in e.g., platform2, or across ChromeOS.', + )), + assignee=check_clang_diags._DEFAULT_ASSIGNEE, + cc=check_clang_diags._DEFAULT_CCS, + ), + ] + + # Don't assertEqual the lists, since the diff is really hard to read for + # that. + for actual, expected in zip(create_new_bug_mock.call_args_list, + expected_calls): + self.assertEqual(actual, expected) + + self.assertEqual(len(create_new_bug_mock.call_args_list), + len(expected_calls)) + + +if __name__ == '__main__': + unittest.main() -- cgit v1.2.3 From f5b3ebc1f582a7ffaef0792fe01d86f7416b4bbc Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Mon, 25 Apr 2022 14:27:02 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: Ia33adce8ea17e7a542a450356840f3ded72789ba Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3606103 Tested-by: Denis Nikitin Auto-Submit: Denis Nikitin Reviewed-by: Manoj Gupta Commit-Queue: Manoj Gupta --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 2d640804..bb31f898 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R102-14588.23-1649065028" + "name": "R103-14682.0-1650879122" }, "chromeos-kernel-4_14": { - "name": "R102-14574.0-1649065329" + "name": "R103-14695.11-1650879494" }, "chromeos-kernel-4_19": { - "name": "R102-14588.23-1649064755" + "name": "R103-14695.11-1650879114" }, "chromeos-kernel-5_4": { - "name": "R102-14588.23-1649064834" + "name": "R103-14695.11-1650879283" }, "chromeos-kernel-5_10": { - "name": "R102-14588.23-1649064775" + "name": "R103-14695.11-1650879392" } } -- cgit v1.2.3 From 5940e1f17e40fbf449ec76370772f52dd6a9c668 Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Mon, 25 Apr 2022 16:11:39 -0700 Subject: auto_delete_nightly_test_data: format yapf doesn't like the current style in this file. fix it up. BUG=b:230201673 TEST=None Change-Id: I649c61d2165ffc089bdc05d768a54b7d339ed2b3 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3607072 Tested-by: George Burgess Reviewed-by: Jordan Abrahams-Whitehead Commit-Queue: George Burgess --- auto_delete_nightly_test_data.py | 60 +++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index 87bf661b..ba7d108c 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -15,14 +15,12 @@ import argparse import datetime import os import re -import shutil import shlex +import shutil import sys import time -from cros_utils import command_executer -from cros_utils import constants -from cros_utils import misc +from cros_utils import command_executer, constants, misc DIR_BY_WEEKDAY = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun') NIGHTLY_TESTS_WORKSPACE = os.path.join(constants.CROSTC_WORKSPACE, @@ -32,8 +30,7 @@ NIGHTLY_TESTS_WORKSPACE = os.path.join(constants.CROSTC_WORKSPACE, def CleanNumberedDir(s, dry_run=False): """Deleted directories under each dated_dir.""" chromeos_dirs = [ - os.path.join(s, x) - for x in os.listdir(s) + os.path.join(s, x) for x in os.listdir(s) if misc.IsChromeOsTree(os.path.join(s, x)) ] ce = command_executer.GetCommandExecuter(log_level='none') @@ -74,8 +71,7 @@ def CleanNumberedDir(s, dry_run=False): def CleanDatedDir(dated_dir, dry_run=False): # List subdirs under dir subdirs = [ - os.path.join(dated_dir, x) - for x in os.listdir(dated_dir) + os.path.join(dated_dir, x) for x in os.listdir(dated_dir) if os.path.isdir(os.path.join(dated_dir, x)) ] all_succeeded = True @@ -90,20 +86,18 @@ def ProcessArguments(argv): parser = argparse.ArgumentParser( description='Automatically delete nightly test data directories.', usage='auto_delete_nightly_test_data.py options') - parser.add_argument( - '-d', - '--dry_run', - dest='dry_run', - default=False, - action='store_true', - help='Only print command line, do not execute anything.') - parser.add_argument( - '--days_to_preserve', - dest='days_to_preserve', - default=3, - help=('Specify the number of days (not including today),' - ' test data generated on these days will *NOT* be ' - 'deleted. Defaults to 3.')) + parser.add_argument('-d', + '--dry_run', + dest='dry_run', + default=False, + action='store_true', + help='Only print command line, do not execute anything.') + parser.add_argument('--days_to_preserve', + dest='days_to_preserve', + default=3, + help=('Specify the number of days (not including today),' + ' test data generated on these days will *NOT* be ' + 'deleted. Defaults to 3.')) options = parser.parse_args(argv) return options @@ -195,8 +189,9 @@ def CleanOldCLs(days_to_preserve='1', dry_run=False): # Find Old CLs. old_cls_cmd = ('gerrit --raw search "owner:me status:open age:%sd"' % days_to_preserve) - _, cls, _ = ce.ChrootRunCommandWOutput( - chromeos_root, old_cls_cmd, print_to_console=False) + _, cls, _ = ce.ChrootRunCommandWOutput(chromeos_root, + old_cls_cmd, + print_to_console=False) # Convert any whitespaces to spaces. cls = ' '.join(cls.split()) if not cls: @@ -207,8 +202,9 @@ def CleanOldCLs(days_to_preserve='1', dry_run=False): print('Going to execute: %s' % abandon_cls_cmd) return 0 - return ce.ChrootRunCommand( - chromeos_root, abandon_cls_cmd, print_to_console=False) + return ce.ChrootRunCommand(chromeos_root, + abandon_cls_cmd, + print_to_console=False) def CleanChromeTelemetryTmpFiles(dry_run): @@ -222,7 +218,8 @@ def CleanChromeTelemetryTmpFiles(dry_run): else: rv = ce.RunCommand(cmd, print_to_console=False) if rv == 0: - print(f'Successfully cleaned chrome tree tmp directory ' f'{tmp_dir!r} .') + print(f'Successfully cleaned chrome tree tmp directory ' + f'{tmp_dir!r} .') else: print(f'Some directories were not removed under chrome tree ' f'tmp directory {tmp_dir!r}.') @@ -247,13 +244,12 @@ def Main(argv): else: dated_dir = DIR_BY_WEEKDAY[i - 1] - rv += 0 if CleanDatedDir( - os.path.join(NIGHTLY_TESTS_WORKSPACE, dated_dir), - options.dry_run) else 1 + rv += 0 if CleanDatedDir(os.path.join(NIGHTLY_TESTS_WORKSPACE, dated_dir), + options.dry_run) else 1 ## Clean temporaries, images under crostc/chromeos - rv2 = CleanChromeOsTmpAndImages( - int(options.days_to_preserve), options.dry_run) + rv2 = CleanChromeOsTmpAndImages(int(options.days_to_preserve), + options.dry_run) # Clean CLs that are not updated in last 2 weeks. rv3 = CleanOldCLs('14', options.dry_run) -- cgit v1.2.3 From 64f87754560bc7e99fb19a74c4f693e586977687 Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Mon, 25 Apr 2022 16:19:04 -0700 Subject: auto_delete_nightly_test_data: s/.format()/f-strings Pylint complained about these, so fix them. BUG=b:230201673 TEST=None Change-Id: Ief092e828d3f153394c1d3dcc9da37380243fe39 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3607073 Tested-by: George Burgess Reviewed-by: Jordan Abrahams-Whitehead Commit-Queue: George Burgess --- auto_delete_nightly_test_data.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index ba7d108c..cc17f7d4 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -37,10 +37,10 @@ def CleanNumberedDir(s, dry_run=False): all_succeeded = True for cd in chromeos_dirs: if misc.DeleteChromeOsTree(cd, dry_run=dry_run): - print('Successfully removed chromeos tree "{0}".'.format(cd)) + print(f'Successfully removed chromeos tree {cd!r}.') else: all_succeeded = False - print('Failed to remove chromeos tree "{0}", please check.'.format(cd)) + print(f'Failed to remove chromeos tree {cd!r}, please check.') if not all_succeeded: print('Failed to delete at least one chromeos tree, please check.') @@ -52,19 +52,19 @@ def CleanNumberedDir(s, dry_run=False): valid_dir_pattern = ('^' + NIGHTLY_TESTS_WORKSPACE + '/(' + '|'.join(DIR_BY_WEEKDAY) + ')') if not re.search(valid_dir_pattern, s): - print('Trying to delete an invalid dir "{0}" (must match "{1}"), ' - 'please check.'.format(s, valid_dir_pattern)) + print(f'Trying to delete an invalid dir {s!r} (must match ' + f'{valid_dir_pattern!r}), please check.') return False - cmd = 'rm -fr {0}'.format(s) + cmd = f'rm -fr {s}' if dry_run: print(cmd) else: if ce.RunCommand(cmd, print_to_console=False, terminated_timeout=480) == 0: - print('Successfully removed "{0}".'.format(s)) + print(f'Successfully removed {s!r}.') else: all_succeeded = False - print('Failed to remove "{0}", please check.'.format(s)) + print(f'Failed to remove {s!r}, please check.') return all_succeeded @@ -146,15 +146,15 @@ def CleanChromeOsImageFiles(chroot_tmp, subdir_suffix, days_to_preserve, subdir_path = os.path.join(tmp_dir, subdir) if now - os.path.getatime(subdir_path) > seconds_delta: if dry_run: - print('Will run:\nshutil.rmtree({})'.format(subdir_path)) + print(f'Will run:\nshutil.rmtree({subdir_path!r})') else: try: shutil.rmtree(subdir_path) print('Successfully cleaned chromeos image autotest directories ' - 'from "{}".'.format(subdir_path)) + f'from {subdir_path!r}.') except OSError: print('Some image autotest directories were not removed from ' - '"{}".'.format(subdir_path)) + f'"{subdir_path}".') errors += 1 return errors -- cgit v1.2.3 From 42ef7387e95de2e97cd109194dbe1b24f88d4ae5 Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Mon, 25 Apr 2022 16:07:20 -0700 Subject: auto_delete_nightly_test_data: refactor tmp file cleaning This function fails pretty often, and its output is very unhelpful (similar quality to "something somewhere failed"). This turns the function into Python code, and makes it error out more gracefully. BUG=b:230201673 TEST=Ran on chrotomation without issue. Change-Id: I57397c7a19a86aa62d5320ae35ca72615115ba35 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3607074 Reviewed-by: Jordan Abrahams-Whitehead Commit-Queue: George Burgess Auto-Submit: George Burgess Tested-by: George Burgess --- auto_delete_nightly_test_data.py | 79 ++++++++++++++++++++++++++++++---------- 1 file changed, 59 insertions(+), 20 deletions(-) diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index cc17f7d4..a79b6cf9 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -17,8 +17,11 @@ import os import re import shlex import shutil +import stat import sys import time +import traceback +from pathlib import Path from cros_utils import command_executer, constants, misc @@ -102,31 +105,67 @@ def ProcessArguments(argv): return options +def IsChromeOsTmpDeletionCandidate(file_name: str): + """Returns whether the given basename can be deleted from a chroot's /tmp.""" + name_prefixes = ( + 'test_that_', + 'cros-update', + 'CrAU_temp_data', + ) + if any(file_name.startswith(x) for x in name_prefixes): + return True + # Remove files that look like `tmpABCDEFGHI`. + return len(file_name) == 9 and file_name.startswith('tmp') + + def CleanChromeOsTmpFiles(chroot_tmp, days_to_preserve, dry_run): - rv = 0 - ce = command_executer.GetCommandExecuter() # Clean chroot/tmp/test_that_* and chroot/tmp/tmpxxxxxx, that were last # accessed more than specified time. - minutes = 1440 * days_to_preserve - cmd = (r'find {0} -maxdepth 1 -type d ' - r'\( -name "test_that_*" -amin +{1} -o ' - r' -name "cros-update*" -amin +{1} -o ' - r' -name "CrAU_temp_data*" -amin +{1} -o ' - r' -regex "{0}/tmp......" -amin +{1} \) ' - r'-exec bash -c "echo rm -fr {{}}" \; ' - r'-exec bash -c "rm -fr {{}}" \;').format(chroot_tmp, minutes) - if dry_run: - print('Going to execute:\n%s' % cmd) - else: - rv = ce.RunCommand(cmd, print_to_console=False) - if rv == 0: - print('Successfully cleaned chromeos tree tmp directory ' - '"{0}".'.format(chroot_tmp)) + secs_to_preserve = 60 * 60 * 24 * days_to_preserve + now = time.time() + remove_older_than_time = now - secs_to_preserve + + had_errors = False + for file in Path(chroot_tmp).iterdir(): + if not IsChromeOsTmpDeletionCandidate(file.name): + continue + + try: + # Take the stat here and use that later, so we only need to check for a + # nonexistent file once. + st = file.stat() + except FileNotFoundError: + # This was deleted while were checking; ignore it. + continue + + if not stat.S_ISDIR(st.st_mode): + continue + + if st.st_atime >= remove_older_than_time: + continue + + if dry_run: + print(f'Would remove {file}') + continue + + this_iteration_had_errors = False + + def OnError(_func, path_name, excinfo): + nonlocal this_iteration_had_errors + this_iteration_had_errors = True + print(f'Failed removing path at {path_name}; traceback:') + traceback.print_exception(*excinfo) + + shutil.rmtree(file, onerror=OnError) + + # Some errors can be other processes racing with us to delete things. Don't + # count those as an error which we complain loudly about. + if this_iteration_had_errors and file.exists(): + had_errors = True else: - print('Some directories were not removed under chromeos tree ' - 'tmp directory -"{0}".'.format(chroot_tmp)) + print(f'Discarding removal errors for {file}; dir was still removed.') - return rv + return 1 if had_errors else 0 def CleanChromeOsImageFiles(chroot_tmp, subdir_suffix, days_to_preserve, -- cgit v1.2.3 From dee8f8f030f91ea6187306739b8c8d2354b4d7ab Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Tue, 26 Apr 2022 15:22:03 -0700 Subject: auto_delete_nightly_test_data: fix a bug We should only print "discarding [...]" if we actually ran OnError. This was an oversight in my original CL. BUG=b:230201673 TEST=None Change-Id: Id6a664faa49fef0a4dc9d7ef01bd60280c071c08 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3609228 Tested-by: George Burgess Auto-Submit: George Burgess Reviewed-by: Jordan Abrahams-Whitehead Commit-Queue: Jordan Abrahams-Whitehead --- auto_delete_nightly_test_data.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index a79b6cf9..5243adcf 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -160,10 +160,11 @@ def CleanChromeOsTmpFiles(chroot_tmp, days_to_preserve, dry_run): # Some errors can be other processes racing with us to delete things. Don't # count those as an error which we complain loudly about. - if this_iteration_had_errors and file.exists(): - had_errors = True - else: - print(f'Discarding removal errors for {file}; dir was still removed.') + if this_iteration_had_errors: + if file.exists(): + had_errors = True + else: + print(f'Discarding removal errors for {file}; dir was still removed.') return 1 if had_errors else 0 -- cgit v1.2.3 From 0719306ebd5d19eeb93567278bdea154bae148aa Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead Date: Wed, 27 Apr 2022 17:47:18 +0000 Subject: llvm_tools: Skip patch failures on local bisection This is now controlled by a USE flag: 'continue-on-patch-failure', which we can make use of in this script. BUG=b:226987500 TEST=./llvm_local_bisection.sh # with erroneous patches Change-Id: I893ee1ca9743d0bc5b0a9ca6e4f88d73ebfb6454 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3610724 Tested-by: Jordan Abrahams-Whitehead Commit-Queue: Jordan Abrahams-Whitehead Reviewed-by: Manoj Gupta --- llvm_tools/llvm_local_bisection.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/llvm_tools/llvm_local_bisection.sh b/llvm_tools/llvm_local_bisection.sh index 04b7b0b7..26d277d5 100755 --- a/llvm_tools/llvm_local_bisection.sh +++ b/llvm_tools/llvm_local_bisection.sh @@ -25,7 +25,7 @@ LLVM_CLONE_PATH="${HOME}/chromiumos/src/third_party/llvm-project" main () { - # Note this builds with USE="llvm-next debug -thinlto -llvm_pgo_use" + # Note this builds with USE="llvm-next debug -thinlto -llvm_pgo_use continue-on-patch-failure" build_llvm || exit # FIXME: Write your actual bisection command here which uses @@ -74,8 +74,9 @@ build_llvm () { local logfile="/tmp/build-llvm.${CURRENT}.out" log "Writing logs to ${logfile}" - log "sudo USE='llvm-next debug -thinlto -llvm_use_pgo' emerge sys-devel/llvm" - logdo sudo USE='llvm-next debug -thinlto -llvm_use_pgo' emerge \ + log "sudo USE='llvm-next debug -thinlto -llvm_use_pgo continue-on-patch-failure'" \ + " emerge sys-devel/llvm" + logdo sudo USE='llvm-next debug -thinlto -llvm_use_pgo continue-on-patch-failure' emerge \ sys-devel/llvm \ &> "${logfile}" local emerge_exit_code="$?" -- cgit v1.2.3 From 171dd532a689fc76f241c540de9e7b4598c57900 Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Thu, 28 Apr 2022 15:21:56 -0700 Subject: auto_delete_nightly_test_data: enhance telemetry file deletion The output of this function was unhelpful. Much like previous CLs changing this file, this CL seeks to make the output more helpful (and make the removal more resilient to transient errors). Functionally, this also means we'll wait a day before removing these subdirectories. This doesn't seem inherently bad. BUG=b:230656849 TEST=Ran on Chrotomation Change-Id: I8e5f9bdd7725591abb5a694eebd1b47f8a3f6eb4 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3614355 Reviewed-by: Jordan Abrahams-Whitehead Tested-by: George Burgess Commit-Queue: George Burgess --- auto_delete_nightly_test_data.py | 76 +++++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 36 deletions(-) diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index 5243adcf..5dae9e05 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -15,13 +15,13 @@ import argparse import datetime import os import re -import shlex import shutil import stat import sys import time import traceback from pathlib import Path +from typing import Callable from cros_utils import command_executer, constants, misc @@ -105,29 +105,17 @@ def ProcessArguments(argv): return options -def IsChromeOsTmpDeletionCandidate(file_name: str): - """Returns whether the given basename can be deleted from a chroot's /tmp.""" - name_prefixes = ( - 'test_that_', - 'cros-update', - 'CrAU_temp_data', - ) - if any(file_name.startswith(x) for x in name_prefixes): - return True - # Remove files that look like `tmpABCDEFGHI`. - return len(file_name) == 9 and file_name.startswith('tmp') - - -def CleanChromeOsTmpFiles(chroot_tmp, days_to_preserve, dry_run): - # Clean chroot/tmp/test_that_* and chroot/tmp/tmpxxxxxx, that were last - # accessed more than specified time. +def RemoveAllSubdirsMatchingPredicate( + base_dir: Path, days_to_preserve: int, dry_run: bool, + is_name_removal_worthy: Callable[[str], bool]) -> bool: + """Removes all subdirs of base_dir that match the given predicate.""" secs_to_preserve = 60 * 60 * 24 * days_to_preserve now = time.time() remove_older_than_time = now - secs_to_preserve had_errors = False - for file in Path(chroot_tmp).iterdir(): - if not IsChromeOsTmpDeletionCandidate(file.name): + for file in base_dir.iterdir(): + if not is_name_removal_worthy(file.name): continue try: @@ -169,6 +157,31 @@ def CleanChromeOsTmpFiles(chroot_tmp, days_to_preserve, dry_run): return 1 if had_errors else 0 +def IsChromeOsTmpDeletionCandidate(file_name: str): + """Returns whether the given basename can be deleted from a chroot's /tmp.""" + name_prefixes = ( + 'test_that_', + 'cros-update', + 'CrAU_temp_data', + ) + if any(file_name.startswith(x) for x in name_prefixes): + return True + # Remove files that look like `tmpABCDEFGHI`. + return len(file_name) == 9 and file_name.startswith('tmp') + + +def CleanChromeOsTmpFiles(chroot_tmp: str, days_to_preserve: int, + dry_run: bool) -> int: + # Clean chroot/tmp/test_that_* and chroot/tmp/tmpxxxxxx, that were last + # accessed more than specified time ago. + return RemoveAllSubdirsMatchingPredicate( + Path(chroot_tmp), + days_to_preserve, + dry_run, + IsChromeOsTmpDeletionCandidate, + ) + + def CleanChromeOsImageFiles(chroot_tmp, subdir_suffix, days_to_preserve, dry_run): # Clean files that were last accessed more than the specified time. @@ -247,23 +260,14 @@ def CleanOldCLs(days_to_preserve='1', dry_run=False): print_to_console=False) -def CleanChromeTelemetryTmpFiles(dry_run): - rv = 0 - ce = command_executer.GetCommandExecuter() - tmp_dir = os.path.join(constants.CROSTC_WORKSPACE, 'chromeos', '.cache', - 'distfiles', 'chrome-src-internal', 'src', 'tmp') - cmd = f'rm -fr {shlex.quote(tmp_dir)}/tmp*telemetry_Crosperf' - if dry_run: - print(f'Going to execute:\n{cmd}') - else: - rv = ce.RunCommand(cmd, print_to_console=False) - if rv == 0: - print(f'Successfully cleaned chrome tree tmp directory ' - f'{tmp_dir!r} .') - else: - print(f'Some directories were not removed under chrome tree ' - f'tmp directory {tmp_dir!r}.') - return rv +def CleanChromeTelemetryTmpFiles(dry_run: bool) -> int: + return RemoveAllSubdirsMatchingPredicate( + Path(constants.CROSTC_WORKSPACE), + days_to_preserve=1, + dry_run=dry_run, + is_name_removal_worthy=lambda x: x.startswith('tmp') and x.endswith( + 'telemetry_Crosperf'), + ) def Main(argv): -- cgit v1.2.3 From b20378020a1863ea6dd6b4becc31ab20ff0ec28c Mon Sep 17 00:00:00 2001 From: Michael Benfield Date: Fri, 29 Apr 2022 16:37:52 +0000 Subject: Remove "-Wno-unused-but-set-variable" from config.go. BUG=b:227655984 TEST=CQ Change-Id: Ib8928cab96f8e3b4dfe50e5d89645e8ec5e905d4 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3613766 Auto-Submit: Michael Benfield Reviewed-by: Manoj Gupta Tested-by: Manoj Gupta Commit-Queue: Manoj Gupta --- compiler_wrapper/config.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index fdd17763..0d24c9a9 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -154,7 +154,6 @@ var crosHardenedConfig = &config{ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", }, newWarningsDir: "/tmp/fatal_clang_warnings", triciumNitsDir: "/tmp/linting_output/clang-tidy", @@ -196,7 +195,6 @@ var crosNonHardenedConfig = &config{ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", }, newWarningsDir: "/tmp/fatal_clang_warnings", triciumNitsDir: "/tmp/linting_output/clang-tidy", @@ -245,7 +243,6 @@ var crosHostConfig = &config{ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", }, newWarningsDir: "/tmp/fatal_clang_warnings", triciumNitsDir: "/tmp/linting_output/clang-tidy", -- cgit v1.2.3 From d7e5c879964eee1a3c9d04d592a75ccbc06d2875 Mon Sep 17 00:00:00 2001 From: Manoj Gupta Date: Mon, 2 May 2022 09:31:15 -0700 Subject: Sync unwindlib option with current llvm Use unwindlib=libunwind to match current production wrapper. BUG=None TEST=go test Change-Id: I6cdace48ed871ac87a093addf2b96b86ee5153c2 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3621381 Reviewed-by: George Burgess Tested-by: Manoj Gupta Commit-Queue: George Burgess Auto-Submit: Manoj Gupta --- compiler_wrapper/config.go | 2 +- .../testdata/cros_hardened_golden/bisect.json | 6 +++--- .../clang_ftrapv_maincc_target_specific.json | 18 ++++++++-------- .../clang_maincc_target_specific.json | 18 ++++++++-------- .../testdata/cros_hardened_golden/clang_path.json | 24 +++++++++++----------- .../cros_hardened_golden/clang_sanitizer_args.json | 16 +++++++-------- .../cros_hardened_golden/clang_specific_args.json | 8 ++++---- .../clang_sysroot_wrapper_common.json | 12 +++++------ .../testdata/cros_hardened_golden/clangtidy.json | 16 +++++++-------- .../cros_hardened_golden/force_disable_werror.json | 10 ++++----- .../cros_hardened_golden/gcc_clang_syntax.json | 8 ++++---- .../cros_hardened_llvmnext_golden/bisect.json | 6 +++--- .../cros_hardened_llvmnext_golden/clang_path.json | 24 +++++++++++----------- .../cros_hardened_llvmnext_golden/clangtidy.json | 16 +++++++-------- .../force_disable_werror.json | 10 ++++----- .../gcc_clang_syntax.json | 8 ++++---- .../cros_hardened_noccache_golden/bisect.json | 6 +++--- .../cros_hardened_noccache_golden/clang_path.json | 24 +++++++++++----------- .../cros_hardened_noccache_golden/clangtidy.json | 16 +++++++-------- .../force_disable_werror.json | 10 ++++----- .../gcc_clang_syntax.json | 8 ++++---- 21 files changed, 133 insertions(+), 133 deletions(-) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 0d24c9a9..301dda6d 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -140,7 +140,7 @@ var crosHardenedConfig = &config{ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json index 05aea31f..39d71b8a 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json @@ -34,7 +34,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -107,7 +107,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -183,7 +183,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json index 2b04d0c9..91953b22 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json @@ -25,7 +25,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -88,7 +88,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -151,7 +151,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -214,7 +214,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -276,7 +276,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -338,7 +338,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -400,7 +400,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -462,7 +462,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -524,7 +524,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json index ea834174..ffe91340 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json @@ -24,7 +24,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -85,7 +85,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -146,7 +146,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -207,7 +207,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -267,7 +267,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -327,7 +327,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -387,7 +387,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -447,7 +447,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -507,7 +507,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json index 52d4184a..a01dc081 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json @@ -24,7 +24,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -88,7 +88,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -152,7 +152,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -216,7 +216,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -287,7 +287,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -363,7 +363,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -434,7 +434,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -500,7 +500,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -561,7 +561,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -622,7 +622,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -683,7 +683,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -747,7 +747,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json index b71a8805..e0a20367 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json @@ -26,7 +26,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -89,7 +89,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -152,7 +152,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -215,7 +215,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -277,7 +277,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -340,7 +340,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -403,7 +403,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -465,7 +465,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json index d10e6cd0..7a437ae5 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json @@ -34,7 +34,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -104,7 +104,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -167,7 +167,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -230,7 +230,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json index 42209185..4bde083a 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json @@ -62,7 +62,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -126,7 +126,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -183,7 +183,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -243,7 +243,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -304,7 +304,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -363,7 +363,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json index ea1363e7..dd3bc358 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json @@ -39,7 +39,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -81,7 +81,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -153,7 +153,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -196,7 +196,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -270,7 +270,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -316,7 +316,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -391,7 +391,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -434,7 +434,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json index 4df81578..c5d2f9e3 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json @@ -27,7 +27,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -91,7 +91,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -141,7 +141,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -209,7 +209,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -259,7 +259,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json index e2037e26..0140c961 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json @@ -24,7 +24,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -112,7 +112,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -195,7 +195,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -259,7 +259,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json index 05aea31f..39d71b8a 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json @@ -34,7 +34,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -107,7 +107,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -183,7 +183,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json index 52d4184a..a01dc081 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json @@ -24,7 +24,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -88,7 +88,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -152,7 +152,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -216,7 +216,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -287,7 +287,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -363,7 +363,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -434,7 +434,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -500,7 +500,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -561,7 +561,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -622,7 +622,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -683,7 +683,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -747,7 +747,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json index ea1363e7..dd3bc358 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json @@ -39,7 +39,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -81,7 +81,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -153,7 +153,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -196,7 +196,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -270,7 +270,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -316,7 +316,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -391,7 +391,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -434,7 +434,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json index 4df81578..c5d2f9e3 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json @@ -27,7 +27,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -91,7 +91,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -141,7 +141,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -209,7 +209,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -259,7 +259,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json index e2037e26..0140c961 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json @@ -24,7 +24,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -112,7 +112,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -195,7 +195,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -259,7 +259,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json index a676dc6e..a58cafd1 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json @@ -33,7 +33,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -102,7 +102,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -174,7 +174,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json index cc6d1ada..2040a621 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json @@ -23,7 +23,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -81,7 +81,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -139,7 +139,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -197,7 +197,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -262,7 +262,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -333,7 +333,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -403,7 +403,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -463,7 +463,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -518,7 +518,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -573,7 +573,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -628,7 +628,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -686,7 +686,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json index ea1363e7..dd3bc358 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json @@ -39,7 +39,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -81,7 +81,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -153,7 +153,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -196,7 +196,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -270,7 +270,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -316,7 +316,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -391,7 +391,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -434,7 +434,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json index 2c34edb4..1b6e1d98 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json @@ -26,7 +26,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -84,7 +84,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -128,7 +128,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -190,7 +190,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -234,7 +234,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json index 368eb855..3742f017 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json @@ -24,7 +24,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -107,7 +107,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -190,7 +190,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", @@ -254,7 +254,7 @@ "-Wno-unknown-warning-option", "-Wno-section", "-fuse-ld=lld", - "--unwindlib=libgcc", + "--unwindlib=libunwind", "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", "-fexperimental-new-pass-manager", -- cgit v1.2.3 From ffc970dd9a9f56a3ff1937698ed24a286de02ea7 Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Thu, 5 May 2022 10:59:54 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: I194f5ce08d98b80a6549c03a21b4b801b90ab96d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3630341 Auto-Submit: Denis Nikitin Tested-by: Denis Nikitin Reviewed-by: Manoj Gupta Commit-Queue: Manoj Gupta --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index bb31f898..332f2852 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R103-14682.0-1650879122" + "name": "R103-14695.25-1651484165" }, "chromeos-kernel-4_14": { - "name": "R103-14695.11-1650879494" + "name": "R103-14695.25-1651484511" }, "chromeos-kernel-4_19": { - "name": "R103-14695.11-1650879114" + "name": "R103-14695.25-1651483920" }, "chromeos-kernel-5_4": { - "name": "R103-14695.11-1650879283" + "name": "R103-14695.25-1651483959" }, "chromeos-kernel-5_10": { - "name": "R103-14695.11-1650879392" + "name": "R103-14695.25-1651484279" } } -- cgit v1.2.3 From 9754d4deca45c4e747972d9866f88670f233badc Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Mon, 9 May 2022 11:15:32 -0700 Subject: auto_delete_nightly_test_data: fix a few bugs I forgot to copy the entire path here. Further, this caller of RemoveAllSubdirsMatchingPredicate _used_ to simply remove everything regardless of atime. Seems that crosperf accesses these things semi-often, so we can't have an `atime` check here. `mtime` also works, but just prefer the old behavior anyway. BUG=b:231976533 TEST=Ran on chrotomation. Change-Id: If833ea4bb07f80555890241ff9d2102079d0f8aa Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3635698 Reviewed-by: Manoj Gupta Commit-Queue: George Burgess Tested-by: George Burgess --- auto_delete_nightly_test_data.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index 5dae9e05..ca721b24 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -14,16 +14,18 @@ __author__ = 'shenhan@google.com (Han Shen)' import argparse import datetime import os +from pathlib import Path import re import shutil import stat import sys import time import traceback -from pathlib import Path from typing import Callable -from cros_utils import command_executer, constants, misc +from cros_utils import command_executer +from cros_utils import constants +from cros_utils import misc DIR_BY_WEEKDAY = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun') NIGHTLY_TESTS_WORKSPACE = os.path.join(constants.CROSTC_WORKSPACE, @@ -129,7 +131,7 @@ def RemoveAllSubdirsMatchingPredicate( if not stat.S_ISDIR(st.st_mode): continue - if st.st_atime >= remove_older_than_time: + if secs_to_preserve and st.st_atime >= remove_older_than_time: continue if dry_run: @@ -261,9 +263,11 @@ def CleanOldCLs(days_to_preserve='1', dry_run=False): def CleanChromeTelemetryTmpFiles(dry_run: bool) -> int: + tmp_dir = (Path(constants.CROSTC_WORKSPACE) / 'chromeos' / '.cache' / + 'distfiles' / 'chrome-src-internal' / 'src' / 'tmp') return RemoveAllSubdirsMatchingPredicate( - Path(constants.CROSTC_WORKSPACE), - days_to_preserve=1, + tmp_dir, + days_to_preserve=0, dry_run=dry_run, is_name_removal_worthy=lambda x: x.startswith('tmp') and x.endswith( 'telemetry_Crosperf'), -- cgit v1.2.3 From 3f02e131985582cfe1069452132cd711f9ac1637 Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Mon, 9 May 2022 15:19:37 -0700 Subject: check-presubmit: use `isort` with `yapf` This requires a few things: - Running `isort` to begin with - Syncing with Chromite's `yapf` config, so `yapf` and `isort` don't disagree - ...Which itself requires upgrading `yapf`, since these disagree with `yapf`'s current configuration in toolchain-utils. The most recent chromite configuration requires v0.31.0, and depot_tools only has v0.27.0. This CL does all of these. BUG=b:231985625 TEST=Ran on a few files in toolchain-utils Change-Id: I43414abea7b75790b129e78708903ed90dae6ab0 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3636138 Reviewed-by: Jordan Abrahams-Whitehead Commit-Queue: George Burgess Tested-by: George Burgess --- .style.yapf | 2 + toolchain_utils_githooks/check-presubmit.py | 182 ++++++++++++++++++++-------- 2 files changed, 135 insertions(+), 49 deletions(-) diff --git a/.style.yapf b/.style.yapf index c4472bda..0baa978f 100644 --- a/.style.yapf +++ b/.style.yapf @@ -1,4 +1,6 @@ [style] based_on_style = pep8 +split_before_bitwise_operator = false blank_line_before_module_docstring = true +blank_lines_between_top_level_imports_and_variables = 2 indent_width = 2 diff --git a/toolchain_utils_githooks/check-presubmit.py b/toolchain_utils_githooks/check-presubmit.py index fe9648a6..5302cf80 100755 --- a/toolchain_utils_githooks/check-presubmit.py +++ b/toolchain_utils_githooks/check-presubmit.py @@ -12,6 +12,7 @@ import datetime import multiprocessing import multiprocessing.pool import os +from pathlib import Path import re import shlex import shutil @@ -111,10 +112,87 @@ def get_check_result_or_catch( ) -def check_yapf(toolchain_utils_root: str, +def check_isort(toolchain_utils_root: str, + python_files: t.Iterable[str]) -> CheckResult: + """Subchecker of check_py_format. Checks python file formats with isort""" + chromite = Path('/mnt/host/source/chromite') + isort = chromite / 'scripts' / 'isort' + config_file = chromite / '.isort.cfg' + + if not (isort.exists() and config_file.exists()): + return CheckResult( + ok=True, + output='isort not found; skipping', + autofix_commands=[], + ) + + config_file_flag = f'--settings-file={config_file}' + command = [isort, '-c', config_file_flag] + python_files + exit_code, stdout_and_stderr = run_command_unchecked( + command, cwd=toolchain_utils_root) + + # isort fails when files have broken formatting. + if not exit_code: + return CheckResult( + ok=True, + output='', + autofix_commands=[], + ) + + bad_files = [] + bad_file_re = re.compile( + r'^ERROR: (.*) Imports are incorrectly sorted and/or formatted\.$') + for line in stdout_and_stderr.splitlines(): + m = bad_file_re.match(line) + if m: + file_name, = m.groups() + bad_files.append(file_name.strip()) + + if not bad_files: + return CheckResult( + ok=False, + output='`%s` failed; stdout/stderr:\n%s' % + (escape_command(command), stdout_and_stderr), + autofix_commands=[], + ) + + autofix = [str(isort), config_file_flag] + bad_files + return CheckResult( + ok=False, + output='The following file(s) have formatting errors: %s' % bad_files, + autofix_commands=[autofix], + ) + + +def check_yapf(toolchain_utils_root: str, yapf: Path, python_files: t.Iterable[str]) -> CheckResult: """Subchecker of check_py_format. Checks python file formats with yapf""" - command = ['yapf', '-d'] + python_files + # Folks have been bitten by accidentally using multiple yapf versions in the + # past. This is an issue, since newer versions of yapf sometimes format + # things differently. Make the version obvious. + command = [yapf, '--version'] + exit_code, stdout_and_stderr = run_command_unchecked( + command, cwd=toolchain_utils_root) + if exit_code: + return CheckResult( + ok=False, + output=f'Failed getting yapf version; stdstreams: {stdout_and_stderr}', + autofix_commands=[], + ) + + yapf_version = stdout_and_stderr.strip() + # This is the depot_tools version. If folks have this, things will break for + # them. Ask them to upgrade. Peephole this rather than making some + # complicated version-parsing scheme, since it's likely that everyone with a + # too-old version is using specifically the depot_tools one. + if yapf_version == 'yapf 0.27.0': + return CheckResult( + ok=False, + output='YAPF is too old; please upgrade it: `pip install --user yapf`', + autofix_commands=[], + ) + + command = [yapf, '-d'] + python_files exit_code, stdout_and_stderr = run_command_unchecked( command, cwd=toolchain_utils_root) @@ -122,7 +200,7 @@ def check_yapf(toolchain_utils_root: str, if exit_code == 0: return CheckResult( ok=True, - output='', + output=f'Using {yapf_version}, no issues were found.', autofix_commands=[], ) @@ -142,15 +220,16 @@ def check_yapf(toolchain_utils_root: str, if not bad_files: return CheckResult( ok=False, - output='`%s` failed; stdout/stderr:\n%s' % (escape_command(command), - stdout_and_stderr), + output='`%s` failed; stdout/stderr:\n%s' % + (escape_command(command), stdout_and_stderr), autofix_commands=[], ) - autofix = ['yapf', '-i'] + bad_files + autofix = [str(yapf), '-i'] + bad_files return CheckResult( ok=False, - output='The following file(s) have formatting errors: %s' % bad_files, + output=f'Using {yapf_version}, these file(s) have formatting errors: ' + f'{bad_files}', autofix_commands=[autofix], ) @@ -175,13 +254,13 @@ def check_python_file_headers(python_files: t.Iterable[str]) -> CheckResult: autofix = [] output = [] if add_hashbang: - output.append( - 'The following files have no #!, but need one: %s' % add_hashbang) + output.append('The following files have no #!, but need one: %s' % + add_hashbang) autofix.append(['sed', '-i', '1i#!/usr/bin/env python3'] + add_hashbang) if remove_hashbang: - output.append( - "The following files have a #!, but shouldn't: %s" % remove_hashbang) + output.append("The following files have a #!, but shouldn't: %s" % + remove_hashbang) autofix.append(['sed', '-i', '1d'] + remove_hashbang) if not output: @@ -199,16 +278,20 @@ def check_python_file_headers(python_files: t.Iterable[str]) -> CheckResult: def check_py_format(toolchain_utils_root: str, thread_pool: multiprocessing.pool.ThreadPool, - files: t.Iterable[str]) -> CheckResult: + files: t.Iterable[str]) -> t.List[CheckResult]: """Runs yapf on files to check for style bugs. Also checks for #!s.""" - yapf = 'yapf' - if not has_executable_on_path(yapf): - return CheckResult( - ok=False, - output="yapf isn't available on your $PATH. Please either " - 'enter a chroot, or place depot_tools on your $PATH.', - autofix_commands=[], - ) + pip_yapf = Path('~/.local/bin/yapf').expanduser() + if pip_yapf.exists(): + yapf = pip_yapf + else: + yapf = 'yapf' + if not has_executable_on_path(yapf): + return CheckResult( + ok=False, + output="yapf isn't available on your $PATH. Please either " + 'enter a chroot, or place depot_tools on your $PATH.', + autofix_commands=[], + ) python_files = [f for f in remove_deleted_files(files) if f.endswith('.py')] if not python_files: @@ -221,9 +304,12 @@ def check_py_format(toolchain_utils_root: str, tasks = [ ('check_yapf', thread_pool.apply_async(check_yapf, + (toolchain_utils_root, yapf, python_files))), + ('check_isort', + thread_pool.apply_async(check_isort, (toolchain_utils_root, python_files))), ('check_file_headers', - thread_pool.apply_async(check_python_file_headers, (python_files,))), + thread_pool.apply_async(check_python_file_headers, (python_files, ))), ] return [(name, get_check_result_or_catch(task)) for name, task in tasks] @@ -243,10 +329,10 @@ def check_cros_lint( # lint` (if it's been made available to us), or we try a mix of # pylint+golint. def try_run_cros_lint(cros_binary: str) -> t.Optional[CheckResult]: - exit_code, output = run_command_unchecked( - [cros_binary, 'lint', '--'] + files, - toolchain_utils_root, - env=fixed_env) + exit_code, output = run_command_unchecked([cros_binary, 'lint', '--'] + + files, + toolchain_utils_root, + env=fixed_env) # This is returned specifically if cros couldn't find the ChromeOS tree # root. @@ -272,8 +358,9 @@ def check_cros_lint( tasks = [] def check_result_from_command(command: t.List[str]) -> CheckResult: - exit_code, output = run_command_unchecked( - command, toolchain_utils_root, env=fixed_env) + exit_code, output = run_command_unchecked(command, + toolchain_utils_root, + env=fixed_env) return CheckResult( ok=exit_code == 0, output=output, @@ -356,8 +443,8 @@ def check_go_format(toolchain_utils_root, _thread_pool, files): if exit_code: return CheckResult( ok=False, - output='%s failed; stdout/stderr:\n%s' % (escape_command(command), - output), + output='%s failed; stdout/stderr:\n%s' % + (escape_command(command), output), autofix_commands=[], ) @@ -467,7 +554,8 @@ def try_autofix(all_autofix_commands: t.List[t.List[str]], anything_succeeded = False for command in all_autofix_commands: - exit_code, output = run_command_unchecked(command, cwd=toolchain_utils_root) + exit_code, output = run_command_unchecked(command, + cwd=toolchain_utils_root) if exit_code: print('*** Autofix command `%s` exited with code %d; stdout/stderr:' % @@ -547,32 +635,28 @@ def maybe_reexec_inside_chroot(autofix: bool, files: t.List[str]) -> None: os.execvp(args[0], args) -# FIXME(crbug.com/980719): we probably want a better way of handling this. For -# now, as a workaround, ensure we have all dependencies installed as a part of -# presubmits. pip and scipy are fast enough to install (they take <1min -# combined on my machine), so hoooopefully users won't get too impatient. -def ensure_scipy_installed() -> None: +def ensure_pip_deps_installed() -> None: if not has_executable_on_path('pip'): print('Autoinstalling `pip`...') subprocess.check_call(['sudo', 'emerge', 'dev-python/pip']) - exit_code = subprocess.call( - ['python3', '-c', 'import scipy'], - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - if exit_code != 0: - print('Autoinstalling `scipy`...') - subprocess.check_call(['pip', 'install', '--user', 'scipy']) + for package in ('scipy', 'yapf'): + exit_code = subprocess.call( + ['python3', '-c', f'import {package}'], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + if exit_code != 0: + print(f'Autoinstalling `{package}`...') + subprocess.check_call(['pip', 'install', '--user', package]) def main(argv: t.List[str]) -> int: parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - '--no_autofix', - dest='autofix', - action='store_false', - help="Don't run any autofix commands.") + parser.add_argument('--no_autofix', + dest='autofix', + action='store_false', + help="Don't run any autofix commands.") parser.add_argument( '--no_enter_chroot', dest='enter_chroot', @@ -591,7 +675,7 @@ def main(argv: t.List[str]) -> int: # If you ask for --no_enter_chroot, you're on your own for installing these # things. if is_in_chroot(): - ensure_scipy_installed() + ensure_pip_deps_installed() files = [os.path.abspath(f) for f in files] -- cgit v1.2.3 From 8ba3a041ada2078f7966d6cf3fb3e0904b1d606d Mon Sep 17 00:00:00 2001 From: Ryan Beltran Date: Mon, 9 May 2022 21:23:55 +0000 Subject: compiler_wrapper: remove -checks=* for tidy This CL removes -checks=*` from the Clang Tidy flags when WITH_TIDY=tricium which allows projects to provide their own configuration files for clang tidy. BUG=b:187790543 TEST=manually tested Change-Id: Ic6893a09146f071874f21f7bedb04a1f85e83284 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3635256 Reviewed-by: George Burgess Commit-Queue: Ryan Beltran Tested-by: Ryan Beltran --- compiler_wrapper/clang_tidy_flag.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/compiler_wrapper/clang_tidy_flag.go b/compiler_wrapper/clang_tidy_flag.go index 8c3712d6..2d2565e0 100644 --- a/compiler_wrapper/clang_tidy_flag.go +++ b/compiler_wrapper/clang_tidy_flag.go @@ -114,9 +114,7 @@ func runClangTidyForTricium(env env, clangCmd *command, cSrcFile, fixesDir strin fixesFilePath := f.Name() + ".yaml" fixesMetadataPath := f.Name() + ".json" - // FIXME(gbiv): Remove `-checks=*` when testing is complete; we should defer to .clang-tidy - // files, which are both more expressive and more approachable than `-checks=*`. - extraTidyFlags = append(extraTidyFlags, "-checks=*", "--export-fixes="+fixesFilePath) + extraTidyFlags = append(extraTidyFlags, "--export-fixes="+fixesFilePath) clangTidyCmd, err := calcClangTidyInvocation(env, clangCmd, cSrcFile, extraTidyFlags...) if err != nil { return fmt.Errorf("calculating tidy invocation: %v", err) -- cgit v1.2.3 From 58bd94f0cf7bba9b91e24d75e991f098723d81ee Mon Sep 17 00:00:00 2001 From: Manoj Gupta Date: Mon, 9 May 2022 20:21:03 -0700 Subject: compiler_wrapper: Disable warning implicit-function-declaration Wimplicit-function-declaration has been promoted to be an default error even with Wno-error. Disable the error by default since it is breaking a lot of packages. BUG=b:230345382 TEST=cq Change-Id: Ib46b0b47a3e1dea3797739370a4d9cf92a4d25f9 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3637117 Tested-by: Manoj Gupta Auto-Submit: Manoj Gupta Reviewed-by: George Burgess Commit-Queue: Manoj Gupta --- compiler_wrapper/config.go | 6 +++ .../testdata/cros_clang_host_golden/bisect.json | 12 +++--- .../clang_ftrapv_maincc_target_specific.json | 36 ++++++++-------- .../cros_clang_host_golden/clang_host_wrapper.json | 4 +- .../clang_maincc_target_specific.json | 36 ++++++++-------- .../cros_clang_host_golden/clang_path.json | 48 +++++++++++----------- .../clang_sanitizer_args.json | 32 +++++++-------- .../clang_specific_args.json | 16 ++++---- .../testdata/cros_clang_host_golden/clangtidy.json | 32 +++++++-------- .../force_disable_werror.json | 16 ++++---- .../testdata/cros_hardened_golden/bisect.json | 6 +-- .../clang_ftrapv_maincc_target_specific.json | 18 ++++---- .../clang_maincc_target_specific.json | 18 ++++---- .../testdata/cros_hardened_golden/clang_path.json | 24 +++++------ .../cros_hardened_golden/clang_sanitizer_args.json | 16 ++++---- .../cros_hardened_golden/clang_specific_args.json | 8 ++-- .../clang_sysroot_wrapper_common.json | 12 +++--- .../testdata/cros_hardened_golden/clangtidy.json | 16 ++++---- .../cros_hardened_golden/force_disable_werror.json | 10 ++--- .../cros_hardened_golden/gcc_clang_syntax.json | 8 ++-- .../cros_hardened_llvmnext_golden/bisect.json | 6 +-- .../cros_hardened_llvmnext_golden/clang_path.json | 24 +++++------ .../cros_hardened_llvmnext_golden/clangtidy.json | 16 ++++---- .../force_disable_werror.json | 10 ++--- .../gcc_clang_syntax.json | 8 ++-- .../cros_hardened_noccache_golden/bisect.json | 6 +-- .../cros_hardened_noccache_golden/clang_path.json | 24 +++++------ .../cros_hardened_noccache_golden/clangtidy.json | 16 ++++---- .../force_disable_werror.json | 10 ++--- .../gcc_clang_syntax.json | 8 ++-- .../testdata/cros_nonhardened_golden/bisect.json | 6 +-- .../clang_ftrapv_maincc_target_specific.json | 18 ++++---- .../clang_maincc_target_specific.json | 18 ++++---- .../cros_nonhardened_golden/clang_path.json | 24 +++++------ .../clang_sanitizer_args.json | 16 ++++---- .../clang_specific_args.json | 8 ++-- .../clang_sysroot_wrapper_common.json | 12 +++--- .../cros_nonhardened_golden/clangtidy.json | 16 ++++---- .../force_disable_werror.json | 10 ++--- .../cros_nonhardened_golden/gcc_clang_syntax.json | 8 ++-- 40 files changed, 322 insertions(+), 316 deletions(-) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 301dda6d..e6708ebf 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -130,6 +130,7 @@ var crosHardenedConfig = &config{ // crbug.com/1103065: -grecord-gcc-switches pollutes the Goma cache; // removed that flag for now. // Temporarily disable Wdeprecated-declarations. b/193860318 + // b/230345382: Temporarily disable Wimplicit-function-declaration. clangFlags: []string{ "-Qunused-arguments", @@ -146,6 +147,7 @@ var crosHardenedConfig = &config{ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", }, // Temporarily disable Wdeprecated-copy. b/191479033 @@ -175,6 +177,7 @@ var crosNonHardenedConfig = &config{ // Temporarily add no-unknown-warning-option to deal with old clang versions. // Temporarily disable Wsection since kernel gets a bunch of these. chromium:778867 // Temporarily disable Wdeprecated-declarations. b/193860318 + // b/230345382: Temporarily disable Wimplicit-function-declaration. clangFlags: []string{ "-Qunused-arguments", "-fdebug-default-version=5", @@ -187,6 +190,7 @@ var crosNonHardenedConfig = &config{ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", }, // Temporarily disable Wdeprecated-copy. b/191479033 @@ -221,6 +225,7 @@ var crosHostConfig = &config{ // crbug.com/1103065: -grecord-gcc-switches pollutes the Goma cache; // removed that flag for now. // Temporarily disable Wdeprecated-declarations. b/193860318 + // b/230345382: Temporarily disable Wimplicit-function-declaration. clangFlags: []string{ "-Qunused-arguments", "-fno-addrsig", @@ -235,6 +240,7 @@ var crosHostConfig = &config{ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", }, // Temporarily disable Wdeprecated-copy. b/191479033 diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json index e237c7c7..b8abc564 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json @@ -37,14 +37,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ], "env_updates": [ "PYTHONPATH=/somepath/test_binary" @@ -92,14 +92,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ], "env_updates": [ "PYTHONPATH=/somepath/test_binary" @@ -150,14 +150,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ], "env_updates": [ "PYTHONPATH=/somepath/test_binary" diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json index 07fccc6d..5e903aa7 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json @@ -28,14 +28,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -70,14 +70,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -112,14 +112,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -154,14 +154,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -196,14 +196,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -238,14 +238,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -280,14 +280,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -322,14 +322,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -364,14 +364,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json index a221605e..c2df9112 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json @@ -27,14 +27,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json index 2130d528..77953e7b 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json @@ -27,14 +27,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -68,14 +68,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -109,14 +109,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -150,14 +150,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -191,14 +191,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -232,14 +232,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -273,14 +273,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -314,14 +314,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -355,14 +355,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json index 43ae728a..ac5f51f9 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json @@ -27,14 +27,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -71,14 +71,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] }, "stdout": "somemessage", @@ -115,14 +115,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -159,14 +159,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -210,6 +210,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-resource-dir=someResourceDir", @@ -218,8 +219,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -266,6 +266,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-resource-dir=someResourceDir", @@ -274,8 +275,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -322,6 +322,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-resource-dir=someResourceDir", @@ -330,8 +331,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] }, "stdout": "somemessage", @@ -368,14 +368,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -409,14 +409,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -450,14 +450,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -491,14 +491,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -535,14 +535,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json index b8b28cd9..65200861 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json @@ -29,6 +29,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=kernel-address", @@ -36,8 +37,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -73,6 +73,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=kernel-address", @@ -80,8 +81,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -117,6 +117,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=kernel-address", @@ -124,8 +125,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -161,6 +161,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=kernel-address", @@ -168,8 +169,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -204,6 +204,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=fuzzer", @@ -211,8 +212,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -248,6 +248,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=address", @@ -256,8 +257,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -292,6 +292,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=address", @@ -299,8 +300,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -335,6 +335,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fprofile-instr-generate", @@ -342,8 +343,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json index 7c4afd32..99ac75c8 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json @@ -37,6 +37,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-mno-movbe", @@ -51,8 +52,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -87,6 +87,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-Wno-#warnings", @@ -94,8 +95,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -130,6 +130,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-Wno-error=uninitialized", @@ -137,8 +138,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -173,6 +173,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-someflag", @@ -180,8 +181,7 @@ "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json index f678ba64..1371acb1 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json @@ -43,14 +43,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } }, @@ -71,14 +71,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -129,14 +129,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } }, @@ -157,14 +157,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -217,14 +217,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] }, "stdout": "somemessage", @@ -248,14 +248,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -309,14 +309,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } }, @@ -337,14 +337,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] }, "stdout": "somemessage", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json index f5a77714..821682ab 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json @@ -30,14 +30,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] } } @@ -74,14 +74,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] }, "stderr": "-Werror originalerror", @@ -104,6 +104,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -111,7 +112,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-Wno-error", "-Wno-error=poison-system-directories" ] @@ -152,14 +152,14 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", - "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable" + "-Wno-deprecated-copy" ] }, "stderr": "-Werror originalerror", @@ -182,6 +182,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -189,7 +190,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-Wno-error", "-Wno-error=poison-system-directories" ] diff --git a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json index 39d71b8a..4fe52f3b 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json @@ -40,6 +40,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -55,7 +56,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -113,6 +113,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -128,7 +129,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -189,6 +189,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -204,7 +205,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json index 91953b22..d5b9a8de 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json @@ -31,6 +31,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -47,7 +48,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -94,6 +94,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -110,7 +111,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -157,6 +157,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -173,7 +174,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -220,6 +220,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -236,7 +237,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-linux-gnu" @@ -282,6 +282,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -298,7 +299,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-eabi" @@ -344,6 +344,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -360,7 +361,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-win-gnu" @@ -406,6 +406,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -422,7 +423,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-linux-gnu" @@ -468,6 +468,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -484,7 +485,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-eabi" @@ -530,6 +530,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -546,7 +547,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-win-gnu" diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json index ffe91340..69b68478 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json @@ -30,6 +30,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -45,7 +46,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -91,6 +91,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -106,7 +107,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -152,6 +152,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -167,7 +168,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -213,6 +213,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -228,7 +229,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-linux-gnu" @@ -273,6 +273,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -288,7 +289,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-eabi" @@ -333,6 +333,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -348,7 +349,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-win-gnu" @@ -393,6 +393,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -408,7 +409,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-linux-gnu" @@ -453,6 +453,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -468,7 +469,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-eabi" @@ -513,6 +513,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -528,7 +529,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-win-gnu" diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json index a01dc081..622dec30 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json @@ -30,6 +30,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -45,7 +46,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -94,6 +94,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -109,7 +110,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -158,6 +158,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -173,7 +174,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -222,6 +222,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -237,7 +238,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -293,6 +293,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -310,7 +311,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -369,6 +369,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -386,7 +387,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -440,6 +440,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -457,7 +458,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -506,6 +506,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -521,7 +522,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -567,6 +567,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -582,7 +583,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -628,6 +628,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -643,7 +644,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -689,6 +689,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -704,7 +705,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../bin", "-target", @@ -753,6 +753,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -768,7 +769,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json index e0a20367..3b24f1b0 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json @@ -32,6 +32,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -47,7 +48,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -95,6 +95,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -110,7 +111,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -158,6 +158,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -173,7 +174,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -221,6 +221,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -236,7 +237,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -283,6 +283,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -298,7 +299,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -346,6 +346,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -362,7 +363,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -409,6 +409,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -424,7 +425,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -471,6 +471,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -487,7 +488,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json index 7a437ae5..998df08e 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json @@ -40,6 +40,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -63,7 +64,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -110,6 +110,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -126,7 +127,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -173,6 +173,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -189,7 +190,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -236,6 +236,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -252,7 +253,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json index 4bde083a..40e814fa 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json @@ -68,6 +68,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -83,7 +84,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -132,6 +132,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -147,7 +148,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -189,6 +189,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -202,7 +203,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -249,6 +249,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-D_FORTIFY_SOURCE=2", @@ -263,7 +264,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -310,6 +310,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-D_FORTIFY_SOURCE=2", @@ -324,7 +325,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7a-cros-linux-gnueabihf" @@ -369,6 +369,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -385,7 +386,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json index dd3bc358..ffcf9100 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json @@ -45,6 +45,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -60,7 +61,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -87,6 +87,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -102,7 +103,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -159,6 +159,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -174,7 +175,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -202,6 +202,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -217,7 +218,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -276,6 +276,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -291,7 +292,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -322,6 +322,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -337,7 +338,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -397,6 +397,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -412,7 +413,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -440,6 +440,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -455,7 +456,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json index c5d2f9e3..5761f8d5 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json @@ -33,6 +33,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -48,7 +49,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -97,6 +97,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -112,7 +113,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -147,6 +147,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -162,7 +163,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -215,6 +215,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -230,7 +231,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -265,6 +265,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -280,7 +281,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json index 0140c961..667446b9 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json @@ -30,6 +30,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -45,7 +46,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -118,6 +118,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -133,7 +134,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -201,6 +201,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -216,7 +217,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -265,6 +265,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -280,7 +281,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json index 39d71b8a..4fe52f3b 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json @@ -40,6 +40,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -55,7 +56,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -113,6 +113,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -128,7 +129,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -189,6 +189,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -204,7 +205,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json index a01dc081..622dec30 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json @@ -30,6 +30,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -45,7 +46,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -94,6 +94,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -109,7 +110,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -158,6 +158,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -173,7 +174,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -222,6 +222,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -237,7 +238,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -293,6 +293,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -310,7 +311,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -369,6 +369,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -386,7 +387,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -440,6 +440,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -457,7 +458,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -506,6 +506,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -521,7 +522,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -567,6 +567,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -582,7 +583,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -628,6 +628,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -643,7 +644,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -689,6 +689,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -704,7 +705,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../bin", "-target", @@ -753,6 +753,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -768,7 +769,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json index dd3bc358..ffcf9100 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json @@ -45,6 +45,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -60,7 +61,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -87,6 +87,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -102,7 +103,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -159,6 +159,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -174,7 +175,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -202,6 +202,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -217,7 +218,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -276,6 +276,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -291,7 +292,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -322,6 +322,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -337,7 +338,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -397,6 +397,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -412,7 +413,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -440,6 +440,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -455,7 +456,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json index c5d2f9e3..5761f8d5 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json @@ -33,6 +33,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -48,7 +49,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -97,6 +97,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -112,7 +113,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -147,6 +147,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -162,7 +163,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -215,6 +215,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -230,7 +231,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -265,6 +265,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -280,7 +281,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json index 0140c961..667446b9 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json @@ -30,6 +30,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -45,7 +46,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -118,6 +118,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -133,7 +134,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -201,6 +201,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -216,7 +217,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -265,6 +265,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -280,7 +281,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json index a58cafd1..7549ed68 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json @@ -39,6 +39,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -54,7 +55,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -108,6 +108,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -123,7 +124,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -180,6 +180,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -195,7 +196,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json index 2040a621..9ad3125c 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json @@ -29,6 +29,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -44,7 +45,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -87,6 +87,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -102,7 +103,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -145,6 +145,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -160,7 +161,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -203,6 +203,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -218,7 +219,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -268,6 +268,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -285,7 +286,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -339,6 +339,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -356,7 +357,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -409,6 +409,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -426,7 +427,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -469,6 +469,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -484,7 +485,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -524,6 +524,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -539,7 +540,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -579,6 +579,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -594,7 +595,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -634,6 +634,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -649,7 +650,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../bin", "-target", @@ -692,6 +692,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -707,7 +708,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json index dd3bc358..ffcf9100 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json @@ -45,6 +45,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -60,7 +61,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -87,6 +87,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -102,7 +103,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -159,6 +159,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -174,7 +175,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -202,6 +202,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -217,7 +218,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -276,6 +276,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -291,7 +292,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -322,6 +322,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -337,7 +338,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -397,6 +397,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -412,7 +413,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -440,6 +440,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -455,7 +456,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json index 1b6e1d98..5c033bfc 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json @@ -32,6 +32,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -47,7 +48,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -90,6 +90,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -105,7 +106,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -134,6 +134,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -149,7 +150,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -196,6 +196,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -211,7 +212,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -240,6 +240,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -255,7 +256,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json index 3742f017..362b9fc1 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json @@ -30,6 +30,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -45,7 +46,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -113,6 +113,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -128,7 +129,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -196,6 +196,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -211,7 +212,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -260,6 +260,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -275,7 +276,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json index 6c46fee8..343a9506 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json @@ -37,6 +37,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -46,7 +47,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -101,6 +101,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -110,7 +111,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -168,6 +168,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -177,7 +178,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json index 86960cc9..b80818c3 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json @@ -28,6 +28,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -38,7 +39,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -82,6 +82,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-eabi-", @@ -92,7 +93,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -136,6 +136,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-win-gnu-", @@ -146,7 +147,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -190,6 +190,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -201,7 +202,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-linux-gnu" @@ -244,6 +244,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/armv7m-cros-eabi-", @@ -254,7 +255,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-eabi" @@ -297,6 +297,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -308,7 +309,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-win-gnu" @@ -351,6 +351,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -362,7 +363,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-linux-gnu" @@ -405,6 +405,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/armv8m-cros-eabi-", @@ -415,7 +416,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-eabi" @@ -458,6 +458,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -469,7 +470,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-win-gnu" diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json index 69af166d..27580387 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json @@ -27,6 +27,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -36,7 +37,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -79,6 +79,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-eabi-", @@ -88,7 +89,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -131,6 +131,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-win-gnu-", @@ -140,7 +141,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -183,6 +183,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -193,7 +194,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-linux-gnu" @@ -235,6 +235,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/armv7m-cros-eabi-", @@ -244,7 +245,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-eabi" @@ -286,6 +286,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -296,7 +297,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7m-cros-win-gnu" @@ -338,6 +338,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -348,7 +349,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-linux-gnu" @@ -390,6 +390,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/armv8m-cros-eabi-", @@ -399,7 +400,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-eabi" @@ -441,6 +441,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -451,7 +452,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv8m-cros-win-gnu" diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json index afc79511..0d5ce1fe 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json @@ -27,6 +27,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -36,7 +37,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -82,6 +82,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -91,7 +92,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -137,6 +137,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -146,7 +147,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -192,6 +192,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -201,7 +202,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -254,6 +254,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -265,7 +266,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -321,6 +321,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -332,7 +333,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -383,6 +383,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -394,7 +395,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -440,6 +440,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -449,7 +450,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -492,6 +492,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", @@ -501,7 +502,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -544,6 +544,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", @@ -553,7 +554,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -596,6 +596,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../bin/x86_64-cros-linux-gnu-", @@ -605,7 +606,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../bin", "-target", @@ -651,6 +651,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../bin/x86_64-cros-linux-gnu-", @@ -660,7 +661,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json index 3e140d59..36e1d385 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json @@ -29,6 +29,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -39,7 +40,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -84,6 +84,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -94,7 +95,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -139,6 +139,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -149,7 +150,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -194,6 +194,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -204,7 +205,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -248,6 +248,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -258,7 +259,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -303,6 +303,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -314,7 +315,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -358,6 +358,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -368,7 +369,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -412,6 +412,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -422,7 +423,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json index fbe038e7..b867b42b 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json @@ -37,6 +37,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -54,7 +55,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -98,6 +98,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -108,7 +109,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -152,6 +152,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -162,7 +163,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -206,6 +206,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -216,7 +217,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json index f2ccadb9..595634bb 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json @@ -60,6 +60,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -69,7 +70,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -115,6 +115,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -124,7 +125,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -163,6 +163,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -172,7 +173,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -216,6 +216,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-fno-stack-protector", @@ -227,7 +228,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -271,6 +271,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -283,7 +284,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-B../../bin", "-target", "armv7a-cros-linux-gnueabihf" @@ -325,6 +325,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -335,7 +336,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json index dc641c96..8a9edab6 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json @@ -42,6 +42,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -51,7 +52,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -75,6 +75,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -84,7 +85,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -138,6 +138,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -147,7 +148,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -172,6 +172,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -181,7 +182,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -237,6 +237,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -246,7 +247,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -274,6 +274,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -283,7 +284,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -340,6 +340,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -349,7 +350,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -374,6 +374,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -383,7 +384,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json index 54b994cc..1a2bbd46 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json @@ -30,6 +30,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -39,7 +40,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -85,6 +85,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -94,7 +95,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -126,6 +126,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -135,7 +136,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -185,6 +185,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -194,7 +195,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -226,6 +226,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -235,7 +236,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json index 5234715d..bfd3e66c 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json @@ -27,6 +27,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -36,7 +37,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -101,6 +101,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -110,7 +111,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -170,6 +170,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -179,7 +180,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", @@ -225,6 +225,7 @@ "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -234,7 +235,6 @@ "-Wno-compound-token-split-by-space", "-Wno-string-concatenation", "-Wno-deprecated-copy", - "-Wno-unused-but-set-variable", "-mno-movbe", "-B../../bin", "-target", -- cgit v1.2.3 From 24ac18e68a75cf876d7bb0e2880945085373668b Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Tue, 10 May 2022 13:02:14 -0700 Subject: compiler_wrapper: un-pointer-ify all configs We do nothing with these but immediately deref them. It's simpler (and should be infinitesimally faster) to just have values here. BUG=None TEST=go test Change-Id: I6df8eda8f36032e856f9abad3e090c62c9d6beb0 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3639684 Commit-Queue: George Burgess Reviewed-by: Jordan Abrahams-Whitehead Tested-by: George Burgess --- compiler_wrapper/config.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index e6708ebf..4e9c4f10 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -83,13 +83,13 @@ func getConfig(configName string, useCCache bool, useLlvmNext bool, version stri cfg := config{} switch configName { case "cros.hardened": - cfg = *crosHardenedConfig + cfg = crosHardenedConfig case "cros.nonhardened": - cfg = *crosNonHardenedConfig + cfg = crosNonHardenedConfig case "cros.host": - cfg = *crosHostConfig + cfg = crosHostConfig case "android": - cfg = *androidConfig + cfg = androidConfig default: return nil, newErrorwithSourceLocf("unknown config name: %s", configName) } @@ -105,7 +105,7 @@ func getConfig(configName string, useCCache bool, useLlvmNext bool, version stri // Full hardening. // Temporarily disable function splitting because of chromium:434751. -var crosHardenedConfig = &config{ +var crosHardenedConfig = config{ clangRootRelPath: "../..", gccRootRelPath: "../../../../..", // Pass "-fcommon" till the packages are fixed to work with new clang/gcc @@ -163,7 +163,7 @@ var crosHardenedConfig = &config{ } // Flags to be added to non-hardened toolchain. -var crosNonHardenedConfig = &config{ +var crosNonHardenedConfig = config{ clangRootRelPath: "../..", gccRootRelPath: "../../../../..", commonFlags: []string{}, @@ -206,7 +206,7 @@ var crosNonHardenedConfig = &config{ } // Flags to be added to host toolchain. -var crosHostConfig = &config{ +var crosHostConfig = config{ isHostWrapper: true, clangRootRelPath: "../..", gccRootRelPath: "../..", @@ -255,7 +255,7 @@ var crosHostConfig = &config{ crashArtifactsDir: "/tmp/clang_crash_diagnostics", } -var androidConfig = &config{ +var androidConfig = config{ isHostWrapper: false, isAndroidWrapper: true, gccRootRelPath: "./", -- cgit v1.2.3 From ea4a522b15b2e1b22ad7b97c8b91f0791c426edc Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Tue, 10 May 2022 11:44:23 -0700 Subject: compiler_wrapper: factor clangPostFlags into a function; sort Since order doesn't matter, make this sorted. Otherwise, these are all shared, so no point in repeating them. BUG=b:232114933 TEST=go test Change-Id: Ide3c620de9018fed3fe8d898b21b3d5d4f42700f Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3639281 Reviewed-by: Jordan Abrahams-Whitehead Commit-Queue: George Burgess Tested-by: George Burgess --- compiler_wrapper/config.go | 33 ++++------ .../testdata/cros_clang_host_golden/bisect.json | 18 +++--- .../clang_ftrapv_maincc_target_specific.json | 54 ++++++++-------- .../cros_clang_host_golden/clang_host_wrapper.json | 6 +- .../clang_maincc_target_specific.json | 54 ++++++++-------- .../cros_clang_host_golden/clang_path.json | 72 +++++++++++----------- .../clang_sanitizer_args.json | 48 +++++++-------- .../clang_specific_args.json | 24 ++++---- .../testdata/cros_clang_host_golden/clangtidy.json | 48 +++++++-------- .../force_disable_werror.json | 26 ++++---- .../testdata/cros_hardened_golden/bisect.json | 12 ++-- .../clang_ftrapv_maincc_target_specific.json | 36 +++++------ .../clang_maincc_target_specific.json | 36 +++++------ .../testdata/cros_hardened_golden/clang_path.json | 48 +++++++-------- .../cros_hardened_golden/clang_sanitizer_args.json | 32 +++++----- .../cros_hardened_golden/clang_specific_args.json | 16 ++--- .../clang_sysroot_wrapper_common.json | 24 ++++---- .../testdata/cros_hardened_golden/clangtidy.json | 32 +++++----- .../cros_hardened_golden/force_disable_werror.json | 20 +++--- .../cros_hardened_golden/gcc_clang_syntax.json | 16 ++--- .../cros_hardened_llvmnext_golden/bisect.json | 12 ++-- .../cros_hardened_llvmnext_golden/clang_path.json | 48 +++++++-------- .../cros_hardened_llvmnext_golden/clangtidy.json | 32 +++++----- .../force_disable_werror.json | 20 +++--- .../gcc_clang_syntax.json | 16 ++--- .../cros_hardened_noccache_golden/bisect.json | 12 ++-- .../cros_hardened_noccache_golden/clang_path.json | 48 +++++++-------- .../cros_hardened_noccache_golden/clangtidy.json | 32 +++++----- .../force_disable_werror.json | 20 +++--- .../gcc_clang_syntax.json | 16 ++--- .../testdata/cros_nonhardened_golden/bisect.json | 12 ++-- .../clang_ftrapv_maincc_target_specific.json | 36 +++++------ .../clang_maincc_target_specific.json | 36 +++++------ .../cros_nonhardened_golden/clang_path.json | 48 +++++++-------- .../clang_sanitizer_args.json | 32 +++++----- .../clang_specific_args.json | 16 ++--- .../clang_sysroot_wrapper_common.json | 24 ++++---- .../cros_nonhardened_golden/clangtidy.json | 32 +++++----- .../force_disable_werror.json | 20 +++--- .../cros_nonhardened_golden/gcc_clang_syntax.json | 16 ++--- 40 files changed, 588 insertions(+), 595 deletions(-) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 4e9c4f10..7e6e4896 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -103,6 +103,16 @@ func getConfig(configName string, useCCache bool, useLlvmNext bool, version stri return &cfg, nil } +func crosCommonClangPostFlags() []string { + // Temporarily disable Wdeprecated-copy. b/191479033 + return []string{ + "-Wno-compound-token-split-by-space", + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", + } +} + // Full hardening. // Temporarily disable function splitting because of chromium:434751. var crosHardenedConfig = config{ @@ -150,13 +160,7 @@ var crosHardenedConfig = config{ "-Wno-error=implicit-function-declaration", }, - // Temporarily disable Wdeprecated-copy. b/191479033 - clangPostFlags: []string{ - "-Wno-implicit-int-float-conversion", - "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy", - }, + clangPostFlags: crosCommonClangPostFlags(), newWarningsDir: "/tmp/fatal_clang_warnings", triciumNitsDir: "/tmp/linting_output/clang-tidy", crashArtifactsDir: "/tmp/clang_crash_diagnostics", @@ -193,13 +197,7 @@ var crosNonHardenedConfig = config{ "-Wno-error=implicit-function-declaration", }, - // Temporarily disable Wdeprecated-copy. b/191479033 - clangPostFlags: []string{ - "-Wno-implicit-int-float-conversion", - "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy", - }, + clangPostFlags: crosCommonClangPostFlags(), newWarningsDir: "/tmp/fatal_clang_warnings", triciumNitsDir: "/tmp/linting_output/clang-tidy", crashArtifactsDir: "/tmp/clang_crash_diagnostics", @@ -244,12 +242,7 @@ var crosHostConfig = config{ }, // Temporarily disable Wdeprecated-copy. b/191479033 - clangPostFlags: []string{ - "-Wno-implicit-int-float-conversion", - "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy", - }, + clangPostFlags: crosCommonClangPostFlags(), newWarningsDir: "/tmp/fatal_clang_warnings", triciumNitsDir: "/tmp/linting_output/clang-tidy", crashArtifactsDir: "/tmp/clang_crash_diagnostics", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json index b8abc564..7c93a125 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json @@ -41,10 +41,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ], "env_updates": [ "PYTHONPATH=/somepath/test_binary" @@ -96,10 +96,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ], "env_updates": [ "PYTHONPATH=/somepath/test_binary" @@ -154,10 +154,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ], "env_updates": [ "PYTHONPATH=/somepath/test_binary" diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json index 5e903aa7..d694f056 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json @@ -32,10 +32,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -74,10 +74,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -116,10 +116,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -158,10 +158,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -200,10 +200,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -242,10 +242,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -284,10 +284,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -326,10 +326,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -368,10 +368,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json index c2df9112..bbea704d 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json @@ -31,10 +31,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json index 77953e7b..30b310bb 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json @@ -31,10 +31,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -72,10 +72,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -113,10 +113,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -154,10 +154,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -195,10 +195,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -236,10 +236,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -277,10 +277,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -318,10 +318,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -359,10 +359,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json index ac5f51f9..f7737c4f 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json @@ -31,10 +31,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -75,10 +75,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] }, "stdout": "somemessage", @@ -119,10 +119,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -163,10 +163,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -216,10 +216,10 @@ "-resource-dir=someResourceDir", "--gcc-toolchain=/usr", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -272,10 +272,10 @@ "-resource-dir=someResourceDir", "--gcc-toolchain=/usr", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -328,10 +328,10 @@ "-resource-dir=someResourceDir", "--gcc-toolchain=/usr", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] }, "stdout": "somemessage", @@ -372,10 +372,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -413,10 +413,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -454,10 +454,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -495,10 +495,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -539,10 +539,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json index 65200861..15ce2829 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json @@ -34,10 +34,10 @@ "-fcommon", "-fsanitize=kernel-address", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -78,10 +78,10 @@ "-fcommon", "-fsanitize=kernel-address", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -122,10 +122,10 @@ "-fcommon", "-fsanitize=kernel-address", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -166,10 +166,10 @@ "-fcommon", "-fsanitize=kernel-address", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -209,10 +209,10 @@ "-fcommon", "-fsanitize=fuzzer", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -254,10 +254,10 @@ "-fsanitize=address", "-fprofile-instr-generate", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -297,10 +297,10 @@ "-fcommon", "-fsanitize=address", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -340,10 +340,10 @@ "-fcommon", "-fprofile-instr-generate", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json index 99ac75c8..bc7752f2 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json @@ -49,10 +49,10 @@ "-Woverride-init", "-Wunsafe-loop-optimizations", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -92,10 +92,10 @@ "-fcommon", "-Wno-#warnings", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -135,10 +135,10 @@ "-fcommon", "-Wno-error=uninitialized", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -178,10 +178,10 @@ "-fcommon", "-someflag", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json index 1371acb1..807a9d60 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json @@ -47,10 +47,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } }, @@ -75,10 +75,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -133,10 +133,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } }, @@ -161,10 +161,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -221,10 +221,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] }, "stdout": "somemessage", @@ -252,10 +252,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -313,10 +313,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } }, @@ -341,10 +341,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] }, "stdout": "somemessage", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json index 821682ab..31d16d6b 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json @@ -34,10 +34,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] } } @@ -78,10 +78,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] }, "stderr": "-Werror originalerror", @@ -108,10 +108,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-Wno-error", "-Wno-error=poison-system-directories" ] @@ -156,10 +156,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", - "-Wno-deprecated-copy" + "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation" ] }, "stderr": "-Werror originalerror", @@ -186,10 +186,10 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-Wno-error", "-Wno-error=poison-system-directories" ] diff --git a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json index 4fe52f3b..66048f3e 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json @@ -52,10 +52,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -125,10 +125,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -201,10 +201,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json index d5b9a8de..3b40bb54 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json @@ -44,10 +44,10 @@ "-ftrapv", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -107,10 +107,10 @@ "-ftrapv", "main.cc", "-L/usr/x86_64-cros-eabi/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -170,10 +170,10 @@ "-ftrapv", "main.cc", "-L/usr/x86_64-cros-win-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -233,10 +233,10 @@ "-ftrapv", "main.cc", "-L/usr/armv7m-cros-linux-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-linux-gnu" @@ -295,10 +295,10 @@ "-ftrapv", "main.cc", "-L/usr/armv7m-cros-eabi/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-eabi" @@ -357,10 +357,10 @@ "-ftrapv", "main.cc", "-L/usr/armv7m-cros-win-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-win-gnu" @@ -419,10 +419,10 @@ "-ftrapv", "main.cc", "-L/usr/armv8m-cros-linux-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-linux-gnu" @@ -481,10 +481,10 @@ "-ftrapv", "main.cc", "-L/usr/armv8m-cros-eabi/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-eabi" @@ -543,10 +543,10 @@ "-ftrapv", "main.cc", "-L/usr/armv8m-cros-win-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-win-gnu" diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json index 69b68478..2c8568f1 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json @@ -42,10 +42,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -103,10 +103,10 @@ "--prefix=../../bin/x86_64-cros-eabi-", "main.cc", "-L/usr/x86_64-cros-eabi/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -164,10 +164,10 @@ "--prefix=../../bin/x86_64-cros-win-gnu-", "main.cc", "-L/usr/x86_64-cros-win-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -225,10 +225,10 @@ "--prefix=../../bin/armv7m-cros-linux-gnu-", "main.cc", "-L/usr/armv7m-cros-linux-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-linux-gnu" @@ -285,10 +285,10 @@ "--prefix=../../bin/armv7m-cros-eabi-", "main.cc", "-L/usr/armv7m-cros-eabi/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-eabi" @@ -345,10 +345,10 @@ "--prefix=../../bin/armv7m-cros-win-gnu-", "main.cc", "-L/usr/armv7m-cros-win-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-win-gnu" @@ -405,10 +405,10 @@ "--prefix=../../bin/armv8m-cros-linux-gnu-", "main.cc", "-L/usr/armv8m-cros-linux-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-linux-gnu" @@ -465,10 +465,10 @@ "--prefix=../../bin/armv8m-cros-eabi-", "main.cc", "-L/usr/armv8m-cros-eabi/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-eabi" @@ -525,10 +525,10 @@ "--prefix=../../bin/armv8m-cros-win-gnu-", "main.cc", "-L/usr/armv8m-cros-win-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-win-gnu" diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json index 622dec30..33b280ea 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json @@ -42,10 +42,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -106,10 +106,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -170,10 +170,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -234,10 +234,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -307,10 +307,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -383,10 +383,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -454,10 +454,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -518,10 +518,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -579,10 +579,10 @@ "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -640,10 +640,10 @@ "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -701,10 +701,10 @@ "--prefix=../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../bin", "-target", @@ -765,10 +765,10 @@ "--prefix=../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json index 3b24f1b0..4817cc2a 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json @@ -44,10 +44,10 @@ "-fsanitize=kernel-address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -107,10 +107,10 @@ "-fsanitize=kernel-address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -170,10 +170,10 @@ "-fsanitize=kernel-address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -233,10 +233,10 @@ "-fsanitize=kernel-address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -295,10 +295,10 @@ "-fsanitize=fuzzer", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -359,10 +359,10 @@ "-fprofile-instr-generate", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -421,10 +421,10 @@ "-fsanitize=address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -484,10 +484,10 @@ "-fprofile-instr-generate", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json index 998df08e..0b61ab7d 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json @@ -60,10 +60,10 @@ "-Wunsafe-loop-optimizations", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -123,10 +123,10 @@ "-Wno-#warnings", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -186,10 +186,10 @@ "-Wno-error=uninitialized", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -249,10 +249,10 @@ "-someflag", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json index 40e814fa..6cbb7e3a 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json @@ -80,10 +80,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -144,10 +144,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -199,10 +199,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -260,10 +260,10 @@ "-D__KERNEL__", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -321,10 +321,10 @@ "-D__KERNEL__", "main.cc", "-L/usr/armv7a-cros-linux-gnueabihf/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7a-cros-linux-gnueabihf" @@ -382,10 +382,10 @@ "--sysroot=xyz", "main.cc", "-Lxyz/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json index ffcf9100..090f3c1a 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json @@ -57,10 +57,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -99,10 +99,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -171,10 +171,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -214,10 +214,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -288,10 +288,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -334,10 +334,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -409,10 +409,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -452,10 +452,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json index 5761f8d5..a17b11eb 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json @@ -45,10 +45,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -109,10 +109,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -159,10 +159,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -227,10 +227,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -277,10 +277,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json index 667446b9..124cf478 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json @@ -42,10 +42,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -130,10 +130,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -213,10 +213,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -277,10 +277,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json index 4fe52f3b..66048f3e 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json @@ -52,10 +52,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -125,10 +125,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -201,10 +201,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json index 622dec30..33b280ea 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json @@ -42,10 +42,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -106,10 +106,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -170,10 +170,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -234,10 +234,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -307,10 +307,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -383,10 +383,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -454,10 +454,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -518,10 +518,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -579,10 +579,10 @@ "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -640,10 +640,10 @@ "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -701,10 +701,10 @@ "--prefix=../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../bin", "-target", @@ -765,10 +765,10 @@ "--prefix=../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json index ffcf9100..090f3c1a 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json @@ -57,10 +57,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -99,10 +99,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -171,10 +171,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -214,10 +214,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -288,10 +288,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -334,10 +334,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -409,10 +409,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -452,10 +452,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json index 5761f8d5..a17b11eb 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json @@ -45,10 +45,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -109,10 +109,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -159,10 +159,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -227,10 +227,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -277,10 +277,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json index 667446b9..124cf478 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json @@ -42,10 +42,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -130,10 +130,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -213,10 +213,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -277,10 +277,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json index 7549ed68..3ea27130 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json @@ -51,10 +51,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -120,10 +120,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -192,10 +192,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json index 9ad3125c..146c07cb 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json @@ -41,10 +41,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -99,10 +99,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -157,10 +157,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -215,10 +215,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -282,10 +282,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -353,10 +353,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -423,10 +423,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -481,10 +481,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -536,10 +536,10 @@ "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -591,10 +591,10 @@ "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -646,10 +646,10 @@ "--prefix=../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../bin", "-target", @@ -704,10 +704,10 @@ "--prefix=../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json index ffcf9100..090f3c1a 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json @@ -57,10 +57,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -99,10 +99,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -171,10 +171,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -214,10 +214,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -288,10 +288,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -334,10 +334,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -409,10 +409,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -452,10 +452,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json index 5c033bfc..421cff26 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json @@ -44,10 +44,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -102,10 +102,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -146,10 +146,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -208,10 +208,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -252,10 +252,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json index 362b9fc1..951786fd 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json @@ -42,10 +42,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -125,10 +125,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -208,10 +208,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -272,10 +272,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json index 343a9506..86f73ed2 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json @@ -43,10 +43,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -107,10 +107,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -174,10 +174,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json index b80818c3..1799eafe 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json @@ -35,10 +35,10 @@ "-ftrapv", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -89,10 +89,10 @@ "-ftrapv", "main.cc", "-L/usr/x86_64-cros-eabi/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -143,10 +143,10 @@ "-ftrapv", "main.cc", "-L/usr/x86_64-cros-win-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -198,10 +198,10 @@ "-ftrapv", "main.cc", "-L/usr/armv7m-cros-linux-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-linux-gnu" @@ -251,10 +251,10 @@ "-ftrapv", "main.cc", "-L/usr/armv7m-cros-eabi/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-eabi" @@ -305,10 +305,10 @@ "-ftrapv", "main.cc", "-L/usr/armv7m-cros-win-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-win-gnu" @@ -359,10 +359,10 @@ "-ftrapv", "main.cc", "-L/usr/armv8m-cros-linux-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-linux-gnu" @@ -412,10 +412,10 @@ "-ftrapv", "main.cc", "-L/usr/armv8m-cros-eabi/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-eabi" @@ -466,10 +466,10 @@ "-ftrapv", "main.cc", "-L/usr/armv8m-cros-win-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-win-gnu" diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json index 27580387..dff14d9b 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json @@ -33,10 +33,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -85,10 +85,10 @@ "--prefix=../../bin/x86_64-cros-eabi-", "main.cc", "-L/usr/x86_64-cros-eabi/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -137,10 +137,10 @@ "--prefix=../../bin/x86_64-cros-win-gnu-", "main.cc", "-L/usr/x86_64-cros-win-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -190,10 +190,10 @@ "--prefix=../../bin/armv7m-cros-linux-gnu-", "main.cc", "-L/usr/armv7m-cros-linux-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-linux-gnu" @@ -241,10 +241,10 @@ "--prefix=../../bin/armv7m-cros-eabi-", "main.cc", "-L/usr/armv7m-cros-eabi/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-eabi" @@ -293,10 +293,10 @@ "--prefix=../../bin/armv7m-cros-win-gnu-", "main.cc", "-L/usr/armv7m-cros-win-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7m-cros-win-gnu" @@ -345,10 +345,10 @@ "--prefix=../../bin/armv8m-cros-linux-gnu-", "main.cc", "-L/usr/armv8m-cros-linux-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-linux-gnu" @@ -396,10 +396,10 @@ "--prefix=../../bin/armv8m-cros-eabi-", "main.cc", "-L/usr/armv8m-cros-eabi/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-eabi" @@ -448,10 +448,10 @@ "--prefix=../../bin/armv8m-cros-win-gnu-", "main.cc", "-L/usr/armv8m-cros-win-gnu/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv8m-cros-win-gnu" diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json index 0d5ce1fe..2d956742 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json @@ -33,10 +33,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -88,10 +88,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -143,10 +143,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -198,10 +198,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -262,10 +262,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -329,10 +329,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -391,10 +391,10 @@ "--gcc-toolchain=/usr", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -446,10 +446,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -498,10 +498,10 @@ "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -550,10 +550,10 @@ "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-Ba/b/c/d/e/bin", "-target", @@ -602,10 +602,10 @@ "--prefix=../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../bin", "-target", @@ -657,10 +657,10 @@ "--prefix=../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json index 36e1d385..7cb4ed5f 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json @@ -36,10 +36,10 @@ "-fsanitize=kernel-address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -91,10 +91,10 @@ "-fsanitize=kernel-address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -146,10 +146,10 @@ "-fsanitize=kernel-address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -201,10 +201,10 @@ "-fsanitize=kernel-address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -255,10 +255,10 @@ "-fsanitize=fuzzer", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -311,10 +311,10 @@ "-fprofile-instr-generate", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -365,10 +365,10 @@ "-fsanitize=address", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -419,10 +419,10 @@ "-fprofile-instr-generate", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json index b867b42b..32716a5e 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json @@ -51,10 +51,10 @@ "-Wunsafe-loop-optimizations", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -105,10 +105,10 @@ "-Wno-#warnings", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -159,10 +159,10 @@ "-Wno-error=uninitialized", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -213,10 +213,10 @@ "-someflag", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json index 595634bb..78eb5885 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json @@ -66,10 +66,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -121,10 +121,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -169,10 +169,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -224,10 +224,10 @@ "-D__KERNEL__", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -280,10 +280,10 @@ "-D__KERNEL__", "main.cc", "-L/usr/armv7a-cros-linux-gnueabihf/usr/lib", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-B../../bin", "-target", "armv7a-cros-linux-gnueabihf" @@ -332,10 +332,10 @@ "--sysroot=xyz", "main.cc", "-Lxyz/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json index 8a9edab6..742e6a3a 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json @@ -48,10 +48,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -81,10 +81,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -144,10 +144,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -178,10 +178,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -243,10 +243,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -280,10 +280,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -346,10 +346,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -380,10 +380,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json index 1a2bbd46..f3878b27 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json @@ -36,10 +36,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -91,10 +91,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -132,10 +132,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -191,10 +191,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -232,10 +232,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json index bfd3e66c..a428983e 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json @@ -33,10 +33,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -107,10 +107,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -176,10 +176,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", @@ -231,10 +231,10 @@ "--prefix=../../bin/x86_64-cros-linux-gnu-", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", - "-Wno-implicit-int-float-conversion", "-Wno-compound-token-split-by-space", - "-Wno-string-concatenation", "-Wno-deprecated-copy", + "-Wno-implicit-int-float-conversion", + "-Wno-string-concatenation", "-mno-movbe", "-B../../bin", "-target", -- cgit v1.2.3 From 5cab4b87ccc87292502d74a222f4c005bc300ad8 Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Tue, 10 May 2022 11:53:14 -0700 Subject: compiler_wrapper: sort clangFlags; partition for dedup This CL sorts clang flags, and puts ones shared across all CrOS configurations into their own textual blocks. This should make factoring these out into their own function not require golden updates, which makes verifying the CL that actually _does_ that simpler. BUG=b:232114933 TEST=go test Change-Id: I7dc6110d680505d4ad2af98709730e85c386ae5d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3639682 Reviewed-by: Jordan Abrahams-Whitehead Tested-by: George Burgess Commit-Queue: George Burgess --- compiler_wrapper/config.go | 53 ++--- .../testdata/cros_clang_host_golden/bisect.json | 54 +++--- .../clang_ftrapv_maincc_target_specific.json | 162 ++++++++-------- .../cros_clang_host_golden/clang_host_wrapper.json | 18 +- .../clang_maincc_target_specific.json | 162 ++++++++-------- .../cros_clang_host_golden/clang_path.json | 216 ++++++++++----------- .../clang_sanitizer_args.json | 144 +++++++------- .../clang_specific_args.json | 72 +++---- .../testdata/cros_clang_host_golden/clangtidy.json | 144 +++++++------- .../force_disable_werror.json | 90 ++++----- .../testdata/cros_hardened_golden/bisect.json | 54 +++--- .../clang_ftrapv_maincc_target_specific.json | 162 ++++++++-------- .../clang_maincc_target_specific.json | 162 ++++++++-------- .../testdata/cros_hardened_golden/clang_path.json | 216 ++++++++++----------- .../cros_hardened_golden/clang_sanitizer_args.json | 144 +++++++------- .../cros_hardened_golden/clang_specific_args.json | 72 +++---- .../clang_sysroot_wrapper_common.json | 108 +++++------ .../testdata/cros_hardened_golden/clangtidy.json | 144 +++++++------- .../cros_hardened_golden/force_disable_werror.json | 90 ++++----- .../cros_hardened_golden/gcc_clang_syntax.json | 72 +++---- .../cros_hardened_llvmnext_golden/bisect.json | 54 +++--- .../cros_hardened_llvmnext_golden/clang_path.json | 216 ++++++++++----------- .../cros_hardened_llvmnext_golden/clangtidy.json | 144 +++++++------- .../force_disable_werror.json | 90 ++++----- .../gcc_clang_syntax.json | 72 +++---- .../cros_hardened_noccache_golden/bisect.json | 54 +++--- .../cros_hardened_noccache_golden/clang_path.json | 216 ++++++++++----------- .../cros_hardened_noccache_golden/clangtidy.json | 144 +++++++------- .../force_disable_werror.json | 90 ++++----- .../gcc_clang_syntax.json | 72 +++---- .../testdata/cros_nonhardened_golden/bisect.json | 42 ++-- .../clang_ftrapv_maincc_target_specific.json | 126 ++++++------ .../clang_maincc_target_specific.json | 126 ++++++------ .../cros_nonhardened_golden/clang_path.json | 168 ++++++++-------- .../clang_sanitizer_args.json | 112 +++++------ .../clang_specific_args.json | 56 +++--- .../clang_sysroot_wrapper_common.json | 84 ++++---- .../cros_nonhardened_golden/clangtidy.json | 112 +++++------ .../force_disable_werror.json | 70 +++---- .../cros_nonhardened_golden/gcc_clang_syntax.json | 56 +++--- 40 files changed, 2223 insertions(+), 2220 deletions(-) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 7e6e4896..13df2b9f 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -144,20 +144,21 @@ var crosHardenedConfig = config{ clangFlags: []string{ "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", }, clangPostFlags: crosCommonClangPostFlags(), @@ -184,17 +185,18 @@ var crosNonHardenedConfig = config{ // b/230345382: Temporarily disable Wimplicit-function-declaration. clangFlags: []string{ "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + + "-Wno-section", }, clangPostFlags: crosCommonClangPostFlags(), @@ -226,19 +228,20 @@ var crosHostConfig = config{ // b/230345382: Temporarily disable Wimplicit-function-declaration. clangFlags: []string{ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", }, // Temporarily disable Wdeprecated-copy. b/191479033 diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json index 7c93a125..3e60ef45 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json @@ -25,19 +25,19 @@ "/tmp/sysroot_bisect", "/tmp/stable/clang", "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -80,19 +80,19 @@ "someBisectDir", "/tmp/stable/clang", "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -138,19 +138,19 @@ "someBisectDir", "/tmp/stable/clang", "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json index d694f056..06fc7311 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json @@ -16,19 +16,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -58,19 +58,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -100,19 +100,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -142,19 +142,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -184,19 +184,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -226,19 +226,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -268,19 +268,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -310,19 +310,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -352,19 +352,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json index bbea704d..d5fe7409 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json @@ -15,19 +15,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json index 30b310bb..f66a82c5 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json @@ -15,19 +15,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -56,19 +56,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -97,19 +97,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -138,19 +138,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -179,19 +179,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -220,19 +220,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -261,19 +261,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -302,19 +302,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -343,19 +343,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json index f7737c4f..9cf9326c 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json @@ -15,19 +15,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -59,19 +59,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -103,19 +103,19 @@ "path": "/tmp/stable/clang++", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -147,19 +147,19 @@ "path": "somepath/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -198,19 +198,19 @@ "path": "/somedir/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-resource-dir=someResourceDir", @@ -254,19 +254,19 @@ "path": "/somedir/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-resource-dir=someResourceDir", @@ -310,19 +310,19 @@ "path": "/somedir/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-resource-dir=someResourceDir", @@ -356,19 +356,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -397,19 +397,19 @@ "path": "/tmp/stable/a/b/c/d/e/f/g/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -438,19 +438,19 @@ "path": "/tmp/stable/a/b/c/d/e/f/g/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -479,19 +479,19 @@ "path": "/tmp/stable/somedir/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -523,19 +523,19 @@ "path": "/tmp/stable/pathenv/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json index 15ce2829..45b5fe34 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json @@ -17,19 +17,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=kernel-address", @@ -61,19 +61,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=kernel-address", @@ -105,19 +105,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=kernel-address", @@ -149,19 +149,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=kernel-address", @@ -192,19 +192,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=fuzzer", @@ -236,19 +236,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=address", @@ -280,19 +280,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fsanitize=address", @@ -323,19 +323,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fprofile-instr-generate", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json index bc7752f2..141206f9 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json @@ -25,19 +25,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-mno-movbe", @@ -75,19 +75,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-Wno-#warnings", @@ -118,19 +118,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-Wno-error=uninitialized", @@ -161,19 +161,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-someflag", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json index 807a9d60..04640bc4 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json @@ -31,19 +31,19 @@ "--", "-resource-dir=someResourceDir", "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -59,19 +59,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -117,19 +117,19 @@ "--", "-resource-dir=someResourceDir", "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -145,19 +145,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -205,19 +205,19 @@ "--", "-resource-dir=someResourceDir", "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -236,19 +236,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -297,19 +297,19 @@ "--", "-resource-dir=someResourceDir", "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -325,19 +325,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json index 31d16d6b..8425fa7a 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json @@ -18,19 +18,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -62,19 +62,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -92,19 +92,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -140,19 +140,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", @@ -170,19 +170,19 @@ "path": "/tmp/stable/clang", "args": [ "-Qunused-arguments", - "-fno-addrsig", - "-fuse-ld=lld", - "-fdebug-default-version=5", - "-Wno-unused-local-typedefs", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-Wno-unknown-warning-option", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-unused-local-typedefs", + "-fno-addrsig", + "-fuse-ld=lld", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "main.cc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json index 66048f3e..60958ea2 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json @@ -27,20 +27,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -100,20 +100,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -176,20 +176,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json index 3b40bb54..25d09d7e 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json @@ -18,20 +18,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -81,20 +81,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-eabi", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -144,20 +144,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-win-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -207,20 +207,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -269,20 +269,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-eabi", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -331,20 +331,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-win-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -393,20 +393,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -455,20 +455,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-eabi", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -517,20 +517,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-win-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json index 2c8568f1..8f963a4a 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json @@ -17,20 +17,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -78,20 +78,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-eabi", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -139,20 +139,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-win-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -200,20 +200,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -260,20 +260,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-eabi", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -320,20 +320,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-win-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -380,20 +380,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -440,20 +440,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-eabi", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -500,20 +500,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-win-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json index 33b280ea..262a5e88 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json @@ -17,20 +17,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -81,20 +81,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -145,20 +145,20 @@ "../../usr/bin/clang++", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -209,20 +209,20 @@ "somepath/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -280,20 +280,20 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -356,20 +356,20 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -427,20 +427,20 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -493,20 +493,20 @@ "/usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -554,20 +554,20 @@ "a/b/c/d/e/usr/bin/clang", "--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -615,20 +615,20 @@ "a/b/c/d/e/usr/bin/clang", "--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -676,20 +676,20 @@ "../usr/bin/clang", "--sysroot=/tmp/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -740,20 +740,20 @@ "/tmp/usr/bin/clang", "--sysroot=/tmp/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json index 4817cc2a..d21f41db 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json @@ -19,20 +19,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -82,20 +82,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -145,20 +145,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -208,20 +208,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -270,20 +270,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -333,20 +333,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -396,20 +396,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -458,20 +458,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json index 0b61ab7d..03e1aecd 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json @@ -27,20 +27,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -97,20 +97,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -160,20 +160,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -223,20 +223,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json index 6cbb7e3a..a822dede 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json @@ -55,20 +55,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -119,20 +119,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -176,20 +176,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -236,20 +236,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-D_FORTIFY_SOURCE=2", @@ -297,20 +297,20 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7a-cros-linux-gnueabihf", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-D_FORTIFY_SOURCE=2", @@ -356,20 +356,20 @@ "args": [ "../../usr/bin/clang", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json index 090f3c1a..a9b62d93 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json @@ -32,20 +32,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -74,20 +74,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -146,20 +146,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -189,20 +189,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -263,20 +263,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -309,20 +309,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -384,20 +384,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -427,20 +427,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json index a17b11eb..2ee2cd01 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json @@ -20,20 +20,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -84,20 +84,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -134,20 +134,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -202,20 +202,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -252,20 +252,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json index 124cf478..d799a0a0 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json @@ -17,20 +17,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -105,20 +105,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -188,20 +188,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -252,20 +252,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json index 66048f3e..60958ea2 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json @@ -27,20 +27,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -100,20 +100,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -176,20 +176,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json index 33b280ea..262a5e88 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json @@ -17,20 +17,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -81,20 +81,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -145,20 +145,20 @@ "../../usr/bin/clang++", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -209,20 +209,20 @@ "somepath/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -280,20 +280,20 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -356,20 +356,20 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -427,20 +427,20 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -493,20 +493,20 @@ "/usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -554,20 +554,20 @@ "a/b/c/d/e/usr/bin/clang", "--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -615,20 +615,20 @@ "a/b/c/d/e/usr/bin/clang", "--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -676,20 +676,20 @@ "../usr/bin/clang", "--sysroot=/tmp/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -740,20 +740,20 @@ "/tmp/usr/bin/clang", "--sysroot=/tmp/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json index 090f3c1a..a9b62d93 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json @@ -32,20 +32,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -74,20 +74,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -146,20 +146,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -189,20 +189,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -263,20 +263,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -309,20 +309,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -384,20 +384,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -427,20 +427,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json index a17b11eb..2ee2cd01 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json @@ -20,20 +20,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -84,20 +84,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -134,20 +134,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -202,20 +202,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -252,20 +252,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json index 124cf478..d799a0a0 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json @@ -17,20 +17,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -105,20 +105,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -188,20 +188,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -252,20 +252,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json index 3ea27130..298ac342 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json @@ -26,20 +26,20 @@ "/usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -95,20 +95,20 @@ "/usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -167,20 +167,20 @@ "/usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json index 146c07cb..324f66c6 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json @@ -16,20 +16,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -74,20 +74,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -132,20 +132,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -190,20 +190,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -255,20 +255,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -326,20 +326,20 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -396,20 +396,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -456,20 +456,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -511,20 +511,20 @@ "args": [ "--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -566,20 +566,20 @@ "args": [ "--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -621,20 +621,20 @@ "args": [ "--sysroot=/tmp/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -679,20 +679,20 @@ "args": [ "--sysroot=/tmp/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json index 090f3c1a..a9b62d93 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json @@ -32,20 +32,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -74,20 +74,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -146,20 +146,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -189,20 +189,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -263,20 +263,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -309,20 +309,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -384,20 +384,20 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -427,20 +427,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json index 421cff26..5036980d 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json @@ -19,20 +19,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -77,20 +77,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -121,20 +121,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -183,20 +183,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -227,20 +227,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json index 951786fd..ecab5901 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json @@ -17,20 +17,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -100,20 +100,20 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -183,20 +183,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", @@ -247,20 +247,20 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fno-addrsig", - "-fdebug-default-version=5", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "--unwindlib=libunwind", "-Wno-section", + "-fno-addrsig", "-fuse-ld=lld", - "--unwindlib=libunwind", - "-Wno-final-dtor-non-final-class", - "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json index 86f73ed2..6f9363ac 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json @@ -27,17 +27,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -91,17 +91,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -158,17 +158,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json index 1799eafe..7b19f239 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json @@ -18,17 +18,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -72,17 +72,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-eabi", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-eabi-", @@ -126,17 +126,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-win-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-win-gnu-", @@ -180,17 +180,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -234,17 +234,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-eabi", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/armv7m-cros-eabi-", @@ -287,17 +287,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-win-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -341,17 +341,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -395,17 +395,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-eabi", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/armv8m-cros-eabi-", @@ -448,17 +448,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-win-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json index dff14d9b..2bbdd453 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json @@ -17,17 +17,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -69,17 +69,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-eabi", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-eabi-", @@ -121,17 +121,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-win-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-win-gnu-", @@ -173,17 +173,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -225,17 +225,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-eabi", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/armv7m-cros-eabi-", @@ -276,17 +276,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7m-cros-win-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -328,17 +328,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -380,17 +380,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-eabi", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/armv8m-cros-eabi-", @@ -431,17 +431,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv8m-cros-win-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json index 2d956742..2b56b48a 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json @@ -17,17 +17,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -72,17 +72,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -127,17 +127,17 @@ "../../usr/bin/clang++", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -182,17 +182,17 @@ "somepath/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -244,17 +244,17 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -311,17 +311,17 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -373,17 +373,17 @@ "/somedir/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -430,17 +430,17 @@ "/usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -482,17 +482,17 @@ "a/b/c/d/e/usr/bin/clang", "--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", @@ -534,17 +534,17 @@ "a/b/c/d/e/usr/bin/clang", "--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-", @@ -586,17 +586,17 @@ "../usr/bin/clang", "--sysroot=/tmp/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../bin/x86_64-cros-linux-gnu-", @@ -641,17 +641,17 @@ "/tmp/usr/bin/clang", "--sysroot=/tmp/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../bin/x86_64-cros-linux-gnu-", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json index 7cb4ed5f..a17cd381 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json @@ -19,17 +19,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -74,17 +74,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -129,17 +129,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -184,17 +184,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -238,17 +238,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -293,17 +293,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -348,17 +348,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -402,17 +402,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json index 32716a5e..196914ee 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json @@ -27,17 +27,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -88,17 +88,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -142,17 +142,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -196,17 +196,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json index 78eb5885..3e36da96 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json @@ -50,17 +50,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -105,17 +105,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -153,17 +153,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -206,17 +206,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-fno-stack-protector", @@ -261,17 +261,17 @@ "../../usr/bin/clang", "--sysroot=/usr/armv7a-cros-linux-gnueabihf", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "-mthumb", @@ -315,17 +315,17 @@ "args": [ "../../usr/bin/clang", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json index 742e6a3a..3bb4a8aa 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json @@ -32,17 +32,17 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -65,17 +65,17 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -128,17 +128,17 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -162,17 +162,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -227,17 +227,17 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -264,17 +264,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -330,17 +330,17 @@ "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -364,17 +364,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json index f3878b27..cf0d0f04 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json @@ -20,17 +20,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -75,17 +75,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -116,17 +116,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -175,17 +175,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -216,17 +216,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json index a428983e..15ba7ae6 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json @@ -17,17 +17,17 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -91,17 +91,17 @@ "../../usr/bin/clang", "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -160,17 +160,17 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -215,17 +215,17 @@ "args": [ "--sysroot=/usr/x86_64-cros-linux-gnu", "-Qunused-arguments", - "-fdebug-default-version=5", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-Wno-section", - "-Wno-final-dtor-non-final-class", "-Werror=poison-system-directories", - "-fexperimental-new-pass-manager", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + "-Wno-section", "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", -- cgit v1.2.3 From 8e528cae0c5067ef1ec26764dabc6ce7c23be9ad Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Tue, 10 May 2022 11:58:26 -0700 Subject: compiler_wrapper: move common clangFlags to a function BUG=b:232114933 TEST=go test Change-Id: Iffc686c47c21017a31b9fb69e1f2764390fe29f3 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3639683 Reviewed-by: Jordan Abrahams-Whitehead Commit-Queue: George Burgess Tested-by: George Burgess --- compiler_wrapper/config.go | 87 ++++++++++++++++------------------------------ 1 file changed, 29 insertions(+), 58 deletions(-) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 13df2b9f..5cbb9748 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -103,6 +103,26 @@ func getConfig(configName string, useCCache bool, useLlvmNext bool, version stri return &cfg, nil } +func crosCommonClangFlags() []string { + // Temporarily disable tautological-*-compare chromium:778316. + // Temporarily add no-unknown-warning-option to deal with old clang versions. + // Temporarily disable Wdeprecated-declarations. b/193860318 + // b/230345382: Temporarily disable Wimplicit-function-declaration. + return []string{ + "-Qunused-arguments", + "-Werror=poison-system-directories", + "-Wno-compound-token-split-by-macro", + "-Wno-deprecated-declarations", + "-Wno-error=implicit-function-declaration", + "-Wno-final-dtor-non-final-class", + "-Wno-tautological-constant-compare", + "-Wno-tautological-unsigned-enum-zero-compare", + "-Wno-unknown-warning-option", + "-fdebug-default-version=5", + "-fexperimental-new-pass-manager", + } +} + func crosCommonClangPostFlags() []string { // Temporarily disable Wdeprecated-copy. b/191479033 return []string{ @@ -133,34 +153,17 @@ var crosHardenedConfig = config{ "-Wno-unused-local-typedefs", "-Wno-maybe-uninitialized", }, - // Temporarily disable tautological-*-compare chromium:778316. - // Temporarily add no-unknown-warning-option to deal with old clang versions. // Temporarily disable Wsection since kernel gets a bunch of these. chromium:778867 // Disable "-faddrsig" since it produces object files that strip doesn't understand, chromium:915742. // crbug.com/1103065: -grecord-gcc-switches pollutes the Goma cache; // removed that flag for now. - // Temporarily disable Wdeprecated-declarations. b/193860318 - // b/230345382: Temporarily disable Wimplicit-function-declaration. - - clangFlags: []string{ - "-Qunused-arguments", - "-Werror=poison-system-directories", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", - "-Wno-final-dtor-non-final-class", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-fdebug-default-version=5", - "-fexperimental-new-pass-manager", - + clangFlags: append( + crosCommonClangFlags(), "--unwindlib=libunwind", "-Wno-section", "-fno-addrsig", "-fuse-ld=lld", - }, - + ), clangPostFlags: crosCommonClangPostFlags(), newWarningsDir: "/tmp/fatal_clang_warnings", triciumNitsDir: "/tmp/linting_output/clang-tidy", @@ -178,27 +181,11 @@ var crosNonHardenedConfig = config{ "-Wno-deprecated-declarations", "-Wtrampolines", }, - // Temporarily disable tautological-*-compare chromium:778316. - // Temporarily add no-unknown-warning-option to deal with old clang versions. // Temporarily disable Wsection since kernel gets a bunch of these. chromium:778867 - // Temporarily disable Wdeprecated-declarations. b/193860318 - // b/230345382: Temporarily disable Wimplicit-function-declaration. - clangFlags: []string{ - "-Qunused-arguments", - "-Werror=poison-system-directories", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", - "-Wno-final-dtor-non-final-class", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-fdebug-default-version=5", - "-fexperimental-new-pass-manager", - + clangFlags: append( + crosCommonClangFlags(), "-Wno-section", - }, - + ), clangPostFlags: crosCommonClangPostFlags(), newWarningsDir: "/tmp/fatal_clang_warnings", triciumNitsDir: "/tmp/linting_output/clang-tidy", @@ -220,30 +207,14 @@ var crosHostConfig = config{ "-Wno-unused-local-typedefs", "-Wno-deprecated-declarations", }, - // Temporarily disable tautological-*-compare chromium:778316. - // Temporarily add no-unknown-warning-option to deal with old clang versions. // crbug.com/1103065: -grecord-gcc-switches pollutes the Goma cache; // removed that flag for now. - // Temporarily disable Wdeprecated-declarations. b/193860318 - // b/230345382: Temporarily disable Wimplicit-function-declaration. - clangFlags: []string{ - "-Qunused-arguments", - "-Werror=poison-system-directories", - "-Wno-compound-token-split-by-macro", - "-Wno-deprecated-declarations", - "-Wno-error=implicit-function-declaration", - "-Wno-final-dtor-non-final-class", - "-Wno-tautological-constant-compare", - "-Wno-tautological-unsigned-enum-zero-compare", - "-Wno-unknown-warning-option", - "-fdebug-default-version=5", - "-fexperimental-new-pass-manager", - + clangFlags: append( + crosCommonClangFlags(), "-Wno-unused-local-typedefs", "-fno-addrsig", "-fuse-ld=lld", - }, - + ), // Temporarily disable Wdeprecated-copy. b/191479033 clangPostFlags: crosCommonClangPostFlags(), newWarningsDir: "/tmp/fatal_clang_warnings", -- cgit v1.2.3 From 76b677b89b385083ab2b243308e745abb6b4261c Mon Sep 17 00:00:00 2001 From: Manoj Gupta Date: Wed, 11 May 2022 11:59:37 -0700 Subject: compiler_wrapper: Disable warning implicit-int Wimplicit-int has been promoted to be an default error even with Wno-error. Disable the error by default since it is breaking a lot of packages. BUG=b:231987783 TEST=cq Change-Id: If88877cb63cdd5c392fcf05c2940751547466213 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3642338 Tested-by: Manoj Gupta Reviewed-by: George Burgess Commit-Queue: George Burgess Auto-Submit: Manoj Gupta --- compiler_wrapper/config.go | 2 ++ compiler_wrapper/testdata/cros_clang_host_golden/bisect.json | 3 +++ .../clang_ftrapv_maincc_target_specific.json | 9 +++++++++ .../testdata/cros_clang_host_golden/clang_host_wrapper.json | 1 + .../cros_clang_host_golden/clang_maincc_target_specific.json | 9 +++++++++ .../testdata/cros_clang_host_golden/clang_path.json | 12 ++++++++++++ .../cros_clang_host_golden/clang_sanitizer_args.json | 8 ++++++++ .../testdata/cros_clang_host_golden/clang_specific_args.json | 4 ++++ .../testdata/cros_clang_host_golden/clangtidy.json | 8 ++++++++ .../cros_clang_host_golden/force_disable_werror.json | 5 +++++ compiler_wrapper/testdata/cros_hardened_golden/bisect.json | 3 +++ .../clang_ftrapv_maincc_target_specific.json | 9 +++++++++ .../cros_hardened_golden/clang_maincc_target_specific.json | 9 +++++++++ .../testdata/cros_hardened_golden/clang_path.json | 12 ++++++++++++ .../testdata/cros_hardened_golden/clang_sanitizer_args.json | 8 ++++++++ .../testdata/cros_hardened_golden/clang_specific_args.json | 4 ++++ .../cros_hardened_golden/clang_sysroot_wrapper_common.json | 6 ++++++ .../testdata/cros_hardened_golden/clangtidy.json | 8 ++++++++ .../testdata/cros_hardened_golden/force_disable_werror.json | 5 +++++ .../testdata/cros_hardened_golden/gcc_clang_syntax.json | 4 ++++ .../testdata/cros_hardened_llvmnext_golden/bisect.json | 3 +++ .../testdata/cros_hardened_llvmnext_golden/clang_path.json | 12 ++++++++++++ .../testdata/cros_hardened_llvmnext_golden/clangtidy.json | 8 ++++++++ .../cros_hardened_llvmnext_golden/force_disable_werror.json | 5 +++++ .../cros_hardened_llvmnext_golden/gcc_clang_syntax.json | 4 ++++ .../testdata/cros_hardened_noccache_golden/bisect.json | 3 +++ .../testdata/cros_hardened_noccache_golden/clang_path.json | 12 ++++++++++++ .../testdata/cros_hardened_noccache_golden/clangtidy.json | 8 ++++++++ .../cros_hardened_noccache_golden/force_disable_werror.json | 5 +++++ .../cros_hardened_noccache_golden/gcc_clang_syntax.json | 4 ++++ .../testdata/cros_nonhardened_golden/bisect.json | 3 +++ .../clang_ftrapv_maincc_target_specific.json | 9 +++++++++ .../clang_maincc_target_specific.json | 9 +++++++++ .../testdata/cros_nonhardened_golden/clang_path.json | 12 ++++++++++++ .../cros_nonhardened_golden/clang_sanitizer_args.json | 8 ++++++++ .../cros_nonhardened_golden/clang_specific_args.json | 4 ++++ .../clang_sysroot_wrapper_common.json | 6 ++++++ .../testdata/cros_nonhardened_golden/clangtidy.json | 8 ++++++++ .../cros_nonhardened_golden/force_disable_werror.json | 5 +++++ .../testdata/cros_nonhardened_golden/gcc_clang_syntax.json | 4 ++++ 40 files changed, 261 insertions(+) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 5cbb9748..c275ae9f 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -108,12 +108,14 @@ func crosCommonClangFlags() []string { // Temporarily add no-unknown-warning-option to deal with old clang versions. // Temporarily disable Wdeprecated-declarations. b/193860318 // b/230345382: Temporarily disable Wimplicit-function-declaration. + // b/231987783: Temporarily disable Wimplicit-int. return []string{ "-Qunused-arguments", "-Werror=poison-system-directories", "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json index 3e60ef45..2762eb64 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json @@ -29,6 +29,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -84,6 +85,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -142,6 +144,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json index 06fc7311..ab4a2fb1 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json @@ -20,6 +20,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -62,6 +63,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -104,6 +106,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -146,6 +149,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -188,6 +192,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -230,6 +235,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -272,6 +278,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -314,6 +321,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -356,6 +364,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json index d5fe7409..de144078 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json @@ -19,6 +19,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json index f66a82c5..288a9a81 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json @@ -19,6 +19,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -60,6 +61,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -101,6 +103,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -142,6 +145,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -183,6 +187,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -224,6 +229,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -265,6 +271,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -306,6 +313,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -347,6 +355,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json index 9cf9326c..dd4bc1e4 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json @@ -19,6 +19,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -63,6 +64,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -107,6 +109,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -151,6 +154,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -202,6 +206,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -258,6 +263,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -314,6 +320,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -360,6 +367,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -401,6 +409,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -442,6 +451,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -483,6 +493,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -527,6 +538,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json index 45b5fe34..4836dda4 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -65,6 +66,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -109,6 +111,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -153,6 +156,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -196,6 +200,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -240,6 +245,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -284,6 +290,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -327,6 +334,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json index 141206f9..4bd6a4d1 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json @@ -29,6 +29,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -79,6 +80,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -122,6 +124,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -165,6 +168,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json index 04640bc4..4191fc6c 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json @@ -35,6 +35,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -63,6 +64,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -121,6 +123,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -149,6 +152,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -209,6 +213,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -240,6 +245,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -301,6 +307,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -329,6 +336,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json index 8425fa7a..ddcaa2e7 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json @@ -22,6 +22,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -66,6 +67,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -96,6 +98,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -144,6 +147,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -174,6 +178,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json index 60958ea2..52301eb5 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json @@ -31,6 +31,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -104,6 +105,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -180,6 +182,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json index 25d09d7e..a2513f22 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json @@ -22,6 +22,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -85,6 +86,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -148,6 +150,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -211,6 +214,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -273,6 +277,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -335,6 +340,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -397,6 +403,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -459,6 +466,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -521,6 +529,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json index 8f963a4a..93f5c5af 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -82,6 +83,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -143,6 +145,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -204,6 +207,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -264,6 +268,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -324,6 +329,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -384,6 +390,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -444,6 +451,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -504,6 +512,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json index 262a5e88..f147f5a1 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -85,6 +86,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -149,6 +151,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -213,6 +216,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -284,6 +288,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -360,6 +365,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -431,6 +437,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -497,6 +504,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -558,6 +566,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -619,6 +628,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -680,6 +690,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -744,6 +755,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json index d21f41db..69f6f0af 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json @@ -23,6 +23,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -86,6 +87,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -149,6 +151,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -212,6 +215,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -274,6 +278,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -337,6 +342,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -400,6 +406,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -462,6 +469,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json index 03e1aecd..0d7e87d1 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json @@ -31,6 +31,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -101,6 +102,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -164,6 +166,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -227,6 +230,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json index a822dede..0fe640fa 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json @@ -59,6 +59,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -123,6 +124,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -180,6 +182,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -240,6 +243,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -301,6 +305,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -360,6 +365,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json index a9b62d93..dcb42440 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json @@ -36,6 +36,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -78,6 +79,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -150,6 +152,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -193,6 +196,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -267,6 +271,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -313,6 +318,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -388,6 +394,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -431,6 +438,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json index 2ee2cd01..2d1752a7 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json @@ -24,6 +24,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -88,6 +89,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -138,6 +140,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -206,6 +209,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -256,6 +260,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json index d799a0a0..fabd3347 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -109,6 +110,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -192,6 +194,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -256,6 +259,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json index 60958ea2..52301eb5 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json @@ -31,6 +31,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -104,6 +105,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -180,6 +182,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json index 262a5e88..f147f5a1 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -85,6 +86,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -149,6 +151,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -213,6 +216,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -284,6 +288,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -360,6 +365,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -431,6 +437,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -497,6 +504,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -558,6 +566,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -619,6 +628,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -680,6 +690,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -744,6 +755,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json index a9b62d93..dcb42440 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json @@ -36,6 +36,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -78,6 +79,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -150,6 +152,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -193,6 +196,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -267,6 +271,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -313,6 +318,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -388,6 +394,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -431,6 +438,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json index 2ee2cd01..2d1752a7 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json @@ -24,6 +24,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -88,6 +89,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -138,6 +140,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -206,6 +209,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -256,6 +260,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json index d799a0a0..fabd3347 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -109,6 +110,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -192,6 +194,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -256,6 +259,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json index 298ac342..fbb684b2 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json @@ -30,6 +30,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -99,6 +100,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -171,6 +173,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json index 324f66c6..6ff25b0e 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json @@ -20,6 +20,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -78,6 +79,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -136,6 +138,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -194,6 +197,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -259,6 +263,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -330,6 +335,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -400,6 +406,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -460,6 +467,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -515,6 +523,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -570,6 +579,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -625,6 +635,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -683,6 +694,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json index a9b62d93..dcb42440 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json @@ -36,6 +36,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -78,6 +79,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -150,6 +152,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -193,6 +196,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -267,6 +271,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -313,6 +318,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -388,6 +394,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -431,6 +438,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json index 5036980d..e1c0ec31 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json @@ -23,6 +23,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -81,6 +82,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -125,6 +127,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -187,6 +190,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -231,6 +235,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json index ecab5901..c8c9bed8 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -104,6 +105,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -187,6 +189,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -251,6 +254,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json index 6f9363ac..4c025df8 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json @@ -31,6 +31,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -95,6 +96,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -162,6 +164,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json index 7b19f239..4c99f2bb 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json @@ -22,6 +22,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -76,6 +77,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -130,6 +132,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -184,6 +187,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -238,6 +242,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -291,6 +296,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -345,6 +351,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -399,6 +406,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -452,6 +460,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json index 2bbdd453..dbed527c 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -73,6 +74,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -125,6 +127,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -177,6 +180,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -229,6 +233,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -280,6 +285,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -332,6 +338,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -384,6 +391,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -435,6 +443,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json index 2b56b48a..6fb3b088 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -76,6 +77,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -131,6 +133,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -186,6 +189,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -248,6 +252,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -315,6 +320,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -377,6 +383,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -434,6 +441,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -486,6 +494,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -538,6 +547,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -590,6 +600,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -645,6 +656,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json index a17cd381..9ec3cd06 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json @@ -23,6 +23,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -78,6 +79,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -133,6 +135,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -188,6 +191,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -242,6 +246,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -297,6 +302,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -352,6 +358,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -406,6 +413,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json index 196914ee..537df6ff 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json @@ -31,6 +31,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -92,6 +93,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -146,6 +148,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -200,6 +203,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json index 3e36da96..d983bd46 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json @@ -54,6 +54,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -109,6 +110,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -157,6 +159,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -210,6 +213,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -265,6 +269,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -319,6 +324,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json index 3bb4a8aa..30f9466d 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json @@ -36,6 +36,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -69,6 +70,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -132,6 +134,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -166,6 +169,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -231,6 +235,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -268,6 +273,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -334,6 +340,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -368,6 +375,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json index cf0d0f04..df6e8c7f 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json @@ -24,6 +24,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -79,6 +80,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -120,6 +122,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -179,6 +182,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -220,6 +224,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json index 15ba7ae6..77225038 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json @@ -21,6 +21,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -95,6 +96,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -164,6 +166,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", @@ -219,6 +222,7 @@ "-Wno-compound-token-split-by-macro", "-Wno-deprecated-declarations", "-Wno-error=implicit-function-declaration", + "-Wno-error=implicit-int", "-Wno-final-dtor-non-final-class", "-Wno-tautological-constant-compare", "-Wno-tautological-unsigned-enum-zero-compare", -- cgit v1.2.3 From 9d0d3541b73b639b291a0d483a3b1725a134d710 Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Tue, 10 May 2022 14:11:13 -0700 Subject: compiler_wrapper: `go fmt` Pre-upload checks in chromiumos-overlay complained about these files not being `go fmt`'ed; do that. This is apparently an artifact of a build tag migration upstream: https://go.googlesource.com/proposal/+/master/design/draft-gobuild.md ...so leave both in here for a while. BUG=b:232114933 TEST=go test Change-Id: I8f8e0896d3e3ce37d155035d93412f8c1c89b1b7 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3639686 Tested-by: George Burgess Commit-Queue: Jordan Abrahams-Whitehead Reviewed-by: Jordan Abrahams-Whitehead Auto-Submit: George Burgess --- compiler_wrapper/go_exec.go | 1 + compiler_wrapper/libc_exec.go | 1 + 2 files changed, 2 insertions(+) diff --git a/compiler_wrapper/go_exec.go b/compiler_wrapper/go_exec.go index 74691484..8c2b8ce7 100644 --- a/compiler_wrapper/go_exec.go +++ b/compiler_wrapper/go_exec.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +//go:build !libc_exec // +build !libc_exec package main diff --git a/compiler_wrapper/libc_exec.go b/compiler_wrapper/libc_exec.go index 5922c6e8..44326d61 100644 --- a/compiler_wrapper/libc_exec.go +++ b/compiler_wrapper/libc_exec.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +//go:build libc_exec // +build libc_exec package main -- cgit v1.2.3 From 5ba140d61255ba5e8a81cfacb0b4d3e1aaad13b0 Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Wed, 11 May 2022 08:39:05 -0700 Subject: crosperf: Update remote lab machines BUG=b:231402615 TEST=ping Change-Id: I40b3c47f9c5209501cb2a15457a4c640887e8733 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3642336 Tested-by: Denis Nikitin Commit-Queue: Denis Nikitin Reviewed-by: Ryan Beltran --- crosperf/default_remotes | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crosperf/default_remotes b/crosperf/default_remotes index 4910a58b..c6525c6b 100644 --- a/crosperf/default_remotes +++ b/crosperf/default_remotes @@ -1,7 +1,7 @@ -bob : chromeos6-row4-rack13-host6.cros +bob : chromeos8-row12-rack16-host2.cros chell : chromeos2-row1-rack10-host2.cros chromeos2-row1-rack10-host4.cros coral : chromeos6-row5-rack6-host1.cros chromeos6-row5-rack6-host3.cros chromeos6-row5-rack6-host5.cros elm : chromeos6-row14-rack15-host21.cros kefka : chromeos6-row6-rack22-host2.cros chromeos6-row6-rack22-host3.cros chromeos6-row11-rack22-host7.cros nautilus : chromeos6-row5-rack10-host1.cros chromeos6-row5-rack10-host3.cros -snappy : chromeos6-row3-rack20-host1.cros chromeos6-row3-rack20-host3.cros +snappy : chromeos8-row12-rack17-host2 -- cgit v1.2.3 From 74a3c504c7d287301b1a6a4eb3a4f20f06a4365c Mon Sep 17 00:00:00 2001 From: Adrian Ratiu Date: Wed, 11 May 2022 17:05:11 +0300 Subject: compiler_wrapper: handle split "-Wl,-z,defs" During the libxcrypt build, both "-Wl,-z,defs" and its split equivalent "-Wl,-z -Wl,defs" are used which create problems because only the unified version is supported. This adds support for filtering the flags seprately and two tests to verify the functionality. BUG=b:187795307 TEST=Local builds with cross-*/libxcrypt; CQ. Change-Id: If48499f5c8e552e28c7cefd2d959e40f4757a88f Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3641843 Tested-by: Adrian Ratiu Reviewed-by: Manoj Gupta Reviewed-by: George Burgess Commit-Queue: Manoj Gupta Tested-by: Manoj Gupta --- compiler_wrapper/command.go | 15 +++++++++++++++ compiler_wrapper/sanitizer_flags.go | 4 ++++ compiler_wrapper/sanitizer_flags_test.go | 18 ++++++++++++++++++ 3 files changed, 37 insertions(+) diff --git a/compiler_wrapper/command.go b/compiler_wrapper/command.go index 253251ab..e186623a 100644 --- a/compiler_wrapper/command.go +++ b/compiler_wrapper/command.go @@ -265,6 +265,21 @@ func (builder *commandBuilder) transformArgs(transform func(arg builderArg) stri builder.args = newArgs } +// Allows to filter arg pairs, useful for eg when having adjacent unsupported args +// like "-Wl,-z -Wl,defs" +func (builder *commandBuilder) filterArgPairs(keepPair func(arg1, arg2 builderArg) bool) { + newArgs := builder.args[:0] + for i := 0; i < len(builder.args); i++ { + if i == len(builder.args)-1 || keepPair(builder.args[i], builder.args[i+1]) { + newArgs = append(newArgs, builder.args[i]) + } else { + // skip builder.args[i]) as well as next item + i++ + } + } + builder.args = newArgs +} + func (builder *commandBuilder) updateEnv(updates ...string) { builder.envUpdates = append(builder.envUpdates, updates...) } diff --git a/compiler_wrapper/sanitizer_flags.go b/compiler_wrapper/sanitizer_flags.go index 5d517e49..b9eb0558 100644 --- a/compiler_wrapper/sanitizer_flags.go +++ b/compiler_wrapper/sanitizer_flags.go @@ -37,5 +37,9 @@ func processSanitizerFlags(builder *commandBuilder) { } return arg.value }) + + builder.filterArgPairs(func(arg1, arg2 builderArg) bool { + return !(arg1.value == "-Wl,-z" && arg2.value == "-Wl,defs") + }) } } diff --git a/compiler_wrapper/sanitizer_flags_test.go b/compiler_wrapper/sanitizer_flags_test.go index 8b22a05e..17c41438 100644 --- a/compiler_wrapper/sanitizer_flags_test.go +++ b/compiler_wrapper/sanitizer_flags_test.go @@ -22,6 +22,15 @@ func TestFilterUnsupportedSanitizerFlagsIfSanitizeGiven(t *testing.T) { t.Error(err) } + cmd = ctx.must(callCompiler(ctx, ctx.cfg, + ctx.newCommand(gccX86_64, "-fsanitize=kernel-address", "-Wl,-z -Wl,defs", mainCc))) + if err := verifyArgCount(cmd, 0, "-Wl,-z"); err != nil { + t.Error(err) + } + if err := verifyArgCount(cmd, 0, "-Wl,defs"); err != nil { + t.Error(err) + } + cmd = ctx.must(callCompiler(ctx, ctx.cfg, ctx.newCommand(gccX86_64, "-fsanitize=kernel-address", "-D_FORTIFY_SOURCE=1", mainCc))) if err := verifyArgCount(cmd, 0, "-D_FORTIFY_SOURCE=1"); err != nil { @@ -74,6 +83,15 @@ func TestKeepSanitizerFlagsIfNoSanitizeGiven(t *testing.T) { t.Error(err) } + cmd = ctx.must(callCompiler(ctx, ctx.cfg, + ctx.newCommand(gccX86_64, "-Wl,-z -Wl,defs", mainCc))) + if err := verifyArgCount(cmd, 1, "-Wl,-z"); err != nil { + t.Error(err) + } + if err := verifyArgCount(cmd, 1, "-Wl,defs"); err != nil { + t.Error(err) + } + cmd = ctx.must(callCompiler(ctx, ctx.cfg, ctx.newCommand(gccX86_64, "-D_FORTIFY_SOURCE=1", mainCc))) if err := verifyArgCount(cmd, 1, "-D_FORTIFY_SOURCE=1"); err != nil { -- cgit v1.2.3 From 3c0605a8395541b18ce5fb1f847b071eb36439ff Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Mon, 16 May 2022 13:16:45 -0700 Subject: compiler_wrapper: fix test `-Wl,-z -Wl,defs` should be passed as separate flags. BUG=b:231357370 TEST=go test Change-Id: Iacd0be0c74df0cc7bd8607473a859c36c5ef4f06 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3651187 Commit-Queue: Manoj Gupta Auto-Submit: George Burgess Reviewed-by: Manoj Gupta Tested-by: George Burgess --- compiler_wrapper/sanitizer_flags_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler_wrapper/sanitizer_flags_test.go b/compiler_wrapper/sanitizer_flags_test.go index 17c41438..551691f4 100644 --- a/compiler_wrapper/sanitizer_flags_test.go +++ b/compiler_wrapper/sanitizer_flags_test.go @@ -84,7 +84,7 @@ func TestKeepSanitizerFlagsIfNoSanitizeGiven(t *testing.T) { } cmd = ctx.must(callCompiler(ctx, ctx.cfg, - ctx.newCommand(gccX86_64, "-Wl,-z -Wl,defs", mainCc))) + ctx.newCommand(gccX86_64, "-Wl,-z", "-Wl,defs", mainCc))) if err := verifyArgCount(cmd, 1, "-Wl,-z"); err != nil { t.Error(err) } -- cgit v1.2.3 From 2cd85bd3afa88b6368d1b2c3052dc079b755a63b Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Mon, 16 May 2022 13:10:41 -0700 Subject: compiler_wrapper: keep FORTIFY if sanitizer is trivial For sanitizers like `return`, `builtin`, etc., we have no reason to also drop FORTIFY checks. BUG=b:231357370 TEST=`go test` Change-Id: I1e349a4f6743e549b7bc0899a307b10683bb42e9 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3651188 Reviewed-by: Manoj Gupta Commit-Queue: George Burgess Tested-by: George Burgess --- compiler_wrapper/sanitizer_flags.go | 95 +++++++++++++++++++++++--------- compiler_wrapper/sanitizer_flags_test.go | 22 ++++++++ 2 files changed, 90 insertions(+), 27 deletions(-) diff --git a/compiler_wrapper/sanitizer_flags.go b/compiler_wrapper/sanitizer_flags.go index b9eb0558..163b0f4f 100644 --- a/compiler_wrapper/sanitizer_flags.go +++ b/compiler_wrapper/sanitizer_flags.go @@ -8,38 +8,79 @@ import ( "strings" ) +// Returns whether the flag turns on 'invasive' sanitizers. These are sanitizers incompatible with +// things like FORTIFY, since they require meaningful runtime support, intercept libc calls, etc. +func isInvasiveSanitizerFlag(flag string) bool { + // There are a few valid spellings here: + // -fsanitize=${sanitizer_list}, which enables the given sanitizers + // -fsanitize-trap=${sanitizer_list}, which specifies sanitizer behavior _if_ these + // sanitizers are already enabled. + // -fsanitize-recover=${sanitizer_list}, which also specifies sanitizer behavior _if_ + // these sanitizers are already enabled. + // -fsanitize-ignorelist=/path/to/file, which designates a config file for sanitizers. + // + // All we care about is the first one, since that's what actually enables sanitizers. Clang + // does not accept a `-fsanitize ${sanitizer_list}` spelling of this flag. + fsanitize := "-fsanitize=" + if !strings.HasPrefix(flag, fsanitize) { + return false + } + + sanitizers := flag[len(fsanitize):] + if sanitizers == "" { + return false + } + + for _, sanitizer := range strings.Split(sanitizers, ",") { + // Keep an allowlist of sanitizers known to not cause issues. + switch sanitizer { + case "alignment", "array-bounds", "bool", "bounds", "builtin", "enum", + "float-cast-overflow", "integer-divide-by-zero", "local-bounds", + "nullability", "nullability-arg", "nullability-assign", + "nullability-return", "null", "return", "returns-nonnull-attribute", + "shift-base", "shift-exponent", "shift", "unreachable", "vla-bound": + // These sanitizers are lightweight. Ignore them. + default: + return true + } + } + return false +} + func processSanitizerFlags(builder *commandBuilder) { hasSanitizeFlags := false + // TODO: This doesn't take -fno-sanitize flags into account. This doesn't seem to be an + // issue in practice. for _, arg := range builder.args { - // TODO: This should probably be -fsanitize= to not match on - // e.g. -fsanitize-blocklist - if arg.fromUser { - if strings.HasPrefix(arg.value, "-fsanitize") { - hasSanitizeFlags = true - } + if arg.fromUser && isInvasiveSanitizerFlag(arg.value) { + hasSanitizeFlags = true + break } } - if hasSanitizeFlags { - // Flags not supported by sanitizers (ASan etc.) - unsupportedSanitizerFlags := map[string]bool{ - "-D_FORTIFY_SOURCE=1": true, - "-D_FORTIFY_SOURCE=2": true, - "-Wl,--no-undefined": true, - "-Wl,-z,defs": true, - } - builder.transformArgs(func(arg builderArg) string { - // TODO: This is a bug in the old wrapper to not filter - // non user args for gcc. Fix this once we don't compare to the old wrapper anymore. - if (builder.target.compilerType != gccType || arg.fromUser) && - unsupportedSanitizerFlags[arg.value] { - return "" - } - return arg.value - }) - - builder.filterArgPairs(func(arg1, arg2 builderArg) bool { - return !(arg1.value == "-Wl,-z" && arg2.value == "-Wl,defs") - }) + if !hasSanitizeFlags { + return + } + + // Flags not supported by sanitizers (ASan etc.) + unsupportedSanitizerFlags := map[string]bool{ + "-D_FORTIFY_SOURCE=1": true, + "-D_FORTIFY_SOURCE=2": true, + "-Wl,--no-undefined": true, + "-Wl,-z,defs": true, } + + builder.transformArgs(func(arg builderArg) string { + // TODO: This is a bug in the old wrapper to not filter + // non user args for gcc. Fix this once we don't compare to the old wrapper anymore. + if (builder.target.compilerType != gccType || arg.fromUser) && + unsupportedSanitizerFlags[arg.value] { + return "" + } + return arg.value + }) + + builder.filterArgPairs(func(arg1, arg2 builderArg) bool { + return !(arg1.value == "-Wl,-z" && arg2.value == "-Wl,defs") + }) } diff --git a/compiler_wrapper/sanitizer_flags_test.go b/compiler_wrapper/sanitizer_flags_test.go index 551691f4..796961eb 100644 --- a/compiler_wrapper/sanitizer_flags_test.go +++ b/compiler_wrapper/sanitizer_flags_test.go @@ -8,6 +8,28 @@ import ( "testing" ) +func TestFortifyIsKeptIfSanitizerIsTrivial(t *testing.T) { + withTestContext(t, func(ctx *testContext) { + cmd := ctx.must(callCompiler(ctx, ctx.cfg, + ctx.newCommand(gccX86_64, "-fsanitize=return", "-D_FORTIFY_SOURCE=1", mainCc))) + if err := verifyArgCount(cmd, 1, "-D_FORTIFY_SOURCE=1"); err != nil { + t.Error(err) + } + + cmd = ctx.must(callCompiler(ctx, ctx.cfg, + ctx.newCommand(gccX86_64, "-fsanitize=return,address", "-D_FORTIFY_SOURCE=1", mainCc))) + if err := verifyArgCount(cmd, 0, "-D_FORTIFY_SOURCE=1"); err != nil { + t.Error(err) + } + + cmd = ctx.must(callCompiler(ctx, ctx.cfg, + ctx.newCommand(gccX86_64, "-fsanitize=address,return", "-D_FORTIFY_SOURCE=1", mainCc))) + if err := verifyArgCount(cmd, 0, "-D_FORTIFY_SOURCE=1"); err != nil { + t.Error(err) + } + }) +} + func TestFilterUnsupportedSanitizerFlagsIfSanitizeGiven(t *testing.T) { withTestContext(t, func(ctx *testContext) { cmd := ctx.must(callCompiler(ctx, ctx.cfg, -- cgit v1.2.3 From 73c422a852cacbee86cb565e8c1812c2b97add3b Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Tue, 17 May 2022 00:46:39 -0700 Subject: auto_delete_nightly_test_data: gracefully handle dirs not existing To avoid cases like the attached bug, simply log when a dir doesn't exist. Doesn't seem bad to consider this a success. BUG=b:232843376 TEST=None Change-Id: I291d1b1ca4007d2402a6707c83ce483470099c84 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3651935 Auto-Submit: George Burgess Reviewed-by: Jordan Abrahams-Whitehead Commit-Queue: Jordan Abrahams-Whitehead Tested-by: George Burgess --- auto_delete_nightly_test_data.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index ca721b24..b625783f 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -7,8 +7,6 @@ """A crontab script to delete night test data.""" -from __future__ import print_function - __author__ = 'shenhan@google.com (Han Shen)' import argparse @@ -27,6 +25,7 @@ from cros_utils import command_executer from cros_utils import constants from cros_utils import misc + DIR_BY_WEEKDAY = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun') NIGHTLY_TESTS_WORKSPACE = os.path.join(constants.CROSTC_WORKSPACE, 'nightly-tests') @@ -109,14 +108,24 @@ def ProcessArguments(argv): def RemoveAllSubdirsMatchingPredicate( base_dir: Path, days_to_preserve: int, dry_run: bool, - is_name_removal_worthy: Callable[[str], bool]) -> bool: + is_name_removal_worthy: Callable[[str], bool]) -> int: """Removes all subdirs of base_dir that match the given predicate.""" secs_to_preserve = 60 * 60 * 24 * days_to_preserve now = time.time() remove_older_than_time = now - secs_to_preserve + try: + dir_entries = list(base_dir.iterdir()) + except FileNotFoundError as e: + # We get this if the directory itself doesn't exist. Since we're cleaning + # tempdirs, that's as good as a success. Further, the prior approach here + # was using the `find` binary, which exits successfully if nothing is + # found. + print(f"Error enumerating {base_dir}'s contents; skipping removal: {e}") + return 0 + had_errors = False - for file in base_dir.iterdir(): + for file in dir_entries: if not is_name_removal_worthy(file.name): continue -- cgit v1.2.3 From c1b431116d1d78a293076c5e264706c24ae09d96 Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Thu, 12 May 2022 22:58:14 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: If3f7a828c656a6534f7cf93aa7beed5666e1ed71 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3647287 Commit-Queue: Jordan Abrahams-Whitehead Auto-Submit: Denis Nikitin Reviewed-by: Jordan Abrahams-Whitehead Tested-by: Denis Nikitin --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 332f2852..0558e971 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R103-14695.25-1651484165" + "name": "R103-14753.0-1652088742" }, "chromeos-kernel-4_14": { - "name": "R103-14695.25-1651484511" + "name": "R103-14767.0-1652089052" }, "chromeos-kernel-4_19": { - "name": "R103-14695.25-1651483920" + "name": "R103-14767.0-1652088729" }, "chromeos-kernel-5_4": { - "name": "R103-14695.25-1651483959" + "name": "R103-14767.0-1652088875" }, "chromeos-kernel-5_10": { - "name": "R103-14695.25-1651484279" + "name": "R103-14767.0-1652088804" } } -- cgit v1.2.3 From 444382a8e6164925565146e0102e282d3421a3bd Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Mon, 16 May 2022 12:30:45 -0700 Subject: crosperf: Remove .cros dependency and add snappy to remotes Clean up code which handles ".cros" suffix. Crosfleet, ssh and cros shell don't require .cros suffix in remotes. Snappy device is back. Add it to lab machines checks. BUG=b:231402615 TEST=tested locally Change-Id: I6e9a308428de05b8e84891933bdc19c55e18d08e Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3651281 Commit-Queue: Denis Nikitin Reviewed-by: Manoj Gupta Tested-by: Denis Nikitin --- crosperf/default_remotes | 2 +- lock_machine.py | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/crosperf/default_remotes b/crosperf/default_remotes index c6525c6b..b7e25fca 100644 --- a/crosperf/default_remotes +++ b/crosperf/default_remotes @@ -4,4 +4,4 @@ coral : chromeos6-row5-rack6-host1.cros chromeos6-row5-rack6-host3.c elm : chromeos6-row14-rack15-host21.cros kefka : chromeos6-row6-rack22-host2.cros chromeos6-row6-rack22-host3.cros chromeos6-row11-rack22-host7.cros nautilus : chromeos6-row5-rack10-host1.cros chromeos6-row5-rack10-host3.cros -snappy : chromeos8-row12-rack17-host2 +snappy : chromeos8-row12-rack17-host1 chromeos8-row12-rack17-host2 diff --git a/lock_machine.py b/lock_machine.py index 8bc3ec22..b95678e8 100755 --- a/lock_machine.py +++ b/lock_machine.py @@ -15,11 +15,10 @@ import getpass import os import sys -import file_lock_machine - from cros_utils import command_executer from cros_utils import logger from cros_utils import machines +import file_lock_machine class LockException(Exception): @@ -407,8 +406,8 @@ class LockManager(object): if os.path.exists(self.CROSFLEET_CREDENTIAL): credential = '-service-account-json %s' % self.CROSFLEET_CREDENTIAL cmd = (('%s dut lease -minutes %s %s %s %s') % - (self.CROSFLEET_PATH, self.LEASE_MINS, credential, '-host' - if '.cros' in machine else '-board', machine.rstrip('.cros'))) + (self.CROSFLEET_PATH, self.LEASE_MINS, credential, '-host', + machine.rstrip('.cros'))) # Wait 8 minutes for server to start the lease task, if not started, # we will treat it as unavailable. check_interval_time = 480 -- cgit v1.2.3 From 36743fe30ca8c95872d8b188b423044974868058 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead Date: Wed, 18 May 2022 20:28:02 +0000 Subject: llvm_tools: Add check for invalid until values Currently in get_upstream_patch.py, it's possible for the "until" value to be earlier than "from", which means the patch can never apply. This CL reports this as an error now. Applies some pre-upload autofixes as well, and adds minor documentation for the --differential flag to describe some non-obvious behavior. BUG=None TEST=get_upstream_patch.py with invalid patch versions Change-Id: Ie8338f8b9b27f0c41f0e350706a3131e5daab1b2 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3653987 Reviewed-by: Manoj Gupta Commit-Queue: Jordan Abrahams-Whitehead Tested-by: Jordan Abrahams-Whitehead Reviewed-by: George Burgess --- llvm_tools/get_upstream_patch.py | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/llvm_tools/get_upstream_patch.py b/llvm_tools/get_upstream_patch.py index d036e8c2..a2327c4d 100755 --- a/llvm_tools/get_upstream_patch.py +++ b/llvm_tools/get_upstream_patch.py @@ -7,6 +7,8 @@ """Get an upstream patch to LLVM's PATCHES.json.""" import argparse +import dataclasses +from datetime import datetime import json import logging import os @@ -14,9 +16,6 @@ import shlex import subprocess import sys import typing as t -from datetime import datetime - -import dataclasses import chroot import get_llvm_hash @@ -24,6 +23,7 @@ import git import git_llvm_rev import update_chromeos_llvm_hash + __DOC_EPILOGUE = """ Example Usage: get_upstream_patch --chroot_path ~/chromiumos --platform chromiumos \ @@ -35,6 +35,10 @@ class CherrypickError(ValueError): """A ValueError that highlights the cherry-pick has been seen before""" +class CherrypickVersionError(ValueError): + """A ValueError that highlights the cherry-pick is before the start_sha""" + + def add_patch(patches_json_path: str, patches_dir: str, relative_patches_dir: str, start_version: git_llvm_rev.Rev, llvm_dir: str, rev: t.Union[git_llvm_rev.Rev, str], sha: str, @@ -58,11 +62,10 @@ def add_patch(patches_json_path: str, patches_dir: str, Raises: CherrypickError: A ValueError that highlights the cherry-pick has been seen before. + CherrypickRangeError: A ValueError that's raised when the given patch + is from before the start_sha. """ - with open(patches_json_path, encoding='utf-8') as f: - patches_json = json.load(f) - is_cherrypick = isinstance(rev, git_llvm_rev.Rev) if is_cherrypick: file_name = f'{sha}.patch' @@ -70,6 +73,17 @@ def add_patch(patches_json_path: str, patches_dir: str, file_name = f'{rev}.patch' rel_patch_path = os.path.join(relative_patches_dir, file_name) + # Check that we haven't grabbed a patch range that's nonsensical. + end_vers = rev.number if isinstance(rev, git_llvm_rev.Rev) else None + if end_vers is not None and end_vers <= start_version.number: + raise CherrypickVersionError( + f'`until` version {end_vers} is earlier or equal to' + f' `from` version {start_version.number} for patch' + f' {rel_patch_path}') + + with open(patches_json_path, encoding='utf-8') as f: + patches_json = json.load(f) + for p in patches_json: rel_path = p['rel_patch_path'] if rel_path == rel_patch_path: @@ -95,8 +109,6 @@ def add_patch(patches_json_path: str, patches_dir: str, ['git', 'log', '-n1', '--format=%s', sha], cwd=llvm_dir, encoding='utf-8') - - end_vers = rev.number if isinstance(rev, git_llvm_rev.Rev) else None patch_props = { 'rel_patch_path': rel_patch_path, 'metadata': { @@ -349,8 +361,8 @@ def _convert_patch(llvm_config: git_llvm_rev.LLVMConfig, is_differential=is_differential) -def _get_duplicate_shas(patches: t.List[ParsedPatch] - ) -> t.List[t.Tuple[ParsedPatch, ParsedPatch]]: +def _get_duplicate_shas( + patches: t.List[ParsedPatch]) -> t.List[t.Tuple[ParsedPatch, ParsedPatch]]: """Return a list of Patches which have duplicate SHA's""" return [(left, right) for i, left in enumerate(patches) for right in patches[i + 1:] if left.sha == right.sha] @@ -426,7 +438,9 @@ def main(): '--differential', action='append', default=[], - help='The LLVM differential revision to apply. Example: D1234') + help='The LLVM differential revision to apply. Example: D1234.' + ' Cannot be used for changes already merged upstream; use --sha' + ' instead for those.') parser.add_argument( '--platform', action='append', -- cgit v1.2.3 From 4231baf897a178682b70f4f4f9f66dee735f537f Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Fri, 20 May 2022 09:40:11 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: I882dfa16066e0048c1ed3640dcbb29c697786e23 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3656068 Commit-Queue: Manoj Gupta Auto-Submit: Denis Nikitin Tested-by: Denis Nikitin Reviewed-by: Manoj Gupta --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 0558e971..36fc61d1 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R103-14753.0-1652088742" + "name": "R104-14794.0-1652693676" }, "chromeos-kernel-4_14": { - "name": "R103-14767.0-1652089052" + "name": "R104-14767.0-1652693858" }, "chromeos-kernel-4_19": { - "name": "R103-14767.0-1652088729" + "name": "R104-14794.0-1652693604" }, "chromeos-kernel-5_4": { - "name": "R103-14767.0-1652088875" + "name": "R104-14794.0-1652693573" }, "chromeos-kernel-5_10": { - "name": "R103-14767.0-1652088804" + "name": "R104-14794.0-1652693553" } } -- cgit v1.2.3 From 49d95342584d81f73b33cd25a178f0c8854cd826 Mon Sep 17 00:00:00 2001 From: Bob Haarman Date: Mon, 23 May 2022 16:39:45 -0700 Subject: crosperf: remove ".cros" suffix from hostnames We don't need the ".cros" suffix anymore, and its presence causes the ping check for at least one host to fail. BUG=b:231402615 TEST=Checking lab machines should pass after this Change-Id: I620db48c6f92837c12f86886eb9e87168dfc6ed7 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3661356 Reviewed-by: Denis Nikitin Tested-by: Bob Haarman Commit-Queue: Bob Haarman --- crosperf/default_remotes | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crosperf/default_remotes b/crosperf/default_remotes index b7e25fca..31e99bbf 100644 --- a/crosperf/default_remotes +++ b/crosperf/default_remotes @@ -1,7 +1,7 @@ -bob : chromeos8-row12-rack16-host2.cros -chell : chromeos2-row1-rack10-host2.cros chromeos2-row1-rack10-host4.cros -coral : chromeos6-row5-rack6-host1.cros chromeos6-row5-rack6-host3.cros chromeos6-row5-rack6-host5.cros -elm : chromeos6-row14-rack15-host21.cros -kefka : chromeos6-row6-rack22-host2.cros chromeos6-row6-rack22-host3.cros chromeos6-row11-rack22-host7.cros -nautilus : chromeos6-row5-rack10-host1.cros chromeos6-row5-rack10-host3.cros +bob : chromeos8-row12-rack16-host2 +chell : chromeos2-row1-rack10-host2 chromeos2-row1-rack10-host4 +coral : chromeos6-row5-rack6-host1 chromeos6-row5-rack6-host3 chromeos6-row5-rack6-host5 +elm : chromeos6-row14-rack15-host21 +kefka : chromeos6-row6-rack22-host2 chromeos6-row6-rack22-host3 chromeos6-row11-rack22-host7 +nautilus : chromeos6-row5-rack10-host1 chromeos6-row5-rack10-host3 snappy : chromeos8-row12-rack17-host1 chromeos8-row12-rack17-host2 -- cgit v1.2.3 From 69f5ec0984e02c8a7f0748522a333fca57f357aa Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead Date: Sat, 21 May 2022 02:21:53 +0000 Subject: llvm_tools: Add patch_utils.py and unittests This introduces the patch_utils.py library, which contains various PATCHES.json and patch_manager utilities that will be useful for future patch manager restructuring. In particular, patch_manager.py doesn't explain why its patches fail, or give any information as to what is wrong with its patch applications. patch_utils.py provides the PatchEntry class, which is a self contained object which can provide this diagnostic information. This module will later be incorporated into patch_manager.py and get_upstream_patches.py BUG=b:188465085, b:227216280 TEST=./patch_utils_unittest.py Change-Id: I6f6e24e6449ea68f6751fbcad14fca76c1bbaec8 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3648887 Tested-by: Jordan Abrahams-Whitehead Reviewed-by: George Burgess Commit-Queue: Jordan Abrahams-Whitehead --- llvm_tools/patch_utils.py | 214 +++++++++++++++++++++++++++++++++++++ llvm_tools/patch_utils_unittest.py | 178 ++++++++++++++++++++++++++++++ 2 files changed, 392 insertions(+) create mode 100644 llvm_tools/patch_utils.py create mode 100755 llvm_tools/patch_utils_unittest.py diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py new file mode 100644 index 00000000..2f282990 --- /dev/null +++ b/llvm_tools/patch_utils.py @@ -0,0 +1,214 @@ +# Copyright 2022 The ChromiumOS Authors. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Provides patch utilities for PATCHES.json file handling.""" + +import collections +import dataclasses +import io +from pathlib import Path +import re +import subprocess +import sys +from typing import Any, Dict, List, Optional + + +CHECKED_FILE_RE = re.compile(r'^checking file\s+(.*)$') +HUNK_FAILED_RE = re.compile(r'^Hunk #(\d+) FAILED at.*') +HUNK_HEADER_RE = re.compile(r'^@@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+@@') +HUNK_END_RE = re.compile(r'^--\s*$') +PATCH_SUBFILE_HEADER_RE = re.compile(r'^\+\+\+ [ab]/(.*)$') + + +@dataclasses.dataclass +class Hunk: + """Represents a patch Hunk.""" + hunk_id: int + """Hunk ID for the current file.""" + orig_start: int + orig_hunk_len: int + patch_start: int + patch_hunk_len: int + patch_hunk_lineno_begin: int + patch_hunk_lineno_end: Optional[int] + + +def parse_patch_stream(patch_stream: io.TextIOBase) -> Dict[str, List[Hunk]]: + """Parse a patch file-like into Hunks. + + Args: + patch_stream: A IO stream formatted like a git patch file. + + Returns: + A dictionary mapping filenames to lists of Hunks present + in the patch stream. + """ + + current_filepath = None + current_hunk_id = 0 + current_hunk = None + out = collections.defaultdict(list) + for lineno, line in enumerate(patch_stream.readlines()): + subfile_header = PATCH_SUBFILE_HEADER_RE.match(line) + if subfile_header: + current_filepath = subfile_header.group(1) + if not current_filepath: + raise RuntimeError('Could not get file header in patch stream') + # Need to reset the hunk id, as it's per-file. + current_hunk_id = 0 + continue + hunk_header = HUNK_HEADER_RE.match(line) + if hunk_header: + if not current_filepath: + raise RuntimeError('Parsed hunk before file header in patch stream') + if current_hunk: + # Already parsing a hunk + current_hunk.patch_hunk_lineno_end = lineno + current_hunk_id += 1 + current_hunk = Hunk(hunk_id=current_hunk_id, + orig_start=int(hunk_header.group(1)), + orig_hunk_len=int(hunk_header.group(2)), + patch_start=int(hunk_header.group(3)), + patch_hunk_len=int(hunk_header.group(4)), + patch_hunk_lineno_begin=lineno + 1, + patch_hunk_lineno_end=None) + out[current_filepath].append(current_hunk) + continue + if current_hunk and HUNK_END_RE.match(line): + current_hunk.patch_hunk_lineno_end = lineno + return out + + +def parse_failed_patch_output(text: str) -> Dict[str, List[int]]: + current_file = None + failed_hunks = collections.defaultdict(list) + for eline in text.split('\n'): + checked_file_match = CHECKED_FILE_RE.match(eline) + if checked_file_match: + current_file = checked_file_match.group(1) + continue + failed_match = HUNK_FAILED_RE.match(eline) + if failed_match: + if not current_file: + raise ValueError('Input stream was not parsable') + hunk_id = int(failed_match.group(1)) + failed_hunks[current_file].append(hunk_id) + return failed_hunks + + +@dataclasses.dataclass(frozen=True) +class PatchResult: + """Result of a patch application.""" + succeeded: bool + failed_hunks: Dict[str, List[Hunk]] = dataclasses.field(default_factory=dict) + + def __bool__(self): + return self.succeeded + + +@dataclasses.dataclass +class PatchEntry: + """Object mapping of an entry of PATCHES.json.""" + workdir: Path + metadata: Dict[str, Any] + platforms: List[str] + rel_patch_path: str + version_range: Dict[str, int] + _parsed_hunks = None + + def __post_init__(self): + if not self.workdir.is_dir(): + raise ValueError(f'workdir {self.workdir} is not a directory') + + @classmethod + def from_dict(cls, workdir: Path, data: Dict[str, Any]): + """Instatiate from a dictionary. + + Dictionary must have at least the following keys: + + { + 'metadata': { + 'title': '' + }, + 'platforms': ['<platform>'], + 'rel_patch_path': '<relative patch path to workdir>', + 'version_range': { + 'from': <int>, + 'until': <int>, + }, + } + + Returns: + A new PatchEntry. + """ + return cls(workdir, data['metadata'], data['platforms'], + data['rel_patch_path'], data['version_range']) + + def to_dict(self) -> Dict[str, Any]: + return { + 'metadata': self.metadata, + 'platforms': self.platforms, + 'rel_patch_path': self.rel_patch_path, + 'version_range': self.version_range, + } + + def parsed_hunks(self) -> Dict[str, List[Hunk]]: + # Minor caching here because IO is slow. + if not self._parsed_hunks: + with self.patch_path().open(encoding='utf-8') as f: + self._parsed_hunks = parse_patch_stream(f) + return self._parsed_hunks + + def patch_path(self) -> Path: + return self.workdir / self.rel_patch_path + + def can_patch_version(self, svn_version: int) -> bool: + """Is this patch meant to apply to `svn_version`?""" + # Sometimes the key is there, but it's set to None. + from_v = self.version_range.get('from') or 0 + until_v = self.version_range.get('until') + if until_v is None: + until_v = sys.maxsize + return from_v <= svn_version < until_v + + def is_old(self, svn_version: int) -> bool: + """Is this patch old compared to `svn_version`?""" + until_v = self.version_range.get('until') + # Sometimes the key is there, but it's set to None. + if until_v is None: + until_v = sys.maxsize + return svn_version >= until_v + + def apply(self, + root_dir: Path, + extra_args: Optional[List[str]] = None) -> PatchResult: + """Apply a patch to a given directory.""" + if not extra_args: + extra_args = [] + # Cmd to apply a patch in the src unpack path. + cmd = [ + 'patch', '-d', + root_dir.absolute(), '-f', '-p1', '--no-backup-if-mismatch', '-i', + self.patch_path().absolute() + ] + extra_args + try: + subprocess.run(cmd, encoding='utf-8', check=True, stdout=subprocess.PIPE) + except subprocess.CalledProcessError as e: + parsed_hunks = self.parsed_hunks() + failed_hunks_id_dict = parse_failed_patch_output(e.stdout) + failed_hunks = {} + for path, failed_hunk_ids in failed_hunks_id_dict.items(): + hunks_for_file = parsed_hunks[path] + failed_hunks[path] = [ + hunk for hunk in hunks_for_file if hunk.hunk_id in failed_hunk_ids + ] + return PatchResult(succeeded=False, failed_hunks=failed_hunks) + return PatchResult(succeeded=True) + + def test_apply(self, root_dir: Path) -> PatchResult: + """Dry run applying a patch to a given directory.""" + return self.apply(root_dir, ['--dry-run']) + + def title(self) -> str: + return self.metadata['title'] diff --git a/llvm_tools/patch_utils_unittest.py b/llvm_tools/patch_utils_unittest.py new file mode 100755 index 00000000..3dfe52b2 --- /dev/null +++ b/llvm_tools/patch_utils_unittest.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# Copyright 2022 The ChromiumOS Authors. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Unit tests for the patch_utils.py file.""" + +from pathlib import Path +import unittest +import unittest.mock as mock + +import patch_utils as pu + + +class TestPatchUtils(unittest.TestCase): + """Test the patch_utils.""" + + def test_from_to_dict(self): + """Test to and from dict conversion.""" + d = TestPatchUtils._default_json_dict() + d['metadata'] = { + 'title': 'hello world', + 'info': [], + 'other_extra_info': { + 'extra_flags': [], + } + } + e = pu.PatchEntry.from_dict(TestPatchUtils._mock_dir(), d) + self.assertEqual(d, e.to_dict()) + + def test_can_patch_version(self): + """Test that patch application based on version is correct.""" + base_dict = TestPatchUtils._default_json_dict() + workdir = TestPatchUtils._mock_dir() + e1 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertFalse(e1.can_patch_version(3)) + self.assertTrue(e1.can_patch_version(4)) + self.assertTrue(e1.can_patch_version(5)) + self.assertFalse(e1.can_patch_version(9)) + base_dict['version_range'] = {'until': 9} + e2 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertTrue(e2.can_patch_version(0)) + self.assertTrue(e2.can_patch_version(5)) + self.assertFalse(e2.can_patch_version(9)) + base_dict['version_range'] = {'from': 4} + e3 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertFalse(e3.can_patch_version(3)) + self.assertTrue(e3.can_patch_version(5)) + self.assertTrue(e3.can_patch_version(1 << 31)) + base_dict['version_range'] = {'from': 4, 'until': None} + e4 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertFalse(e4.can_patch_version(3)) + self.assertTrue(e4.can_patch_version(5)) + self.assertTrue(e4.can_patch_version(1 << 31)) + base_dict['version_range'] = {'from': None, 'until': 9} + e5 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertTrue(e5.can_patch_version(0)) + self.assertTrue(e5.can_patch_version(5)) + self.assertFalse(e5.can_patch_version(9)) + + def test_parsed_hunks(self): + """Test that we can parse patch file hunks.""" + m = mock.mock_open(read_data=_EXAMPLE_PATCH) + + def mocked_open(self, *args, **kwargs): + return m(self, *args, **kwargs) + + with mock.patch.object(Path, 'open', mocked_open): + e = pu.PatchEntry.from_dict(TestPatchUtils._mock_dir(), + TestPatchUtils._default_json_dict()) + hunk_dict = e.parsed_hunks() + + m.assert_called() + filename1 = 'clang/lib/Driver/ToolChains/Clang.cpp' + filename2 = 'llvm/lib/Passes/PassBuilder.cpp' + self.assertEqual(set(hunk_dict.keys()), {filename1, filename2}) + hunk_list1 = hunk_dict[filename1] + hunk_list2 = hunk_dict[filename2] + self.assertEqual(len(hunk_list1), 1) + self.assertEqual(len(hunk_list2), 2) + + def test_apply_success(self): + """Test that we can call apply.""" + src_dir = TestPatchUtils._mock_dir('somewhere/llvm-project') + patch_dir = TestPatchUtils._mock_dir() + e = pu.PatchEntry.from_dict(patch_dir, TestPatchUtils._default_json_dict()) + with mock.patch('subprocess.run', mock.MagicMock()): + result = e.apply(src_dir) + self.assertTrue(result.succeeded) + + def test_parse_failed_patch_output(self): + """Test that we can call parse `patch` output.""" + fixture = """ +checking file a/b/c.cpp +Hunk #1 SUCCEEDED at 96 with fuzz 1. +Hunk #12 FAILED at 77. +Hunk #42 FAILED at 1979. +checking file x/y/z.h +Hunk #4 FAILED at 30. +checking file works.cpp +Hunk #1 SUCCEEDED at 96 with fuzz 1. +""" + result = pu.parse_failed_patch_output(fixture) + self.assertEqual(result['a/b/c.cpp'], [12, 42]) + self.assertEqual(result['x/y/z.h'], [4]) + self.assertNotIn('works.cpp', result) + + @staticmethod + def _default_json_dict(): + return { + 'metadata': { + 'title': 'hello world', + }, + 'platforms': [], + 'rel_patch_path': 'x/y/z', + 'version_range': { + 'from': 4, + 'until': 9, + } + } + + @staticmethod + def _mock_dir(path: str = 'a/b/c'): + workdir = Path(path) + workdir = mock.MagicMock(workdir) + workdir.is_dir = lambda: True + workdir.joinpath = lambda x: Path(path).joinpath(x) + workdir.__truediv__ = lambda self, x: self.joinpath(x) + return workdir + + +_EXAMPLE_PATCH = """ +diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp +index 5620a543438..099eb769ca5 100644 +--- a/clang/lib/Driver/ToolChains/Clang.cpp ++++ b/clang/lib/Driver/ToolChains/Clang.cpp +@@ -3995,8 +3995,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, + Args.hasArg(options::OPT_dA)) + CmdArgs.push_back("-masm-verbose"); + +- if (!TC.useIntegratedAs()) ++ if (!TC.useIntegratedAs()) { + CmdArgs.push_back("-no-integrated-as"); ++ CmdArgs.push_back("-mllvm"); ++ CmdArgs.push_back("-enable-call-graph-profile-sort=false"); ++ } + + if (Args.hasArg(options::OPT_fdebug_pass_structure)) { + CmdArgs.push_back("-mdebug-pass"); +diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp +index c5fd68299eb..4c6e15eeeb9 100644 +--- a/llvm/lib/Passes/PassBuilder.cpp ++++ b/llvm/lib/Passes/PassBuilder.cpp +@@ -212,6 +212,10 @@ static cl::opt<bool> + EnableCHR("enable-chr-npm", cl::init(true), cl::Hidden, + cl::desc("Enable control height reduction optimization (CHR)")); + ++static cl::opt<bool> EnableCallGraphProfileSort( ++ "enable-call-graph-profile-sort", cl::init(true), cl::Hidden, ++ cl::desc("Enable call graph profile pass for the new PM (default = on)")); ++ + extern cl::opt<bool> EnableHotColdSplit; + extern cl::opt<bool> EnableOrderFileInstrumentation; + +@@ -939,7 +943,8 @@ ModulePassManager PassBuilder::buildModuleOptimizationPipeline( + // Add the core optimizing pipeline. + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(OptimizePM))); + +- MPM.addPass(CGProfilePass()); ++ if (EnableCallGraphProfileSort) ++ MPM.addPass(CGProfilePass()); + + // Now we need to do some global optimization transforms. + // FIXME: It would seem like these should come first in the optimization +""" + +if __name__ == '__main__': + unittest.main() -- cgit v1.2.3 From 9d20a38272abfe50c1775d7c3abc876f28db5304 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Tue, 24 May 2022 02:27:55 +0000 Subject: llvm_tools: Clean up of patch_manager.py This is just some general clean up of patch_manager.py before any structural changes. Mostly sorting imports, re-doing some typing. BUG=None TEST=./patch_manager_unittest.py Change-Id: I96d6db4efb20ed4b934a39defe9171de5e19d450 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3673493 Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> --- llvm_tools/patch_manager.py | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index a4b42109..c755d88d 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -1,25 +1,41 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- # Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A manager for patches.""" -from __future__ import print_function - import argparse +import dataclasses import json import os import subprocess import sys -from collections import namedtuple +from typing import Any, Dict, IO, List, Optional, Tuple -import get_llvm_hash from failure_modes import FailureModes +import get_llvm_hash from subprocess_helpers import check_call from subprocess_helpers import check_output +@dataclasses.dataclass +class PatchInfo: + """Holds info for a round of patch applications.""" + # str types are legacy. Patch lists should + # probably be PatchEntries, + applied_patches: List[str] + failed_patches: List[str] + # Can be deleted once legacy code is removed. + non_applicable_patches: List[str] + # Can be deleted once legacy code is removed. + disabled_patches: List[str] + # Can be deleted once legacy code is removed. + removed_patches: List[str] + # Can be deleted once legacy code is removed. + modified_metadata: Optional[str] + + def _asdict(self): + return dataclasses.asdict(self) def is_directory(dir_path): """Validates that the argument passed into 'argparse' is a directory.""" @@ -136,8 +152,8 @@ def GetCommandLineArgs(): # applicable patches. parser.add_argument( '--failure_mode', - default=FailureModes.FAIL.value, - type=is_valid_failure_mode, + default=FailureModes.FAIL, + type=FailureModes, help='the mode of the patch manager when handling failed patches ' '(default: %(default)s)') @@ -657,12 +673,6 @@ def HandlePatches(svn_version, # NOTE: Exit code 0 is similar to `git bisect good`. sys.exit(0) - # Create a namedtuple of the patch results. - PatchInfo = namedtuple('PatchInfo', [ - 'applied_patches', 'failed_patches', 'non_applicable_patches', - 'disabled_patches', 'removed_patches', 'modified_metadata' - ]) - patch_info = PatchInfo(applied_patches=applied_patches, failed_patches=failed_patches, non_applicable_patches=non_applicable_patches, @@ -695,7 +705,7 @@ def PrintPatchResults(patch_info): """Prints the results of handling the patches of a package. Args: - patch_info: A namedtuple that has information on the patches. + patch_info: A dataclass that has information on the patches. """ if patch_info.applied_patches: @@ -728,8 +738,7 @@ def main(): """Applies patches to the source tree and takes action on a failed patch.""" args_output = GetCommandLineArgs() - - if args_output.failure_mode != FailureModes.INTERNAL_BISECTION.value: + if args_output.failure_mode != FailureModes.INTERNAL_BISECTION: # If the SVN version of HEAD is not the same as 'svn_version', then some # patches that fail to apply could successfully apply if HEAD's SVN version # was the same as 'svn_version'. In other words, HEAD's git hash should be -- cgit v1.2.3 From e1a6d6f9827d243fdb1200d4e9ac8b97c69bdec7 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Wed, 1 Jun 2022 22:27:47 +0000 Subject: llvm_tools: Add atomic_write to patch_utils.py This allows a utility function which can write to an arbitrary file without risking an incomplete write error, causing issues and creating an invalid edit. This function ensures that the file is only swapped if the file write was successful, otherwise the file is deleted. BUG=None TEST=./patch_utils_unittest.py Change-Id: Iedc3297b0e59d216f027e6ff125f92bc4d088c7d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3685569 Reviewed-by: George Burgess <gbiv@chromium.org> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> --- llvm_tools/patch_utils.py | 35 ++++++++++++++++++++++++++++++++++- llvm_tools/patch_utils_unittest.py | 26 ++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py index 2f282990..9117ba72 100644 --- a/llvm_tools/patch_utils.py +++ b/llvm_tools/patch_utils.py @@ -5,13 +5,14 @@ """Provides patch utilities for PATCHES.json file handling.""" import collections +import contextlib import dataclasses import io from pathlib import Path import re import subprocess import sys -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union CHECKED_FILE_RE = re.compile(r'^checking file\s+(.*)$') @@ -21,6 +22,38 @@ HUNK_END_RE = re.compile(r'^--\s*$') PATCH_SUBFILE_HEADER_RE = re.compile(r'^\+\+\+ [ab]/(.*)$') +@contextlib.contextmanager +def atomic_write(fp: Union[Path, str], mode='w', *args, **kwargs): + """Write to a filepath atomically. + + This works by a temp file swap, created with a .tmp suffix in + the same directory briefly until being renamed to the desired + filepath. + + Args: + fp: Filepath to open. + mode: File mode; can be 'w', 'wb'. Default 'w'. + *args: Passed to Path.open as nargs. + **kwargs: Passed to Path.open as kwargs. + + Raises: + ValueError when the mode is invalid. + """ + if isinstance(fp, str): + fp = Path(fp) + if mode not in ('w', 'wb'): + raise ValueError(f'mode {mode} not accepted') + temp_fp = fp.with_suffix(fp.suffix + '.tmp') + try: + with temp_fp.open(mode, *args, **kwargs) as f: + yield f + except: + if temp_fp.is_file(): + temp_fp.unlink() + raise + temp_fp.rename(fp) + + @dataclasses.dataclass class Hunk: """Represents a patch Hunk.""" diff --git a/llvm_tools/patch_utils_unittest.py b/llvm_tools/patch_utils_unittest.py index 3dfe52b2..bef5ae5f 100755 --- a/llvm_tools/patch_utils_unittest.py +++ b/llvm_tools/patch_utils_unittest.py @@ -6,6 +6,7 @@ """Unit tests for the patch_utils.py file.""" from pathlib import Path +import tempfile import unittest import unittest.mock as mock @@ -15,6 +16,31 @@ import patch_utils as pu class TestPatchUtils(unittest.TestCase): """Test the patch_utils.""" + def test_atomic_write(self): + """Test that atomic write safely writes.""" + prior_contents = 'This is a test written by patch_utils_unittest.py\n' + new_contents = 'I am a test written by patch_utils_unittest.py\n' + with tempfile.TemporaryDirectory(prefix='patch_utils_unittest') as dirname: + dirpath = Path(dirname) + filepath = dirpath / 'test_atomic_write.txt' + with filepath.open('w', encoding='utf-8') as f: + f.write(prior_contents) + + def _t(): + with pu.atomic_write(filepath, encoding='utf-8') as f: + f.write(new_contents) + raise Exception('Expected failure') + + self.assertRaises(Exception, _t) + with filepath.open(encoding='utf-8') as f: + lines = f.readlines() + self.assertEqual(lines[0], prior_contents) + with pu.atomic_write(filepath, encoding='utf-8') as f: + f.write(new_contents) + with filepath.open(encoding='utf-8') as f: + lines = f.readlines() + self.assertEqual(lines[0], new_contents) + def test_from_to_dict(self): """Test to and from dict conversion.""" d = TestPatchUtils._default_json_dict() -- cgit v1.2.3 From aa99b9ab4093f11c12b0c1ebdae57144f3dbb632 Mon Sep 17 00:00:00 2001 From: Ryan Beltran <ryanbeltran@chromium.org> Date: Tue, 7 Jun 2022 22:42:01 +0000 Subject: compiler_wrapper: include header lints for tidy This CL adds a flag to clang tidy invocations to prevent supressed lints in header files. BUG=b:187790543 TEST=None Change-Id: I9977818894899fc6c28f0c3d4121326ca854e1b3 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3694553 Commit-Queue: Ryan Beltran <ryanbeltran@chromium.org> Tested-by: Ryan Beltran <ryanbeltran@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> --- compiler_wrapper/clang_tidy_flag.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler_wrapper/clang_tidy_flag.go b/compiler_wrapper/clang_tidy_flag.go index 2d2565e0..bd1f9846 100644 --- a/compiler_wrapper/clang_tidy_flag.go +++ b/compiler_wrapper/clang_tidy_flag.go @@ -114,7 +114,7 @@ func runClangTidyForTricium(env env, clangCmd *command, cSrcFile, fixesDir strin fixesFilePath := f.Name() + ".yaml" fixesMetadataPath := f.Name() + ".json" - extraTidyFlags = append(extraTidyFlags, "--export-fixes="+fixesFilePath) + extraTidyFlags = append(extraTidyFlags, "--export-fixes="+fixesFilePath, "--header-filter=.*") clangTidyCmd, err := calcClangTidyInvocation(env, clangCmd, cSrcFile, extraTidyFlags...) if err != nil { return fmt.Errorf("calculating tidy invocation: %v", err) -- cgit v1.2.3 From 41e93526f0117ee883353c800f063239fff92ad4 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Thu, 9 Jun 2022 23:08:19 +0000 Subject: llvm_tools: Add more utils to patch_utils.py This adds the json_to_patch_entries function which abstracts away a common pattern to convert the PATCHES.json file contents into patch_entries eagerly. Fixes some typing issues too that have presented themselves in later CLs. BUG=None TEST=./patch_utils_unittest.py Change-Id: I5ecb1aedbbfb4f04176c021ff976da417319c17e Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3699193 Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> --- llvm_tools/patch_utils.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py index 9117ba72..6fd75c3a 100644 --- a/llvm_tools/patch_utils.py +++ b/llvm_tools/patch_utils.py @@ -7,12 +7,12 @@ import collections import contextlib import dataclasses -import io +import json from pathlib import Path import re import subprocess import sys -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, IO, List, Optional, Union CHECKED_FILE_RE = re.compile(r'^checking file\s+(.*)$') @@ -67,7 +67,7 @@ class Hunk: patch_hunk_lineno_end: Optional[int] -def parse_patch_stream(patch_stream: io.TextIOBase) -> Dict[str, List[Hunk]]: +def parse_patch_stream(patch_stream: IO[str]) -> Dict[str, List[Hunk]]: """Parse a patch file-like into Hunks. Args: @@ -144,10 +144,11 @@ class PatchResult: class PatchEntry: """Object mapping of an entry of PATCHES.json.""" workdir: Path + """Storage location for the patches.""" metadata: Dict[str, Any] platforms: List[str] rel_patch_path: str - version_range: Dict[str, int] + version_range: Dict[str, Optional[int]] _parsed_hunks = None def __post_init__(self): @@ -245,3 +246,13 @@ class PatchEntry: def title(self) -> str: return self.metadata['title'] + + +def json_to_patch_entries(workdir: Path, json_fd: IO[str]) -> List[PatchEntry]: + """Convert a json IO object to List[PatchEntry]. + + Examples: + >>> f = open('PATCHES.json') + >>> patch_entries = json_to_patch_entries(Path(), f) + """ + return [PatchEntry.from_dict(workdir, d) for d in json.load(json_fd)] -- cgit v1.2.3 From d9d151871f0500e8cdad2432e40698ca15fbf8b4 Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Thu, 9 Jun 2022 10:33:57 -0700 Subject: compiler_wrapper: add a --version_suffix flag to build.py This allows us to provide more information about the compiler being wrapped to the wrapper. The intended use of this is to embed hashes of the compiler being wrapped into the wrapper itself, so as the compiler changes, the wrapper's SHA changes with it. While I'm in the area, fix up a comment, and apply this flag to our manual wrapper installation script. BUG=b:222321317 TEST=`emerge llvm` Change-Id: I20380722e1e539d51fe7ea708c63dad696d84c87 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3696434 Reviewed-by: Denis Nikitin <denik@chromium.org> Tested-by: George Burgess <gbiv@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> --- compiler_wrapper/build.py | 21 +++++++++++++++---- compiler_wrapper/config.go | 2 +- compiler_wrapper/install_compiler_wrapper.sh | 31 ++++++++++++++++++++++++---- 3 files changed, 45 insertions(+), 9 deletions(-) diff --git a/compiler_wrapper/build.py b/compiler_wrapper/build.py index 44c398bf..d004b553 100755 --- a/compiler_wrapper/build.py +++ b/compiler_wrapper/build.py @@ -23,7 +23,12 @@ def parse_args(): choices=['cros.hardened', 'cros.nonhardened', 'cros.host', 'android']) parser.add_argument('--use_ccache', required=True, choices=['true', 'false']) parser.add_argument( - '--use_llvm_next', required=True, choices=['true', 'false']) + '--version_suffix', + help='A string appended to the computed version of the wrapper. This ' + 'is appeneded directly without any delimiter.') + parser.add_argument('--use_llvm_next', + required=True, + choices=['true', 'false']) parser.add_argument('--output_file', required=True, type=str) parser.add_argument( '--static', @@ -41,6 +46,11 @@ def parse_args(): def calc_go_args(args, version, build_dir): + # These seem unnecessary, and might lead to breakages with Go's ldflag + # parsing. Don't allow them. + if "'" in version: + raise ValueError('`version` should not contain single quotes') + ldFlags = [ '-X', 'main.ConfigName=' + args.config, @@ -49,7 +59,8 @@ def calc_go_args(args, version, build_dir): '-X', 'main.UseLlvmNext=' + args.use_llvm_next, '-X', - 'main.Version=' + version, + # Quote this, as `version` may have spaces in it. + "'main.Version=" + version + "'", ] # If the wrapper is intended for ChromeOS, we need to use libc's exec. @@ -60,8 +71,8 @@ def calc_go_args(args, version, build_dir): if args.config == 'android': # If android_llvm_next_flags.go DNE, we'll get an obscure "no # llvmNextFlags" build error; complaining here is clearer. - if not os.path.exists( - os.path.join(build_dir, 'android_llvm_next_flags.go')): + if not os.path.exists(os.path.join(build_dir, + 'android_llvm_next_flags.go')): sys.exit('In order to build the Android wrapper, you must have a local ' 'android_llvm_next_flags.go file; please see ' 'cros_llvm_next_flags.go.') @@ -92,6 +103,8 @@ def main(): args = parse_args() build_dir = os.path.dirname(__file__) version = read_version(build_dir) + if args.version_suffix: + version += args.version_suffix # Note: Go does not support using absolute package names. # So we run go inside the directory of the the build file. sys.exit( diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index c275ae9f..25df476f 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -34,7 +34,7 @@ type config struct { triciumNitsDir string // Directory to store crash artifacts in. crashArtifactsDir string - // Version. Only used for printing via -print-cmd. + // Version. Only exposed via -print-config. version string } diff --git a/compiler_wrapper/install_compiler_wrapper.sh b/compiler_wrapper/install_compiler_wrapper.sh index f05f2b4c..a503895f 100755 --- a/compiler_wrapper/install_compiler_wrapper.sh +++ b/compiler_wrapper/install_compiler_wrapper.sh @@ -11,18 +11,41 @@ if [[ ! -e /etc/cros_chroot_version ]]; then exit 1 fi set -e + +# Use a unique value here, since folks doing wrapper dev _likely_ want builds +# to always be redone. +version_suffix="manually_installed_wrapper_at_unix_$(date +%s.%6N)" +echo "Using toolchain hash: ${version_suffix}" cd "$(dirname "$(readlink -m "$0")")" + +build_py() { + ./build.py --version_suffix="${version_suffix}" "$@" +} + echo "Updated files:" # Update the host wrapper -./build.py --config=cros.host --use_ccache=false --use_llvm_next=false --output_file=./clang_host_wrapper +build_py \ + --config=cros.host \ + --use_ccache=false \ + --use_llvm_next=false \ + --output_file=./clang_host_wrapper sudo mv ./clang_host_wrapper /usr/bin/clang_host_wrapper echo "/usr/bin/clang_host_wrapper" sudo cp ../binary_search_tool/bisect_driver.py /usr/bin echo "/usr/bin/clang_host_wrapper/bisect_driver.py" # Update the target wrappers -./build.py --config=cros.hardened --use_ccache=false --use_llvm_next=false --output_file=./sysroot_wrapper.hardened.noccache -./build.py --config=cros.hardened --use_ccache=true --use_llvm_next=false --output_file=./sysroot_wrapper.hardened.ccache +build_py \ + --config=cros.hardened \ + --use_ccache=false \ + --use_llvm_next=false \ + --output_file=./sysroot_wrapper.hardened.noccache +build_py \ + --config=cros.hardened \ + --use_ccache=true \ + --use_llvm_next=false \ + --output_file=./sysroot_wrapper.hardened.ccache + # Update clang target wrappers. sudo cp ./sysroot_wrapper.hardened.noccache ./sysroot_wrapper.hardened.ccache /usr/bin echo "Updated clang wrapper /usr/bin/sysroot_wrapper.hardened.noccache" @@ -30,7 +53,7 @@ echo "Updated clang wrapper /usr/bin/sysroot_wrapper.hardened.ccache" # Update GCC target wrappers. for GCC in cross-x86_64-cros-linux-gnu/gcc cross-armv7a-cros-linux-gnueabihf/gcc cross-aarch64-cros-linux-gnu/gcc; do - if ! FILES="$(equery f ${GCC})"; then + if ! FILES="$(equery f "${GCC}")"; then if [[ $(equery l "${GCC}" 2>&1 | wc -c) -eq 0 ]]; then echo "no ${GCC} package found; skipping" >&2 continue -- cgit v1.2.3 From 671cc1d5e9a9259cec9452144380f44b1f161032 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Sat, 28 May 2022 04:06:03 +0000 Subject: llvm_tools: Restructure most patch_manager modes This is an attempt to gradually replace some of the behavior of patch_manager.py with a more extendable structure. Instead of having the same code handle every patch_manager.py mode, instead we can have a dispatch for supported modes, and the fallback to legacy when we can't separate the existing behavior. This does not change the external API of patch_manager.py at all. All unittests still pass, and we're still applying patches correctly as expected. BUG=b:188465085, b:227216280 TEST=./patch_manager_unittest.py TEST=cp patch_manager.py patch_utils.py ${llvm_files}/patch_manager/ \ && sudo emerge llvm Change-Id: I43d26d4e903140ce2e490624aaac15d0bae898cd Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3661358 Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> --- llvm_tools/patch_manager.py | 245 ++++++++++++++++++++++++++++++++--- llvm_tools/patch_manager_unittest.py | 85 +++++++++++- 2 files changed, 307 insertions(+), 23 deletions(-) diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index c755d88d..51a7476b 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -9,22 +9,25 @@ import argparse import dataclasses import json import os +from pathlib import Path import subprocess import sys from typing import Any, Dict, IO, List, Optional, Tuple from failure_modes import FailureModes import get_llvm_hash +import patch_utils from subprocess_helpers import check_call from subprocess_helpers import check_output -@dataclasses.dataclass + +@dataclasses.dataclass(frozen=True) class PatchInfo: """Holds info for a round of patch applications.""" # str types are legacy. Patch lists should # probably be PatchEntries, - applied_patches: List[str] - failed_patches: List[str] + applied_patches: List[patch_utils.PatchEntry] + failed_patches: List[patch_utils.PatchEntry] # Can be deleted once legacy code is removed. non_applicable_patches: List[str] # Can be deleted once legacy code is removed. @@ -37,6 +40,7 @@ class PatchInfo: def _asdict(self): return dataclasses.asdict(self) + def is_directory(dir_path): """Validates that the argument passed into 'argparse' is a directory.""" @@ -298,7 +302,14 @@ def UpdatePatchMetadataFile(patch_metadata_file, patches): raise ValueError('File does not end in ".json": %s' % patch_metadata_file) with open(patch_metadata_file, 'w') as patch_file: - json.dump(patches, patch_file, indent=4, separators=(',', ': ')) + _WriteJsonChanges(patches, patch_file) + + +def _WriteJsonChanges(patches: List[Dict[str, Any]], file_io: IO[str]): + """Write JSON changes to file, does not acquire new file lock.""" + json.dump(patches, file_io, indent=4, separators=(',', ': ')) + # Need to add a newline as json.dump omits it. + file_io.write('\n') def GetCommitHashesForBisection(src_path, good_svn_version, bad_svn_version): @@ -382,6 +393,172 @@ def RestoreSrcTreeState(src_path, bad_commit_hash): check_output(get_changes_cmd) +def ApplyAllFromJson(svn_version: int, + llvm_src_dir: Path, + patches_json_fp: Path, + continue_on_failure: bool = False) -> PatchInfo: + """Attempt to apply some patches to a given LLVM source tree. + + This relies on a PATCHES.json file to be the primary way + the patches are applied. + + Args: + svn_version: LLVM Subversion revision to patch. + llvm_src_dir: llvm-project root-level source directory to patch. + patches_json_fp: Filepath to the PATCHES.json file. + continue_on_failure: Skip any patches which failed to apply, + rather than throw an Exception. + """ + with patches_json_fp.open(encoding='utf-8') as f: + patches = patch_utils.json_to_patch_entries(patches_json_fp.parent, f) + skipped_patches = [] + failed_patches = [] + applied_patches = [] + for pe in patches: + applied, failed_hunks = ApplySinglePatchEntry(svn_version, llvm_src_dir, + pe) + if applied: + applied_patches.append(pe) + continue + if failed_hunks is not None: + if continue_on_failure: + failed_patches.append(pe) + continue + else: + _PrintFailedPatch(pe, failed_hunks) + raise RuntimeError('failed to apply patch ' + f'{pe.patch_path()}: {pe.title()}') + # Didn't apply, didn't fail, it was skipped. + skipped_patches.append(pe) + return PatchInfo( + non_applicable_patches=skipped_patches, + applied_patches=applied_patches, + failed_patches=failed_patches, + disabled_patches=[], + removed_patches=[], + modified_metadata=None, + ) + + +def ApplySinglePatchEntry( + svn_version: int, llvm_src_dir: Path, pe: patch_utils.PatchEntry +) -> Tuple[bool, Optional[Dict[str, List[patch_utils.Hunk]]]]: + """Try to apply a single PatchEntry object. + + Returns: + Tuple where the first element indicates whether the patch applied, + and the second element is a faild hunk mapping from file name to lists of + hunks (if the patch didn't apply). + """ + # Don't apply patches outside of the version range. + if not pe.can_patch_version(svn_version): + return False, None + # Test first to avoid making changes. + test_application = pe.test_apply(llvm_src_dir) + if not test_application: + return False, test_application.failed_hunks + # Now actually make changes. + application_result = pe.apply(llvm_src_dir) + if not application_result: + # This should be very rare/impossible. + return False, application_result.failed_hunks + return True, None + + +def RemoveOldPatches(svn_version: int, llvm_src_dir: Path, + patches_json_fp: Path): + """Remove patches that don't and will never apply for the future. + + Patches are determined to be "old" via the "is_old" method for + each patch entry. + + Args: + svn_version: LLVM SVN version. + llvm_src_dir: LLVM source directory. + patches_json_fp: Location to edit patches on. + """ + with patches_json_fp.open(encoding='utf-8') as f: + patches_list = json.load(f) + patch_entries = (patch_utils.PatchEntry.from_dict(llvm_src_dir, elem) + for elem in patches_list) + filtered_entries = [ + entry.to_dict() for entry in patch_entries + if not entry.is_old(svn_version) + ] + with patch_utils.atomic_write(patches_json_fp, encoding='utf-8') as f: + _WriteJsonChanges(filtered_entries, f) + + +def UpdateVersionRanges(svn_version: int, llvm_src_dir: Path, + patches_json_fp: Path): + """Reduce the version ranges of failing patches. + + Patches which fail to apply will have their 'version_range.until' + field reduced to the passed in svn_version. + + Modifies the contents of patches_json_fp. + + Ars: + svn_version: LLVM revision number. + llvm_src_dir: llvm-project directory path. + patches_json_fp: Filepath to the PATCHES.json file. + """ + if IsGitDirty(llvm_src_dir): + raise RuntimeError('Cannot test patch applications, llvm_src_dir is dirty') + with patches_json_fp.open(encoding='utf-8') as f: + patch_entries = patch_utils.json_to_patch_entries(patches_json_fp.parent, + f) + modified_entries: List[patch_utils.PatchEntry] = [] + for pe in patch_entries: + test_result = pe.test_apply(llvm_src_dir) + if not test_result: + pe.version_range['until'] = svn_version + modified_entries.append(pe) + else: + # We have to actually apply the patch so that future patches + # will stack properly. + if not pe.apply(llvm_src_dir).succeeded: + CleanSrcTree(llvm_src_dir) + raise RuntimeError('Could not apply patch that dry ran successfully') + with patch_utils.atomic_write(patches_json_fp, encoding='utf-8') as f: + _WriteJsonChanges([p.to_dict() for p in patch_entries], f) + for entry in modified_entries: + print(f'Stopped applying {entry.rel_patch_path} ({entry.title()}) ' + f'for r{svn_version}') + CleanSrcTree(llvm_src_dir) + + +def IsGitDirty(git_root_dir: Path) -> bool: + """Return whether the given git directory has uncommitted changes.""" + if not git_root_dir.is_dir(): + raise ValueError(f'git_root_dir {git_root_dir} is not a directory') + cmd = ['git', 'ls-files', '-m', '--other', '--exclude-standard'] + return (subprocess.run(cmd, + stdout=subprocess.PIPE, + check=True, + cwd=git_root_dir, + encoding='utf-8').stdout != "") + + +def _PrintFailedPatch(pe: patch_utils.PatchEntry, + failed_hunks: Dict[str, List[patch_utils.Hunk]]): + """Print information about a single failing PatchEntry. + + Args: + pe: A PatchEntry that failed. + failed_hunks: Hunks for pe which failed as dict: + filepath: [Hunk...] + """ + print(f'Could not apply {pe.rel_patch_path}: {pe.title()}', file=sys.stderr) + for fp, hunks in failed_hunks.items(): + print(f'{fp}:', file=sys.stderr) + for h in hunks: + print( + f'- {pe.rel_patch_path} ' + f'l:{h.patch_hunk_lineno_begin}...{h.patch_hunk_lineno_end}', + file=sys.stderr) + + def HandlePatches(svn_version, patch_metadata_file, filesdir_path, @@ -701,24 +878,27 @@ def HandlePatches(svn_version, return patch_info -def PrintPatchResults(patch_info): +def PrintPatchResults(patch_info: PatchInfo): """Prints the results of handling the patches of a package. Args: patch_info: A dataclass that has information on the patches. """ + def _fmt(patches): + return (str(pe.patch_path()) for pe in patches) + if patch_info.applied_patches: print('\nThe following patches applied successfully:') - print('\n'.join(patch_info.applied_patches)) + print('\n'.join(_fmt(patch_info.applied_patches))) if patch_info.failed_patches: print('\nThe following patches failed to apply:') - print('\n'.join(patch_info.failed_patches)) + print('\n'.join(_fmt(patch_info.failed_patches))) if patch_info.non_applicable_patches: print('\nThe following patches were not applicable:') - print('\n'.join(patch_info.non_applicable_patches)) + print('\n'.join(_fmt(patch_info.non_applicable_patches))) if patch_info.modified_metadata: print('\nThe patch metadata file %s has been modified' % @@ -726,7 +906,7 @@ def PrintPatchResults(patch_info): if patch_info.disabled_patches: print('\nThe following patches were disabled:') - print('\n'.join(patch_info.disabled_patches)) + print('\n'.join(_fmt(patch_info.disabled_patches))) if patch_info.removed_patches: print('\nThe following patches were removed from the patch metadata file:') @@ -754,16 +934,43 @@ def main(): # SVN version is not used in determining whether a patch is applicable. args_output.svn_version = GetHEADSVNVersion(args_output.src_path) - # Get the results of handling the patches of the package. - patch_info = HandlePatches(args_output.svn_version, - args_output.patch_metadata_file, - args_output.filesdir_path, args_output.src_path, - FailureModes(args_output.failure_mode), - args_output.good_svn_version, - args_output.num_patches_to_iterate, - args_output.continue_bisection) - - PrintPatchResults(patch_info) + def _apply_all(args): + result = ApplyAllFromJson( + svn_version=args.svn_version, + llvm_src_dir=Path(args.src_path), + patches_json_fp=Path(args.patch_metadata_file), + continue_on_failure=args.failure_mode == FailureModes.CONTINUE) + PrintPatchResults(result) + + def _remove(args): + RemoveOldPatches(args.svn_version, Path(args.src_path), + Path(args.patch_metadata_file)) + + def _disable(args): + UpdateVersionRanges(args.svn_version, Path(args.src_path), + Path(args.patch_metadata_file)) + + dispatch_table = { + FailureModes.FAIL: _apply_all, + FailureModes.CONTINUE: _apply_all, + FailureModes.REMOVE_PATCHES: _remove, + FailureModes.DISABLE_PATCHES: _disable + } + + if args_output.failure_mode in dispatch_table: + dispatch_table[args_output.failure_mode](args_output) + else: + # TODO(ajordanr): Legacy mode, remove when dispatch_table + # supports bisection. + # Get the results of handling the patches of the package. + patch_info = HandlePatches(args_output.svn_version, + args_output.patch_metadata_file, + args_output.filesdir_path, args_output.src_path, + FailureModes(args_output.failure_mode), + args_output.good_svn_version, + args_output.num_patches_to_iterate, + args_output.continue_bisection) + PrintPatchResults(patch_info) if __name__ == '__main__': diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py index 452aea39..63d70a5b 100755 --- a/llvm_tools/patch_manager_unittest.py +++ b/llvm_tools/patch_manager_unittest.py @@ -1,21 +1,21 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- # Copyright 2019 The ChromiumOS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests when handling patches.""" -from __future__ import print_function - import json import os +from pathlib import Path import subprocess +import tempfile +from typing import Callable import unittest import unittest.mock as mock -import patch_manager from failure_modes import FailureModes +import patch_manager from test_helpers import CallCountsToMockFunctions from test_helpers import CreateTemporaryJsonFile from test_helpers import WritePrettyJsonFile @@ -189,6 +189,83 @@ class PatchManagerTest(unittest.TestCase): self.assertEqual(patch_manager.GetPatchMetadata(test_patch), expected_patch_metadata) + def testRemoveOldPatches(self): + """Can remove old patches from PATCHES.json.""" + one_patch_dict = { + 'metadata': { + 'title': '[some label] hello world', + }, + 'platforms': [ + 'chromiumos', + ], + 'rel_patch_path': 'x/y/z', + 'version_range': { + 'from': 4, + 'until': 5, + } + } + patches = [ + one_patch_dict, + { + **one_patch_dict, 'version_range': { + 'until': None + } + }, + { + **one_patch_dict, 'version_range': { + 'from': 100 + } + }, + { + **one_patch_dict, 'version_range': { + 'until': 8 + } + }, + ] + cases = [ + (0, lambda x: self.assertEqual(len(x), 4)), + (6, lambda x: self.assertEqual(len(x), 3)), + (8, lambda x: self.assertEqual(len(x), 2)), + (1000, lambda x: self.assertEqual(len(x), 2)), + ] + + def _t(dirname: str, svn_version: int, assertion_f: Callable): + json_filepath = Path(dirname) / 'PATCHES.json' + with json_filepath.open('w', encoding='utf-8') as f: + json.dump(patches, f) + patch_manager.RemoveOldPatches(svn_version, Path(), json_filepath) + with json_filepath.open('r', encoding='utf-8') as f: + result = json.load(f) + assertion_f(result) + + with tempfile.TemporaryDirectory( + prefix='patch_manager_unittest') as dirname: + for r, a in cases: + _t(dirname, r, a) + + def testIsGitDirty(self): + """Test if a git directory has uncommitted changes.""" + with tempfile.TemporaryDirectory( + prefix='patch_manager_unittest') as dirname: + dirpath = Path(dirname) + + def _run_h(cmd): + subprocess.run(cmd, cwd=dirpath, stdout=subprocess.DEVNULL, check=True) + + _run_h(['git', 'init']) + self.assertFalse(patch_manager.IsGitDirty(dirpath)) + test_file = dirpath / 'test_file' + test_file.touch() + self.assertTrue(patch_manager.IsGitDirty(dirpath)) + _run_h(['git', 'add', '.']) + _run_h(['git', 'commit', '-m', 'test']) + self.assertFalse(patch_manager.IsGitDirty(dirpath)) + test_file.touch() + self.assertFalse(patch_manager.IsGitDirty(dirpath)) + with test_file.open('w', encoding='utf-8'): + test_file.write_text('abc') + self.assertTrue(patch_manager.IsGitDirty(dirpath)) + def testFailedToApplyPatchWhenInvalidSrcPathIsPassedIn(self): src_path = '/abs/path/to/src' -- cgit v1.2.3 From 472996cf6cd2b9281a1cc3cfb96f1505b20472f0 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Tue, 14 Jun 2022 02:34:47 +0000 Subject: llvm_tools: Unify patch json representation At present, I made a mistake where patch_utils expected certain keys to exist. However, the original patch_manager.py didn't. This was further broken by the fact that patch_sync.py would avoid serializing the 'platforms' field if empty, causing a back and forth between patch_utils.py and patch_sync. Update the unittests to match and verify we're doing the correct thing. BUG=None TEST=./patch_utils_unittest.py TEST=./patch_manager_unittest.py Change-Id: Ib02c9d552848831f395b006de9a28ea4292b82f7 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3704542 Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> --- llvm_tools/patch_manager.py | 2 ++ llvm_tools/patch_utils.py | 36 +++++++++++++++++++----------------- llvm_tools/patch_utils_unittest.py | 30 +++++++++++++++++++++++++++++- 3 files changed, 50 insertions(+), 18 deletions(-) diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index 51a7476b..b499d52f 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -512,6 +512,8 @@ def UpdateVersionRanges(svn_version: int, llvm_src_dir: Path, for pe in patch_entries: test_result = pe.test_apply(llvm_src_dir) if not test_result: + if pe.version_range is None: + pe.version_range = {} pe.version_range['until'] = svn_version modified_entries.append(pe) else: diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py index 6fd75c3a..cdf9f215 100644 --- a/llvm_tools/patch_utils.py +++ b/llvm_tools/patch_utils.py @@ -145,10 +145,10 @@ class PatchEntry: """Object mapping of an entry of PATCHES.json.""" workdir: Path """Storage location for the patches.""" - metadata: Dict[str, Any] - platforms: List[str] + metadata: Optional[Dict[str, Any]] + platforms: Optional[List[str]] rel_patch_path: str - version_range: Dict[str, Optional[int]] + version_range: Optional[Dict[str, Optional[int]]] _parsed_hunks = None def __post_init__(self): @@ -159,33 +159,29 @@ class PatchEntry: def from_dict(cls, workdir: Path, data: Dict[str, Any]): """Instatiate from a dictionary. - Dictionary must have at least the following keys: + Dictionary must have at least the following key: { - 'metadata': { - 'title': '<title>' - }, - 'platforms': ['<platform>'], 'rel_patch_path': '<relative patch path to workdir>', - 'version_range': { - 'from': <int>, - 'until': <int>, - }, } Returns: A new PatchEntry. """ - return cls(workdir, data['metadata'], data['platforms'], - data['rel_patch_path'], data['version_range']) + return cls(workdir, data.get('metadata'), data.get('platforms'), + data['rel_patch_path'], data.get('version_range')) def to_dict(self) -> Dict[str, Any]: - return { + out = { 'metadata': self.metadata, - 'platforms': self.platforms, 'rel_patch_path': self.rel_patch_path, 'version_range': self.version_range, } + if self.platforms: + # To match patch_sync, only serialized when + # non-empty and non-null. + out['platforms'] = sorted(self.platforms) + return out def parsed_hunks(self) -> Dict[str, List[Hunk]]: # Minor caching here because IO is slow. @@ -200,6 +196,8 @@ class PatchEntry: def can_patch_version(self, svn_version: int) -> bool: """Is this patch meant to apply to `svn_version`?""" # Sometimes the key is there, but it's set to None. + if not self.version_range: + return True from_v = self.version_range.get('from') or 0 until_v = self.version_range.get('until') if until_v is None: @@ -208,6 +206,8 @@ class PatchEntry: def is_old(self, svn_version: int) -> bool: """Is this patch old compared to `svn_version`?""" + if not self.version_range: + return False until_v = self.version_range.get('until') # Sometimes the key is there, but it's set to None. if until_v is None: @@ -245,7 +245,9 @@ class PatchEntry: return self.apply(root_dir, ['--dry-run']) def title(self) -> str: - return self.metadata['title'] + if not self.metadata: + return '' + return self.metadata.get('title', '') def json_to_patch_entries(workdir: Path, json_fd: IO[str]) -> List[PatchEntry]: diff --git a/llvm_tools/patch_utils_unittest.py b/llvm_tools/patch_utils_unittest.py index bef5ae5f..3a6409b9 100755 --- a/llvm_tools/patch_utils_unittest.py +++ b/llvm_tools/patch_utils_unittest.py @@ -5,6 +5,7 @@ """Unit tests for the patch_utils.py file.""" +import io from pathlib import Path import tempfile import unittest @@ -84,6 +85,33 @@ class TestPatchUtils(unittest.TestCase): self.assertTrue(e5.can_patch_version(5)) self.assertFalse(e5.can_patch_version(9)) + def test_can_parse_from_json(self): + """Test that patches be loaded from json.""" + json = """ +[ + { + "metadata": {}, + "platforms": [], + "rel_patch_path": "cherry/nowhere.patch", + "version_range": {} + }, + { + "metadata": {}, + "rel_patch_path": "cherry/somewhere.patch", + "version_range": {} + }, + { + "rel_patch_path": "where.patch", + "version_range": null + }, + { + "rel_patch_path": "cherry/anywhere.patch" + } +] + """ + result = pu.json_to_patch_entries(Path(), io.StringIO(json)) + self.assertEqual(len(result), 4) + def test_parsed_hunks(self): """Test that we can parse patch file hunks.""" m = mock.mock_open(read_data=_EXAMPLE_PATCH) @@ -137,7 +165,7 @@ Hunk #1 SUCCEEDED at 96 with fuzz 1. 'metadata': { 'title': 'hello world', }, - 'platforms': [], + 'platforms': ['a'], 'rel_patch_path': 'x/y/z', 'version_range': { 'from': 4, -- cgit v1.2.3 From c74d39dcf1bcd1b6b4fa25418eb978687c011010 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Tue, 14 Jun 2022 23:36:55 +0000 Subject: llvm_tools: Print removed in patch_manager.py We should print out what we actually did, instead of just exiting silently when everything goes well. This is informative to the user running patch_manager.py code. BUG=b:188465085 TEST=./patch_manager_unittest.py Change-Id: Idcb203d0c3e28d1b00e7b9503334863b7844d033 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3706059 Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> --- llvm_tools/patch_manager.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index b499d52f..eba3a530 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -481,12 +481,15 @@ def RemoveOldPatches(svn_version: int, llvm_src_dir: Path, patches_list = json.load(f) patch_entries = (patch_utils.PatchEntry.from_dict(llvm_src_dir, elem) for elem in patches_list) - filtered_entries = [ - entry.to_dict() for entry in patch_entries - if not entry.is_old(svn_version) - ] + oldness = [(entry, entry.is_old(svn_version)) for entry in patch_entries] + filtered_entries = [entry.to_dict() for entry, old in oldness if not old] with patch_utils.atomic_write(patches_json_fp, encoding='utf-8') as f: _WriteJsonChanges(filtered_entries, f) + removed_entries = [entry for entry, old in oldness if old] + plural_patches = 'patch' if len(removed_entries) == 1 else 'patches' + print(f'Removed {len(removed_entries)} old {plural_patches}:') + for r in removed_entries: + print(f'- {r.rel_patch_path}: {r.title()}') def UpdateVersionRanges(svn_version: int, llvm_src_dir: Path, -- cgit v1.2.3 From 279b045247f3512579fbcdd0f146def546652779 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Mon, 13 Jun 2022 22:48:18 +0000 Subject: llvm_tools: Migrate patch_manager.py bisection Previous restructing did not handle the bisection case, as it was considerably more complicated than other failure modes for patch_manager.py This commit changes the operation of bisection in patch_manager.py quite drastically, as it now assumes that the git bisection process can occur outside of patch_manager.py, rather than internally. BUG=b:188465085 TEST=./patch_manager_unittests.py TEST=./patch_manager.py --failure_mode bisect <...> \ # With a patch that applied far too late Change-Id: I156373dc13bcbd3d297f8537fb6fb77fe9f0e9a5 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3704539 Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> --- llvm_tools/patch_manager.py | 211 ++++++++++++++++++++++++++--------- llvm_tools/patch_manager_unittest.py | 123 +++++++++++++++++++- 2 files changed, 283 insertions(+), 51 deletions(-) diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index eba3a530..82ba65d1 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -6,13 +6,15 @@ """A manager for patches.""" import argparse +import contextlib import dataclasses +import enum import json import os from pathlib import Path import subprocess import sys -from typing import Any, Dict, IO, List, Optional, Tuple +from typing import Any, Dict, IO, Iterable, List, Optional, Tuple from failure_modes import FailureModes import get_llvm_hash @@ -41,6 +43,20 @@ class PatchInfo: return dataclasses.asdict(self) +class GitBisectionCode(enum.IntEnum): + """Git bisection exit codes. + + Used when patch_manager.py is in the bisection mode, + as we need to return in what way we should handle + certain patch failures. + """ + GOOD = 0 + """All patches applied successfully.""" + BAD = 1 + """The tested patch failed to apply.""" + SKIP = 125 + + def is_directory(dir_path): """Validates that the argument passed into 'argparse' is a directory.""" @@ -160,6 +176,11 @@ def GetCommandLineArgs(): type=FailureModes, help='the mode of the patch manager when handling failed patches ' '(default: %(default)s)') + parser.add_argument( + '--test_patch', + default='', + help='The rel_patch_path of the patch we want to bisect the ' + 'application of. Not used in other modes.') # Parse the command line. args_output = parser.parse_args() @@ -441,7 +462,10 @@ def ApplyAllFromJson(svn_version: int, def ApplySinglePatchEntry( - svn_version: int, llvm_src_dir: Path, pe: patch_utils.PatchEntry + svn_version: int, + llvm_src_dir: Path, + pe: patch_utils.PatchEntry, + ignore_version_range: bool = False ) -> Tuple[bool, Optional[Dict[str, List[patch_utils.Hunk]]]]: """Try to apply a single PatchEntry object. @@ -451,7 +475,7 @@ def ApplySinglePatchEntry( hunks (if the patch didn't apply). """ # Don't apply patches outside of the version range. - if not pe.can_patch_version(svn_version): + if not ignore_version_range and not pe.can_patch_version(svn_version): return False, None # Test first to avoid making changes. test_application = pe.test_apply(llvm_src_dir) @@ -506,31 +530,30 @@ def UpdateVersionRanges(svn_version: int, llvm_src_dir: Path, llvm_src_dir: llvm-project directory path. patches_json_fp: Filepath to the PATCHES.json file. """ - if IsGitDirty(llvm_src_dir): - raise RuntimeError('Cannot test patch applications, llvm_src_dir is dirty') with patches_json_fp.open(encoding='utf-8') as f: - patch_entries = patch_utils.json_to_patch_entries(patches_json_fp.parent, - f) + patch_entries = patch_utils.json_to_patch_entries( + patches_json_fp.parent, + f, + ) modified_entries: List[patch_utils.PatchEntry] = [] - for pe in patch_entries: - test_result = pe.test_apply(llvm_src_dir) - if not test_result: - if pe.version_range is None: - pe.version_range = {} - pe.version_range['until'] = svn_version - modified_entries.append(pe) - else: - # We have to actually apply the patch so that future patches - # will stack properly. - if not pe.apply(llvm_src_dir).succeeded: - CleanSrcTree(llvm_src_dir) - raise RuntimeError('Could not apply patch that dry ran successfully') + with _GitCleanContext(llvm_src_dir): + for pe in patch_entries: + test_result = pe.test_apply(llvm_src_dir) + if not test_result: + if pe.version_range is None: + pe.version_range = {} + pe.version_range['until'] = svn_version + modified_entries.append(pe) + else: + # We have to actually apply the patch so that future patches + # will stack properly. + if not pe.apply(llvm_src_dir).succeeded: + raise RuntimeError('Could not apply patch that dry ran successfully') with patch_utils.atomic_write(patches_json_fp, encoding='utf-8') as f: _WriteJsonChanges([p.to_dict() for p in patch_entries], f) for entry in modified_entries: print(f'Stopped applying {entry.rel_patch_path} ({entry.title()}) ' f'for r{svn_version}') - CleanSrcTree(llvm_src_dir) def IsGitDirty(git_root_dir: Path) -> bool: @@ -542,7 +565,97 @@ def IsGitDirty(git_root_dir: Path) -> bool: stdout=subprocess.PIPE, check=True, cwd=git_root_dir, - encoding='utf-8').stdout != "") + encoding='utf-8').stdout != '') + + +def CheckPatchApplies(svn_version: int, llvm_src_dir: Path, + patches_json_fp: Path, + rel_patch_path: str) -> GitBisectionCode: + """Check that a given patch with the rel_patch_path applies in the stack. + + This is used in the bisection mode of the patch manager. It's similiar + to ApplyAllFromJson, but differs in that the patch with rel_patch_path + will attempt to apply regardless of its version range, as we're trying + to identify the SVN version + + Args: + svn_version: SVN version to test at. + llvm_src_dir: llvm-project source code diroctory (with a .git). + patches_json_fp: PATCHES.json filepath. + rel_patch_path: Relative patch path of the patch we want to check. If + patches before this patch fail to apply, then the revision is skipped. + """ + with patches_json_fp.open(encoding='utf-8') as f: + patch_entries = patch_utils.json_to_patch_entries( + patches_json_fp.parent, + f, + ) + with _GitCleanContext(llvm_src_dir): + success, _, failed_patches = ApplyPatchAndPrior( + svn_version, + llvm_src_dir, + patch_entries, + rel_patch_path, + ) + if success: + # Everything is good, patch applied successfully. + print(f'SUCCEEDED applying {rel_patch_path} @ r{svn_version}') + return GitBisectionCode.GOOD + if failed_patches and failed_patches[-1].rel_patch_path == rel_patch_path: + # We attempted to apply this patch, but it failed. + print(f'FAILED to apply {rel_patch_path} @ r{svn_version}') + return GitBisectionCode.BAD + # Didn't attempt to apply the patch, but failed regardless. + # Skip this revision. + print(f'SKIPPED {rel_patch_path} @ r{svn_version} due to prior failures') + return GitBisectionCode.SKIP + + +def ApplyPatchAndPrior( + svn_version: int, src_dir: Path, + patch_entries: Iterable[patch_utils.PatchEntry], rel_patch_path: str +) -> Tuple[bool, List[patch_utils.PatchEntry], List[patch_utils.PatchEntry]]: + """Apply a patch, and all patches that apply before it in the patch stack. + + Patches which did not attempt to apply (because their version range didn't + match and they weren't the patch of interest) do not appear in the output. + + Probably shouldn't be called from outside of CheckPatchApplies, as it modifies + the source dir contents. + + Returns: + A tuple where: + [0]: Did the patch of interest succeed in applying? + [1]: List of applied patches, potentially containing the patch of interest. + [2]: List of failing patches, potentially containing the patch of interest. + """ + failed_patches = [] + applied_patches = [] + # We have to apply every patch up to the one we care about, + # as patches can stack. + for pe in patch_entries: + is_patch_of_interest = pe.rel_patch_path == rel_patch_path + applied, failed_hunks = ApplySinglePatchEntry( + svn_version, src_dir, pe, ignore_version_range=is_patch_of_interest) + meant_to_apply = bool(failed_hunks) or is_patch_of_interest + if is_patch_of_interest: + if applied: + # We applied the patch we wanted to, we can stop. + applied_patches.append(pe) + return True, applied_patches, failed_patches + else: + # We failed the patch we cared about, we can stop. + failed_patches.append(pe) + return False, applied_patches, failed_patches + else: + if applied: + applied_patches.append(pe) + elif meant_to_apply: + # Broke before we reached the patch we cared about. Stop. + failed_patches.append(pe) + return False, applied_patches, failed_patches + raise ValueError(f'Did not find patch {rel_patch_path}. ' + 'Does it exist?') def _PrintFailedPatch(pe: patch_utils.PatchEntry, @@ -564,6 +677,17 @@ def _PrintFailedPatch(pe: patch_utils.PatchEntry, file=sys.stderr) +@contextlib.contextmanager +def _GitCleanContext(git_root_dir: Path): + """Cleans up a git directory when the context exits.""" + if IsGitDirty(git_root_dir): + raise RuntimeError('Cannot setup clean context; git_root_dir is dirty') + try: + yield + finally: + CleanSrcTree(git_root_dir) + + def HandlePatches(svn_version, patch_metadata_file, filesdir_path, @@ -923,21 +1047,6 @@ def main(): """Applies patches to the source tree and takes action on a failed patch.""" args_output = GetCommandLineArgs() - if args_output.failure_mode != FailureModes.INTERNAL_BISECTION: - # If the SVN version of HEAD is not the same as 'svn_version', then some - # patches that fail to apply could successfully apply if HEAD's SVN version - # was the same as 'svn_version'. In other words, HEAD's git hash should be - # what is being updated to (e.g. LLVM_NEXT_HASH). - if not args_output.use_src_head: - VerifyHEADIsTheSameAsSVNVersion(args_output.src_path, - args_output.svn_version) - else: - # `git bisect run` called this script. - # - # `git bisect run` moves HEAD each time it invokes this script, so set the - # 'svn_version' to be current HEAD's SVN version so that the previous - # SVN version is not used in determining whether a patch is applicable. - args_output.svn_version = GetHEADSVNVersion(args_output.src_path) def _apply_all(args): result = ApplyAllFromJson( @@ -955,27 +1064,29 @@ def main(): UpdateVersionRanges(args.svn_version, Path(args.src_path), Path(args.patch_metadata_file)) + def _test_single(args): + if not args.test_patch: + raise ValueError('Running with bisect_patches requires the ' + '--test_patch flag.') + llvm_src_dir = Path(args.src_path) + svn_version = GetHEADSVNVersion(llvm_src_dir) + error_code = CheckPatchApplies(svn_version, llvm_src_dir, + Path(args.patch_metadata_file), + args.test_patch) + # Since this is for bisection, we want to exit with the + # GitBisectionCode enum. + sys.exit(int(error_code)) + dispatch_table = { FailureModes.FAIL: _apply_all, FailureModes.CONTINUE: _apply_all, FailureModes.REMOVE_PATCHES: _remove, - FailureModes.DISABLE_PATCHES: _disable + FailureModes.DISABLE_PATCHES: _disable, + FailureModes.BISECT_PATCHES: _test_single, } if args_output.failure_mode in dispatch_table: dispatch_table[args_output.failure_mode](args_output) - else: - # TODO(ajordanr): Legacy mode, remove when dispatch_table - # supports bisection. - # Get the results of handling the patches of the package. - patch_info = HandlePatches(args_output.svn_version, - args_output.patch_metadata_file, - args_output.filesdir_path, args_output.src_path, - FailureModes(args_output.failure_mode), - args_output.good_svn_version, - args_output.num_patches_to_iterate, - args_output.continue_bisection) - PrintPatchResults(patch_info) if __name__ == '__main__': diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py index 63d70a5b..b77c3022 100755 --- a/llvm_tools/patch_manager_unittest.py +++ b/llvm_tools/patch_manager_unittest.py @@ -16,6 +16,7 @@ import unittest.mock as mock from failure_modes import FailureModes import patch_manager +import patch_utils from test_helpers import CallCountsToMockFunctions from test_helpers import CreateTemporaryJsonFile from test_helpers import WritePrettyJsonFile @@ -189,7 +190,8 @@ class PatchManagerTest(unittest.TestCase): self.assertEqual(patch_manager.GetPatchMetadata(test_patch), expected_patch_metadata) - def testRemoveOldPatches(self): + @mock.patch('builtins.print') + def testRemoveOldPatches(self, _): """Can remove old patches from PATCHES.json.""" one_patch_dict = { 'metadata': { @@ -266,6 +268,125 @@ class PatchManagerTest(unittest.TestCase): test_file.write_text('abc') self.assertTrue(patch_manager.IsGitDirty(dirpath)) + @mock.patch('builtins.print') + @mock.patch.object(patch_manager, '_GitCleanContext') + def testCheckPatchApplies(self, _, mock_git_clean_context): + """Tests whether we can apply a single patch for a given svn_version.""" + mock_git_clean_context.return_value = mock.MagicMock() + with tempfile.TemporaryDirectory( + prefix='patch_manager_unittest') as dirname: + dirpath = Path(dirname) + patch_entries = [ + patch_utils.PatchEntry(dirpath, + metadata=None, + platforms=[], + rel_patch_path='another.patch', + version_range={ + 'from': 9, + 'until': 20, + }), + patch_utils.PatchEntry(dirpath, + metadata=None, + platforms=['chromiumos'], + rel_patch_path='example.patch', + version_range={ + 'from': 1, + 'until': 10, + }), + patch_utils.PatchEntry(dirpath, + metadata=None, + platforms=['chromiumos'], + rel_patch_path='patch_after.patch', + version_range={ + 'from': 1, + 'until': 5, + }) + ] + patches_path = dirpath / 'PATCHES.json' + with patch_utils.atomic_write(patches_path, encoding='utf-8') as f: + json.dump([pe.to_dict() for pe in patch_entries], f) + + def _harness1(version: int, return_value: patch_utils.PatchResult, + expected: patch_manager.GitBisectionCode): + with mock.patch.object( + patch_utils.PatchEntry, + 'apply', + return_value=return_value, + ) as m: + result = patch_manager.CheckPatchApplies( + version, + dirpath, + patches_path, + 'example.patch', + ) + self.assertEqual(result, expected) + m.assert_called() + + _harness1(1, patch_utils.PatchResult(True, {}), + patch_manager.GitBisectionCode.GOOD) + _harness1(2, patch_utils.PatchResult(True, {}), + patch_manager.GitBisectionCode.GOOD) + _harness1(2, patch_utils.PatchResult(False, {}), + patch_manager.GitBisectionCode.BAD) + _harness1(11, patch_utils.PatchResult(False, {}), + patch_manager.GitBisectionCode.BAD) + + def _harness2(version: int, application_func: Callable, + expected: patch_manager.GitBisectionCode): + with mock.patch.object( + patch_manager, + 'ApplySinglePatchEntry', + application_func, + ): + result = patch_manager.CheckPatchApplies( + version, + dirpath, + patches_path, + 'example.patch', + ) + self.assertEqual(result, expected) + + # Check patch can apply and fail with good return codes. + def _apply_patch_entry_mock1(v, _, patch_entry, **__): + return patch_entry.can_patch_version(v), None + + _harness2( + 1, + _apply_patch_entry_mock1, + patch_manager.GitBisectionCode.GOOD, + ) + _harness2( + 11, + _apply_patch_entry_mock1, + patch_manager.GitBisectionCode.BAD, + ) + + # Early exit check, shouldn't apply later failing patch. + def _apply_patch_entry_mock2(v, _, patch_entry, **__): + if (patch_entry.can_patch_version(v) + and patch_entry.rel_patch_path == 'patch_after.patch'): + return False, {'filename': mock.Mock()} + return True, None + + _harness2( + 1, + _apply_patch_entry_mock2, + patch_manager.GitBisectionCode.GOOD, + ) + + # Skip check, should exit early on the first patch. + def _apply_patch_entry_mock3(v, _, patch_entry, **__): + if (patch_entry.can_patch_version(v) + and patch_entry.rel_patch_path == 'another.patch'): + return False, {'filename': mock.Mock()} + return True, None + + _harness2( + 9, + _apply_patch_entry_mock3, + patch_manager.GitBisectionCode.SKIP, + ) + def testFailedToApplyPatchWhenInvalidSrcPathIsPassedIn(self): src_path = '/abs/path/to/src' -- cgit v1.2.3 From 5d0936b8a422df2cbf010908229ac0346905e2e8 Mon Sep 17 00:00:00 2001 From: Michael Benfield <mbenfield@google.com> Date: Thu, 16 Jun 2022 23:56:55 +0000 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: Icde18d30e128cfb4fbc26414ec747403392541a0 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3708110 Tested-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Michael Benfield <mbenfield@google.com> Reviewed-by: Denis Nikitin <denik@chromium.org> --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 36fc61d1..19007ac5 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R104-14794.0-1652693676" + "name": "R105-14816.49-1655113527" }, "chromeos-kernel-4_14": { - "name": "R104-14767.0-1652693858" + "name": "R105-14816.49-1655113751" }, "chromeos-kernel-4_19": { - "name": "R104-14794.0-1652693604" + "name": "R105-14816.49-1655113639" }, "chromeos-kernel-5_4": { - "name": "R104-14794.0-1652693573" + "name": "R105-14816.49-1655113428" }, "chromeos-kernel-5_10": { - "name": "R104-14794.0-1652693553" + "name": "R105-14816.51-1655113658" } } -- cgit v1.2.3 From 341ee8c2cf75bd3b8f3297f5025416bdfcc97b0c Mon Sep 17 00:00:00 2001 From: Manoj Gupta <manojgupta@google.com> Date: Thu, 23 Jun 2022 18:16:25 +0000 Subject: compiler_wrapper: Stop managing pie flags With CL:3710850, pie will be defaults for clang and GCC (cross-compiles). No need to manage them separately. BUG=b:190047257 TEST=go test Change-Id: Icf8e74d6a31c5de678ec9a6a5c321a17a0154d37 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3722436 Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> Auto-Submit: Manoj Gupta <manojgupta@chromium.org> Tested-by: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> --- compiler_wrapper/compiler_wrapper.go | 1 - compiler_wrapper/config.go | 2 - compiler_wrapper/config_test.go | 2 +- compiler_wrapper/pie_flags.go | 43 ----------- compiler_wrapper/pie_flags_test.go | 84 ---------------------- .../testdata/cros_hardened_golden/bisect.json | 6 -- .../clang_ftrapv_maincc_target_specific.json | 18 ----- .../clang_maincc_target_specific.json | 18 ----- .../testdata/cros_hardened_golden/clang_path.json | 24 ------- .../cros_hardened_golden/clang_sanitizer_args.json | 16 ----- .../cros_hardened_golden/clang_specific_args.json | 8 --- .../clang_sysroot_wrapper_common.json | 9 +-- .../testdata/cros_hardened_golden/clangtidy.json | 16 ----- .../cros_hardened_golden/force_disable_werror.json | 10 --- .../cros_hardened_golden/gcc_clang_syntax.json | 14 ---- .../gcc_maincc_target_specific.json | 18 ----- .../testdata/cros_hardened_golden/gcc_path.json | 12 ---- .../cros_hardened_golden/gcc_sanitizer_args.json | 16 ----- .../cros_hardened_golden/gcc_specific_args.json | 6 -- .../gcc_sysroot_wrapper_common.json | 9 +-- .../cros_hardened_llvmnext_golden/bisect.json | 6 -- .../cros_hardened_llvmnext_golden/clang_path.json | 24 ------- .../cros_hardened_llvmnext_golden/clangtidy.json | 16 ----- .../force_disable_werror.json | 10 --- .../gcc_clang_syntax.json | 14 ---- .../cros_hardened_llvmnext_golden/gcc_path.json | 12 ---- .../cros_hardened_noccache_golden/bisect.json | 6 -- .../cros_hardened_noccache_golden/clang_path.json | 24 ------- .../cros_hardened_noccache_golden/clangtidy.json | 16 ----- .../force_disable_werror.json | 10 --- .../gcc_clang_syntax.json | 14 ---- .../cros_hardened_noccache_golden/gcc_path.json | 12 ---- .../clang_sysroot_wrapper_common.json | 1 + .../gcc_sysroot_wrapper_common.json | 1 + 34 files changed, 5 insertions(+), 493 deletions(-) delete mode 100644 compiler_wrapper/pie_flags.go delete mode 100644 compiler_wrapper/pie_flags_test.go diff --git a/compiler_wrapper/compiler_wrapper.go b/compiler_wrapper/compiler_wrapper.go index 2581cb0b..1fe3eb70 100644 --- a/compiler_wrapper/compiler_wrapper.go +++ b/compiler_wrapper/compiler_wrapper.go @@ -354,7 +354,6 @@ func calcCommonPreUserArgs(builder *commandBuilder) { builder.addPreUserArgs(builder.cfg.commonFlags...) if !builder.cfg.isHostWrapper { processLibGCCFlags(builder) - processPieFlags(builder) processThumbCodeFlags(builder) processStackProtectorFlags(builder) processX86Flags(builder) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 25df476f..9f49e259 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -145,8 +145,6 @@ var crosHardenedConfig = config{ commonFlags: []string{ "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", }, diff --git a/compiler_wrapper/config_test.go b/compiler_wrapper/config_test.go index 1d2cafaf..207c0312 100644 --- a/compiler_wrapper/config_test.go +++ b/compiler_wrapper/config_test.go @@ -119,7 +119,7 @@ func TestRealConfigWithConfigNameFlag(t *testing.T) { func isSysrootHardened(cfg *config) bool { for _, arg := range cfg.commonFlags { - if arg == "-pie" { + if arg == "-D_FORTIFY_SOURCE=2" { return true } } diff --git a/compiler_wrapper/pie_flags.go b/compiler_wrapper/pie_flags.go deleted file mode 100644 index e4110827..00000000 --- a/compiler_wrapper/pie_flags.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package main - -func processPieFlags(builder *commandBuilder) { - fpieMap := map[string]bool{"-D__KERNEL__": true, "-fPIC": true, "-fPIE": true, "-fno-PIC": true, "-fno-PIE": true, - "-fno-pic": true, "-fno-pie": true, "-fpic": true, "-fpie": true, "-nopie": true, - "-nostartfiles": true, "-nostdlib": true, "-pie": true, "-static": true} - - pieMap := map[string]bool{"-D__KERNEL__": true, "-A": true, "-fno-PIC": true, "-fno-PIE": true, "-fno-pic": true, "-fno-pie": true, - "-nopie": true, "-nostartfiles": true, "-nostdlib": true, "-pie": true, "-r": true, "--shared": true, - "-shared": true, "-static": true} - - pie := false - fpie := false - if builder.target.abi != "eabi" { - for _, arg := range builder.args { - if arg.fromUser { - if fpieMap[arg.value] { - fpie = true - } - if pieMap[arg.value] { - pie = true - } - } - } - } - builder.transformArgs(func(arg builderArg) string { - // Remove -nopie as it is a non-standard flag. - if arg.value == "-nopie" { - return "" - } - if fpie && !arg.fromUser && arg.value == "-fPIE" { - return "" - } - if pie && !arg.fromUser && arg.value == "-pie" { - return "" - } - return arg.value - }) -} diff --git a/compiler_wrapper/pie_flags_test.go b/compiler_wrapper/pie_flags_test.go deleted file mode 100644 index d0be08fe..00000000 --- a/compiler_wrapper/pie_flags_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package main - -import ( - "testing" -) - -func TestAddPieFlags(t *testing.T) { - withTestContext(t, func(ctx *testContext) { - initPieConfig(ctx.cfg) - cmd := ctx.must(callCompiler(ctx, ctx.cfg, - ctx.newCommand(gccX86_64, mainCc))) - if err := verifyArgOrder(cmd, "-pie", mainCc); err != nil { - t.Error(err) - } - if err := verifyArgOrder(cmd, "-fPIE", mainCc); err != nil { - t.Error(err) - } - }) -} - -func TestOmitPieFlagsWhenNoPieArgGiven(t *testing.T) { - withTestContext(t, func(ctx *testContext) { - initPieConfig(ctx.cfg) - cmd := ctx.must(callCompiler(ctx, ctx.cfg, - ctx.newCommand(gccX86_64, "-nopie", mainCc))) - if err := verifyArgCount(cmd, 0, "-nopie"); err != nil { - t.Error(err) - } - if err := verifyArgCount(cmd, 0, "-pie"); err != nil { - t.Error(err) - } - if err := verifyArgCount(cmd, 0, "-fPIE"); err != nil { - t.Error(err) - } - - cmd = ctx.must(callCompiler(ctx, ctx.cfg, - ctx.newCommand(gccX86_64, "-fno-pie", mainCc))) - if err := verifyArgCount(cmd, 0, "-pie"); err != nil { - t.Error(err) - } - if err := verifyArgCount(cmd, 0, "-fPIE"); err != nil { - t.Error(err) - } - }) -} - -func TestOmitPieFlagsWhenKernelDefined(t *testing.T) { - withTestContext(t, func(ctx *testContext) { - initPieConfig(ctx.cfg) - cmd := ctx.must(callCompiler(ctx, ctx.cfg, - ctx.newCommand(gccX86_64, "-D__KERNEL__", mainCc))) - if err := verifyArgCount(cmd, 0, "-pie"); err != nil { - t.Error(err) - } - if err := verifyArgCount(cmd, 0, "-fPIE"); err != nil { - t.Error(err) - } - }) -} - -func TestAddPieFlagsForEabiEvenIfNoPieGiven(t *testing.T) { - withTestContext(t, func(ctx *testContext) { - initPieConfig(ctx.cfg) - cmd := ctx.must(callCompiler(ctx, ctx.cfg, - ctx.newCommand(gccX86_64Eabi, "-nopie", mainCc))) - if err := verifyArgCount(cmd, 0, "-nopie"); err != nil { - t.Error(err) - } - if err := verifyArgCount(cmd, 1, "-pie"); err != nil { - t.Error(err) - } - if err := verifyArgCount(cmd, 1, "-fPIE"); err != nil { - t.Error(err) - } - }) -} - -func initPieConfig(cfg *config) { - cfg.commonFlags = []string{"-fPIE", "-pie"} -} diff --git a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json index 52301eb5..a397865c 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json @@ -45,8 +45,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -119,8 +117,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -196,8 +192,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json index a2513f22..da680ac7 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json @@ -36,8 +36,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -100,8 +98,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -164,8 +160,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -228,8 +222,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", @@ -291,8 +283,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -354,8 +344,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", @@ -417,8 +405,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", @@ -480,8 +466,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -543,8 +527,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json index 93f5c5af..ab79c13b 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json @@ -35,8 +35,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -97,8 +95,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -159,8 +155,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -221,8 +215,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", @@ -282,8 +274,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -343,8 +333,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", @@ -404,8 +392,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", @@ -465,8 +451,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -526,8 +510,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json index f147f5a1..5c0bdc71 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json @@ -35,8 +35,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -100,8 +98,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -165,8 +161,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -230,8 +224,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -302,8 +294,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -379,8 +369,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -451,8 +439,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -518,8 +504,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -580,8 +564,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -642,8 +624,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -704,8 +684,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -769,8 +747,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json index 69f6f0af..0e92dbe0 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json @@ -37,8 +37,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-fno-omit-frame-pointer", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -101,8 +99,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-fno-omit-frame-pointer", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -165,8 +161,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-fno-omit-frame-pointer", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -229,8 +223,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-fno-omit-frame-pointer", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -292,8 +284,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-fno-omit-frame-pointer", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -356,8 +346,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-fno-omit-frame-pointer", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -420,8 +408,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-fno-omit-frame-pointer", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", @@ -483,8 +469,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json index 0d7e87d1..e4cc49ee 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json @@ -45,8 +45,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -116,8 +114,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -180,8 +176,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -244,8 +238,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json index 0fe640fa..93e7f0e2 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json @@ -21,8 +21,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -73,8 +71,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -138,8 +134,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -200,6 +194,7 @@ "-fno-omit-frame-pointer", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", + "-nopie", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", @@ -379,8 +374,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json index dcb42440..dd96e907 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json @@ -50,8 +50,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -93,8 +91,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -166,8 +162,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -210,8 +204,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -285,8 +277,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -332,8 +322,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -408,8 +396,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -452,8 +438,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json index 2d1752a7..659253f7 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json @@ -38,8 +38,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -103,8 +101,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -154,8 +150,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -223,8 +217,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -274,8 +266,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json index fabd3347..e1e146e9 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json @@ -35,8 +35,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -67,8 +65,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -124,8 +120,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -156,8 +150,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -208,8 +200,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -273,8 +263,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -305,8 +293,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_maincc_target_specific.json index 0cc3d8a9..63b7da18 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_maincc_target_specific.json @@ -21,8 +21,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -60,8 +58,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -99,8 +95,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -138,8 +132,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", @@ -176,8 +168,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -214,8 +204,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", @@ -252,8 +240,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", @@ -290,8 +276,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -328,8 +312,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-static-libgcc", "-mthumb", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_path.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_path.json index 5b129d7d..6e06ce18 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_path.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_path.json @@ -21,8 +21,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -63,8 +61,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -105,8 +101,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -144,8 +138,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -183,8 +175,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -225,8 +215,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_sanitizer_args.json index a61ead8e..9aaf9c25 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_sanitizer_args.json @@ -23,8 +23,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -65,8 +63,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -107,8 +103,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -149,8 +143,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -190,8 +182,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -232,8 +222,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -274,8 +262,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -315,8 +301,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_specific_args.json index f7b20001..4ee90cbc 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_specific_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_specific_args.json @@ -22,8 +22,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -63,8 +61,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -104,8 +100,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_sysroot_wrapper_common.json index 299b46b6..9397333d 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_sysroot_wrapper_common.json @@ -21,8 +21,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -59,8 +57,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -101,8 +97,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -140,6 +134,7 @@ "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", + "-nopie", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-mno-movbe" @@ -251,8 +246,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json index 52301eb5..a397865c 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json @@ -45,8 +45,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -119,8 +117,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -196,8 +192,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json index f147f5a1..5c0bdc71 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json @@ -35,8 +35,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -100,8 +98,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -165,8 +161,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -230,8 +224,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -302,8 +294,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -379,8 +369,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -451,8 +439,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -518,8 +504,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -580,8 +564,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -642,8 +624,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -704,8 +684,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -769,8 +747,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json index dcb42440..dd96e907 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json @@ -50,8 +50,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -93,8 +91,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -166,8 +162,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -210,8 +204,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -285,8 +277,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -332,8 +322,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -408,8 +396,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -452,8 +438,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json index 2d1752a7..659253f7 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json @@ -38,8 +38,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -103,8 +101,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -154,8 +150,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -223,8 +217,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -274,8 +266,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json index fabd3347..e1e146e9 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json @@ -35,8 +35,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -67,8 +65,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -124,8 +120,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -156,8 +150,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -208,8 +200,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -273,8 +263,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -305,8 +293,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_path.json index 5b129d7d..6e06ce18 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_path.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_path.json @@ -21,8 +21,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -63,8 +61,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -105,8 +101,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -144,8 +138,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -183,8 +175,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -225,8 +215,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json index fbb684b2..d834d581 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json @@ -44,8 +44,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -114,8 +112,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -187,8 +183,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json index 6ff25b0e..d1ef5fe5 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json @@ -34,8 +34,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -93,8 +91,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -152,8 +148,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -211,8 +205,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -277,8 +269,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -349,8 +339,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -420,8 +408,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -481,8 +467,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -537,8 +521,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -593,8 +575,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -649,8 +629,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -708,8 +686,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json index dcb42440..dd96e907 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json @@ -50,8 +50,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -93,8 +91,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -166,8 +162,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -210,8 +204,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -285,8 +277,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -332,8 +322,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -408,8 +396,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -452,8 +438,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json index e1c0ec31..0818bca1 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json @@ -37,8 +37,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -96,8 +94,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -141,8 +137,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -204,8 +198,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -249,8 +241,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json index c8c9bed8..ff82f64e 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json @@ -35,8 +35,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -66,8 +64,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -119,8 +115,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -151,8 +145,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -203,8 +195,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -268,8 +258,6 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -299,8 +287,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_path.json index 9393c4b3..fb877370 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_path.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_path.json @@ -20,8 +20,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -57,8 +55,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -94,8 +90,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -128,8 +122,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -162,8 +154,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", @@ -199,8 +189,6 @@ "-Wno-maybe-uninitialized", "-fcommon", "-fstack-protector-strong", - "-fPIE", - "-pie", "-D_FORTIFY_SOURCE=2", "-fno-omit-frame-pointer", "-static-libgcc", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json index d983bd46..860fc798 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json @@ -170,6 +170,7 @@ "-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics", "-static-libgcc", "--prefix=../../bin/x86_64-cros-linux-gnu-", + "-nopie", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_sysroot_wrapper_common.json index ba2f292c..25411f20 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_sysroot_wrapper_common.json @@ -122,6 +122,7 @@ "-Wno-deprecated-declarations", "-Wtrampolines", "-static-libgcc", + "-nopie", "main.cc", "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-mno-movbe" -- cgit v1.2.3 From 6a0ae540fb1feb543c61389dfe33f982501b4e32 Mon Sep 17 00:00:00 2001 From: Manoj Gupta <manojgupta@google.com> Date: Thu, 23 Jun 2022 14:59:10 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: I3e39be51d01147f17059f4c94df085185eea1561 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3722453 Reviewed-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Denis Nikitin <denik@chromium.org> Tested-by: Manoj Gupta <manojgupta@chromium.org> Auto-Submit: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: Manoj Gupta <manojgupta@chromium.org> --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 19007ac5..ee4b6248 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R105-14816.49-1655113527" + "name": "R105-14816.64-1655717501" }, "chromeos-kernel-4_14": { - "name": "R105-14816.49-1655113751" + "name": "R105-14909.11-1655717804" }, "chromeos-kernel-4_19": { - "name": "R105-14816.49-1655113639" + "name": "R105-14909.11-1655717582" }, "chromeos-kernel-5_4": { - "name": "R105-14816.49-1655113428" + "name": "R105-14909.11-1655717651" }, "chromeos-kernel-5_10": { - "name": "R105-14816.51-1655113658" + "name": "R105-14909.11-1655717748" } } -- cgit v1.2.3 From d0fe2198f2c4bb8aa769c26209d9423fb40b32ed Mon Sep 17 00:00:00 2001 From: Michael Benfield <mbenfield@google.com> Date: Wed, 22 Jun 2022 00:04:11 +0000 Subject: crate_ebuild_help.py: add BUG=None TEST=manually running the script Change-Id: I53fa857210b5f13edb318aba407b945a5de98f0d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3715833 Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Michael Benfield <mbenfield@google.com> Commit-Queue: Michael Benfield <mbenfield@google.com> --- crate_ebuild_help.py | 164 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100755 crate_ebuild_help.py diff --git a/crate_ebuild_help.py b/crate_ebuild_help.py new file mode 100755 index 00000000..e8ad48b7 --- /dev/null +++ b/crate_ebuild_help.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 +# Copyright 2022 The ChromiumOS Authors. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Help creating a Rust ebuild with CRATES. + +This script is meant to help someone creating a Rust ebuild of the type +currently used by sys-apps/ripgrep and sys-apps/rust-analyzer. + +In these ebuilds, the CRATES variable is used to list all dependencies, rather +than creating an ebuild for each dependency. This style of ebuild can be used +for a crate which is only intended for use in the chromiumos SDK, and which has +many dependencies which otherwise won't be used. + +To create such an ebuild, there are essentially two tasks that must be done: + +1. Determine all transitive dependent crates and version and list them in the +CRATES variable. Ignore crates that are already included in the main crate's +repository. + +2. Find which dependent crates are not already on a chromeos mirror, retrieve +them from crates.io, and upload them to `gs://chromeos-localmirror/distfiles`. + +This script parses the crate's lockfile to list transitive dependent crates, +and either lists crates to be uploaded or actually uploads them. + +Of course these can be done manually instead. If you choose to do these steps +manually, I recommend *not* using the `cargo download` tool, and instead obtain +dependent crates at +`https://crates.io/api/v1/crates/{crate_name}/{crate_version}/download`. + +Example usage: + + # Here we instruct the script to ignore crateA and crateB, presumably + # because they are already included in the same repository as some-crate. + # This will not actually upload any crates to `gs`. + python3 crate_ebuild_help.py --lockfile some-crate/Cargo.lock \ + --ignore crateA --ignore crateB --dry-run + + # Similar to the above, but here we'll actually carry out the uploads. + python3 crate_ebuild_help.py --lockfile some-crate/Cargo.lock \ + --ignore crateA --ignore crateB + +See the ebuild files for ripgrep or rust-analyzer for other details. +""" + +import argparse +import concurrent.futures +from pathlib import Path +import subprocess +import tempfile +from typing import List, Tuple +import urllib.request + +# Python 3.11 has `tomllib`, so maybe eventually we can switch to that. +import toml + + +def run(args: List[str]) -> bool: + result = subprocess.run(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False) + return result.returncode == 0 + + +def run_check(args: List[str]): + subprocess.run(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True) + + +def gs_address_exists(address: str) -> bool: + # returns False if the file isn't there + return run(['gsutil.py', 'ls', address]) + + +def crate_already_uploaded(crate_name: str, crate_version: str) -> bool: + filename = f'{crate_name}-{crate_version}.crate' + return gs_address_exists( + f'gs://chromeos-localmirror/distfiles/{filename}') or gs_address_exists( + f'gs://chromeos-mirror/gentoo/distfiles/{filename}') + + +def download_crate(crate_name: str, crate_version: str, localpath: Path): + urllib.request.urlretrieve( + f'https://crates.io/api/v1/crates/{crate_name}/{crate_version}/download', + localpath) + + +def upload_crate(crate_name: str, crate_version: str, localpath: Path): + run_check([ + 'gsutil.py', 'cp', '-n', '-a', 'public-read', + str(localpath), + f'gs://chromeos-localmirror/distfiles/{crate_name}-{crate_version}.crate' + ]) + + +def main(): + parser = argparse.ArgumentParser( + description='Help prepare a Rust crate for an ebuild.') + parser.add_argument('--lockfile', + type=str, + required=True, + help='Path to the lockfile of the crate in question.') + parser.add_argument( + '--ignore', + type=str, + action='append', + required=False, + default=[], + help='Ignore the crate by this name (may be used multiple times).') + parser.add_argument( + '--dry-run', + action='store_true', + help="Don't actually download/upload crates, just print their names.") + ns = parser.parse_args() + + to_ignore = set(ns.ignore) + + toml_contents = toml.load(ns.lockfile) + packages = toml_contents['package'] + + crates = [(pkg['name'], pkg['version']) for pkg in packages + if pkg['name'] not in to_ignore] + crates.sort() + + print('Dependent crates:') + for name, version in crates: + print(f'{name}-{version}') + print() + + if ns.dry_run: + print('Crates that would be uploaded (skipping ones already uploaded):') + else: + print('Uploading crates (skipping ones already uploaded):') + + def maybe_upload(crate: Tuple[str, str]) -> str: + name, version = crate + if crate_already_uploaded(name, version): + return '' + if not ns.dry_run: + with tempfile.TemporaryDirectory() as temp_dir: + path = Path(temp_dir.name, f'{name}-{version}.crate') + download_crate(name, version, path) + upload_crate(name, version, path) + return f'{name}-{version}' + + # Simple benchmarking on my machine with rust-analyzer's Cargo.lock, using + # the --dry-run option, gives a wall time of 277 seconds with max_workers=1 + # and 70 seconds with max_workers=4. + with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: + crates_len = len(crates) + for i, s in enumerate(executor.map(maybe_upload, crates)): + if s: + j = i + 1 + print(f'[{j}/{crates_len}] {s}') + print() + + +if __name__ == '__main__': + main() -- cgit v1.2.3 From d31ac0b99b083f51dbb88031e8329f336d0a666b Mon Sep 17 00:00:00 2001 From: Adrian Dole <adriandole@google.com> Date: Fri, 17 Jun 2022 21:46:03 +0000 Subject: get_upstream_patch: Validate patch application Currently, get_upstream_patch does not validate that a patch applies to the current LLVM state. Add validation before modifying PATCHES.json. Move several functions into patch_utils to avoid depending on patch_manager. BUG=b:227216280 TEST=./get_upstream_patch.py --platform chromiumos --sha [patch SHA] Change-Id: I97e7d401e7f8fc6d85dbfb9a310e4a77205ef444 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3711269 Reviewed-by: Adrian Dole <adriandole@google.com> Commit-Queue: Adrian Dole <adriandole@google.com> Auto-Submit: Adrian Dole <adriandole@google.com> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Tested-by: Adrian Dole <adriandole@google.com> --- llvm_tools/get_upstream_patch.py | 37 ++++++ llvm_tools/llvm_patch_management.py | 6 +- llvm_tools/llvm_patch_management_unittest.py | 4 +- llvm_tools/patch_manager.py | 184 +++------------------------ llvm_tools/patch_manager_unittest.py | 29 +---- llvm_tools/patch_utils.py | 161 ++++++++++++++++++++++- llvm_tools/patch_utils_unittest.py | 24 ++++ 7 files changed, 248 insertions(+), 197 deletions(-) diff --git a/llvm_tools/get_upstream_patch.py b/llvm_tools/get_upstream_patch.py index a2327c4d..b5b61153 100755 --- a/llvm_tools/get_upstream_patch.py +++ b/llvm_tools/get_upstream_patch.py @@ -12,6 +12,7 @@ from datetime import datetime import json import logging import os +from pathlib import Path import shlex import subprocess import sys @@ -21,6 +22,7 @@ import chroot import get_llvm_hash import git import git_llvm_rev +import patch_utils import update_chromeos_llvm_hash @@ -39,6 +41,36 @@ class CherrypickVersionError(ValueError): """A ValueError that highlights the cherry-pick is before the start_sha""" +class PatchApplicationError(ValueError): + """A ValueError indicating that a test patch application was unsuccessful""" + + +def validate_patch_application(llvm_dir: Path, svn_version: int, + patches_json_fp: Path, patch_props): + + start_sha = get_llvm_hash.GetGitHashFrom(llvm_dir, svn_version) + subprocess.run(['git', '-C', llvm_dir, 'checkout', start_sha], check=True) + + predecessor_apply_results = patch_utils.apply_all_from_json( + svn_version, llvm_dir, patches_json_fp, continue_on_failure=True) + + if predecessor_apply_results.failed_patches: + logging.error('Failed to apply patches from PATCHES.json:') + for p in predecessor_apply_results.failed_patches: + logging.error(f'Patch title: {p.title()}') + raise PatchApplicationError('Failed to apply patch from PATCHES.json') + + patch_entry = patch_utils.PatchEntry.from_dict(patches_json_fp.parent, + patch_props) + test_apply_result = patch_entry.test_apply(Path(llvm_dir)) + + if not test_apply_result: + logging.error('Could not apply requested patch') + logging.error(test_apply_result.failure_info()) + raise PatchApplicationError( + f'Failed to apply patch: {patch_props["metadata"]["title"]}') + + def add_patch(patches_json_path: str, patches_dir: str, relative_patches_dir: str, start_version: git_llvm_rev.Rev, llvm_dir: str, rev: t.Union[git_llvm_rev.Rev, str], sha: str, @@ -121,6 +153,11 @@ def add_patch(patches_json_path: str, patches_dir: str, 'until': end_vers, }, } + + with patch_utils.git_clean_context(Path(llvm_dir)): + validate_patch_application(Path(llvm_dir), start_version.number, + Path(patches_json_path), patch_props) + patches_json.append(patch_props) temp_file = patches_json_path + '.tmp' diff --git a/llvm_tools/llvm_patch_management.py b/llvm_tools/llvm_patch_management.py index b7ac1973..46ddb867 100755 --- a/llvm_tools/llvm_patch_management.py +++ b/llvm_tools/llvm_patch_management.py @@ -13,12 +13,14 @@ from __future__ import print_function import argparse import os -from failure_modes import FailureModes import chroot +from failure_modes import FailureModes import get_llvm_hash import patch_manager +import patch_utils import subprocess_helpers + # If set to `True`, then the contents of `stdout` after executing a command will # be displayed to the terminal. verbose = False @@ -228,7 +230,7 @@ def UpdatePackagesPatchMetadataFile(chroot_path, svn_version, # Make sure the patch metadata path is valid. _CheckPatchMetadataPath(patch_metadata_path) - patch_manager.CleanSrcTree(src_path) + patch_utils.clean_src_tree(src_path) # Get the patch results for the current package. patches_info = patch_manager.HandlePatches(svn_version, diff --git a/llvm_tools/llvm_patch_management_unittest.py b/llvm_tools/llvm_patch_management_unittest.py index 78d55259..52117c93 100755 --- a/llvm_tools/llvm_patch_management_unittest.py +++ b/llvm_tools/llvm_patch_management_unittest.py @@ -9,6 +9,7 @@ """Unit tests when creating the arguments for the patch manager.""" from __future__ import print_function + from collections import namedtuple import os import unittest @@ -18,6 +19,7 @@ from failure_modes import FailureModes import get_llvm_hash import llvm_patch_management import patch_manager +import patch_utils import subprocess_helpers @@ -217,7 +219,7 @@ class LlvmPatchManagementTest(unittest.TestCase): # Simulate `CleanSrcTree()` when successfully removed changes from the # worktree. - @mock.patch.object(patch_manager, 'CleanSrcTree') + @mock.patch.object(patch_utils, 'clean_src_tree') # Simulate `GetGitHashFrom()` when successfully retrieved the git hash # of the version passed in. @mock.patch.object(get_llvm_hash, diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index 82ba65d1..056757fe 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -6,15 +6,13 @@ """A manager for patches.""" import argparse -import contextlib -import dataclasses import enum import json import os from pathlib import Path import subprocess import sys -from typing import Any, Dict, IO, Iterable, List, Optional, Tuple +from typing import Any, Dict, IO, Iterable, List, Tuple from failure_modes import FailureModes import get_llvm_hash @@ -23,26 +21,6 @@ from subprocess_helpers import check_call from subprocess_helpers import check_output -@dataclasses.dataclass(frozen=True) -class PatchInfo: - """Holds info for a round of patch applications.""" - # str types are legacy. Patch lists should - # probably be PatchEntries, - applied_patches: List[patch_utils.PatchEntry] - failed_patches: List[patch_utils.PatchEntry] - # Can be deleted once legacy code is removed. - non_applicable_patches: List[str] - # Can be deleted once legacy code is removed. - disabled_patches: List[str] - # Can be deleted once legacy code is removed. - removed_patches: List[str] - # Can be deleted once legacy code is removed. - modified_metadata: Optional[str] - - def _asdict(self): - return dataclasses.asdict(self) - - class GitBisectionCode(enum.IntEnum): """Git bisection exit codes. @@ -382,18 +360,6 @@ def PerformBisection(src_path, good_commit, bad_commit, svn_version, return version -def CleanSrcTree(src_path): - """Cleans the source tree of the changes made in 'src_path'.""" - - reset_src_tree_cmd = ['git', '-C', src_path, 'reset', 'HEAD', '--hard'] - - check_output(reset_src_tree_cmd) - - clean_src_tree_cmd = ['git', '-C', src_path, 'clean', '-fd'] - - check_output(clean_src_tree_cmd) - - def SaveSrcTreeState(src_path): """Stashes the changes made so far to the source tree.""" @@ -414,81 +380,6 @@ def RestoreSrcTreeState(src_path, bad_commit_hash): check_output(get_changes_cmd) -def ApplyAllFromJson(svn_version: int, - llvm_src_dir: Path, - patches_json_fp: Path, - continue_on_failure: bool = False) -> PatchInfo: - """Attempt to apply some patches to a given LLVM source tree. - - This relies on a PATCHES.json file to be the primary way - the patches are applied. - - Args: - svn_version: LLVM Subversion revision to patch. - llvm_src_dir: llvm-project root-level source directory to patch. - patches_json_fp: Filepath to the PATCHES.json file. - continue_on_failure: Skip any patches which failed to apply, - rather than throw an Exception. - """ - with patches_json_fp.open(encoding='utf-8') as f: - patches = patch_utils.json_to_patch_entries(patches_json_fp.parent, f) - skipped_patches = [] - failed_patches = [] - applied_patches = [] - for pe in patches: - applied, failed_hunks = ApplySinglePatchEntry(svn_version, llvm_src_dir, - pe) - if applied: - applied_patches.append(pe) - continue - if failed_hunks is not None: - if continue_on_failure: - failed_patches.append(pe) - continue - else: - _PrintFailedPatch(pe, failed_hunks) - raise RuntimeError('failed to apply patch ' - f'{pe.patch_path()}: {pe.title()}') - # Didn't apply, didn't fail, it was skipped. - skipped_patches.append(pe) - return PatchInfo( - non_applicable_patches=skipped_patches, - applied_patches=applied_patches, - failed_patches=failed_patches, - disabled_patches=[], - removed_patches=[], - modified_metadata=None, - ) - - -def ApplySinglePatchEntry( - svn_version: int, - llvm_src_dir: Path, - pe: patch_utils.PatchEntry, - ignore_version_range: bool = False -) -> Tuple[bool, Optional[Dict[str, List[patch_utils.Hunk]]]]: - """Try to apply a single PatchEntry object. - - Returns: - Tuple where the first element indicates whether the patch applied, - and the second element is a faild hunk mapping from file name to lists of - hunks (if the patch didn't apply). - """ - # Don't apply patches outside of the version range. - if not ignore_version_range and not pe.can_patch_version(svn_version): - return False, None - # Test first to avoid making changes. - test_application = pe.test_apply(llvm_src_dir) - if not test_application: - return False, test_application.failed_hunks - # Now actually make changes. - application_result = pe.apply(llvm_src_dir) - if not application_result: - # This should be very rare/impossible. - return False, application_result.failed_hunks - return True, None - - def RemoveOldPatches(svn_version: int, llvm_src_dir: Path, patches_json_fp: Path): """Remove patches that don't and will never apply for the future. @@ -536,7 +427,7 @@ def UpdateVersionRanges(svn_version: int, llvm_src_dir: Path, f, ) modified_entries: List[patch_utils.PatchEntry] = [] - with _GitCleanContext(llvm_src_dir): + with patch_utils.git_clean_context(llvm_src_dir): for pe in patch_entries: test_result = pe.test_apply(llvm_src_dir) if not test_result: @@ -556,18 +447,6 @@ def UpdateVersionRanges(svn_version: int, llvm_src_dir: Path, f'for r{svn_version}') -def IsGitDirty(git_root_dir: Path) -> bool: - """Return whether the given git directory has uncommitted changes.""" - if not git_root_dir.is_dir(): - raise ValueError(f'git_root_dir {git_root_dir} is not a directory') - cmd = ['git', 'ls-files', '-m', '--other', '--exclude-standard'] - return (subprocess.run(cmd, - stdout=subprocess.PIPE, - check=True, - cwd=git_root_dir, - encoding='utf-8').stdout != '') - - def CheckPatchApplies(svn_version: int, llvm_src_dir: Path, patches_json_fp: Path, rel_patch_path: str) -> GitBisectionCode: @@ -590,7 +469,7 @@ def CheckPatchApplies(svn_version: int, llvm_src_dir: Path, patches_json_fp.parent, f, ) - with _GitCleanContext(llvm_src_dir): + with patch_utils.git_clean_context(llvm_src_dir): success, _, failed_patches = ApplyPatchAndPrior( svn_version, llvm_src_dir, @@ -635,7 +514,7 @@ def ApplyPatchAndPrior( # as patches can stack. for pe in patch_entries: is_patch_of_interest = pe.rel_patch_path == rel_patch_path - applied, failed_hunks = ApplySinglePatchEntry( + applied, failed_hunks = patch_utils.apply_single_patch_entry( svn_version, src_dir, pe, ignore_version_range=is_patch_of_interest) meant_to_apply = bool(failed_hunks) or is_patch_of_interest if is_patch_of_interest: @@ -658,36 +537,6 @@ def ApplyPatchAndPrior( 'Does it exist?') -def _PrintFailedPatch(pe: patch_utils.PatchEntry, - failed_hunks: Dict[str, List[patch_utils.Hunk]]): - """Print information about a single failing PatchEntry. - - Args: - pe: A PatchEntry that failed. - failed_hunks: Hunks for pe which failed as dict: - filepath: [Hunk...] - """ - print(f'Could not apply {pe.rel_patch_path}: {pe.title()}', file=sys.stderr) - for fp, hunks in failed_hunks.items(): - print(f'{fp}:', file=sys.stderr) - for h in hunks: - print( - f'- {pe.rel_patch_path} ' - f'l:{h.patch_hunk_lineno_begin}...{h.patch_hunk_lineno_end}', - file=sys.stderr) - - -@contextlib.contextmanager -def _GitCleanContext(git_root_dir: Path): - """Cleans up a git directory when the context exits.""" - if IsGitDirty(git_root_dir): - raise RuntimeError('Cannot setup clean context; git_root_dir is dirty') - try: - yield - finally: - CleanSrcTree(git_root_dir) - - def HandlePatches(svn_version, patch_metadata_file, filesdir_path, @@ -871,7 +720,7 @@ def HandlePatches(svn_version, # Need a clean source tree for `git bisect run` to avoid unnecessary # fails for patches. - CleanSrcTree(src_path) + patch_utils.clean_src_tree(src_path) print('\nStarting to bisect patch %s for SVN version %d:\n' % (os.path.basename( @@ -904,7 +753,7 @@ def HandlePatches(svn_version, UpdatePatchMetadataFile(patch_metadata_file, patch_file_contents) # Clear the changes made to the source tree by `git bisect run`. - CleanSrcTree(src_path) + patch_utils.clean_src_tree(src_path) if not continue_bisection: # Exiting program early because 'continue_bisection' is not set. @@ -952,7 +801,7 @@ def HandlePatches(svn_version, # Changes to the source tree need to be removed, otherwise some # patches may fail when applying the patch to the source tree when # `git bisect run` calls this script again. - CleanSrcTree(src_path) + patch_utils.clean_src_tree(src_path) # The last patch in the interval [0, N] failed to apply, so let # `git bisect run` know that the last patch (the patch that failed @@ -974,17 +823,18 @@ def HandlePatches(svn_version, # complain that the changes would need to be 'stashed' or 'removed' in # order to reset HEAD back to the bad commit's git hash, so HEAD will remain # on the last git hash used by `git bisect run`. - CleanSrcTree(src_path) + patch_utils.clean_src_tree(src_path) # NOTE: Exit code 0 is similar to `git bisect good`. sys.exit(0) - patch_info = PatchInfo(applied_patches=applied_patches, - failed_patches=failed_patches, - non_applicable_patches=non_applicable_patches, - disabled_patches=disabled_patches, - removed_patches=removed_patches, - modified_metadata=modified_metadata) + patch_info = patch_utils.PatchInfo( + applied_patches=applied_patches, + failed_patches=failed_patches, + non_applicable_patches=non_applicable_patches, + disabled_patches=disabled_patches, + removed_patches=removed_patches, + modified_metadata=modified_metadata) # Determine post actions after iterating through the patches. if mode == FailureModes.REMOVE_PATCHES: @@ -1007,7 +857,7 @@ def HandlePatches(svn_version, return patch_info -def PrintPatchResults(patch_info: PatchInfo): +def PrintPatchResults(patch_info: patch_utils.PatchInfo): """Prints the results of handling the patches of a package. Args: @@ -1049,7 +899,7 @@ def main(): args_output = GetCommandLineArgs() def _apply_all(args): - result = ApplyAllFromJson( + result = patch_utils.apply_all_from_json( svn_version=args.svn_version, llvm_src_dir=Path(args.src_path), patches_json_fp=Path(args.patch_metadata_file), diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py index b77c3022..f74480c2 100755 --- a/llvm_tools/patch_manager_unittest.py +++ b/llvm_tools/patch_manager_unittest.py @@ -245,31 +245,8 @@ class PatchManagerTest(unittest.TestCase): for r, a in cases: _t(dirname, r, a) - def testIsGitDirty(self): - """Test if a git directory has uncommitted changes.""" - with tempfile.TemporaryDirectory( - prefix='patch_manager_unittest') as dirname: - dirpath = Path(dirname) - - def _run_h(cmd): - subprocess.run(cmd, cwd=dirpath, stdout=subprocess.DEVNULL, check=True) - - _run_h(['git', 'init']) - self.assertFalse(patch_manager.IsGitDirty(dirpath)) - test_file = dirpath / 'test_file' - test_file.touch() - self.assertTrue(patch_manager.IsGitDirty(dirpath)) - _run_h(['git', 'add', '.']) - _run_h(['git', 'commit', '-m', 'test']) - self.assertFalse(patch_manager.IsGitDirty(dirpath)) - test_file.touch() - self.assertFalse(patch_manager.IsGitDirty(dirpath)) - with test_file.open('w', encoding='utf-8'): - test_file.write_text('abc') - self.assertTrue(patch_manager.IsGitDirty(dirpath)) - @mock.patch('builtins.print') - @mock.patch.object(patch_manager, '_GitCleanContext') + @mock.patch.object(patch_utils, 'git_clean_context') def testCheckPatchApplies(self, _, mock_git_clean_context): """Tests whether we can apply a single patch for a given svn_version.""" mock_git_clean_context.return_value = mock.MagicMock() @@ -334,8 +311,8 @@ class PatchManagerTest(unittest.TestCase): def _harness2(version: int, application_func: Callable, expected: patch_manager.GitBisectionCode): with mock.patch.object( - patch_manager, - 'ApplySinglePatchEntry', + patch_utils, + 'apply_single_patch_entry', application_func, ): result = patch_manager.CheckPatchApplies( diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py index cdf9f215..003990be 100644 --- a/llvm_tools/patch_utils.py +++ b/llvm_tools/patch_utils.py @@ -12,7 +12,7 @@ from pathlib import Path import re import subprocess import sys -from typing import Any, Dict, IO, List, Optional, Union +from typing import Any, Dict, IO, List, Optional, Tuple, Union CHECKED_FILE_RE = re.compile(r'^checking file\s+(.*)$') @@ -139,6 +139,17 @@ class PatchResult: def __bool__(self): return self.succeeded + def failure_info(self) -> str: + if self.succeeded: + return '' + s = '' + for file, hunks in self.failed_hunks.items(): + s += f'{file}:\n' + for h in hunks: + s += f'Lines {h.orig_start} to {h.orig_start + h.orig_hunk_len}\n' + s += '--------------------\n' + return s + @dataclasses.dataclass class PatchEntry: @@ -250,6 +261,26 @@ class PatchEntry: return self.metadata.get('title', '') +@dataclasses.dataclass(frozen=True) +class PatchInfo: + """Holds info for a round of patch applications.""" + # str types are legacy. Patch lists should + # probably be PatchEntries, + applied_patches: List[PatchEntry] + failed_patches: List[PatchEntry] + # Can be deleted once legacy code is removed. + non_applicable_patches: List[str] + # Can be deleted once legacy code is removed. + disabled_patches: List[str] + # Can be deleted once legacy code is removed. + removed_patches: List[str] + # Can be deleted once legacy code is removed. + modified_metadata: Optional[str] + + def _asdict(self): + return dataclasses.asdict(self) + + def json_to_patch_entries(workdir: Path, json_fd: IO[str]) -> List[PatchEntry]: """Convert a json IO object to List[PatchEntry]. @@ -258,3 +289,131 @@ def json_to_patch_entries(workdir: Path, json_fd: IO[str]) -> List[PatchEntry]: >>> patch_entries = json_to_patch_entries(Path(), f) """ return [PatchEntry.from_dict(workdir, d) for d in json.load(json_fd)] + + +def _print_failed_patch(pe: PatchEntry, failed_hunks: Dict[str, List[Hunk]]): + """Print information about a single failing PatchEntry. + + Args: + pe: A PatchEntry that failed. + failed_hunks: Hunks for pe which failed as dict: + filepath: [Hunk...] + """ + print(f'Could not apply {pe.rel_patch_path}: {pe.title()}', file=sys.stderr) + for fp, hunks in failed_hunks.items(): + print(f'{fp}:', file=sys.stderr) + for h in hunks: + print( + f'- {pe.rel_patch_path} ' + f'l:{h.patch_hunk_lineno_begin}...{h.patch_hunk_lineno_end}', + file=sys.stderr) + + +def apply_all_from_json(svn_version: int, + llvm_src_dir: Path, + patches_json_fp: Path, + continue_on_failure: bool = False) -> PatchInfo: + """Attempt to apply some patches to a given LLVM source tree. + + This relies on a PATCHES.json file to be the primary way + the patches are applied. + + Args: + svn_version: LLVM Subversion revision to patch. + llvm_src_dir: llvm-project root-level source directory to patch. + patches_json_fp: Filepath to the PATCHES.json file. + continue_on_failure: Skip any patches which failed to apply, + rather than throw an Exception. + """ + with patches_json_fp.open(encoding='utf-8') as f: + patches = json_to_patch_entries(patches_json_fp.parent, f) + skipped_patches = [] + failed_patches = [] + applied_patches = [] + for pe in patches: + applied, failed_hunks = apply_single_patch_entry(svn_version, llvm_src_dir, + pe) + if applied: + applied_patches.append(pe) + continue + if failed_hunks is not None: + if continue_on_failure: + failed_patches.append(pe) + continue + else: + _print_failed_patch(pe, failed_hunks) + raise RuntimeError('failed to apply patch ' + f'{pe.patch_path()}: {pe.title()}') + # Didn't apply, didn't fail, it was skipped. + skipped_patches.append(pe) + return PatchInfo( + non_applicable_patches=skipped_patches, + applied_patches=applied_patches, + failed_patches=failed_patches, + disabled_patches=[], + removed_patches=[], + modified_metadata=None, + ) + + +def apply_single_patch_entry( + svn_version: int, + llvm_src_dir: Path, + pe: PatchEntry, + ignore_version_range: bool = False +) -> Tuple[bool, Optional[Dict[str, List[Hunk]]]]: + """Try to apply a single PatchEntry object. + + Returns: + Tuple where the first element indicates whether the patch applied, + and the second element is a faild hunk mapping from file name to lists of + hunks (if the patch didn't apply). + """ + # Don't apply patches outside of the version range. + if not ignore_version_range and not pe.can_patch_version(svn_version): + return False, None + # Test first to avoid making changes. + test_application = pe.test_apply(llvm_src_dir) + if not test_application: + return False, test_application.failed_hunks + # Now actually make changes. + application_result = pe.apply(llvm_src_dir) + if not application_result: + # This should be very rare/impossible. + return False, application_result.failed_hunks + return True, None + + +def is_git_dirty(git_root_dir: Path) -> bool: + """Return whether the given git directory has uncommitted changes.""" + if not git_root_dir.is_dir(): + raise ValueError(f'git_root_dir {git_root_dir} is not a directory') + cmd = ['git', 'ls-files', '-m', '--other', '--exclude-standard'] + return (subprocess.run(cmd, + stdout=subprocess.PIPE, + check=True, + cwd=git_root_dir, + encoding='utf-8').stdout != '') + + +def clean_src_tree(src_path): + """Cleans the source tree of the changes made in 'src_path'.""" + + reset_src_tree_cmd = ['git', '-C', src_path, 'reset', 'HEAD', '--hard'] + + subprocess.run(reset_src_tree_cmd, check=True) + + clean_src_tree_cmd = ['git', '-C', src_path, 'clean', '-fd'] + + subprocess.run(clean_src_tree_cmd, check=True) + + +@contextlib.contextmanager +def git_clean_context(git_root_dir: Path): + """Cleans up a git directory when the context exits.""" + if is_git_dirty(git_root_dir): + raise RuntimeError('Cannot setup clean context; git_root_dir is dirty') + try: + yield + finally: + clean_src_tree(git_root_dir) diff --git a/llvm_tools/patch_utils_unittest.py b/llvm_tools/patch_utils_unittest.py index 3a6409b9..f73ee751 100755 --- a/llvm_tools/patch_utils_unittest.py +++ b/llvm_tools/patch_utils_unittest.py @@ -7,6 +7,7 @@ import io from pathlib import Path +import subprocess import tempfile import unittest import unittest.mock as mock @@ -159,6 +160,29 @@ Hunk #1 SUCCEEDED at 96 with fuzz 1. self.assertEqual(result['x/y/z.h'], [4]) self.assertNotIn('works.cpp', result) + def test_is_git_dirty(self): + """Test if a git directory has uncommitted changes.""" + with tempfile.TemporaryDirectory( + prefix='patch_utils_unittest') as dirname: + dirpath = Path(dirname) + + def _run_h(cmd): + subprocess.run(cmd, cwd=dirpath, stdout=subprocess.DEVNULL, check=True) + + _run_h(['git', 'init']) + self.assertFalse(pu.is_git_dirty(dirpath)) + test_file = dirpath / 'test_file' + test_file.touch() + self.assertTrue(pu.is_git_dirty(dirpath)) + _run_h(['git', 'add', '.']) + _run_h(['git', 'commit', '-m', 'test']) + self.assertFalse(pu.is_git_dirty(dirpath)) + test_file.touch() + self.assertFalse(pu.is_git_dirty(dirpath)) + with test_file.open('w', encoding='utf-8'): + test_file.write_text('abc') + self.assertTrue(pu.is_git_dirty(dirpath)) + @staticmethod def _default_json_dict(): return { -- cgit v1.2.3 From 3c36449dd4c66f7415f0a23b7908f5496b0dd743 Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Tue, 21 Jun 2022 17:06:52 -0700 Subject: compiler_wrapper: add autocrash logic As outlined in the attached bug, we want to be able to crash the compiler based on a handful of heuristics. Crashing Clang helps get us self-contained reproducers fairly easily. This CL provides (off-by-default) functionality to do the above. The expectation is that a SWE will hack at it to make it work as they need to. BUG=b:236736327 TEST=Installed the new wrapper; observed autocrashes. Change-Id: I76ec753ec37baa5e9b6dab92668081fa7c605725 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3714885 Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: George Burgess <gbiv@chromium.org> Tested-by: George Burgess <gbiv@chromium.org> --- compiler_wrapper/README.md | 28 ++++ compiler_wrapper/ccache_flag.go | 7 +- compiler_wrapper/compiler_wrapper.go | 6 + compiler_wrapper/crash_builds.go | 154 ++++++++++++++++++++ compiler_wrapper/crash_builds_test.go | 260 ++++++++++++++++++++++++++++++++++ 5 files changed, 454 insertions(+), 1 deletion(-) create mode 100644 compiler_wrapper/crash_builds.go create mode 100644 compiler_wrapper/crash_builds_test.go diff --git a/compiler_wrapper/README.md b/compiler_wrapper/README.md index 0228e27d..bb63798a 100644 --- a/compiler_wrapper/README.md +++ b/compiler_wrapper/README.md @@ -73,3 +73,31 @@ Generated wrappers are stored here: `/usr/bin/clang_host_wrapper` - Gcc host wrapper: `/usr/x86_64-pc-linux-gnu/gcc-bin/10.2.0/host_wrapper` + +## Using the compiler wrapper to crash arbitrary compilations + +When Clang crashes, its output can be extremely useful. Often, it will provide +the user with a stack trace, and messages like: + +``` +clang-15: unable to execute command: Illegal instruction +clang-15: note: diagnostic msg: /tmp/clang_crash_diagnostics/foo-5420d2.c +clang-15: note: diagnostic msg: /tmp/clang_crash_diagnostics/foo-5420d2.sh +``` + +Where the artifacts at `/tmp/clang_crash_diagnostics/foo-*` are a full, +self-contained reproducer of the inputs that caused the crash in question. +Often, such a reproducer is very valuable to have even for cases where a crash +_doesn't_ happen (e.g., maybe Clang is now emitting an error where it used to +not do so, and we want to bisect upstream LLVM with that info). Normally, +collecting and crafting such a reproducer is a multi-step process, and can be +error-prone; compile commands may rely on env vars, they may be done within +`chroot`s, they may rely on being executed in a particular directory, they may +rely on intermediate state, etc. + +Because of the usefulness of these crash reports, our wrapper supports crashing +Clang even on files that ordinarily don't cause Clang to crash. For various +reasons (b/236736327), this support currently requires rebuilding and +redeploying the wrapper in order to work. That said, this could be a valuable +tool for devs interested in creating a self-contained reproducer without having +to manually reproduce the environment in which a particular build was performed. diff --git a/compiler_wrapper/ccache_flag.go b/compiler_wrapper/ccache_flag.go index 7d19da88..2c966fd6 100644 --- a/compiler_wrapper/ccache_flag.go +++ b/compiler_wrapper/ccache_flag.go @@ -4,6 +4,11 @@ package main +func isInConfigureStage(env env) bool { + val, present := env.getenv("EBUILD_PHASE") + return present && val == "configure" +} + func processCCacheFlag(builder *commandBuilder) { // We should be able to share the objects across compilers as // the pre-processed output will differ. This allows boards @@ -22,7 +27,7 @@ func processCCacheFlag(builder *commandBuilder) { // Disable ccache during portage's src_configure phase. Using ccache here is generally a // waste of time, since these files are very small. Experimentally, this speeds up // configuring by ~13%. - if val, present := builder.env.getenv("EBUILD_PHASE"); present && val == "configure" { + if isInConfigureStage(builder.env) { useCCache = false } diff --git a/compiler_wrapper/compiler_wrapper.go b/compiler_wrapper/compiler_wrapper.go index 1fe3eb70..28d22471 100644 --- a/compiler_wrapper/compiler_wrapper.go +++ b/compiler_wrapper/compiler_wrapper.go @@ -201,6 +201,12 @@ func callCompilerInternal(env env, cfg *config, inputCmd *command) (exitCode int } } + // If builds matching some heuristic should crash, crash them. Since this is purely a + // debugging tool, don't offer any nice features with it (e.g., rusage, ...). + if shouldUseCrashBuildsHeuristic && mainBuilder.target.compilerType == clangType { + return buildWithAutocrash(env, cfg, compilerCmd) + } + bisectStage := getBisectStage(env) if rusageEnabled { diff --git a/compiler_wrapper/crash_builds.go b/compiler_wrapper/crash_builds.go new file mode 100644 index 00000000..147fb369 --- /dev/null +++ b/compiler_wrapper/crash_builds.go @@ -0,0 +1,154 @@ +// Copyright 2022 The ChromiumOS Authors. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "io" + "regexp" +) + +// ** HEY YOU, PERSON READING THIS! ** +// +// Are you a dev who wants to make this work locally? Awesome! Please note that this **only** works +// for Clang. If that's OK, here's a checklist for you: +// [ ] Set `shouldUseCrashBuildsHeuristic = true` below. +// [ ] If you want this heuristic to operate during `src_configure` (rare), also set +// `allowAutoCrashInConfigure` to true. +// [ ] Modify `shouldAutocrashPostExec` to return `true` when the compiler's output/flags match what +// you want to crash on, and `false` otherwise. +// [ ] Run `./install_compiler_wrapper.sh` to install the updated wrapper. +// [ ] Run whatever command reproduces the error. +// +// If you need to make changes to your heuristic, repeat the above steps starting at +// `./install_compiler_wrapper.sh` until things seem to do what you want. +const ( + // Set this to true to use autocrashing logic. + shouldUseCrashBuildsHeuristic = false + // Set this to true to allow `shouldAutocrashPostExec` to check+crash configure steps. + allowAutoCrashInConfigure = false +) + +// shouldAutocrashPostExec returns true if we should automatically crash the compiler. This is +// called after the compiler is run. If it returns true, we'll re-execute the compiler with the bit +// of extra code necessary to crash it. +func shouldAutocrashPostExec(env env, cfg *config, originalCmd *command, runInfo compilerExecInfo) bool { + // ** TODO, DEAR READER: ** Fill this in. Below are a few `if false {` blocks that should + // work for common use-cases. You're encouraged to change them to `if true {` if they suit + // your needs. + + // Return true if `error: some error message` is contained in the run's stderr. + if false { + return bytes.Contains(runInfo.stderr, []byte("error: some error message")) + } + + // Return true if `foo.c:${line_number}: error: some error message` appears in the run's + // stderr. Otherwise, return false. + if false { + r := regexp.MustCompile(`foo\.c:\d+: error: some error message`) + return r.Match(runInfo.stderr) + } + + // Return true if there's a `-fjust-give-up` flag in the compiler's invocation. + if false { + for _, flag := range originalCmd.Args { + if flag == "-fjust-give-up" { + return true + } + } + + return false + } + + panic("Please fill in `shouldAutocrashPostExec` with meaningful logic.") +} + +type compilerExecInfo struct { + exitCode int + stdout, stderr []byte +} + +// ** Below here are implementation details. If all you want is autocrashing behavior, you don't +// need to keep reading. ** +const ( + autocrashProgramLine = "\n#pragma clang __debug parser_crash" +) + +type buildWithAutocrashPredicates struct { + allowInConfigure bool + shouldAutocrash func(env, *config, *command, compilerExecInfo) bool +} + +func buildWithAutocrash(env env, cfg *config, originalCmd *command) (exitCode int, err error) { + return buildWithAutocrashImpl(env, cfg, originalCmd, buildWithAutocrashPredicates{ + allowInConfigure: allowAutoCrashInConfigure, + shouldAutocrash: shouldAutocrashPostExec, + }) +} + +func buildWithAutocrashImpl(env env, cfg *config, originalCmd *command, preds buildWithAutocrashPredicates) (exitCode int, err error) { + stdinBuffer := (*bytes.Buffer)(nil) + subprocStdin := io.Reader(nil) + invocationUsesStdinAsAFile := needStdinTee(originalCmd) + if invocationUsesStdinAsAFile { + stdinBuffer = &bytes.Buffer{} + if _, err := stdinBuffer.ReadFrom(env.stdin()); err != nil { + return 0, wrapErrorwithSourceLocf(err, "prebuffering stdin") + } + subprocStdin = stdinBuffer + } else { + subprocStdin = env.stdin() + } + + stdoutBuffer := &bytes.Buffer{} + stderrBuffer := &bytes.Buffer{} + exitCode, err = wrapSubprocessErrorWithSourceLoc(originalCmd, + env.run(originalCmd, subprocStdin, stdoutBuffer, stderrBuffer)) + if err != nil { + return 0, err + } + + autocrashAllowed := preds.allowInConfigure || !isInConfigureStage(env) + crash := autocrashAllowed && preds.shouldAutocrash(env, cfg, originalCmd, compilerExecInfo{ + exitCode: exitCode, + stdout: stdoutBuffer.Bytes(), + stderr: stderrBuffer.Bytes(), + }) + if !crash { + stdoutBuffer.WriteTo(env.stdout()) + stderrBuffer.WriteTo(env.stderr()) + return exitCode, nil + } + + fmt.Fprintln(env.stderr(), "** Autocrash requested; crashing the compiler...**") + + // `stdinBuffer == nil` implies that `-` wasn't used as a flag. If `-` isn't used as a + // flag, clang will ignore stdin. We want to write our #pragma to stdin, since we can't + // reasonably modify the files we're currently compiling. + if stdinBuffer == nil { + newArgs := []string{} + // Clang can't handle `-o ${target}` when handed multiple input files. Since + // we expect to crash before emitting anything, remove `-o ${file}` entirely. + for i, e := 0, len(originalCmd.Args); i < e; i++ { + a := originalCmd.Args[i] + if a == "-o" { + // Skip the -o here, then skip the following arg in the loop header. + i++ + } else { + newArgs = append(newArgs, a) + } + } + // And now add args that instruct clang to read from stdin. In this case, we also + // need to tell Clang what language the file is written in; C is as good as anything + // for this. + originalCmd.Args = append(newArgs, "-x", "c", "-") + stdinBuffer = &bytes.Buffer{} + } + + stdinBuffer.WriteString(autocrashProgramLine) + return wrapSubprocessErrorWithSourceLoc(originalCmd, + env.run(originalCmd, stdinBuffer, env.stdout(), env.stderr())) +} diff --git a/compiler_wrapper/crash_builds_test.go b/compiler_wrapper/crash_builds_test.go new file mode 100644 index 00000000..a4b2b99b --- /dev/null +++ b/compiler_wrapper/crash_builds_test.go @@ -0,0 +1,260 @@ +// Copyright 2022 The ChromiumOS Authors. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package main + +import ( + "bytes" + "io" + "strings" + "testing" +) + +func TestBuildWithAutoCrashDoesNothingIfCrashIsNotRequested(t *testing.T) { + withTestContext(t, func(ctx *testContext) { + neverAutoCrash := buildWithAutocrashPredicates{ + allowInConfigure: true, + shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool { + return false + }, + } + + exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc), neverAutoCrash) + if err != nil { + t.Fatalf("unexpectedly failed with %v", err) + } + ctx.must(exitCode) + if ctx.cmdCount != 1 { + t.Errorf("expected 1 call. Got: %d", ctx.cmdCount) + } + }) +} + +func TestBuildWithAutoCrashSkipsAutocrashLogicIfInConfigureAndConfigureChecksDisabled(t *testing.T) { + withTestContext(t, func(ctx *testContext) { + alwaysAutocrash := buildWithAutocrashPredicates{ + allowInConfigure: false, + shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool { + return true + }, + } + + ctx.env = append(ctx.env, "EBUILD_PHASE=configure") + exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc), alwaysAutocrash) + if err != nil { + t.Fatalf("unexpectedly failed with %v", err) + } + ctx.must(exitCode) + if ctx.cmdCount != 1 { + t.Errorf("expected 1 call. Got: %d", ctx.cmdCount) + } + }) +} + +func TestBuildWithAutoCrashRerunsIfPredicateRequestsCrash(t *testing.T) { + withTestContext(t, func(ctx *testContext) { + autocrashPostCmd := buildWithAutocrashPredicates{ + allowInConfigure: true, + shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool { + return true + }, + } + + ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + hasDash := false + for _, arg := range cmd.Args { + if arg == "-" { + hasDash = true + break + } + } + + switch ctx.cmdCount { + case 1: + if hasDash { + t.Error("Got `-` on command 1; didn't want that.") + } + return nil + case 2: + if !hasDash { + t.Error("Didn't get `-` on command 2; wanted that.") + } else { + input := stdin.(*bytes.Buffer) + if s := input.String(); !strings.Contains(s, autocrashProgramLine) { + t.Errorf("Input was %q; expected %q to be in it", s, autocrashProgramLine) + } + } + return nil + default: + t.Fatalf("Unexpected command count: %d", ctx.cmdCount) + panic("Unreachable") + } + } + + exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc), autocrashPostCmd) + if err != nil { + t.Fatalf("unexpectedly failed with %v", err) + } + ctx.must(exitCode) + + if ctx.cmdCount != 2 { + t.Errorf("expected 2 calls. Got: %d", ctx.cmdCount) + } + }) +} + +func TestBuildWithAutoCrashAddsDashAndWritesToStdinIfInputFileIsNotStdin(t *testing.T) { + withTestContext(t, func(ctx *testContext) { + autocrashPostCmd := buildWithAutocrashPredicates{ + allowInConfigure: true, + shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool { + return true + }, + } + + ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + numDashes := 0 + for _, arg := range cmd.Args { + if arg == "-" { + numDashes++ + } + } + + switch ctx.cmdCount { + case 1: + if numDashes != 0 { + t.Errorf("Got %d dashes on command 1; want 0", numDashes) + } + return nil + case 2: + if numDashes != 1 { + t.Errorf("Got %d dashes on command 2; want 1", numDashes) + } + + input := stdin.(*bytes.Buffer).String() + stdinHasAutocrashLine := strings.Contains(input, autocrashProgramLine) + if !stdinHasAutocrashLine { + t.Error("Got no autocrash line on the second command; wanted that") + } + return nil + default: + t.Fatalf("Unexpected command count: %d", ctx.cmdCount) + panic("Unreachable") + } + } + + exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc), autocrashPostCmd) + if err != nil { + t.Fatalf("unexpectedly failed with %v", err) + } + ctx.must(exitCode) + + if ctx.cmdCount != 2 { + t.Errorf("expected 2 calls. Got: %d", ctx.cmdCount) + } + }) +} + +func TestBuildWithAutoCrashAppendsToStdinIfStdinIsTheOnlyInputFile(t *testing.T) { + withTestContext(t, func(ctx *testContext) { + autocrashPostCmd := buildWithAutocrashPredicates{ + allowInConfigure: true, + shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool { + return true + }, + } + + ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + numDashes := 0 + for _, arg := range cmd.Args { + if arg == "-" { + numDashes++ + } + } + + if numDashes != 1 { + t.Errorf("Got %d dashes on command %d (args: %#v); want 1", numDashes, ctx.cmdCount, cmd.Args) + } + + input := stdin.(*bytes.Buffer).String() + stdinHasAutocrashLine := strings.Contains(input, autocrashProgramLine) + + switch ctx.cmdCount { + case 1: + if stdinHasAutocrashLine { + t.Error("Got autocrash line on the first command; did not want that") + } + return nil + case 2: + if !stdinHasAutocrashLine { + t.Error("Got no autocrash line on the second command; wanted that") + } + return nil + default: + t.Fatalf("Unexpected command count: %d", ctx.cmdCount) + panic("Unreachable") + } + } + + exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, "-x", "c", "-"), autocrashPostCmd) + if err != nil { + t.Fatalf("unexpectedly failed with %v", err) + } + ctx.must(exitCode) + + if ctx.cmdCount != 2 { + t.Errorf("expected 2 calls. Got: %d", ctx.cmdCount) + } + }) +} + +func TestCrashBuildFiltersObjectFileOptionOnCrashes(t *testing.T) { + withTestContext(t, func(ctx *testContext) { + autocrashPostCmd := buildWithAutocrashPredicates{ + allowInConfigure: true, + shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool { + return true + }, + } + + const outputFileName = "/path/to/foo.o" + + ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + cmdOutputArg := (*string)(nil) + for i, e := range cmd.Args { + if e == "-o" { + // Assume something follows. If not, we'll crash and the + // test will fail. + cmdOutputArg = &cmd.Args[i+1] + } + } + + switch ctx.cmdCount { + case 1: + if cmdOutputArg == nil || *cmdOutputArg != outputFileName { + t.Errorf("Got command args %q; want `-o %q` in them", cmd.Args, outputFileName) + } + return nil + case 2: + if cmdOutputArg != nil { + t.Errorf("Got command args %q; want no mention of `-o %q` in them", cmd.Args, outputFileName) + } + return nil + default: + t.Fatalf("Unexpected command count: %d", ctx.cmdCount) + panic("Unreachable") + } + } + + exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, "-o", outputFileName, mainCc), autocrashPostCmd) + if err != nil { + t.Fatalf("unexpectedly failed with %v", err) + } + ctx.must(exitCode) + + if ctx.cmdCount != 2 { + t.Errorf("expected 2 calls. Got: %d", ctx.cmdCount) + } + }) +} -- cgit v1.2.3 From 758eaa4e1e9270a1d0c99ca5aaedf76d8c1511b4 Mon Sep 17 00:00:00 2001 From: Ryan Beltran <ryanbeltran@chromium.org> Date: Fri, 1 Jul 2022 23:25:51 +0000 Subject: update OWNERS.toolchain This CL updates OWNERS.toolchain with recent changes to the ChromeOS Toolchain Team. BUG=b:237711871 TEST=presubmit Change-Id: I66b02236664f04b49cd5a7a81c17ccb5b6fc86e3 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3741464 Tested-by: George Burgess <gbiv@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> --- OWNERS.toolchain | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/OWNERS.toolchain b/OWNERS.toolchain index 67a4cd07..e8cdbf98 100644 --- a/OWNERS.toolchain +++ b/OWNERS.toolchain @@ -1,12 +1,9 @@ +adriandole@google.com ajordanr@google.com cjdb@google.com denik@chromium.org gbiv@chromium.org inglorion@chromium.org -llozano@chromium.org manojgupta@chromium.org mbenfield@google.com ryanbeltran@chromium.org - -# Temporary; see comment #2 on crbug.com/982498 -llozano@google.com -- cgit v1.2.3 From 36dcdff746b5708985c4ff1771dca3b1002cd3e3 Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@google.com> Date: Wed, 6 Jul 2022 09:38:13 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: I65702c5f9e1342eaba5707546f049df6f7eb5fd6 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3748693 Tested-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Christopher Di Bella <cjdb@google.com> Reviewed-by: Christopher Di Bella <cjdb@google.com> Auto-Submit: Denis Nikitin <denik@chromium.org> --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index ee4b6248..55831e01 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R105-14816.64-1655717501" + "name": "R105-14943.0-1656927124" }, "chromeos-kernel-4_14": { - "name": "R105-14909.11-1655717804" + "name": "R105-14943.0-1656927138" }, "chromeos-kernel-4_19": { - "name": "R105-14909.11-1655717582" + "name": "R105-14943.0-1656927254" }, "chromeos-kernel-5_4": { - "name": "R105-14909.11-1655717651" + "name": "R105-14943.0-1656927279" }, "chromeos-kernel-5_10": { - "name": "R105-14909.11-1655717748" + "name": "R105-14909.34-1656927210" } } -- cgit v1.2.3 From 4e4720974f409d48e8d862c291f63b511ce10e3a Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Mon, 11 Jul 2022 20:04:15 +0000 Subject: patch_sync: Add copyright lines to patch_sync These copyright lines should be added, and they're currently missing. Failing presubmit checks without this change. BUG=None TEST=Presubmit checks pass Change-Id: I9cd3b9e26a4d7284974705aa96cb6050db25e767 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3756344 Auto-Submit: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: George Burgess <gbiv@chromium.org> --- llvm_tools/patch_sync/src/android_utils.rs | 4 ++++ llvm_tools/patch_sync/src/main.rs | 4 ++++ llvm_tools/patch_sync/src/patch_parsing.rs | 4 ++++ llvm_tools/patch_sync/src/version_control.rs | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/llvm_tools/patch_sync/src/android_utils.rs b/llvm_tools/patch_sync/src/android_utils.rs index 77cb4b8a..7b0e5849 100644 --- a/llvm_tools/patch_sync/src/android_utils.rs +++ b/llvm_tools/patch_sync/src/android_utils.rs @@ -1,3 +1,7 @@ +// Copyright 2022 The ChromiumOS Authors. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + use std::path::Path; use std::process::Command; diff --git a/llvm_tools/patch_sync/src/main.rs b/llvm_tools/patch_sync/src/main.rs index 5c11b453..2141721a 100644 --- a/llvm_tools/patch_sync/src/main.rs +++ b/llvm_tools/patch_sync/src/main.rs @@ -1,3 +1,7 @@ +// Copyright 2022 The ChromiumOS Authors. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + mod android_utils; mod patch_parsing; mod version_control; diff --git a/llvm_tools/patch_sync/src/patch_parsing.rs b/llvm_tools/patch_sync/src/patch_parsing.rs index f1ad52fb..2f5d6d69 100644 --- a/llvm_tools/patch_sync/src/patch_parsing.rs +++ b/llvm_tools/patch_sync/src/patch_parsing.rs @@ -1,3 +1,7 @@ +// Copyright 2022 The ChromiumOS Authors. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + use std::collections::{BTreeMap, BTreeSet}; use std::fs::{copy, File}; use std::io::{BufRead, BufReader, Read, Write}; diff --git a/llvm_tools/patch_sync/src/version_control.rs b/llvm_tools/patch_sync/src/version_control.rs index f8ddbcaa..125d5cbc 100644 --- a/llvm_tools/patch_sync/src/version_control.rs +++ b/llvm_tools/patch_sync/src/version_control.rs @@ -1,3 +1,7 @@ +// Copyright 2022 The ChromiumOS Authors. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + use anyhow::{anyhow, bail, ensure, Context, Result}; use regex::Regex; use std::ffi::OsStr; -- cgit v1.2.3 From fd100eeff847d43ac84b69606cda9309a913070a Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Mon, 11 Jul 2022 19:53:03 +0000 Subject: patch_sync: Sync version range changes At present, patch_sync intentionally does not copy over version changes to existing patches, it only copies new patches to the other repositories. This commit changes this behaviour at the request of the Android toolchain team, so that now version range changes are also copied over (but nothing else). BUG=b:237030928 TEST=cargo test TEST=patch_sync locally with version range changes Change-Id: I115fa02012cdf663a2b5b5657e769f513241dedd Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3756345 Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> --- llvm_tools/patch_sync/src/main.rs | 65 ++++++++++- llvm_tools/patch_sync/src/patch_parsing.rs | 166 ++++++++++++++++++++++++++--- 2 files changed, 214 insertions(+), 17 deletions(-) diff --git a/llvm_tools/patch_sync/src/main.rs b/llvm_tools/patch_sync/src/main.rs index 2141721a..c3c9a61b 100644 --- a/llvm_tools/patch_sync/src/main.rs +++ b/llvm_tools/patch_sync/src/main.rs @@ -13,7 +13,7 @@ use std::path::{Path, PathBuf}; use anyhow::{Context, Result}; use structopt::StructOpt; -use patch_parsing::{filter_patches_by_platform, PatchCollection, PatchDictSchema}; +use patch_parsing::{filter_patches_by_platform, PatchCollection, PatchDictSchema, VersionRange}; use version_control::RepoSetupContext; fn main() -> Result<()> { @@ -137,13 +137,21 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> { let android_patches_path = ctx.android_patches_path(); // Get new Patches ------------------------------------------------------- - let (cur_cros_collection, new_cros_patches) = patch_parsing::new_patches( + let patch_parsing::PatchTemporalDiff { + cur_collection: cur_cros_collection, + new_patches: new_cros_patches, + version_updates: cros_version_updates, + } = patch_parsing::new_patches( &cros_patches_path, &ctx.old_cros_patch_contents(&args.old_cros_ref)?, "chromiumos", ) .context("finding new patches for chromiumos")?; - let (cur_android_collection, new_android_patches) = patch_parsing::new_patches( + let patch_parsing::PatchTemporalDiff { + cur_collection: cur_android_collection, + new_patches: new_android_patches, + version_updates: android_version_updates, + } = patch_parsing::new_patches( &android_patches_path, &ctx.old_android_patch_contents(&args.old_android_ref)?, "android", @@ -176,9 +184,17 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> { } }); + // Need to filter version updates to only existing patches to the other platform. + let cros_version_updates = + filter_version_changes(cros_version_updates, &cur_android_collection); + let android_version_updates = + filter_version_changes(android_version_updates, &cur_cros_collection); + if args.verbose { display_patches("New patches from ChromiumOS", &new_cros_patches); + display_version_updates("Version updates from ChromiumOS", &cros_version_updates); display_patches("New patches from Android", &new_android_patches); + display_version_updates("Version updates from Android", &android_version_updates); } if args.dry_run { @@ -192,9 +208,11 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> { ModifyOpt { new_cros_patches, cur_cros_collection, + cros_version_updates, cros_reviewers: args.cros_reviewers, new_android_patches, cur_android_collection, + android_version_updates, android_reviewers: args.android_reviewers, }, ) @@ -203,9 +221,11 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> { struct ModifyOpt { new_cros_patches: PatchCollection, cur_cros_collection: PatchCollection, + cros_version_updates: Vec<(String, Option<VersionRange>)>, cros_reviewers: Vec<String>, new_android_patches: PatchCollection, cur_android_collection: PatchCollection, + android_version_updates: Vec<(String, Option<VersionRange>)>, android_reviewers: Vec<String>, } @@ -217,11 +237,16 @@ fn modify_repos(ctx: &RepoSetupContext, no_commit: bool, opt: ModifyOpt) -> Resu // Transpose Patches ----------------------------------------------------- let mut cur_android_collection = opt.cur_android_collection; let mut cur_cros_collection = opt.cur_cros_collection; - if !opt.new_cros_patches.is_empty() { + // Apply any version ranges and new patches, then write out. + if !opt.new_cros_patches.is_empty() || !opt.cros_version_updates.is_empty() { + cur_android_collection = + cur_android_collection.update_version_ranges(&opt.cros_version_updates); opt.new_cros_patches .transpose_write(&mut cur_android_collection)?; } - if !opt.new_android_patches.is_empty() { + if !opt.new_android_patches.is_empty() || !opt.android_version_updates.is_empty() { + cur_cros_collection = + cur_cros_collection.update_version_ranges(&opt.android_version_updates); opt.new_android_patches .transpose_write(&mut cur_cros_collection)?; } @@ -250,6 +275,25 @@ fn modify_repos(ctx: &RepoSetupContext, no_commit: bool, opt: ModifyOpt) -> Resu Ok(()) } +/// Filter version changes that can't apply to a given collection. +fn filter_version_changes<T>( + version_updates: T, + other_platform_collection: &PatchCollection, +) -> Vec<(String, Option<VersionRange>)> +where + T: IntoIterator<Item = (String, Option<VersionRange>)>, +{ + version_updates + .into_iter() + .filter(|(rel_patch_path, _)| { + other_platform_collection + .patches + .iter() + .any(|p| &p.rel_patch_path == rel_patch_path) + }) + .collect() +} + fn display_patches(prelude: &str, collection: &PatchCollection) { println!("{}", prelude); if collection.patches.is_empty() { @@ -259,6 +303,17 @@ fn display_patches(prelude: &str, collection: &PatchCollection) { println!("{}", collection); } +fn display_version_updates(prelude: &str, version_updates: &[(String, Option<VersionRange>)]) { + println!("{}", prelude); + if version_updates.is_empty() { + println!(" [No Version Changes]"); + return; + } + for (rel_patch_path, _) in version_updates { + println!("* {}", rel_patch_path); + } +} + #[derive(Debug, structopt::StructOpt)] #[structopt(name = "patch_sync", about = "A pipeline for syncing the patch code")] enum Opt { diff --git a/llvm_tools/patch_sync/src/patch_parsing.rs b/llvm_tools/patch_sync/src/patch_parsing.rs index 2f5d6d69..0f4cb741 100644 --- a/llvm_tools/patch_sync/src/patch_parsing.rs +++ b/llvm_tools/patch_sync/src/patch_parsing.rs @@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; /// JSON serde struct. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct PatchDictSchema { pub metadata: Option<BTreeMap<String, serde_json::Value>>, #[serde(default, skip_serializing_if = "BTreeSet::is_empty")] @@ -21,7 +21,7 @@ pub struct PatchDictSchema { pub version_range: Option<VersionRange>, } -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct VersionRange { pub from: Option<u64>, pub until: Option<u64>, @@ -122,6 +122,61 @@ impl PatchCollection { ) } + /// Vec of every PatchDictSchema with differing + /// version ranges but the same rel_patch_paths. + fn version_range_diffs(&self, other: &Self) -> Vec<(String, Option<VersionRange>)> { + let other_map: BTreeMap<_, _> = other + .patches + .iter() + .map(|p| (p.rel_patch_path.clone(), p)) + .collect(); + self.patches + .iter() + .filter_map(|ours| match other_map.get(&ours.rel_patch_path) { + Some(theirs) => { + if ours.get_from_version() != theirs.get_from_version() + || ours.get_until_version() != theirs.get_until_version() + { + Some((ours.rel_patch_path.clone(), ours.version_range)) + } else { + None + } + } + _ => None, + }) + .collect() + } + + /// Given a vector of tuples with (rel_patch_path, Option<VersionRange>), replace + /// all version ranges in this collection with a matching one in the new_versions parameter. + pub fn update_version_ranges(&self, new_versions: &[(String, Option<VersionRange>)]) -> Self { + // new_versions should be really tiny (len() <= 2 for the most part), so + // the overhead of O(1) lookups is not worth it. + let get_updated_version = |rel_patch_path: &str| -> Option<Option<VersionRange>> { + // The first Option indicates whether we are updating it at all. + // The second Option indicates we can update it with None. + new_versions + .iter() + .find(|i| i.0 == rel_patch_path) + .map(|x| x.1) + }; + let cloned_patches = self + .patches + .iter() + .map(|p| match get_updated_version(&p.rel_patch_path) { + Some(version_range) => PatchDictSchema { + version_range, + ..p.clone() + }, + _ => p.clone(), + }) + .collect(); + Self { + workdir: self.workdir.clone(), + patches: cloned_patches, + } + } + fn union_helper( &self, other: &Self, @@ -255,25 +310,38 @@ impl std::fmt::Display for PatchCollection { } } +/// Represents information which changed between now and an old version of a PATCHES.json file. +pub struct PatchTemporalDiff { + pub cur_collection: PatchCollection, + pub new_patches: PatchCollection, + // Store version_updates as a vec, not a map, as it's likely to be very small (<=2), + // and the overhead of using a O(1) look up structure isn't worth it. + pub version_updates: Vec<(String, Option<VersionRange>)>, +} + /// Generate a PatchCollection incorporating only the diff between current patches and old patch /// contents. pub fn new_patches( patches_path: &Path, old_patch_contents: &str, platform: &str, -) -> Result<(PatchCollection, PatchCollection)> { +) -> Result<PatchTemporalDiff> { + // Set up the current patch collection. let cur_collection = PatchCollection::parse_from_file(patches_path) .with_context(|| format!("parsing {} PATCHES.json", platform))?; let cur_collection = filter_patches_by_platform(&cur_collection, platform); let cur_collection = cur_collection.filter_patches(|p| cur_collection.patch_exists(p)); - let new_patches: PatchCollection = { - let old_collection = PatchCollection::parse_from_str( - patches_path.parent().unwrap().to_path_buf(), - old_patch_contents, - )?; - let old_collection = old_collection.filter_patches(|p| old_collection.patch_exists(p)); - cur_collection.subtract(&old_collection)? - }; + + // Set up the old patch collection. + let old_collection = PatchCollection::parse_from_str( + patches_path.parent().unwrap().to_path_buf(), + old_patch_contents, + )?; + let old_collection = old_collection.filter_patches(|p| old_collection.patch_exists(p)); + + // Set up the differential values + let version_updates = cur_collection.version_range_diffs(&old_collection); + let new_patches: PatchCollection = cur_collection.subtract(&old_collection)?; let new_patches = new_patches.map_patches(|p| { let mut platforms = BTreeSet::new(); platforms.extend(["android".to_string(), "chromiumos".to_string()]); @@ -282,7 +350,11 @@ pub fn new_patches( ..p.to_owned() } }); - Ok((cur_collection, new_patches)) + Ok(PatchTemporalDiff { + cur_collection, + new_patches, + version_updates, + }) } /// Create a new collection with only the patches that apply to the @@ -446,4 +518,74 @@ mod test { assert_eq!(union.patches.len(), 1); assert_eq!(union.patches[0].platforms.len(), 0); } + + #[test] + fn test_version_differentials() { + let fixture = version_range_fixture(); + let diff = fixture[0].version_range_diffs(&fixture[1]); + assert_eq!(diff.len(), 1); + assert_eq!( + &diff, + &[( + "a".to_string(), + Some(VersionRange { + from: Some(0), + until: Some(1) + }) + )] + ); + let diff = fixture[1].version_range_diffs(&fixture[2]); + assert_eq!(diff.len(), 0); + } + + #[test] + fn test_version_updates() { + let fixture = version_range_fixture(); + let collection = fixture[0].update_version_ranges(&[("a".into(), None)]); + assert_eq!(collection.patches[0].version_range, None); + assert_eq!(collection.patches[1], fixture[1].patches[1]); + let new_version_range = Some(VersionRange { + from: Some(42), + until: Some(43), + }); + let collection = fixture[0].update_version_ranges(&[("a".into(), new_version_range)]); + assert_eq!(collection.patches[0].version_range, new_version_range); + assert_eq!(collection.patches[1], fixture[1].patches[1]); + } + + fn version_range_fixture() -> Vec<PatchCollection> { + let patch1 = PatchDictSchema { + rel_patch_path: "a".into(), + metadata: None, + platforms: Default::default(), + version_range: Some(VersionRange { + from: Some(0), + until: Some(1), + }), + }; + let patch1_updated = PatchDictSchema { + version_range: Some(VersionRange { + from: Some(0), + until: Some(3), + }), + ..patch1.clone() + }; + let patch2 = PatchDictSchema { + rel_patch_path: "b".into(), + ..patch1.clone() + }; + let collection1 = PatchCollection { + workdir: PathBuf::new(), + patches: vec![patch1, patch2.clone()], + }; + let collection2 = PatchCollection { + workdir: PathBuf::new(), + patches: vec![patch1_updated, patch2.clone()], + }; + let collection3 = PatchCollection { + workdir: PathBuf::new(), + patches: vec![patch2], + }; + vec![collection1, collection2, collection3] + } } -- cgit v1.2.3 From ad8fdba512d0e5547e112742c1f1b86d4f463a78 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Wed, 13 Jul 2022 23:25:22 +0000 Subject: llvm_tools: Clean patch_manager dead code, tests At present, patch_manager.py and related files had lots of dead code (>50% of the lines were not used). This commit removes them, and removes the tests which tested that unused code. Test concepts that were useful were either already covered, or have been added as well in this commit. This commit also cleans up some imports and moves some functions for better access to testing. BUG=b:188465085 TEST=Preupload tests pass TEST=llvm builds successfully locally with copied llvm_tools/ Change-Id: I202952a1f5d53a1182c1c6c51b8072573c5cbc5d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3761449 Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> --- llvm_tools/patch_manager.py | 667 ++---------------------- llvm_tools/patch_manager_unittest.py | 969 ++--------------------------------- llvm_tools/patch_utils.py | 14 +- llvm_tools/patch_utils_unittest.py | 32 +- 4 files changed, 140 insertions(+), 1542 deletions(-) diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index 056757fe..2893d611 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -10,14 +10,12 @@ import enum import json import os from pathlib import Path -import subprocess import sys -from typing import Any, Dict, IO, Iterable, List, Tuple +from typing import Any, Dict, IO, Iterable, List, Optional, Tuple from failure_modes import FailureModes import get_llvm_hash import patch_utils -from subprocess_helpers import check_call from subprocess_helpers import check_output @@ -35,115 +33,31 @@ class GitBisectionCode(enum.IntEnum): SKIP = 125 -def is_directory(dir_path): - """Validates that the argument passed into 'argparse' is a directory.""" - - if not os.path.isdir(dir_path): - raise ValueError('Path is not a directory: %s' % dir_path) - - return dir_path - - -def is_patch_metadata_file(patch_metadata_file): - """Valides the argument into 'argparse' is a patch file.""" - - if not os.path.isfile(patch_metadata_file): - raise ValueError('Invalid patch metadata file provided: %s' % - patch_metadata_file) - - if not patch_metadata_file.endswith('.json'): - raise ValueError('Patch metadata file does not end in ".json": %s' % - patch_metadata_file) - - return patch_metadata_file - - -def is_valid_failure_mode(failure_mode): - """Validates that the failure mode passed in is correct.""" - - cur_failure_modes = [mode.value for mode in FailureModes] - - if failure_mode not in cur_failure_modes: - raise ValueError('Invalid failure mode provided: %s' % failure_mode) - - return failure_mode - - -def EnsureBisectModeAndSvnVersionAreSpecifiedTogether(failure_mode, - good_svn_version): - """Validates that 'good_svn_version' is passed in only for bisection.""" - - if failure_mode != FailureModes.BISECT_PATCHES.value and good_svn_version: - raise ValueError('"good_svn_version" is only available for bisection.') - elif (failure_mode == FailureModes.BISECT_PATCHES.value - and not good_svn_version): - raise ValueError('A good SVN version is required for bisection (used by' - '"git bisect start".') - - -def GetCommandLineArgs(): +def GetCommandLineArgs(sys_argv: Optional[List[str]]): """Get the required arguments from the command line.""" # Create parser and add optional command-line arguments. parser = argparse.ArgumentParser(description='A manager for patches.') - # Add argument for the last good SVN version which is required by - # `git bisect start` (only valid for bisection mode). - parser.add_argument('--good_svn_version', - type=int, - help='INTERNAL USE ONLY... (used for bisection.)') - - # Add argument for the number of patches it iterate. Only used when performing - # `git bisect run`. - parser.add_argument('--num_patches_to_iterate', - type=int, - help=argparse.SUPPRESS) - - # Add argument for whether bisection should continue. Only used for - # 'bisect_patches.' - parser.add_argument( - '--continue_bisection', - type=bool, - default=False, - help='Determines whether bisection should continue after successfully ' - 'bisecting a patch (default: %(default)s) - only used for ' - '"bisect_patches"') - - # Trust src_path HEAD and svn_version. - parser.add_argument( - '--use_src_head', - action='store_true', - help='Use the HEAD of src_path directory as is, not necessarily the same ' - 'as the svn_version of upstream.') - # Add argument for the LLVM version to use for patch management. parser.add_argument( '--svn_version', type=int, - required=True, help='the LLVM svn version to use for patch management (determines ' - 'whether a patch is applicable)') + 'whether a patch is applicable). Required when not bisecting.') # Add argument for the patch metadata file that is in $FILESDIR. parser.add_argument( '--patch_metadata_file', required=True, - type=is_patch_metadata_file, + type=Path, help='the absolute path to the .json file in "$FILESDIR/" of the ' 'package which has all the patches and their metadata if applicable') - # Add argument for the absolute path to the ebuild's $FILESDIR path. - # Example: '.../sys-devel/llvm/files/'. - parser.add_argument( - '--filesdir_path', - required=True, - type=is_directory, - help='the absolute path to the ebuild "files/" directory') - # Add argument for the absolute path to the unpacked sources. parser.add_argument('--src_path', required=True, - type=is_directory, + type=Path, help='the absolute path to the unpacked LLVM sources') # Add argument for the mode of the patch manager when handling failing @@ -161,12 +75,7 @@ def GetCommandLineArgs(): 'application of. Not used in other modes.') # Parse the command line. - args_output = parser.parse_args() - - EnsureBisectModeAndSvnVersionAreSpecifiedTogether( - args_output.failure_mode, args_output.good_svn_version) - - return args_output + return parser.parse_args(sys_argv) def GetHEADSVNVersion(src_path): @@ -181,129 +90,6 @@ def GetHEADSVNVersion(src_path): return version -def VerifyHEADIsTheSameAsSVNVersion(src_path, svn_version): - """Verifies that HEAD's SVN version matches 'svn_version'.""" - - head_svn_version = GetHEADSVNVersion(src_path) - - if head_svn_version != svn_version: - raise ValueError('HEAD\'s SVN version %d does not match "svn_version"' - ' %d, please move HEAD to "svn_version"s\' git hash.' % - (head_svn_version, svn_version)) - - -def GetPathToPatch(filesdir_path, rel_patch_path): - """Gets the absolute path to a patch in $FILESDIR. - - Args: - filesdir_path: The absolute path to $FILESDIR. - rel_patch_path: The relative path to the patch in '$FILESDIR/'. - - Returns: - The absolute path to the patch in $FILESDIR. - - Raises: - ValueError: Unable to find the path to the patch in $FILESDIR. - """ - - if not os.path.isdir(filesdir_path): - raise ValueError('Invalid path to $FILESDIR provided: %s' % filesdir_path) - - # Combine $FILESDIR + relative path of patch to $FILESDIR. - patch_path = os.path.join(filesdir_path, rel_patch_path) - - if not os.path.isfile(patch_path): - raise ValueError('The absolute path %s to the patch %s does not exist' % - (patch_path, rel_patch_path)) - - return patch_path - - -def GetPatchMetadata(patch_dict): - """Gets the patch's metadata. - - Args: - patch_dict: A dictionary that has the patch metadata. - - Returns: - A tuple that contains the metadata values. - """ - - if 'version_range' in patch_dict: - from_version = patch_dict['version_range'].get('from', 0) - until_version = patch_dict['version_range'].get('until', None) - else: - from_version = 0 - until_version = None - is_critical = patch_dict.get('is_critical', False) - - return from_version, until_version, is_critical - - -def ApplyPatch(src_path, patch_path): - """Attempts to apply the patch. - - Args: - src_path: The absolute path to the unpacked sources of the package. - patch_path: The absolute path to the patch in $FILESDIR/ - - Returns: - A boolean where 'True' means that the patch applied fine or 'False' means - that the patch failed to apply. - """ - - if not os.path.isdir(src_path): - raise ValueError('Invalid src path provided: %s' % src_path) - - if not os.path.isfile(patch_path): - raise ValueError('Invalid patch file provided: %s' % patch_path) - - # Test the patch with '--dry-run' before actually applying the patch. - test_patch_cmd = [ - 'patch', '--dry-run', '-d', src_path, '-f', '-p1', '-E', - '--no-backup-if-mismatch', '-i', patch_path - ] - - # Cmd to apply a patch in the src unpack path. - apply_patch_cmd = [ - 'patch', '-d', src_path, '-f', '-p1', '-E', '--no-backup-if-mismatch', - '-i', patch_path - ] - - try: - check_output(test_patch_cmd) - - # If the mode is 'continue', then catching the exception makes sure that - # the program does not exit on the first failed applicable patch. - except subprocess.CalledProcessError: - # Test run on the patch failed to apply. - return False - - # Test run succeeded on the patch. - check_output(apply_patch_cmd) - - return True - - -def UpdatePatchMetadataFile(patch_metadata_file, patches): - """Updates the .json file with unchanged and at least one changed patch. - - Args: - patch_metadata_file: The absolute path to the .json file that has all the - patches and its metadata. - patches: A list of patches whose metadata were or were not updated. - - Raises: - ValueError: The patch metadata file does not have the correct extension. - """ - - if not patch_metadata_file.endswith('.json'): - raise ValueError('File does not end in ".json": %s' % patch_metadata_file) - - with open(patch_metadata_file, 'w') as patch_file: - _WriteJsonChanges(patches, patch_file) - - def _WriteJsonChanges(patches: List[Dict[str, Any]], file_io: IO[str]): """Write JSON changes to file, does not acquire new file lock.""" json.dump(patches, file_io, indent=4, separators=(',', ': ')) @@ -321,65 +107,6 @@ def GetCommitHashesForBisection(src_path, good_svn_version, bad_svn_version): return good_commit_hash, bad_commit_hash -def PerformBisection(src_path, good_commit, bad_commit, svn_version, - patch_metadata_file, filesdir_path, num_patches): - """Performs bisection to determine where a patch stops applying.""" - - start_cmd = [ - 'git', '-C', src_path, 'bisect', 'start', bad_commit, good_commit - ] - - check_output(start_cmd) - - run_cmd = [ - 'git', '-C', src_path, 'bisect', 'run', - os.path.abspath(__file__), '--svn_version', - '%d' % svn_version, '--patch_metadata_file', patch_metadata_file, - '--filesdir_path', filesdir_path, '--src_path', src_path, - '--failure_mode', 'internal_bisection', '--num_patches_to_iterate', - '%d' % num_patches - ] - - check_call(run_cmd) - - # Successfully bisected the patch, so retrieve the SVN version from the - # commit message. - get_bad_commit_hash_cmd = [ - 'git', '-C', src_path, 'rev-parse', 'refs/bisect/bad' - ] - - git_hash = check_output(get_bad_commit_hash_cmd) - - end_cmd = ['git', '-C', src_path, 'bisect', 'reset'] - - check_output(end_cmd) - - # `git bisect run` returns the bad commit hash and the commit message. - version = get_llvm_hash.GetVersionFrom(src_path, git_hash.rstrip()) - - return version - - -def SaveSrcTreeState(src_path): - """Stashes the changes made so far to the source tree.""" - - save_src_tree_cmd = ['git', '-C', src_path, 'stash', '-a'] - - check_output(save_src_tree_cmd) - - -def RestoreSrcTreeState(src_path, bad_commit_hash): - """Restores the changes made to the source tree.""" - - checkout_cmd = ['git', '-C', src_path, 'checkout', bad_commit_hash] - - check_output(checkout_cmd) - - get_changes_cmd = ['git', '-C', src_path, 'stash', 'pop'] - - check_output(get_changes_cmd) - - def RemoveOldPatches(svn_version: int, llvm_src_dir: Path, patches_json_fp: Path): """Remove patches that don't and will never apply for the future. @@ -426,6 +153,32 @@ def UpdateVersionRanges(svn_version: int, llvm_src_dir: Path, patches_json_fp.parent, f, ) + modified_entries = UpdateVersionRangesWithEntries(svn_version, llvm_src_dir, + patch_entries) + with patch_utils.atomic_write(patches_json_fp, encoding='utf-8') as f: + _WriteJsonChanges([p.to_dict() for p in patch_entries], f) + for entry in modified_entries: + print(f'Stopped applying {entry.rel_patch_path} ({entry.title()}) ' + f'for r{svn_version}') + + +def UpdateVersionRangesWithEntries( + svn_version: int, llvm_src_dir: Path, + patch_entries: Iterable[patch_utils.PatchEntry] +) -> List[patch_utils.PatchEntry]: + """Test-able helper for UpdateVersionRanges. + + Args: + svn_version: LLVM revision number. + llvm_src_dir: llvm-project directory path. + patch_entries: PatchEntry objects to modify. + + Returns: + A list of PatchEntry objects which were modified. + + Post: + Modifies patch_entries in place. + """ modified_entries: List[patch_utils.PatchEntry] = [] with patch_utils.git_clean_context(llvm_src_dir): for pe in patch_entries: @@ -440,11 +193,7 @@ def UpdateVersionRanges(svn_version: int, llvm_src_dir: Path, # will stack properly. if not pe.apply(llvm_src_dir).succeeded: raise RuntimeError('Could not apply patch that dry ran successfully') - with patch_utils.atomic_write(patches_json_fp, encoding='utf-8') as f: - _WriteJsonChanges([p.to_dict() for p in patch_entries], f) - for entry in modified_entries: - print(f'Stopped applying {entry.rel_patch_path} ({entry.title()}) ' - f'for r{svn_version}') + return modified_entries def CheckPatchApplies(svn_version: int, llvm_src_dir: Path, @@ -537,326 +286,6 @@ def ApplyPatchAndPrior( 'Does it exist?') -def HandlePatches(svn_version, - patch_metadata_file, - filesdir_path, - src_path, - mode, - good_svn_version=None, - num_patches_to_iterate=None, - continue_bisection=False): - """Handles the patches in the .json file for the package. - - Args: - svn_version: The LLVM version to use for patch management. - patch_metadata_file: The absolute path to the .json file in '$FILESDIR/' - that has all the patches and their metadata. - filesdir_path: The absolute path to $FILESDIR. - src_path: The absolute path to the unpacked destination of the package. - mode: The action to take when an applicable patch failed to apply. - Ex: 'FailureModes.FAIL' - good_svn_version: Only used by 'bisect_patches' which tells - `git bisect start` the good version. - num_patches_to_iterate: The number of patches to iterate in the .JSON file - (internal use). Only used by `git bisect run`. - continue_bisection: Only used for 'bisect_patches' mode. If flag is set, - then bisection will continue to the next patch when successfully bisected a - patch. - - Returns: - Depending on the mode, 'None' would be returned if everything went well or - the .json file was not updated. Otherwise, a list or multiple lists would - be returned that indicates what has changed. - - Raises: - ValueError: The patch metadata file does not exist or does not end with - '.json' or the absolute path to $FILESDIR does not exist or the unpacked - path does not exist or if the mode is 'fail', then an applicable patch - failed to apply. - """ - - # A flag for whether the mode specified would possible modify the patches. - can_modify_patches = False - - # 'fail' or 'continue' mode would not modify a patch's metadata, so the .json - # file would stay the same. - if mode != FailureModes.FAIL and mode != FailureModes.CONTINUE: - can_modify_patches = True - - # A flag that determines whether at least one patch's metadata was - # updated due to the mode that is passed in. - updated_patch = False - - # A list of patches that will be in the updated .json file. - applicable_patches = [] - - # A list of patches that successfully applied. - applied_patches = [] - - # A list of patches that were disabled. - disabled_patches = [] - - # A list of bisected patches. - bisected_patches = [] - - # A list of non applicable patches. - non_applicable_patches = [] - - # A list of patches that will not be included in the updated .json file - removed_patches = [] - - # Whether the patch metadata file was modified where 'None' means that the - # patch metadata file was not modified otherwise the absolute path to the - # patch metadata file is stored. - modified_metadata = None - - # A list of patches that failed to apply. - failed_patches = [] - - with open(patch_metadata_file) as patch_file: - patch_file_contents = json.load(patch_file) - - if mode == FailureModes.BISECT_PATCHES: - # A good and bad commit are required by `git bisect start`. - good_commit, bad_commit = GetCommitHashesForBisection( - src_path, good_svn_version, svn_version) - - # Patch format: - # { - # "rel_patch_path" : "[REL_PATCH_PATH_FROM_$FILESDIR]" - # [PATCH_METADATA] if available. - # } - # - # For each patch, find the path to it in $FILESDIR and get its metadata if - # available, then check if the patch is applicable. - for patch_dict_index, cur_patch_dict in enumerate(patch_file_contents): - # Used by the internal bisection. All the patches in the interval [0, N] - # have been iterated. - if (num_patches_to_iterate - and (patch_dict_index + 1) > num_patches_to_iterate): - break - - # Get the absolute path to the patch in $FILESDIR. - path_to_patch = GetPathToPatch(filesdir_path, - cur_patch_dict['rel_patch_path']) - - # Get the patch's metadata. - # - # Index information of 'patch_metadata': - # [0]: from_version - # [1]: until_version - # [2]: is_critical - patch_metadata = GetPatchMetadata(cur_patch_dict) - - if not patch_metadata[1]: - # Patch does not have an 'until' value which implies - # 'until' == 'inf' ('svn_version' will always be less - # than 'until'), so the patch is applicable if - # 'svn_version' >= 'from'. - patch_applicable = svn_version >= patch_metadata[0] - else: - # Patch is applicable if 'svn_version' >= 'from' && - # "svn_version" < "until". - patch_applicable = (svn_version >= patch_metadata[0] - and svn_version < patch_metadata[1]) - - if can_modify_patches: - # Add to the list only if the mode can potentially modify a patch. - # - # If the mode is 'remove_patches', then all patches that are - # applicable or are from the future will be added to the updated .json - # file and all patches that are not applicable will be added to the - # remove patches list which will not be included in the updated .json - # file. - if (patch_applicable or svn_version < patch_metadata[0] - or mode != FailureModes.REMOVE_PATCHES): - applicable_patches.append(cur_patch_dict) - elif mode == FailureModes.REMOVE_PATCHES: - removed_patches.append(path_to_patch) - - if not modified_metadata: - # At least one patch will be removed from the .json file. - modified_metadata = patch_metadata_file - - if not patch_applicable: - non_applicable_patches.append(os.path.basename(path_to_patch)) - - # There is no need to apply patches in 'remove_patches' mode because the - # mode removes patches that do not apply anymore based off of - # 'svn_version.' - if patch_applicable and mode != FailureModes.REMOVE_PATCHES: - patch_applied = ApplyPatch(src_path, path_to_patch) - - if not patch_applied: # Failed to apply patch. - failed_patches.append(os.path.basename(path_to_patch)) - - # Check the mode to determine what action to take on the failing - # patch. - if mode == FailureModes.DISABLE_PATCHES: - # Set the patch's 'until' to 'svn_version' so the patch - # would not be applicable anymore (i.e. the patch's 'until' - # would not be greater than 'svn_version'). - - # Last element in 'applicable_patches' is the current patch. - new_version_range = applicable_patches[-1].get('version_range', {}) - new_version_range['until'] = svn_version - applicable_patches[-1]['version_range'] = new_version_range - - disabled_patches.append(os.path.basename(path_to_patch)) - - if not updated_patch: - # At least one patch has been modified, so the .json file - # will be updated with the new patch metadata. - updated_patch = True - - modified_metadata = patch_metadata_file - elif mode == FailureModes.BISECT_PATCHES: - # Figure out where the patch's stops applying and set the patch's - # 'until' to that version. - - # Do not want to overwrite the changes to the current progress of - # 'bisect_patches' on the source tree. - SaveSrcTreeState(src_path) - - # Need a clean source tree for `git bisect run` to avoid unnecessary - # fails for patches. - patch_utils.clean_src_tree(src_path) - - print('\nStarting to bisect patch %s for SVN version %d:\n' % - (os.path.basename( - cur_patch_dict['rel_patch_path']), svn_version)) - - # Performs the bisection: calls `git bisect start` and - # `git bisect run`, where `git bisect run` is going to call this - # script as many times as needed with specific arguments. - bad_svn_version = PerformBisection(src_path, good_commit, - bad_commit, svn_version, - patch_metadata_file, - filesdir_path, - patch_dict_index + 1) - - print('\nSuccessfully bisected patch %s, starts to fail to apply ' - 'at %d\n' % (os.path.basename( - cur_patch_dict['rel_patch_path']), bad_svn_version)) - - # Overwrite the .JSON file with the new 'until' for the - # current failed patch so that if there are other patches that - # fail to apply, then the 'until' for the current patch could - # be applicable when `git bisect run` is performed on the next - # failed patch because the same .JSON file is used for `git bisect - # run`. - new_version_range = patch_file_contents[patch_dict_index].get( - 'version_range', {}) - new_version_range['until'] = bad_svn_version - patch_file_contents[patch_dict_index][ - 'version_range'] = new_version_range - UpdatePatchMetadataFile(patch_metadata_file, patch_file_contents) - - # Clear the changes made to the source tree by `git bisect run`. - patch_utils.clean_src_tree(src_path) - - if not continue_bisection: - # Exiting program early because 'continue_bisection' is not set. - sys.exit(0) - - bisected_patches.append( - '%s starts to fail to apply at %d' % (os.path.basename( - cur_patch_dict['rel_patch_path']), bad_svn_version)) - - # Continue where 'bisect_patches' left off. - RestoreSrcTreeState(src_path, bad_commit) - - if not modified_metadata: - # At least one patch's 'until' has been updated. - modified_metadata = patch_metadata_file - - elif mode == FailureModes.FAIL: - if applied_patches: - print('The following patches applied successfully up to the ' - 'failed patch:') - print('\n'.join(applied_patches)) - - # Throw an exception on the first patch that failed to apply. - raise ValueError('Failed to apply patch: %s' % - os.path.basename(path_to_patch)) - elif mode == FailureModes.INTERNAL_BISECTION: - # Determine the exit status for `git bisect run` because of the - # failed patch in the interval [0, N]. - # - # NOTE: `git bisect run` exit codes are as follows: - # 130: Terminates the bisection. - # 1: Similar as `git bisect bad`. - - # Some patch in the interval [0, N) failed, so terminate bisection - # (the patch stack is broken). - if (patch_dict_index + 1) != num_patches_to_iterate: - print('\nTerminating bisection due to patch %s failed to apply ' - 'on SVN version %d.\n' % (os.path.basename( - cur_patch_dict['rel_patch_path']), svn_version)) - - # Man page for `git bisect run` states that any value over 127 - # terminates it. - sys.exit(130) - - # Changes to the source tree need to be removed, otherwise some - # patches may fail when applying the patch to the source tree when - # `git bisect run` calls this script again. - patch_utils.clean_src_tree(src_path) - - # The last patch in the interval [0, N] failed to apply, so let - # `git bisect run` know that the last patch (the patch that failed - # originally which led to `git bisect run` to be invoked) is bad - # with exit code 1. - sys.exit(1) - else: # Successfully applied patch - applied_patches.append(os.path.basename(path_to_patch)) - - # All patches in the interval [0, N] applied successfully, so let - # `git bisect run` know that the program exited with exit code 0 (good). - if mode == FailureModes.INTERNAL_BISECTION: - # Changes to the source tree need to be removed, otherwise some - # patches may fail when applying the patch to the source tree when - # `git bisect run` calls this script again. - # - # Also, if `git bisect run` will NOT call this script again (terminated) and - # if the source tree changes are not removed, `git bisect reset` will - # complain that the changes would need to be 'stashed' or 'removed' in - # order to reset HEAD back to the bad commit's git hash, so HEAD will remain - # on the last git hash used by `git bisect run`. - patch_utils.clean_src_tree(src_path) - - # NOTE: Exit code 0 is similar to `git bisect good`. - sys.exit(0) - - patch_info = patch_utils.PatchInfo( - applied_patches=applied_patches, - failed_patches=failed_patches, - non_applicable_patches=non_applicable_patches, - disabled_patches=disabled_patches, - removed_patches=removed_patches, - modified_metadata=modified_metadata) - - # Determine post actions after iterating through the patches. - if mode == FailureModes.REMOVE_PATCHES: - if removed_patches: - UpdatePatchMetadataFile(patch_metadata_file, applicable_patches) - elif mode == FailureModes.DISABLE_PATCHES: - if updated_patch: - UpdatePatchMetadataFile(patch_metadata_file, applicable_patches) - elif mode == FailureModes.BISECT_PATCHES: - PrintPatchResults(patch_info) - if modified_metadata: - print('\nThe following patches have been bisected:') - print('\n'.join(bisected_patches)) - - # Exiting early because 'bisect_patches' will not be called from other - # scripts, only this script uses 'bisect_patches'. The intent is to provide - # bisection information on the patches and aid in the bisection process. - sys.exit(0) - - return patch_info - - def PrintPatchResults(patch_info: patch_utils.PatchInfo): """Prints the results of handling the patches of a package. @@ -893,35 +322,41 @@ def PrintPatchResults(patch_info: patch_utils.PatchInfo): print('%s' % os.path.basename(cur_patch_path)) -def main(): +def main(sys_argv: List[str]): """Applies patches to the source tree and takes action on a failed patch.""" - args_output = GetCommandLineArgs() + args_output = GetCommandLineArgs(sys_argv) + + llvm_src_dir = Path(args_output.src_path) + if not llvm_src_dir.is_dir(): + raise ValueError(f'--src_path arg {llvm_src_dir} is not a directory') + patches_json_fp = Path(args_output.patch_metadata_file) + if not patches_json_fp.is_file(): + raise ValueError('--patch_metadata_file arg ' + f'{patches_json_fp} is not a file') def _apply_all(args): + if args.svn_version is None: + raise ValueError('--svn_version must be set when applying patches') result = patch_utils.apply_all_from_json( svn_version=args.svn_version, - llvm_src_dir=Path(args.src_path), - patches_json_fp=Path(args.patch_metadata_file), + llvm_src_dir=llvm_src_dir, + patches_json_fp=patches_json_fp, continue_on_failure=args.failure_mode == FailureModes.CONTINUE) PrintPatchResults(result) def _remove(args): - RemoveOldPatches(args.svn_version, Path(args.src_path), - Path(args.patch_metadata_file)) + RemoveOldPatches(args.svn_version, llvm_src_dir, patches_json_fp) def _disable(args): - UpdateVersionRanges(args.svn_version, Path(args.src_path), - Path(args.patch_metadata_file)) + UpdateVersionRanges(args.svn_version, llvm_src_dir, patches_json_fp) def _test_single(args): if not args.test_patch: raise ValueError('Running with bisect_patches requires the ' '--test_patch flag.') - llvm_src_dir = Path(args.src_path) svn_version = GetHEADSVNVersion(llvm_src_dir) - error_code = CheckPatchApplies(svn_version, llvm_src_dir, - Path(args.patch_metadata_file), + error_code = CheckPatchApplies(svn_version, llvm_src_dir, patches_json_fp, args.test_patch) # Since this is for bisection, we want to exit with the # GitBisectionCode enum. @@ -940,4 +375,4 @@ def main(): if __name__ == '__main__': - main() + main(sys.argv[1:]) diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py index f74480c2..238fd781 100755 --- a/llvm_tools/patch_manager_unittest.py +++ b/llvm_tools/patch_manager_unittest.py @@ -6,190 +6,50 @@ """Unit tests when handling patches.""" import json -import os from pathlib import Path -import subprocess import tempfile from typing import Callable import unittest -import unittest.mock as mock +from unittest import mock -from failure_modes import FailureModes import patch_manager import patch_utils -from test_helpers import CallCountsToMockFunctions -from test_helpers import CreateTemporaryJsonFile -from test_helpers import WritePrettyJsonFile class PatchManagerTest(unittest.TestCase): """Test class when handling patches of packages.""" # Simulate behavior of 'os.path.isdir()' when the path is not a directory. - @mock.patch.object(os.path, 'isdir', return_value=False) + @mock.patch.object(Path, 'is_dir', return_value=False) def testInvalidDirectoryPassedAsCommandLineArgument(self, mock_isdir): - test_dir = '/some/path/that/is/not/a/directory' + src_dir = '/some/path/that/is/not/a/directory' + patch_metadata_file = '/some/path/that/is/not/a/file' # Verify the exception is raised when the command line argument for # '--filesdir_path' or '--src_path' is not a directory. - with self.assertRaises(ValueError) as err: - patch_manager.is_directory(test_dir) - - self.assertEqual(str(err.exception), 'Path is not a directory: ' - '%s' % test_dir) - - mock_isdir.assert_called_once() - - # Simulate the behavior of 'os.path.isdir()' when a path to a directory is - # passed as the command line argument for '--filesdir_path' or '--src_path'. - @mock.patch.object(os.path, 'isdir', return_value=True) - def testValidDirectoryPassedAsCommandLineArgument(self, mock_isdir): - test_dir = '/some/path/that/is/a/directory' - - self.assertEqual(patch_manager.is_directory(test_dir), test_dir) - + with self.assertRaises(ValueError): + patch_manager.main([ + '--src_path', src_dir, '--patch_metadata_file', patch_metadata_file + ]) mock_isdir.assert_called_once() # Simulate behavior of 'os.path.isfile()' when the patch metadata file is does # not exist. - @mock.patch.object(os.path, 'isfile', return_value=False) + @mock.patch.object(Path, 'is_file', return_value=False) def testInvalidPathToPatchMetadataFilePassedAsCommandLineArgument( self, mock_isfile): - - abs_path_to_patch_file = '/abs/path/to/PATCHES.json' + src_dir = '/some/path/that/is/not/a/directory' + patch_metadata_file = '/some/path/that/is/not/a/file' # Verify the exception is raised when the command line argument for - # '--patch_metadata_file' does not exist or is not a file. - with self.assertRaises(ValueError) as err: - patch_manager.is_patch_metadata_file(abs_path_to_patch_file) - - self.assertEqual( - str(err.exception), 'Invalid patch metadata file provided: ' - '%s' % abs_path_to_patch_file) - - mock_isfile.assert_called_once() - - # Simulate the behavior of 'os.path.isfile()' when the path to the patch - # metadata file exists and is a file. - @mock.patch.object(os.path, 'isfile', return_value=True) - def testPatchMetadataFileDoesNotEndInJson(self, mock_isfile): - abs_path_to_patch_file = '/abs/path/to/PATCHES' - - # Verify the exception is raises when the command line argument for - # '--patch_metadata_file' exists and is a file but does not end in - # '.json'. - with self.assertRaises(ValueError) as err: - patch_manager.is_patch_metadata_file(abs_path_to_patch_file) - - self.assertEqual( - str(err.exception), 'Patch metadata file does not end in ".json": ' - '%s' % abs_path_to_patch_file) - - mock_isfile.assert_called_once() - - # Simulate the behavior of 'os.path.isfile()' when the command line argument - # for '--patch_metadata_file' exists and is a file. - @mock.patch.object(os.path, 'isfile', return_value=True) - def testValidPatchMetadataFilePassedAsCommandLineArgument(self, mock_isfile): - abs_path_to_patch_file = '/abs/path/to/PATCHES.json' - - self.assertEqual( - patch_manager.is_patch_metadata_file(abs_path_to_patch_file), - '%s' % abs_path_to_patch_file) - - mock_isfile.assert_called_once() - - # Simulate behavior of 'os.path.isdir()' when the path to $FILESDIR - # does not exist. - @mock.patch.object(os.path, 'isdir', return_value=False) - def testInvalidPathToFilesDirWhenConstructingPathToPatch(self, mock_isdir): - abs_path_to_filesdir = '/abs/path/to/filesdir' - - rel_patch_path = 'cherry/fixes_stdout.patch' - - # Verify the exception is raised when the the absolute path to $FILESDIR of - # a package is not a directory. - with self.assertRaises(ValueError) as err: - patch_manager.GetPathToPatch(abs_path_to_filesdir, rel_patch_path) - - self.assertEqual( - str(err.exception), 'Invalid path to $FILESDIR provided: ' - '%s' % abs_path_to_filesdir) - - mock_isdir.assert_called_once() - - # Simulate behavior of 'os.path.isdir()' when the absolute path to the - # $FILESDIR of a package exists and is a directory. - @mock.patch.object(os.path, 'isdir', return_value=True) - # Simulate the behavior of 'os.path.isfile()' when the absolute path to the - # patch does not exist. - @mock.patch.object(os.path, 'isfile', return_value=False) - def testConstructedPathToPatchDoesNotExist(self, mock_isfile, mock_isdir): - abs_path_to_filesdir = '/abs/path/to/filesdir' - - rel_patch_path = 'cherry/fixes_stdout.patch' - - abs_patch_path = os.path.join(abs_path_to_filesdir, rel_patch_path) - - # Verify the exception is raised when the absolute path to the patch does - # not exist. - with self.assertRaises(ValueError) as err: - patch_manager.GetPathToPatch(abs_path_to_filesdir, rel_patch_path) - - self.assertEqual( - str(err.exception), 'The absolute path %s to the patch %s does not ' - 'exist' % (abs_patch_path, rel_patch_path)) - - mock_isdir.assert_called_once() - - mock_isfile.assert_called_once() - - # Simulate behavior of 'os.path.isdir()' when the absolute path to the - # $FILESDIR of a package exists and is a directory. - @mock.patch.object(os.path, 'isdir', return_value=True) - # Simulate behavior of 'os.path.isfile()' when the absolute path to the - # patch exists and is a file. - @mock.patch.object(os.path, 'isfile', return_value=True) - def testConstructedPathToPatchSuccessfully(self, mock_isfile, mock_isdir): - abs_path_to_filesdir = '/abs/path/to/filesdir' - - rel_patch_path = 'cherry/fixes_stdout.patch' - - abs_patch_path = os.path.join(abs_path_to_filesdir, rel_patch_path) - - self.assertEqual( - patch_manager.GetPathToPatch(abs_path_to_filesdir, rel_patch_path), - abs_patch_path) - - mock_isdir.assert_called_once() - + # '--filesdir_path' or '--src_path' is not a directory. + with mock.patch.object(Path, 'is_dir', return_value=True): + with self.assertRaises(ValueError): + patch_manager.main([ + '--src_path', src_dir, '--patch_metadata_file', patch_metadata_file + ]) mock_isfile.assert_called_once() - def testSuccessfullyGetPatchMetadataForPatchWithNoMetadata(self): - expected_patch_metadata = 0, None, False - - test_patch = { - 'comment': 'Redirects output to stdout', - 'rel_patch_path': 'cherry/fixes_stdout.patch' - } - - self.assertEqual(patch_manager.GetPatchMetadata(test_patch), - expected_patch_metadata) - - def testSuccessfullyGetPatchMetdataForPatchWithSomeMetadata(self): - expected_patch_metadata = 0, 1000, False - - test_patch = { - 'comment': 'Redirects output to stdout', - 'rel_patch_path': 'cherry/fixes_stdout.patch', - 'version_range': { - 'until': 1000, - } - } - - self.assertEqual(patch_manager.GetPatchMetadata(test_patch), - expected_patch_metadata) - @mock.patch('builtins.print') def testRemoveOldPatches(self, _): """Can remove old patches from PATCHES.json.""" @@ -364,767 +224,42 @@ class PatchManagerTest(unittest.TestCase): patch_manager.GitBisectionCode.SKIP, ) - def testFailedToApplyPatchWhenInvalidSrcPathIsPassedIn(self): - src_path = '/abs/path/to/src' - - abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_stdout.patch' - - # Verify the exception is raised when the absolute path to the unpacked - # sources of a package is not a directory. - with self.assertRaises(ValueError) as err: - patch_manager.ApplyPatch(src_path, abs_patch_path) - - self.assertEqual(str(err.exception), - 'Invalid src path provided: %s' % src_path) - - # Simulate behavior of 'os.path.isdir()' when the absolute path to the - # unpacked sources of the package is valid and exists. - @mock.patch.object(os.path, 'isdir', return_value=True) - def testFailedToApplyPatchWhenPatchPathIsInvalid(self, mock_isdir): - src_path = '/abs/path/to/src' - - abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_stdout.patch' - - # Verify the exception is raised when the absolute path to the patch does - # not exist or is not a file. - with self.assertRaises(ValueError) as err: - patch_manager.ApplyPatch(src_path, abs_patch_path) - - self.assertEqual(str(err.exception), 'Invalid patch file provided: ' - '%s' % abs_patch_path) - - mock_isdir.assert_called_once() - - # Simulate behavior of 'os.path.isdir()' when the absolute path to the - # unpacked sources of the package is valid and exists. - @mock.patch.object(os.path, 'isdir', return_value=True) - @mock.patch.object(os.path, 'isfile', return_value=True) - # Simulate behavior of 'os.path.isfile()' when the absolute path to the - # patch exists and is a file. - @mock.patch.object(patch_manager, 'check_output') - def testFailedToApplyPatchInDryRun(self, mock_dry_run, mock_isfile, - mock_isdir): - - # Simulate behavior of 'subprocess.check_output()' when '--dry-run' - # fails on the applying patch. - def FailedToApplyPatch(test_patch_cmd): - # First argument is the return error code, the second argument is the - # command that was run, and the third argument is the output. - raise subprocess.CalledProcessError(1, test_patch_cmd, None) - - mock_dry_run.side_effect = FailedToApplyPatch - - src_path = '/abs/path/to/src' - - abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_stdout.patch' - - self.assertEqual(patch_manager.ApplyPatch(src_path, abs_patch_path), False) - - mock_isdir.assert_called_once() - - mock_isfile.assert_called_once() - - mock_dry_run.assert_called_once() - - # Simulate behavior of 'os.path.isdir()' when the absolute path to the - # unpacked sources of the package is valid and exists. - @mock.patch.object(os.path, 'isdir', return_value=True) - @mock.patch.object(os.path, 'isfile', return_value=True) - # Simulate behavior of 'os.path.isfile()' when the absolute path to the - # patch exists and is a file. - @mock.patch.object(patch_manager, 'check_output') - def testSuccessfullyAppliedPatch(self, mock_dry_run, mock_isfile, - mock_isdir): - src_path = '/abs/path/to/src' - - abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_stdout.patch' - - self.assertEqual(patch_manager.ApplyPatch(src_path, abs_patch_path), True) - - mock_isdir.assert_called_once() - - mock_isfile.assert_called_once() - - self.assertEqual(mock_dry_run.call_count, 2) - - def testFailedToUpdatePatchMetadataFileWhenPatchFileNotEndInJson(self): - patch = [{ - 'comment': 'Redirects output to stdout', - 'rel_patch_path': 'cherry/fixes_output.patch', - 'version_range': { - 'from': 10, - }, - }] - - abs_patch_path = '/abs/path/to/filesdir/PATCHES' - - # Verify the exception is raised when the absolute path to the patch - # metadata file does not end in '.json'. - with self.assertRaises(ValueError) as err: - patch_manager.UpdatePatchMetadataFile(abs_patch_path, patch) - - self.assertEqual(str(err.exception), 'File does not end in ".json": ' - '%s' % abs_patch_path) - - def testSuccessfullyUpdatedPatchMetadataFile(self): - test_updated_patch_metadata = [{ - 'comment': 'Redirects output to stdout', - 'rel_patch_path': 'cherry/fixes_output.patch', - 'version_range': { - 'from': 10, - } - }] - - expected_patch_metadata = { - 'comment': 'Redirects output to stdout', - 'rel_patch_path': 'cherry/fixes_output.patch', - 'version_range': { - 'from': 10, - } - } - - with CreateTemporaryJsonFile() as json_test_file: - patch_manager.UpdatePatchMetadataFile(json_test_file, - test_updated_patch_metadata) - - # Make sure the updated patch metadata was written into the temporary - # .json file. - with open(json_test_file) as patch_file: - patch_contents = json.load(patch_file) - - self.assertEqual(len(patch_contents), 1) - - self.assertDictEqual(patch_contents[0], expected_patch_metadata) - - @mock.patch.object(patch_manager, 'GetPathToPatch') - def testExceptionThrownWhenHandlingPatches(self, mock_get_path_to_patch): - filesdir_path = '/abs/path/to/filesdir' - - abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_output.patch' - - rel_patch_path = 'cherry/fixes_output.patch' - - # Simulate behavior of 'GetPathToPatch()' when the absolute path to the - # patch does not exist. - def PathToPatchDoesNotExist(filesdir_path, rel_patch_path): - raise ValueError('The absolute path to %s does not exist' % - os.path.join(filesdir_path, rel_patch_path)) - - # Use the test function to simulate the behavior of 'GetPathToPatch()'. - mock_get_path_to_patch.side_effect = PathToPatchDoesNotExist - - test_patch_metadata = [{ - 'comment': 'Redirects output to stdout', - 'rel_patch_path': rel_patch_path, - 'version_range': { - 'from': 10, - } - }] - - with CreateTemporaryJsonFile() as json_test_file: - # Write the test patch metadata to the temporary .json file. - with open(json_test_file, 'w') as json_file: - WritePrettyJsonFile(test_patch_metadata, json_file) - - src_path = '/some/path/to/src' - - revision = 1000 - - # Verify the exception is raised when the absolute path to a patch does - # not exist. - with self.assertRaises(ValueError) as err: - patch_manager.HandlePatches(revision, json_test_file, filesdir_path, - src_path, FailureModes.FAIL) - - self.assertEqual(str(err.exception), - 'The absolute path to %s does not exist' % abs_patch_path) - - mock_get_path_to_patch.assert_called_once_with(filesdir_path, - rel_patch_path) - - @mock.patch.object(patch_manager, 'GetPathToPatch') - # Simulate behavior for 'ApplyPatch()' when an applicable patch failed to - # apply. - @mock.patch.object(patch_manager, 'ApplyPatch', return_value=False) - def testExceptionThrownOnAFailedPatchInFailMode(self, mock_apply_patch, - mock_get_path_to_patch): - filesdir_path = '/abs/path/to/filesdir' - - abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_output.patch' - - rel_patch_path = 'cherry/fixes_output.patch' - - # Simulate behavior for 'GetPathToPatch()' when successfully constructed the - # absolute path to the patch and the patch exists. - mock_get_path_to_patch.return_value = abs_patch_path - - test_patch_metadata = [{ - 'comment': 'Redirects output to stdout', - 'rel_patch_path': rel_patch_path, - 'version_range': { - 'from': 1000, - }, - }] - - with CreateTemporaryJsonFile() as json_test_file: - # Write the test patch metadata to the temporary .json file. - with open(json_test_file, 'w') as json_file: - WritePrettyJsonFile(test_patch_metadata, json_file) - - src_path = '/some/path/to/src' - - revision = 1000 - - patch_name = 'fixes_output.patch' - - # Verify the exception is raised when the mode is 'fail' and an applicable - # patch fails to apply. - with self.assertRaises(ValueError) as err: - patch_manager.HandlePatches(revision, json_test_file, filesdir_path, - src_path, FailureModes.FAIL) - - self.assertEqual(str(err.exception), - 'Failed to apply patch: %s' % patch_name) - - mock_get_path_to_patch.assert_called_once_with(filesdir_path, - rel_patch_path) - - mock_apply_patch.assert_called_once_with(src_path, abs_patch_path) - - @mock.patch.object(patch_manager, 'GetPathToPatch') - @mock.patch.object(patch_manager, 'ApplyPatch') - def testSomePatchesFailedToApplyInContinueMode(self, mock_apply_patch, - mock_get_path_to_patch): - - test_patch_1 = { - 'comment': 'Redirects output to stdout', - 'rel_patch_path': 'cherry/fixes_output.patch', - 'version_range': { - 'from': 1000, - 'until': 1250 - } - } - - test_patch_2 = { - 'comment': 'Fixes input', - 'rel_patch_path': 'cherry/fixes_input.patch', - 'version_range': { - 'from': 1000 - } - } - - test_patch_3 = { - 'comment': 'Adds a warning', - 'rel_patch_path': 'add_warning.patch', - 'version_range': { - 'from': 750, - 'until': 1500 - } - } - - test_patch_4 = { - 'comment': 'Adds a helper function', - 'rel_patch_path': 'add_helper.patch', - 'version_range': { - 'from': 20, - 'until': 900 - } - } - - test_patch_metadata = [ - test_patch_1, test_patch_2, test_patch_3, test_patch_4 - ] - - abs_path_to_filesdir = '/abs/path/to/filesdir' - - # Simulate behavior for 'GetPathToPatch()' when successfully constructed the - # absolute path to the patch and the patch exists. - @CallCountsToMockFunctions - def MultipleCallsToGetPatchPath(call_count, filesdir_path, rel_patch_path): - self.assertEqual(filesdir_path, abs_path_to_filesdir) - - if call_count < 4: - self.assertEqual(rel_patch_path, - test_patch_metadata[call_count]['rel_patch_path']) - - return os.path.join(abs_path_to_filesdir, - test_patch_metadata[call_count]['rel_patch_path']) - - assert False, 'Unexpectedly called more than 4 times.' - - # Simulate behavior for 'ApplyPatch()' when applying multiple applicable - # patches. - @CallCountsToMockFunctions - def MultipleCallsToApplyPatches(call_count, _src_path, path_to_patch): - if call_count < 3: - self.assertEqual( - path_to_patch, - os.path.join(abs_path_to_filesdir, - test_patch_metadata[call_count]['rel_patch_path'])) - - # Simulate that the first patch successfully applied. - return call_count == 0 - - # 'ApplyPatch()' was called more times than expected (3 times). - assert False, 'Unexpectedly called more than 3 times.' - - # Use test functions to simulate behavior. - mock_get_path_to_patch.side_effect = MultipleCallsToGetPatchPath - mock_apply_patch.side_effect = MultipleCallsToApplyPatches - - expected_applied_patches = ['fixes_output.patch'] - expected_failed_patches = ['fixes_input.patch', 'add_warning.patch'] - expected_non_applicable_patches = ['add_helper.patch'] - - expected_patch_info_dict = { - 'applied_patches': expected_applied_patches, - 'failed_patches': expected_failed_patches, - 'non_applicable_patches': expected_non_applicable_patches, - 'disabled_patches': [], - 'removed_patches': [], - 'modified_metadata': None - } - - with CreateTemporaryJsonFile() as json_test_file: - # Write the test patch metadata to the temporary .json file. - with open(json_test_file, 'w') as json_file: - WritePrettyJsonFile(test_patch_metadata, json_file) - - src_path = '/some/path/to/src/' - - revision = 1000 - - patch_info = patch_manager.HandlePatches(revision, json_test_file, - abs_path_to_filesdir, src_path, - FailureModes.CONTINUE) - - self.assertDictEqual(patch_info._asdict(), expected_patch_info_dict) - - self.assertEqual(mock_get_path_to_patch.call_count, 4) - - self.assertEqual(mock_apply_patch.call_count, 3) - - @mock.patch.object(patch_manager, 'GetPathToPatch') - @mock.patch.object(patch_manager, 'ApplyPatch') - def testSomePatchesAreDisabled(self, mock_apply_patch, - mock_get_path_to_patch): - - test_patch_1 = { - 'comment': 'Redirects output to stdout', - 'rel_patch_path': 'cherry/fixes_output.patch', - 'version_range': { - 'from': 1000, - 'until': 1190 - } - } - - test_patch_2 = { - 'comment': 'Fixes input', - 'rel_patch_path': 'cherry/fixes_input.patch', - 'version_range': { - 'from': 1000 - } - } - - test_patch_3 = { - 'comment': 'Adds a warning', - 'rel_patch_path': 'add_warning.patch', - 'version_range': { - 'from': 750, - 'until': 1500 - } - } - - test_patch_4 = { - 'comment': 'Adds a helper function', - 'rel_patch_path': 'add_helper.patch', - 'version_range': { - 'from': 20, - 'until': 2000 - } - } - - test_patch_metadata = [ - test_patch_1, test_patch_2, test_patch_3, test_patch_4 - ] - - abs_path_to_filesdir = '/abs/path/to/filesdir' - - # Simulate behavior for 'GetPathToPatch()' when successfully constructed the - # absolute path to the patch and the patch exists. - @CallCountsToMockFunctions - def MultipleCallsToGetPatchPath(call_count, filesdir_path, rel_patch_path): - self.assertEqual(filesdir_path, abs_path_to_filesdir) - - if call_count < 4: - self.assertEqual(rel_patch_path, - test_patch_metadata[call_count]['rel_patch_path']) - - return os.path.join(abs_path_to_filesdir, - test_patch_metadata[call_count]['rel_patch_path']) - - # 'GetPathToPatch()' was called more times than expected (4 times). - assert False, 'Unexpectedly called more than 4 times.' - - # Simulate behavior for 'ApplyPatch()' when applying multiple applicable - # patches. - @CallCountsToMockFunctions - def MultipleCallsToApplyPatches(call_count, _src_path, path_to_patch): - if call_count < 3: - self.assertEqual( - path_to_patch, - os.path.join(abs_path_to_filesdir, - test_patch_metadata[call_count + - 1]['rel_patch_path'])) - - # Simulate that the second patch applied successfully. - return call_count == 1 - - # 'ApplyPatch()' was called more times than expected (3 times). - assert False, 'Unexpectedly called more than 3 times.' - - # Use test functions to simulate behavior. - mock_get_path_to_patch.side_effect = MultipleCallsToGetPatchPath - mock_apply_patch.side_effect = MultipleCallsToApplyPatches - - expected_applied_patches = ['add_warning.patch'] - expected_failed_patches = ['fixes_input.patch', 'add_helper.patch'] - expected_disabled_patches = ['fixes_input.patch', 'add_helper.patch'] - expected_non_applicable_patches = ['fixes_output.patch'] - - # Assigned 'None' for now, but it is expected that the patch metadata file - # will be modified, so the 'expected_patch_info_dict's' value for the - # key 'modified_metadata' will get updated to the temporary .json file once - # the file is created. - expected_modified_metadata_file = None - - expected_patch_info_dict = { - 'applied_patches': expected_applied_patches, - 'failed_patches': expected_failed_patches, - 'non_applicable_patches': expected_non_applicable_patches, - 'disabled_patches': expected_disabled_patches, - 'removed_patches': [], - 'modified_metadata': expected_modified_metadata_file - } - - with CreateTemporaryJsonFile() as json_test_file: - # Write the test patch metadata to the temporary .json file. - with open(json_test_file, 'w') as json_file: - WritePrettyJsonFile(test_patch_metadata, json_file) - - expected_patch_info_dict['modified_metadata'] = json_test_file - - src_path = '/some/path/to/src/' - - revision = 1200 - - patch_info = patch_manager.HandlePatches(revision, json_test_file, - abs_path_to_filesdir, src_path, - FailureModes.DISABLE_PATCHES) - - self.assertDictEqual(patch_info._asdict(), expected_patch_info_dict) - - # 'test_patch_1' and 'test_patch_3' were not modified/disabled, so their - # dictionary is the same, but 'test_patch_2' and 'test_patch_4' were - # disabled, so their 'until' would be set to 1200, which was the - # value passed into 'HandlePatches()' for the 'svn_version'. - test_patch_2['version_range']['until'] = 1200 - test_patch_4['version_range']['until'] = 1200 - - expected_json_file = [ - test_patch_1, test_patch_2, test_patch_3, test_patch_4 + @mock.patch('patch_utils.git_clean_context', mock.MagicMock) + def testUpdateVersionRanges(self): + """Test the UpdateVersionRanges function.""" + with tempfile.TemporaryDirectory( + prefix='patch_manager_unittest') as dirname: + dirpath = Path(dirname) + patches = [ + patch_utils.PatchEntry(workdir=dirpath, + rel_patch_path='x.patch', + metadata=None, + platforms=None, + version_range={ + 'from': 0, + 'until': 2, + }), + patch_utils.PatchEntry(workdir=dirpath, + rel_patch_path='y.patch', + metadata=None, + platforms=None, + version_range={ + 'from': 0, + 'until': 2, + }), ] - - # Make sure the updated patch metadata was written into the temporary - # .json file. - with open(json_test_file) as patch_file: - new_json_file_contents = json.load(patch_file) - - self.assertListEqual(new_json_file_contents, expected_json_file) - - self.assertEqual(mock_get_path_to_patch.call_count, 4) - - self.assertEqual(mock_apply_patch.call_count, 3) - - @mock.patch.object(patch_manager, 'GetPathToPatch') - @mock.patch.object(patch_manager, 'ApplyPatch') - def testSomePatchesAreRemoved(self, mock_apply_patch, - mock_get_path_to_patch): - # For the 'remove_patches' mode, this patch is expected to be in the - # 'non_applicable_patches' list and 'removed_patches' list because - # the 'svn_version' (1500) >= 'until' (1190). - test_patch_1 = { - 'comment': 'Redirects output to stdout', - 'rel_patch_path': 'cherry/fixes_output.patch', - 'version_range': { - 'from': 1000, - 'until': 1190 - } - } - - # For the 'remove_patches' mode, this patch is expected to be in the - # 'applicable_patches' list (which is the list that the .json file will be - # updated with) because the 'svn_version' < 'inf' (this patch does not have - # an 'until' value which implies 'until' == 'inf'). - test_patch_2 = { - 'comment': 'Fixes input', - 'rel_patch_path': 'cherry/fixes_input.patch', - 'version_range': { - 'from': 1000 - } - } - - # For the 'remove_patches' mode, this patch is expected to be in the - # 'non_applicable_patches' list and 'removed_patches' list because - # the 'svn_version' (1500) >= 'until' (1500). - test_patch_3 = { - 'comment': 'Adds a warning', - 'rel_patch_path': 'add_warning.patch', - 'version_range': { - 'from': 750, - 'until': 1500 - } - } - - # For the 'remove_patches' mode, this patch is expected to be in the - # 'non_applicable_patches' list and 'removed_patches' list because - # the 'svn_version' (1500) >= 'until' (1400). - test_patch_4 = { - 'comment': 'Adds a helper function', - 'rel_patch_path': 'add_helper.patch', - 'version_range': { - 'from': 20, - 'until': 1400 - } - } - - test_patch_metadata = [ - test_patch_1, test_patch_2, test_patch_3, test_patch_4 - ] - - abs_path_to_filesdir = '/abs/path/to/filesdir' - - # Simulate behavior for 'GetPathToPatch()' when successfully constructed the - # absolute path to the patch and the patch exists. - @CallCountsToMockFunctions - def MultipleCallsToGetPatchPath(call_count, filesdir_path, rel_patch_path): - self.assertEqual(filesdir_path, abs_path_to_filesdir) - - if call_count < 4: - self.assertEqual(rel_patch_path, - test_patch_metadata[call_count]['rel_patch_path']) - - return os.path.join(abs_path_to_filesdir, - test_patch_metadata[call_count]['rel_patch_path']) - - assert False, 'Unexpectedly called more than 4 times.' - - # Use the test function to simulate behavior of 'GetPathToPatch()'. - mock_get_path_to_patch.side_effect = MultipleCallsToGetPatchPath - - expected_applied_patches = [] - expected_failed_patches = [] - expected_disabled_patches = [] - expected_non_applicable_patches = [ - 'fixes_output.patch', 'add_warning.patch', 'add_helper.patch' - ] - expected_removed_patches = [ - '/abs/path/to/filesdir/cherry/fixes_output.patch', - '/abs/path/to/filesdir/add_warning.patch', - '/abs/path/to/filesdir/add_helper.patch' - ] - - # Assigned 'None' for now, but it is expected that the patch metadata file - # will be modified, so the 'expected_patch_info_dict's' value for the - # key 'modified_metadata' will get updated to the temporary .json file once - # the file is created. - expected_modified_metadata_file = None - - expected_patch_info_dict = { - 'applied_patches': expected_applied_patches, - 'failed_patches': expected_failed_patches, - 'non_applicable_patches': expected_non_applicable_patches, - 'disabled_patches': expected_disabled_patches, - 'removed_patches': expected_removed_patches, - 'modified_metadata': expected_modified_metadata_file - } - - with CreateTemporaryJsonFile() as json_test_file: - # Write the test patch metadata to the temporary .json file. - with open(json_test_file, 'w') as json_file: - WritePrettyJsonFile(test_patch_metadata, json_file) - - expected_patch_info_dict['modified_metadata'] = json_test_file - - abs_path_to_filesdir = '/abs/path/to/filesdir' - - src_path = '/some/path/to/src/' - - revision = 1500 - - patch_info = patch_manager.HandlePatches(revision, json_test_file, - abs_path_to_filesdir, src_path, - FailureModes.REMOVE_PATCHES) - - self.assertDictEqual(patch_info._asdict(), expected_patch_info_dict) - - # 'test_patch_2' was an applicable patch, so this patch will be the only - # patch that is in temporary .json file. The other patches were not - # applicable (they failed the applicable check), so they will not be in - # the .json file. - expected_json_file = [test_patch_2] - - # Make sure the updated patch metadata was written into the temporary - # .json file. - with open(json_test_file) as patch_file: - new_json_file_contents = json.load(patch_file) - - self.assertListEqual(new_json_file_contents, expected_json_file) - - self.assertEqual(mock_get_path_to_patch.call_count, 4) - - mock_apply_patch.assert_not_called() - - @mock.patch.object(patch_manager, 'GetPathToPatch') - @mock.patch.object(patch_manager, 'ApplyPatch') - def testSuccessfullyDidNotRemoveAFuturePatch(self, mock_apply_patch, - mock_get_path_to_patch): - - # For the 'remove_patches' mode, this patch is expected to be in the - # 'non_applicable_patches' list and 'removed_patches' list because - # the 'svn_version' (1200) >= 'until' (1190). - test_patch_1 = { - 'comment': 'Redirects output to stdout', - 'rel_patch_path': 'cherry/fixes_output.patch', - 'version_range': { - 'from': 1000, - 'until': 1190 - } - } - - # For the 'remove_patches' mode, this patch is expected to be in the - # 'applicable_patches' list (which is the list that the .json file will be - # updated with) because the 'svn_version' < 'inf' (this patch does not have - # an 'until' value which implies 'until' == 'inf'). - test_patch_2 = { - 'comment': 'Fixes input', - 'rel_patch_path': 'cherry/fixes_input.patch', - 'version_range': { - 'from': 1000, - } - } - - # For the 'remove_patches' mode, this patch is expected to be in the - # 'applicable_patches' list because 'svn_version' >= 'from' and - # 'svn_version' < 'until'. - test_patch_3 = { - 'comment': 'Adds a warning', - 'rel_patch_path': 'add_warning.patch', - 'version_range': { - 'from': 750, - 'until': 1500 - } - } - - # For the 'remove_patches' mode, this patch is expected to be in the - # 'applicable_patches' list because the patch is from the future (e.g. - # 'from' > 'svn_version' (1200), so it should NOT be removed. - test_patch_4 = { - 'comment': 'Adds a helper function', - 'rel_patch_path': 'add_helper.patch', - 'version_range': { - 'from': 1600, - 'until': 2000 - } - } - - test_patch_metadata = [ - test_patch_1, test_patch_2, test_patch_3, test_patch_4 - ] - - abs_path_to_filesdir = '/abs/path/to/filesdir' - - # Simulate behavior for 'GetPathToPatch()' when successfully constructed the - # absolute path to the patch and the patch exists. - @CallCountsToMockFunctions - def MultipleCallsToGetPatchPath(call_count, filesdir_path, rel_patch_path): - self.assertEqual(filesdir_path, abs_path_to_filesdir) - - if call_count < 4: - self.assertEqual(rel_patch_path, - test_patch_metadata[call_count]['rel_patch_path']) - - return os.path.join(abs_path_to_filesdir, - test_patch_metadata[call_count]['rel_patch_path']) - - # 'GetPathToPatch()' was called more times than expected (4 times). - assert False, 'Unexpectedly called more than 4 times.' - - # Use the test function to simulate behavior of 'GetPathToPatch()'. - mock_get_path_to_patch.side_effect = MultipleCallsToGetPatchPath - - expected_applied_patches = [] - expected_failed_patches = [] - expected_disabled_patches = [] - - # 'add_helper.patch' is still a 'non applicable' patch meaning it does not - # apply in revision 1200 but it will NOT be removed because it is a future - # patch. - expected_non_applicable_patches = [ - 'fixes_output.patch', 'add_helper.patch' - ] - expected_removed_patches = [ - '/abs/path/to/filesdir/cherry/fixes_output.patch' - ] - - # Assigned 'None' for now, but it is expected that the patch metadata file - # will be modified, so the 'expected_patch_info_dict's' value for the - # key 'modified_metadata' will get updated to the temporary .json file once - # the file is created. - expected_modified_metadata_file = None - - expected_patch_info_dict = { - 'applied_patches': expected_applied_patches, - 'failed_patches': expected_failed_patches, - 'non_applicable_patches': expected_non_applicable_patches, - 'disabled_patches': expected_disabled_patches, - 'removed_patches': expected_removed_patches, - 'modified_metadata': expected_modified_metadata_file - } - - with CreateTemporaryJsonFile() as json_test_file: - # Write the test patch metadata to the temporary .json file. - with open(json_test_file, 'w') as json_file: - WritePrettyJsonFile(test_patch_metadata, json_file) - - expected_patch_info_dict['modified_metadata'] = json_test_file - - src_path = '/some/path/to/src/' - - revision = 1200 - - patch_info = patch_manager.HandlePatches(revision, json_test_file, - abs_path_to_filesdir, src_path, - FailureModes.REMOVE_PATCHES) - - self.assertDictEqual(patch_info._asdict(), expected_patch_info_dict) - - # 'test_patch_2' was an applicable patch, so this patch will be the only - # patch that is in temporary .json file. The other patches were not - # applicable (they failed the applicable check), so they will not be in - # the .json file. - expected_json_file = [test_patch_2, test_patch_3, test_patch_4] - - # Make sure the updated patch metadata was written into the temporary - # .json file. - with open(json_test_file) as patch_file: - new_json_file_contents = json.load(patch_file) - - self.assertListEqual(new_json_file_contents, expected_json_file) - - self.assertEqual(mock_get_path_to_patch.call_count, 4) - - mock_apply_patch.assert_not_called() + patches[0].apply = mock.MagicMock(return_value=patch_utils.PatchResult( + succeeded=False, failed_hunks={'a/b/c': []})) + patches[1].apply = mock.MagicMock(return_value=patch_utils.PatchResult( + succeeded=True)) + results = patch_manager.UpdateVersionRangesWithEntries( + 1, dirpath, patches) + # We should only have updated the version_range of the first patch, + # as that one failed to apply. + self.assertEqual(len(results), 1) + self.assertEqual(results[0].version_range, {'from': 0, 'until': 1}) + self.assertEqual(patches[0].version_range, {'from': 0, 'until': 1}) + self.assertEqual(patches[1].version_range, {'from': 0, 'until': 2}) if __name__ == '__main__': diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py index 003990be..4c602027 100644 --- a/llvm_tools/patch_utils.py +++ b/llvm_tools/patch_utils.py @@ -232,10 +232,18 @@ class PatchEntry: if not extra_args: extra_args = [] # Cmd to apply a patch in the src unpack path. + abs_patch_path = self.patch_path().absolute() + if not abs_patch_path.is_file(): + raise RuntimeError(f'Cannot apply: patch {abs_patch_path} is not a file') cmd = [ - 'patch', '-d', - root_dir.absolute(), '-f', '-p1', '--no-backup-if-mismatch', '-i', - self.patch_path().absolute() + 'patch', + '-d', + root_dir.absolute(), + '-f', + '-p1', + '--no-backup-if-mismatch', + '-i', + abs_patch_path, ] + extra_args try: subprocess.run(cmd, encoding='utf-8', check=True, stdout=subprocess.PIPE) diff --git a/llvm_tools/patch_utils_unittest.py b/llvm_tools/patch_utils_unittest.py index f73ee751..04541ae0 100755 --- a/llvm_tools/patch_utils_unittest.py +++ b/llvm_tools/patch_utils_unittest.py @@ -10,7 +10,7 @@ from pathlib import Path import subprocess import tempfile import unittest -import unittest.mock as mock +from unittest import mock import patch_utils as pu @@ -56,6 +56,14 @@ class TestPatchUtils(unittest.TestCase): e = pu.PatchEntry.from_dict(TestPatchUtils._mock_dir(), d) self.assertEqual(d, e.to_dict()) + def test_patch_path(self): + """Test that we can get the full path from a PatchEntry.""" + d = TestPatchUtils._default_json_dict() + with mock.patch.object(Path, 'is_dir', return_value=True): + entry = pu.PatchEntry.from_dict(Path('/home/dir'), d) + self.assertEqual(entry.patch_path(), + Path('/home/dir') / d['rel_patch_path']) + def test_can_patch_version(self): """Test that patch application based on version is correct.""" base_dict = TestPatchUtils._default_json_dict() @@ -134,13 +142,22 @@ class TestPatchUtils(unittest.TestCase): self.assertEqual(len(hunk_list1), 1) self.assertEqual(len(hunk_list2), 2) + def test_apply_when_patch_nonexistent(self): + """Test that we error out when we try to apply a non-existent patch.""" + src_dir = TestPatchUtils._mock_dir('somewhere/llvm-project') + patch_dir = TestPatchUtils._mock_dir() + e = pu.PatchEntry.from_dict(patch_dir, TestPatchUtils._default_json_dict()) + with mock.patch('subprocess.run', mock.MagicMock()): + self.assertRaises(RuntimeError, lambda: e.apply(src_dir)) + def test_apply_success(self): """Test that we can call apply.""" src_dir = TestPatchUtils._mock_dir('somewhere/llvm-project') patch_dir = TestPatchUtils._mock_dir() e = pu.PatchEntry.from_dict(patch_dir, TestPatchUtils._default_json_dict()) - with mock.patch('subprocess.run', mock.MagicMock()): - result = e.apply(src_dir) + with mock.patch('pathlib.Path.is_file', return_value=True): + with mock.patch('subprocess.run', mock.MagicMock()): + result = e.apply(src_dir) self.assertTrue(result.succeeded) def test_parse_failed_patch_output(self): @@ -162,12 +179,15 @@ Hunk #1 SUCCEEDED at 96 with fuzz 1. def test_is_git_dirty(self): """Test if a git directory has uncommitted changes.""" - with tempfile.TemporaryDirectory( - prefix='patch_utils_unittest') as dirname: + with tempfile.TemporaryDirectory(prefix='patch_utils_unittest') as dirname: dirpath = Path(dirname) def _run_h(cmd): - subprocess.run(cmd, cwd=dirpath, stdout=subprocess.DEVNULL, check=True) + subprocess.run(cmd, + cwd=dirpath, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + check=True) _run_h(['git', 'init']) self.assertFalse(pu.is_git_dirty(dirpath)) -- cgit v1.2.3 From c37602934cbc1280c50e7cdc54ac15ab72e5d1ed Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Thu, 14 Jul 2022 22:33:03 +0000 Subject: llvm_tools: Remove unused llvm_patch_management.py This code is not used by anything on the ChromeOS side, and is just a wrapper around patch_manager.py itself. The README implies that it auto-fills the command line arguments, but this is somewhat unneccesary, and if we want to add support for this workflow, it should likely be in patch_manager.py itself as a subcommand. This commit removes llvm_patch_management.py and llvm_patch_management_unittest.py, and also removes it from the README.md. Also removes other incorrect flags from the README.md. BUG=b:188465085 TEST=./run_tests_for.py llvm_tools/* Change-Id: Ibf5f77977f70a8b7334e111a92a8fec5be462201 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3765939 Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> --- llvm_tools/README.md | 34 --- llvm_tools/llvm_patch_management.py | 281 ------------------------ llvm_tools/llvm_patch_management_unittest.py | 311 --------------------------- 3 files changed, 626 deletions(-) delete mode 100755 llvm_tools/llvm_patch_management.py delete mode 100755 llvm_tools/llvm_patch_management_unittest.py diff --git a/llvm_tools/README.md b/llvm_tools/README.md index 74fad6c9..86a4b778 100644 --- a/llvm_tools/README.md +++ b/llvm_tools/README.md @@ -124,35 +124,6 @@ $ ./update_chromeos_llvm_hash.py \ --failure_mode disable_patches ``` -## `llvm_patch_management.py` - -### Usage - -This script is used to test whether a newly added patch in a package's patch -metadata file would apply successfully. The script is also used to make sure -the patches of a package applied successfully, failed, etc., depending on the -failure mode specified. - -An example of using this script is when multiple packages would like to be -tested when a new patch was added to their patch metadata file. - -For example: - -``` -$ ./llvm_patch_management.py \ - --packages sys-devel/llvm sys-libs/compiler-rt \ - --failure_mode continue -``` - -The above example tests sys-devel/llvm and sys-libs/compiler-rt patch metadata -file with the failure mode `continue`. - -For help with the command line arguments of the script, run: - -``` -$ ./llvm_patch_management.py --help -``` - ## `patch_manager.py` ### Usage @@ -172,7 +143,6 @@ For example, to see all the failed (if any) patches: $ ./patch_manager.py \ --svn_version 367622 \ --patch_metadata_file /abs/path/to/patch/file \ - --filesdir_path /abs/path/to/$FILESDIR \ --src_path /abs/path/to/src/tree \ --failure_mode continue ``` @@ -183,7 +153,6 @@ For example, to disable all patches that failed to apply: $ ./patch_manager.py \ --svn_version 367622 \ --patch_metadata_file /abs/path/to/patch/file \ - --filesdir_path /abs/path/to/$FILESDIR \ --src_path /abs/path/to/src/tree \ --failure_mode disable_patches ``` @@ -194,7 +163,6 @@ For example, to remove all patches that no longer apply: $ ./patch_manager.py \ --svn_version 367622 \ --patch_metadata_file /abs/path/to/patch/file \ - --filesdir_path /abs/path/to/$FILESDIR \ --src_path /abs/path/to/src/tree \ --failure_mode remove_patches ``` @@ -205,7 +173,6 @@ For example, to bisect a failing patch and stop at the first bisected patch: $ ./patch_manager.py \ --svn_version 367622 \ --patch_metadata_file /abs/path/to/patch/file \ - --filesdir_path /abs/path/to/$FILESDIR \ --src_path /abs/path/to/src/tree \ --failure_mode bisect_patches \ --good_svn_version 365631 @@ -218,7 +185,6 @@ the failed patches: $ ./patch_manager.py \ --svn_version 367622 \ --patch_metadata_file /abs/path/to/patch/file \ - --filesdir_path /abs/path/to/$FILESDIR \ --src_path /abs/path/to/src/tree \ --failure_mode bisect_patches \ --good_svn_version 365631 \ diff --git a/llvm_tools/llvm_patch_management.py b/llvm_tools/llvm_patch_management.py deleted file mode 100755 index 46ddb867..00000000 --- a/llvm_tools/llvm_patch_management.py +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -# -# pylint: disable=global-statement - -"""Creates the arguments for the patch manager for LLVM.""" - -from __future__ import print_function - -import argparse -import os - -import chroot -from failure_modes import FailureModes -import get_llvm_hash -import patch_manager -import patch_utils -import subprocess_helpers - - -# If set to `True`, then the contents of `stdout` after executing a command will -# be displayed to the terminal. -verbose = False - - -def GetCommandLineArgs(): - """Parses the commandline for the optional commandline arguments. - - Returns: - An argument parser object that contains all the commandline arguments. - """ - - # Default path to the chroot if a path is not specified. - cros_root = os.path.expanduser('~') - cros_root = os.path.join(cros_root, 'chromiumos') - - # Create parser and add optional command-line arguments. - parser = argparse.ArgumentParser( - description='Patch management for packages.') - - # Add argument for a specific chroot path. - parser.add_argument( - '--chroot_path', - type=patch_manager.is_directory, - default=cros_root, - help='the absolute path to the chroot (default: %(default)s)') - - # Add argument for which packages to manage their patches. - parser.add_argument( - '--packages', - required=False, - nargs='+', - default=['sys-devel/llvm'], - help='the packages to manage their patches (default: %(default)s)') - - # Add argument for whether to display command contents to `stdout`. - parser.add_argument('--verbose', - action='store_true', - help='display contents of a command to the terminal ' - '(default: %(default)s)') - - # Add argument for the LLVM version to use for patch management. - parser.add_argument( - '--llvm_version', - type=int, - help='the LLVM version to use for patch management. Alternatively, you ' - 'can pass "google3" or "google3-unstable". (Default: "google3")') - - # Add argument for the mode of the patch management when handling patches. - parser.add_argument( - '--failure_mode', - default=FailureModes.FAIL.value, - choices=[FailureModes.FAIL.value, FailureModes.CONTINUE.value, - FailureModes.DISABLE_PATCHES.value, - FailureModes.REMOVE_PATCHES.value], - help='the mode of the patch manager when handling failed patches ' \ - '(default: %(default)s)') - - # Add argument for the patch metadata file in $FILESDIR of LLVM. - parser.add_argument( - '--patch_metadata_file', - default='PATCHES.json', - help='the .json file in $FILESDIR that has all the patches and their ' - 'metadata if applicable (default: %(default)s)') - - # Parse the command line. - args_output = parser.parse_args() - - global verbose - - verbose = args_output.verbose - - unique_packages = list(set(args_output.packages)) - - # Duplicate packages were passed into the command line - if len(unique_packages) != len(args_output.packages): - raise ValueError('Duplicate packages were passed in: %s' % - ' '.join(args_output.packages)) - - args_output.packages = unique_packages - - return args_output - - -def GetPathToFilesDirectory(chroot_path, package): - """Gets the absolute path to $FILESDIR of the package. - - Args: - chroot_path: The absolute path to the chroot. - package: The package to find its absolute path to $FILESDIR. - - Returns: - The absolute path to $FILESDIR. - - Raises: - ValueError: An invalid chroot path has been provided. - """ - - if not os.path.isdir(chroot_path): - raise ValueError('Invalid chroot provided: %s' % chroot_path) - - # Get the absolute chroot path to the ebuild. - chroot_ebuild_path = subprocess_helpers.ChrootRunCommand( - chroot_path, ['equery', 'w', package], verbose=verbose) - - # Get the absolute chroot path to $FILESDIR's parent directory. - filesdir_parent_path = os.path.dirname(chroot_ebuild_path.strip()) - - # Get the relative path to $FILESDIR's parent directory. - rel_path = _GetRelativePathOfChrootPath(filesdir_parent_path) - - # Construct the absolute path to the package's 'files' directory. - return os.path.join(chroot_path, rel_path, 'files/') - - -def _GetRelativePathOfChrootPath(chroot_path): - """Gets the relative path of the chroot path passed in. - - Args: - chroot_path: The chroot path to get its relative path. - - Returns: - The relative path after '/mnt/host/source/'. - - Raises: - ValueError: The prefix of 'chroot_path' did not match '/mnt/host/source/'. - """ - - chroot_prefix = '/mnt/host/source/' - - if not chroot_path.startswith(chroot_prefix): - raise ValueError('Invalid prefix for the chroot path: %s' % chroot_path) - - return chroot_path[len(chroot_prefix):] - - -def _CheckPatchMetadataPath(patch_metadata_path): - """Checks that the patch metadata path is valid. - - Args: - patch_metadata_path: The absolute path to the .json file that has the - patches and their metadata. - - Raises: - ValueError: The file does not exist or the file does not end in '.json'. - """ - - if not os.path.isfile(patch_metadata_path): - raise ValueError('Invalid file provided: %s' % patch_metadata_path) - - if not patch_metadata_path.endswith('.json'): - raise ValueError('File does not end in ".json": %s' % patch_metadata_path) - - -def _MoveSrcTreeHEADToGitHash(src_path, git_hash): - """Moves HEAD to 'git_hash'.""" - - move_head_cmd = ['git', '-C', src_path, 'checkout', git_hash] - - subprocess_helpers.ExecCommandAndCaptureOutput(move_head_cmd, - verbose=verbose) - - -def UpdatePackagesPatchMetadataFile(chroot_path, svn_version, - patch_metadata_file, packages, mode): - """Updates the packages metadata file. - - Args: - chroot_path: The absolute path to the chroot. - svn_version: The version to use for patch management. - patch_metadata_file: The patch metadta file where all the patches and - their metadata are. - packages: All the packages to update their patch metadata file. - mode: The mode for the patch manager to use when an applicable patch - fails to apply. - Ex: 'FailureModes.FAIL' - - Returns: - A dictionary where the key is the package name and the value is a dictionary - that has information on the patches. - """ - - # A dictionary where the key is the package name and the value is a dictionary - # that has information on the patches. - package_info = {} - - llvm_hash = get_llvm_hash.LLVMHash() - - with llvm_hash.CreateTempDirectory() as temp_dir: - with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as src_path: - # Ensure that 'svn_version' exists in the chromiumum mirror of LLVM by - # finding its corresponding git hash. - git_hash = get_llvm_hash.GetGitHashFrom(src_path, svn_version) - - # Git hash of 'svn_version' exists, so move the source tree's HEAD to - # 'git_hash' via `git checkout`. - _MoveSrcTreeHEADToGitHash(src_path, git_hash) - - for cur_package in packages: - # Get the absolute path to $FILESDIR of the package. - filesdir_path = GetPathToFilesDirectory(chroot_path, cur_package) - - # Construct the absolute path to the patch metadata file where all the - # patches and their metadata are. - patch_metadata_path = os.path.join(filesdir_path, patch_metadata_file) - - # Make sure the patch metadata path is valid. - _CheckPatchMetadataPath(patch_metadata_path) - - patch_utils.clean_src_tree(src_path) - - # Get the patch results for the current package. - patches_info = patch_manager.HandlePatches(svn_version, - patch_metadata_path, - filesdir_path, src_path, - mode) - - package_info[cur_package] = patches_info._asdict() - - return package_info - - -def main(): - """Updates the patch metadata file of each package if possible. - - Raises: - AssertionError: The script was run inside the chroot. - """ - - chroot.VerifyOutsideChroot() - - args_output = GetCommandLineArgs() - - # Get the google3 LLVM version if a LLVM version was not provided. - llvm_version = args_output.llvm_version - if llvm_version in ('', 'google3', 'google3-unstable'): - llvm_version = get_llvm_hash.GetGoogle3LLVMVersion( - stable=llvm_version != 'google3-unstable') - - UpdatePackagesPatchMetadataFile(args_output.chroot_path, llvm_version, - args_output.patch_metadata_file, - args_output.packages, - FailureModes(args_output.failure_mode)) - - # Only 'disable_patches' and 'remove_patches' can potentially modify the patch - # metadata file. - if (args_output.failure_mode == FailureModes.DISABLE_PATCHES.value - or args_output.failure_mode == FailureModes.REMOVE_PATCHES.value): - print('The patch file %s has been modified for the packages:' % - args_output.patch_metadata_file) - print('\n'.join(args_output.packages)) - else: - print('Applicable patches in %s applied successfully.' % - args_output.patch_metadata_file) - - -if __name__ == '__main__': - main() diff --git a/llvm_tools/llvm_patch_management_unittest.py b/llvm_tools/llvm_patch_management_unittest.py deleted file mode 100755 index 52117c93..00000000 --- a/llvm_tools/llvm_patch_management_unittest.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# pylint: disable=protected-access - -"""Unit tests when creating the arguments for the patch manager.""" - -from __future__ import print_function - -from collections import namedtuple -import os -import unittest -import unittest.mock as mock - -from failure_modes import FailureModes -import get_llvm_hash -import llvm_patch_management -import patch_manager -import patch_utils -import subprocess_helpers - - -class LlvmPatchManagementTest(unittest.TestCase): - """Test class when constructing the arguments for the patch manager.""" - - # Simulate the behavior of `os.path.isdir()` when the chroot path does not - # exist or is not a directory. - @mock.patch.object(os.path, 'isdir', return_value=False) - def testInvalidChrootPathWhenGetPathToFilesDir(self, mock_isdir): - chroot_path = '/some/path/to/chroot' - package = 'sys-devel/llvm' - - # Verify the exception is raised when an invalid absolute path to the chroot - # is passed in. - with self.assertRaises(ValueError) as err: - llvm_patch_management.GetPathToFilesDirectory(chroot_path, package) - - self.assertEqual(str(err.exception), - 'Invalid chroot provided: %s' % chroot_path) - - mock_isdir.assert_called_once() - - # Simulate the behavior of 'os.path.isdir()' when a valid chroot path is - # passed in. - @mock.patch.object(os.path, 'isdir', return_value=True) - @mock.patch.object(subprocess_helpers, 'ChrootRunCommand') - @mock.patch.object(llvm_patch_management, '_GetRelativePathOfChrootPath') - def testSuccessfullyGetPathToFilesDir(self, - mock_get_relative_path_of_chroot_path, - mock_chroot_cmd, mock_isdir): - - package_chroot_path = '/mnt/host/source/path/to/llvm/llvm.ebuild' - - # Simulate behavior of 'ChrootRunCommand()' when successfully - # retrieved the absolute chroot path to the package's ebuild. - mock_chroot_cmd.return_value = package_chroot_path - - # Simulate behavior of '_GetRelativePathOfChrootPath()' when successfully - # removed '/mnt/host/source' of the absolute chroot path to the package's - # ebuild. - # - # Returns relative path after '/mnt/host/source/'. - mock_get_relative_path_of_chroot_path.return_value = 'path/to/llvm' - - chroot_path = '/some/path/to/chroot' - - package = 'sys-devel/llvm' - - self.assertEqual( - llvm_patch_management.GetPathToFilesDirectory(chroot_path, package), - '/some/path/to/chroot/path/to/llvm/files/') - - mock_isdir.assert_called_once() - - mock_chroot_cmd.assert_called_once() - - mock_get_relative_path_of_chroot_path.assert_called_once_with( - '/mnt/host/source/path/to/llvm') - - def testInvalidPrefixForChrootPath(self): - package_chroot_path = '/path/to/llvm' - - # Verify the exception is raised when the chroot path does not start with - # '/mnt/host/source/'. - with self.assertRaises(ValueError) as err: - llvm_patch_management._GetRelativePathOfChrootPath(package_chroot_path) - - self.assertEqual( - str(err.exception), - 'Invalid prefix for the chroot path: %s' % package_chroot_path) - - def testValidPrefixForChrootPath(self): - package_chroot_path = '/mnt/host/source/path/to/llvm' - - package_rel_path = 'path/to/llvm' - - self.assertEqual( - llvm_patch_management._GetRelativePathOfChrootPath( - package_chroot_path), package_rel_path) - - # Simulate behavior of 'os.path.isfile()' when the patch metadata file does - # not exist. - @mock.patch.object(os.path, 'isfile', return_value=False) - def testInvalidFileForPatchMetadataPath(self, mock_isfile): - abs_path_to_patch_file = '/abs/path/to/files/test.json' - - # Verify the exception is raised when the absolute path to the patch - # metadata file does not exist. - with self.assertRaises(ValueError) as err: - llvm_patch_management._CheckPatchMetadataPath(abs_path_to_patch_file) - - self.assertEqual(str(err.exception), - 'Invalid file provided: %s' % abs_path_to_patch_file) - - mock_isfile.assert_called_once() - - # Simulate behavior of 'os.path.isfile()' when the absolute path to the - # patch metadata file exists. - @mock.patch.object(os.path, 'isfile', return_value=True) - def testPatchMetadataFileDoesNotEndInJson(self, mock_isfile): - abs_path_to_patch_file = '/abs/path/to/files/PATCHES' - - # Verify the exception is raised when the patch metadata file does not end - # in '.json'. - with self.assertRaises(ValueError) as err: - llvm_patch_management._CheckPatchMetadataPath(abs_path_to_patch_file) - - self.assertEqual( - str(err.exception), - 'File does not end in ".json": %s' % abs_path_to_patch_file) - - mock_isfile.assert_called_once() - - @mock.patch.object(os.path, 'isfile') - def testValidPatchMetadataFile(self, mock_isfile): - abs_path_to_patch_file = '/abs/path/to/files/PATCHES.json' - - # Simulate behavior of 'os.path.isfile()' when the absolute path to the - # patch metadata file exists. - mock_isfile.return_value = True - - llvm_patch_management._CheckPatchMetadataPath(abs_path_to_patch_file) - - mock_isfile.assert_called_once() - - # Simulate `GetGitHashFrom()` when successfully retrieved the git hash - # of the version passed in. - @mock.patch.object(get_llvm_hash, - 'GetGitHashFrom', - return_value='a123testhash1') - # Simulate `CreateTempLLVMRepo()` when successfully created a work tree from - # the LLVM repo copy in `llvm_tools` directory. - @mock.patch.object(get_llvm_hash, 'CreateTempLLVMRepo') - # Simulate behavior of `_MoveSrcTreeHEADToGitHash()` when successfully moved - # the head pointer to the git hash of the revision. - @mock.patch.object(llvm_patch_management, '_MoveSrcTreeHEADToGitHash') - @mock.patch.object(llvm_patch_management, 'GetPathToFilesDirectory') - @mock.patch.object(llvm_patch_management, '_CheckPatchMetadataPath') - def testExceptionIsRaisedWhenUpdatingAPackagesMetadataFile( - self, mock_check_patch_metadata_path, mock_get_filesdir_path, - mock_move_head_pointer, mock_create_temp_llvm_repo, mock_get_git_hash): - - abs_path_to_patch_file = ( - '/some/path/to/chroot/some/path/to/filesdir/PATCHES') - - # Simulate the behavior of '_CheckPatchMetadataPath()' when the patch - # metadata file in $FILESDIR does not exist or does not end in '.json'. - def InvalidPatchMetadataFile(patch_metadata_path): - self.assertEqual(patch_metadata_path, abs_path_to_patch_file) - - raise ValueError('File does not end in ".json": %s' % - abs_path_to_patch_file) - - # Use the test function to simulate behavior of '_CheckPatchMetadataPath()'. - mock_check_patch_metadata_path.side_effect = InvalidPatchMetadataFile - - abs_path_to_filesdir = '/some/path/to/chroot/some/path/to/filesdir' - - # Simulate the behavior of 'GetPathToFilesDirectory()' when successfully - # constructed the absolute path to $FILESDIR of a package. - mock_get_filesdir_path.return_value = abs_path_to_filesdir - - temp_work_tree = '/abs/path/to/tmpWorkTree' - - # Simulate the behavior of returning the absolute path to a worktree via - # `git worktree add`. - mock_create_temp_llvm_repo.return_value.__enter__.return_value.name = ( - temp_work_tree) - - chroot_path = '/some/path/to/chroot' - revision = 1000 - patch_file_name = 'PATCHES' - package_name = 'test-package/package1' - - # Verify the exception is raised when a package is constructing the - # arguments for the patch manager to update its patch metadata file and an - # exception is raised in the process. - with self.assertRaises(ValueError) as err: - llvm_patch_management.UpdatePackagesPatchMetadataFile( - chroot_path, revision, patch_file_name, [package_name], - FailureModes.FAIL) - - self.assertEqual( - str(err.exception), - 'File does not end in ".json": %s' % abs_path_to_patch_file) - - mock_get_filesdir_path.assert_called_once_with(chroot_path, package_name) - - mock_get_git_hash.assert_called_once() - - mock_check_patch_metadata_path.assert_called_once() - - mock_move_head_pointer.assert_called_once() - - mock_create_temp_llvm_repo.assert_called_once() - - # Simulate `CleanSrcTree()` when successfully removed changes from the - # worktree. - @mock.patch.object(patch_utils, 'clean_src_tree') - # Simulate `GetGitHashFrom()` when successfully retrieved the git hash - # of the version passed in. - @mock.patch.object(get_llvm_hash, - 'GetGitHashFrom', - return_value='a123testhash1') - # Simulate `CreateTempLLVMRepo()` when successfully created a work tree from - # the LLVM repo copy in `llvm_tools` directory. - @mock.patch.object(get_llvm_hash, 'CreateTempLLVMRepo') - # Simulate behavior of `_MoveSrcTreeHEADToGitHash()` when successfully moved - # the head pointer to the git hash of the revision. - @mock.patch.object(llvm_patch_management, '_MoveSrcTreeHEADToGitHash') - @mock.patch.object(llvm_patch_management, 'GetPathToFilesDirectory') - @mock.patch.object(llvm_patch_management, '_CheckPatchMetadataPath') - @mock.patch.object(patch_manager, 'HandlePatches') - def testSuccessfullyRetrievedPatchResults( - self, mock_handle_patches, mock_check_patch_metadata_path, - mock_get_filesdir_path, mock_move_head_pointer, - mock_create_temp_llvm_repo, mock_get_git_hash, mock_clean_src_tree): - - abs_path_to_filesdir = '/some/path/to/chroot/some/path/to/filesdir' - - abs_path_to_patch_file = ( - '/some/path/to/chroot/some/path/to/filesdir/PATCHES.json') - - # Simulate the behavior of 'GetPathToFilesDirectory()' when successfully - # constructed the absolute path to $FILESDIR of a package. - mock_get_filesdir_path.return_value = abs_path_to_filesdir - - PatchInfo = namedtuple('PatchInfo', [ - 'applied_patches', 'failed_patches', 'non_applicable_patches', - 'disabled_patches', 'removed_patches', 'modified_metadata' - ]) - - # Simulate the behavior of 'HandlePatches()' when successfully iterated - # through every patch in the patch metadata file and a dictionary is - # returned that contains information about the patches' status. - mock_handle_patches.return_value = PatchInfo( - applied_patches=['fixes_something.patch'], - failed_patches=['disables_output.patch'], - non_applicable_patches=[], - disabled_patches=[], - removed_patches=[], - modified_metadata=None) - - temp_work_tree = '/abs/path/to/tmpWorkTree' - - # Simulate the behavior of returning the absolute path to a worktree via - # `git worktree add`. - mock_create_temp_llvm_repo.return_value.__enter__.return_value.name = ( - temp_work_tree) - - expected_patch_results = { - 'applied_patches': ['fixes_something.patch'], - 'failed_patches': ['disables_output.patch'], - 'non_applicable_patches': [], - 'disabled_patches': [], - 'removed_patches': [], - 'modified_metadata': None - } - - chroot_path = '/some/path/to/chroot' - revision = 1000 - patch_file_name = 'PATCHES.json' - package_name = 'test-package/package2' - - patch_info = llvm_patch_management.UpdatePackagesPatchMetadataFile( - chroot_path, revision, patch_file_name, [package_name], - FailureModes.CONTINUE) - - self.assertDictEqual(patch_info, {package_name: expected_patch_results}) - - mock_get_filesdir_path.assert_called_once_with(chroot_path, package_name) - - mock_check_patch_metadata_path.assert_called_once_with( - abs_path_to_patch_file) - - mock_handle_patches.assert_called_once() - - mock_create_temp_llvm_repo.assert_called_once() - - mock_get_git_hash.assert_called_once() - - mock_move_head_pointer.assert_called_once() - - mock_clean_src_tree.assert_called_once() - - -if __name__ == '__main__': - unittest.main() -- cgit v1.2.3 From d1360003a478eaf067aa469b8f11eb5aaa599b0a Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@google.com> Date: Fri, 15 Jul 2022 11:02:50 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: Ic6bea362338745eff5f67b50207b14c1447f36ac Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3765945 Commit-Queue: Manoj Gupta <manojgupta@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Tested-by: Denis Nikitin <denik@chromium.org> Auto-Submit: Denis Nikitin <denik@chromium.org> --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 55831e01..1d2f1cc2 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R105-14943.0-1656927124" + "name": "R105-14943.0-1657532004" }, "chromeos-kernel-4_14": { - "name": "R105-14943.0-1656927138" + "name": "R105-14943.0-1657532105" }, "chromeos-kernel-4_19": { - "name": "R105-14943.0-1656927254" + "name": "R105-14943.0-1657532036" }, "chromeos-kernel-5_4": { - "name": "R105-14943.0-1656927279" + "name": "R105-14943.0-1657531926" }, "chromeos-kernel-5_10": { - "name": "R105-14909.34-1656927210" + "name": "R105-14943.0-1657531932" } } -- cgit v1.2.3 From 197d2f545c0ad3d2e06deab2f1eac7d2b9bce1ee Mon Sep 17 00:00:00 2001 From: Michael Benfield <mbenfield@google.com> Date: Tue, 21 Jun 2022 21:29:50 +0000 Subject: rust-analyzer-chromiumos-wrapper: add This is a wrapper program enabling use of `rust-analyzer` running in the chroot with an editor outside the chroot. BUG=b:235120448 TEST=tested with Neovim Change-Id: I64287071ce6cc26c6848b6fb09743f6df9fac311 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3715832 Reviewed-by: George Burgess <gbiv@chromium.org> Commit-Queue: Michael Benfield <mbenfield@google.com> Tested-by: Michael Benfield <mbenfield@google.com> Reviewed-by: Michael Benfield <mbenfield@google.com> --- .gitignore | 1 + rust-analyzer-chromiumos-wrapper/Cargo.lock | 147 ++++++++++++ rust-analyzer-chromiumos-wrapper/Cargo.toml | 17 ++ rust-analyzer-chromiumos-wrapper/README.md | 61 +++++ rust-analyzer-chromiumos-wrapper/src/main.rs | 330 +++++++++++++++++++++++++++ 5 files changed, 556 insertions(+) create mode 100644 rust-analyzer-chromiumos-wrapper/Cargo.lock create mode 100644 rust-analyzer-chromiumos-wrapper/Cargo.toml create mode 100644 rust-analyzer-chromiumos-wrapper/README.md create mode 100644 rust-analyzer-chromiumos-wrapper/src/main.rs diff --git a/.gitignore b/.gitignore index 072bd9e4..92ee4cb9 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ logs .mypy_cache/ llvm-project-copy/ compiler_wrapper/compiler_wrapper +/rust-analyzer-chromiumos-wrapper/target diff --git a/rust-analyzer-chromiumos-wrapper/Cargo.lock b/rust-analyzer-chromiumos-wrapper/Cargo.lock new file mode 100644 index 00000000..aedf8bfc --- /dev/null +++ b/rust-analyzer-chromiumos-wrapper/Cargo.lock @@ -0,0 +1,147 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "anyhow" +version = "1.0.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "itoa" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" + +[[package]] +name = "libc" +version = "0.2.126" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", +] + +[[package]] +name = "rust-analyzer-chromiumos-wrapper" +version = "0.1.0" +dependencies = [ + "anyhow", + "log", + "serde_json", + "simplelog", +] + +[[package]] +name = "ryu" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" + +[[package]] +name = "serde" +version = "1.0.137" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" + +[[package]] +name = "serde_json" +version = "1.0.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "simplelog" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48dfff04aade74dd495b007c831cd6f4e0cee19c344dd9dc0884c0289b70a786" +dependencies = [ + "log", + "termcolor", + "time", +] + +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "time" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217" +dependencies = [ + "itoa", + "libc", + "num_threads", + "time-macros", +] + +[[package]] +name = "time-macros" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/rust-analyzer-chromiumos-wrapper/Cargo.toml b/rust-analyzer-chromiumos-wrapper/Cargo.toml new file mode 100644 index 00000000..91d0f9a9 --- /dev/null +++ b/rust-analyzer-chromiumos-wrapper/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "rust-analyzer-chromiumos-wrapper" +version = "0.1.0" +edition = "2021" + +[profile.release] +panic = "abort" + +[dependencies] +anyhow = "1.0" +log = { version = "0.4.17" } +serde_json = "1.0" +simplelog = { version = "0.12.0" } + +[features] +default = ["no_debug_log"] +no_debug_log = ["log/max_level_off", "log/release_max_level_off"] diff --git a/rust-analyzer-chromiumos-wrapper/README.md b/rust-analyzer-chromiumos-wrapper/README.md new file mode 100644 index 00000000..e834ff34 --- /dev/null +++ b/rust-analyzer-chromiumos-wrapper/README.md @@ -0,0 +1,61 @@ +# rust-analyzer-chromiumos-wrapper + +## Intro + +rust-analyzer is an LSP server for the Rust language. It allows editors like +vim, emacs, or VS Code to provide IDE-like features for Rust. + +This program, `rust-analyzer-chromiumos-wrapper`, is a wrapper around +`rust-analyzer`. It exists to translate paths between an instance of +rust-analyzer running inside the chromiumos chroot and a client running outside +the chroot. + +It is of course possible to simply run `rust-analyzer` outside the chroot, but +version mismatch issues may lead to a suboptimal experience. + +It should run outside the chroot. If invoked in a `chromiumos` repo in a +subdirectory of either `chromiumos/src` or `chromiumos/chroot`, it will attempt +to invoke `rust-analyzer` inside the chroot and translate paths. Otherwise, it +will attempt to invoke a `rust-analyzer` outside the chroot and will not +translate paths. + +It supports none of rust-analyzer's command line options, which aren't +necessary for acting as a LSP server anyway. + +## Quickstart + +*Outside* the chroot, install the `rust-analyzer-chromiumos-wrapper` binary: + +``` +cargo install --path /path-to-a-chromiumos-checkout/src/third_party/toolchain-utils/rust-analyzer-chromiumos-wrapper +``` + +Make sure `~/.cargo/bin' is in your PATH, or move/symlink `~/.cargo/bin/rust-analyzer-chromiumos-wrapper` to a location in your PATH. + +Configure your editor to use the binary `rust-analyzer-chromiumos-wrapper` as +`rust-analyzer`. In Neovim, if you're using +[nvim-lspconfig](https://github.com/neovim/nvim-lspconfig), this can be done by +putting the following in your `init.lua`: + +``` +require('lspconfig')['rust_analyzer'].setup { + cmd = {'rust-analyzer-chromiumos-wrapper'}, +} +``` + +This configuration is specific to your editor, but see the +[Rust analyzer manual](https://rust-analyzer.github.io/manual.html) for +more about several different editors. + +Once the above general configuration is set up, you'll need to install +`rust-analyzer` inside each chroot where you want to edit code: +``` +sudo emerge rust-analyzer +``` + +## Misc + +A wrapper isn't necessary for clangd, because clangd supports the option +`--path-mappings` to translate paths. In principle a similar option could be +added to `rust-analyzer`, obviating the need for this wrapper. See this +[issue on github](https://github.com/rust-lang/rust-analyzer/issues/12485). diff --git a/rust-analyzer-chromiumos-wrapper/src/main.rs b/rust-analyzer-chromiumos-wrapper/src/main.rs new file mode 100644 index 00000000..7bc52d26 --- /dev/null +++ b/rust-analyzer-chromiumos-wrapper/src/main.rs @@ -0,0 +1,330 @@ +// Copyright 2022 The ChromiumOS Authors. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +use std::env; +use std::fs::File; +use std::io::{self, BufRead, BufReader, BufWriter, Write}; +use std::os::unix::process::CommandExt; +use std::path::{Path, PathBuf}; +use std::process::{self, Child}; +use std::str::from_utf8; +use std::thread; + +use anyhow::{anyhow, bail, Context, Result}; + +use log::trace; + +use simplelog::{Config, LevelFilter, WriteLogger}; + +use serde_json::{from_slice, to_writer, Value}; + +fn main() -> Result<()> { + if env::args().len() > 1 { + bail!("rust-analyzer-chromiumos-wrapper doesn't support command line arguments"); + } + + init_log()?; + + let d = env::current_dir()?; + let chromiumos_root = match find_chromiumos_root(&d) { + Some(x) => x, + None => { + // It doesn't appear that we're in a chroot. Run the + // regular rust-analyzer. + return Err(process::Command::new("rust-analyzer").exec())?; + } + }; + + let outside_prefix: &'static str = { + let path = chromiumos_root + .to_str() + .ok_or_else(|| anyhow!("Path is not valid UTF-8"))?; + + let mut tmp = format!("file://{}", path); + if Some(&b'/') != tmp.as_bytes().last() { + tmp.push('/'); + } + + // No need to ever free this memory, so let's get a static reference. + Box::leak(tmp.into_boxed_str()) + }; + + trace!("Found chromiumos root {}", outside_prefix); + + let inside_prefix: &'static str = "file:///mnt/host/source/"; + + let cmd = "cros_sdk"; + let args: [&str; 2] = ["--", "rust-analyzer"]; + let mut child = KillOnDrop(run_command(cmd, args)?); + + let mut child_stdin = BufWriter::new(child.0.stdin.take().unwrap()); + let mut child_stdout = BufReader::new(child.0.stdout.take().unwrap()); + + let join_handle = { + thread::spawn(move || { + let mut stdin = io::stdin().lock(); + stream_with_replacement(&mut stdin, &mut child_stdin, outside_prefix, inside_prefix) + .context("Streaming from stdin into rust-analyzer") + }) + }; + + let mut stdout = BufWriter::new(io::stdout().lock()); + stream_with_replacement( + &mut child_stdout, + &mut stdout, + inside_prefix, + outside_prefix, + ) + .context("Streaming from rust-analyzer into stdout")?; + + join_handle.join().unwrap()?; + + let code = child.0.wait().context("Running rust-analyzer")?.code(); + std::process::exit(code.unwrap_or(127)); +} + +fn init_log() -> Result<()> { + if !cfg!(feature = "no_debug_log") { + let filename = env::var("RUST_ANALYZER_CHROMIUMOS_WRAPPER_LOG") + .context("Obtaining RUST_ANALYZER_CHROMIUMOS_WRAPPER_LOG environment variable")?; + let file = File::create(&filename).with_context(|| { + format!( + "Opening log file `{}` (value of RUST_ANALYZER_WRAPPER_LOG)", + filename + ) + })?; + WriteLogger::init(LevelFilter::Trace, Config::default(), file) + .with_context(|| format!("Creating WriteLogger with log file `{}`", filename))?; + } + Ok(()) +} + +#[derive(Debug, Default)] +struct Header { + length: Option<usize>, + other_fields: Vec<u8>, +} + +/// Read the `Content-Length` (if present) into `header.length`, and the text of every other header +/// field into `header.other_fields`. +fn read_header<R: BufRead>(r: &mut R, header: &mut Header) -> Result<()> { + header.length = None; + header.other_fields.clear(); + const CONTENT_LENGTH: &[u8] = b"Content-Length:"; + let slen = CONTENT_LENGTH.len(); + loop { + let index = header.other_fields.len(); + + // HTTP header spec says line endings are supposed to be '\r\n' but recommends + // implementations accept just '\n', so let's not worry whether a '\r' is present. + r.read_until(b'\n', &mut header.other_fields) + .context("Reading a header")?; + + let new_len = header.other_fields.len(); + + if new_len <= index + 2 { + // Either we've just received EOF, or just a newline, indicating end of the header. + return Ok(()); + } + if header + .other_fields + .get(index..index + slen) + .map_or(false, |v| v == CONTENT_LENGTH) + { + let s = from_utf8(&header.other_fields[index + slen..]) + .context("Parsing Content-Length")?; + header.length = Some(s.trim().parse().context("Parsing Content-Length")?); + header.other_fields.truncate(index); + } + } +} + +/// Extend `dest` with `contents`, replacing any occurrence of `pattern` in a json string in +/// `contents` with `replacement`. +fn replace(contents: &[u8], pattern: &str, replacement: &str, dest: &mut Vec<u8>) -> Result<()> { + fn map_value(val: Value, pattern: &str, replacement: &str) -> Value { + match val { + Value::String(s) => + // `s.replace` is very likely doing more work than necessary. Probably we only need + // to look for the pattern at the beginning of the string. + { + Value::String(s.replace(pattern, replacement)) + } + Value::Array(mut v) => { + for val_ref in v.iter_mut() { + let value = std::mem::replace(val_ref, Value::Null); + *val_ref = map_value(value, pattern, replacement); + } + Value::Array(v) + } + Value::Object(mut map) => { + // Surely keys can't be paths. + for val_ref in map.values_mut() { + let value = std::mem::replace(val_ref, Value::Null); + *val_ref = map_value(value, pattern, replacement); + } + Value::Object(map) + } + x => x, + } + } + + let init_val: Value = from_slice(contents).with_context(|| match from_utf8(contents) { + Err(_) => format!( + "JSON parsing content of length {} that's not valid UTF-8", + contents.len() + ), + Ok(s) => format!("JSON parsing content of length {}:\n{}", contents.len(), s), + })?; + let mapped_val = map_value(init_val, pattern, replacement); + to_writer(dest, &mapped_val)?; + Ok(()) +} + +/// Read LSP messages from `r`, replacing each occurrence of `pattern` in a json string in the +/// payload with `replacement`, adjusting the `Content-Length` in the header to match, and writing +/// the result to `w`. +fn stream_with_replacement<R: BufRead, W: Write>( + r: &mut R, + w: &mut W, + pattern: &str, + replacement: &str, +) -> Result<()> { + let mut head = Header::default(); + let mut buf = Vec::with_capacity(1024); + let mut buf2 = Vec::with_capacity(1024); + loop { + read_header(r, &mut head)?; + if head.length.is_none() && head.other_fields.len() == 0 { + // No content in the header means we're apparently done. + return Ok(()); + } + let len = head + .length + .ok_or_else(|| anyhow!("No Content-Length in header"))?; + + trace!("Received header with length {}", head.length.unwrap()); + trace!( + "Received header with contents\n{}", + from_utf8(&head.other_fields)? + ); + + buf.resize(len, 0); + r.read_exact(&mut buf) + .with_context(|| format!("Reading payload expecting size {}", len))?; + + trace!("Received payload\n{}", from_utf8(&buf)?); + + buf2.clear(); + replace(&buf, pattern, replacement, &mut buf2)?; + + trace!("After replacements payload\n{}", from_utf8(&buf2)?); + + write!(w, "Content-Length: {}\r\n", buf2.len())?; + w.write_all(&head.other_fields)?; + w.write_all(&buf2)?; + w.flush()?; + } +} + +fn run_command<'a, I>(cmd: &'a str, args: I) -> Result<process::Child> +where + I: IntoIterator<Item = &'a str>, +{ + Ok(process::Command::new(cmd) + .args(args) + .stdin(process::Stdio::piped()) + .stdout(process::Stdio::piped()) + .spawn()?) +} + +fn find_chromiumos_root(start: &Path) -> Option<PathBuf> { + let mut buf = start.to_path_buf(); + loop { + buf.push(".chroot_lock"); + if buf.exists() { + buf.pop(); + return Some(buf); + } + buf.pop(); + if !buf.pop() { + return None; + } + } +} + +struct KillOnDrop(Child); + +impl Drop for KillOnDrop { + fn drop(&mut self) { + let _ = self.0.kill(); + } +} + +#[cfg(test)] +mod test { + use super::*; + + fn test_stream_with_replacement( + read: &str, + pattern: &str, + replacement: &str, + json_expected: &str, + ) -> Result<()> { + let mut w = Vec::<u8>::with_capacity(read.len()); + stream_with_replacement(&mut read.as_bytes(), &mut w, pattern, replacement)?; + + // serde_json may not format the json output the same as we do, so we can't just compare + // as strings or slices. + + let (w1, w2) = { + let mut split = w.rsplitn(2, |&c| c == b'\n'); + let w2 = split.next().unwrap(); + (split.next().unwrap(), w2) + }; + + assert_eq!( + from_utf8(w1)?, + format!("Content-Length: {}\r\n\r", w2.len()) + ); + + let v1: Value = from_slice(w2)?; + let v2: Value = serde_json::from_str(json_expected)?; + assert_eq!(v1, v2); + + Ok(()) + } + + #[test] + fn test_stream_with_replacement_1() -> Result<()> { + test_stream_with_replacement( + // read + "Content-Length: 93\r\n\r\n{\"somekey\": {\"somepath\": \"XYZXYZabc\",\ + \"anotherpath\": \"somestring\"}, \"anotherkey\": \"XYZXYZdef\"}", + // pattern + "XYZXYZ", + // replacement + "REPLACE", + // json_expected + "{\"somekey\": {\"somepath\": \"REPLACEabc\", \"anotherpath\": \"somestring\"},\ + \"anotherkey\": \"REPLACEdef\"}", + ) + } + + #[test] + fn test_stream_with_replacement_2() -> Result<()> { + test_stream_with_replacement( + // read + "Content-Length: 83\r\n\r\n{\"key0\": \"sometextABCDEF\",\ + \"key1\": {\"key2\": 5, \"key3\": \"moreABCDEFtext\"}, \"key4\": 1}", + // pattern + "ABCDEF", + // replacement + "replacement", + // json_expected + "{\"key0\": \"sometextreplacement\", \"key1\": {\"key2\": 5,\ + \"key3\": \"morereplacementtext\"}, \"key4\": 1}", + ) + } +} -- cgit v1.2.3 From b3b2c8514865be8650dc9c4f42fd081a255643b8 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Wed, 20 Jul 2022 17:40:09 +0000 Subject: llvm_tools: Fix missing import At present, programs which import the now deleted llvm_patch_management break. This commit removes the dependency, and moves the one relevant function from llvm_patch_management to update_chromeos_llvm_hash. Some other developer quality of life additions have been added, such as type hints, to get the new patch_manager implementation to validate static checks. BUG=b:239280701, b:239279349, b:237870186 TEST=get_upstream_patch.py ... TEST=run_tests_for llvm_tools/* Change-Id: I2a7880c9f3b285109f6b115ad8b22eccbf0b714f Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3777524 Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: Christopher Di Bella <cjdb@google.com> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> --- llvm_tools/update_chromeos_llvm_hash.py | 82 ++++++++++++++++++++---- llvm_tools/update_chromeos_llvm_hash_unittest.py | 13 ++-- 2 files changed, 77 insertions(+), 18 deletions(-) diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index 50d8ecfb..efa84ead 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -16,14 +16,18 @@ import argparse import datetime import enum import os +from pathlib import Path import re import subprocess +from typing import Dict, List import chroot import failure_modes import get_llvm_hash import git -import llvm_patch_management +import patch_utils +import subprocess_helpers + DEFAULT_PACKAGES = [ 'dev-util/lldb-server', @@ -48,7 +52,7 @@ class LLVMVariant(enum.Enum): verbose = False -def defaultCrosRoot(): +def defaultCrosRoot() -> Path: """Get default location of chroot_path. The logic assumes that the cros_root is ~/chromiumos, unless llvm_tools is @@ -59,8 +63,8 @@ def defaultCrosRoot(): """ llvm_tools_path = os.path.realpath(os.path.dirname(__file__)) if llvm_tools_path.endswith('src/third_party/toolchain-utils/llvm_tools'): - return os.path.join(llvm_tools_path, '../../../../') - return '~/chromiumos' + return Path(llvm_tools_path).parent.parent.parent.parent + return Path.home() / 'chromiumos' def GetCommandLineArgs(): @@ -79,6 +83,7 @@ def GetCommandLineArgs(): # Add argument for a specific chroot path. parser.add_argument('--chroot_path', + type=Path, default=defaultCrosRoot(), help='the path to the chroot (default: %(default)s)') @@ -461,9 +466,9 @@ def StagePackagesPatchResultsForCommit(package_info_dict, commit_messages): return commit_messages -def UpdatePackages(packages, llvm_variant, git_hash, svn_version, chroot_path, - patch_metadata_file, mode, git_hash_source, - extra_commit_msg): +def UpdatePackages(packages, llvm_variant, git_hash, svn_version, + chroot_path: Path, patch_metadata_file, mode, + git_hash_source, extra_commit_msg): """Updates an LLVM hash and uprevs the ebuild of the packages. A temporary repo is created for the changes. The changes are @@ -489,9 +494,6 @@ def UpdatePackages(packages, llvm_variant, git_hash, svn_version, chroot_path, Gerrit commit URL and the second pair is the change list number. """ - # Determines whether to print the result of each executed command. - llvm_patch_management.verbose = verbose - # Construct a dictionary where the key is the absolute path of the symlink to # the package and the value is the absolute path to the ebuild of the package. paths_dict = CreatePathDictionaryFromPackages(chroot_path, packages) @@ -546,8 +548,8 @@ def UpdatePackages(packages, llvm_variant, git_hash, svn_version, chroot_path, EnsurePackageMaskContains(chroot_path, git_hash) # Handle the patches for each package. - package_info_dict = llvm_patch_management.UpdatePackagesPatchMetadataFile( - chroot_path, svn_version, patch_metadata_file, packages, mode) + package_info_dict = UpdatePackagesPatchMetadataFile( + chroot_path, svn_version, packages, mode) # Update the commit message if changes were made to a package's patches. commit_messages = StagePackagesPatchResultsForCommit( @@ -589,6 +591,62 @@ def EnsurePackageMaskContains(chroot_path, git_hash): subprocess.check_output(['git', '-C', overlay_dir, 'add', mask_path]) +def UpdatePackagesPatchMetadataFile( + chroot_path: Path, svn_version: int, packages: List[str], + mode: failure_modes.FailureModes) -> Dict[str, patch_utils.PatchInfo]: + """Updates the packages metadata file. + + Args: + chroot_path: The absolute path to the chroot. + svn_version: The version to use for patch management. + packages: All the packages to update their patch metadata file. + mode: The mode for the patch manager to use when an applicable patch + fails to apply. + Ex: 'FailureModes.FAIL' + + Returns: + A dictionary where the key is the package name and the value is a dictionary + that has information on the patches. + """ + + # A dictionary where the key is the package name and the value is a dictionary + # that has information on the patches. + package_info = {} + + llvm_hash = get_llvm_hash.LLVMHash() + + with llvm_hash.CreateTempDirectory() as temp_dir: + with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as src_path: + # Ensure that 'svn_version' exists in the chromiumum mirror of LLVM by + # finding its corresponding git hash. + git_hash = get_llvm_hash.GetGitHashFrom(src_path, svn_version) + move_head_cmd = ['git', '-C', src_path, 'checkout', git_hash, '-q'] + subprocess.run(move_head_cmd, stdout=subprocess.DEVNULL, check=True) + + for cur_package in packages: + # Get the absolute path to $FILESDIR of the package. + chroot_ebuild_str = subprocess_helpers.ChrootRunCommand( + chroot_path, ['equery', 'w', cur_package]).strip() + if not chroot_ebuild_str: + raise RuntimeError(f'could not find ebuild for {cur_package}') + chroot_ebuild_path = Path( + chroot.ConvertChrootPathsToAbsolutePaths(chroot_path, + [chroot_ebuild_str])[0]) + patches_json_fp = chroot_ebuild_path.parent / 'files' / 'PATCHES.json' + if not patches_json_fp.is_file(): + raise RuntimeError(f'patches file {patches_json_fp} is not a file') + + patches_info = patch_utils.apply_all_from_json( + svn_version=svn_version, + llvm_src_dir=Path(src_path), + patches_json_fp=patches_json_fp, + continue_on_failure=mode == failure_modes.FailureModes.CONTINUE, + ) + package_info[cur_package] = patches_info._asdict() + + return package_info + + def main(): """Updates the LLVM next hash for each package. diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index c1efc910..569fdcc4 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -11,6 +11,7 @@ from __future__ import print_function import collections import datetime import os +from pathlib import Path import subprocess import unittest import unittest.mock as mock @@ -19,10 +20,10 @@ import chroot import failure_modes import get_llvm_hash import git -import llvm_patch_management import test_helpers import update_chromeos_llvm_hash + # These are unittests; protected access is OK to a point. # pylint: disable=protected-access @@ -35,13 +36,13 @@ class UpdateLLVMHashTest(unittest.TestCase): llvm_tools_path = '/path/to/cros/src/third_party/toolchain-utils/llvm_tools' mock_llvm_tools.return_value = llvm_tools_path self.assertEqual(update_chromeos_llvm_hash.defaultCrosRoot(), - '%s/../../../../' % llvm_tools_path) + Path('/path/to/cros')) @mock.patch.object(os.path, 'realpath') def testDefaultCrosRootFromOutsideCrOSCheckout(self, mock_llvm_tools): mock_llvm_tools.return_value = '~/toolchain-utils/llvm_tools' self.assertEqual(update_chromeos_llvm_hash.defaultCrosRoot(), - '~/chromiumos') + Path.home() / 'chromiumos') # Simulate behavior of 'os.path.isfile()' when the ebuild path to a package # does not exist. @@ -692,7 +693,8 @@ class UpdateLLVMHashTest(unittest.TestCase): @mock.patch.object(update_chromeos_llvm_hash, 'UprevEbuildSymlink') @mock.patch.object(git, 'UploadChanges') @mock.patch.object(git, 'DeleteBranch') - @mock.patch.object(llvm_patch_management, 'UpdatePackagesPatchMetadataFile') + @mock.patch.object(update_chromeos_llvm_hash, + 'UpdatePackagesPatchMetadataFile') @mock.patch.object(update_chromeos_llvm_hash, 'StagePatchMetadataFileForCommit') def testSuccessfullyUpdatedPackages( @@ -727,8 +729,7 @@ class UpdateLLVMHashTest(unittest.TestCase): # Test function to simulate 'UpdatePackagesPatchMetadataFile()' when the # patch results contains a disabled patch in 'disable_patches' mode. - def RetrievedPatchResults(chroot_path, svn_version, patch_metadata_file, - packages, mode): + def RetrievedPatchResults(chroot_path, svn_version, packages, mode): self.assertEqual(chroot_path, '/some/path/to/chroot') self.assertEqual(svn_version, 1000) -- cgit v1.2.3 From 081dbfe709ba03290ee6203098b844b78317961c Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Thu, 21 Jul 2022 20:28:59 +0000 Subject: llvm_tools: Clean up on apply_all_from_json At present, update_chromeos_llvm_hash.py will keep trying to apply patches from separate llvm subprojects, to the same LLVM dir. This causes patches to stack, so that the second time it applies the same patch it then fails to apply. This commit fixes the issue by cleaning up the git directory on every stage of the loop. This commit also fixes some type casts. BUG=b:239279349, b:239280701 TEST=./update_chromeos_llvm_hash_unittest.py TEST=./update_packages_and_run_tests.py \ --extra_change_lists 1394249 1986966 \ --chroot_path $CROS_ROOT \ --llvm_version google3 cq Change-Id: I6cc853d80c47fefaba4ff0b5133787b716177567 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3780679 Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: Christopher Di Bella <cjdb@google.com> Commit-Queue: Christopher Di Bella <cjdb@google.com> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> --- llvm_tools/update_chromeos_llvm_hash.py | 20 +++++++++++--------- llvm_tools/update_chromeos_llvm_hash_unittest.py | 6 +++--- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index efa84ead..fe13f708 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -616,11 +616,11 @@ def UpdatePackagesPatchMetadataFile( llvm_hash = get_llvm_hash.LLVMHash() with llvm_hash.CreateTempDirectory() as temp_dir: - with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as src_path: + with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as dirname: # Ensure that 'svn_version' exists in the chromiumum mirror of LLVM by # finding its corresponding git hash. - git_hash = get_llvm_hash.GetGitHashFrom(src_path, svn_version) - move_head_cmd = ['git', '-C', src_path, 'checkout', git_hash, '-q'] + git_hash = get_llvm_hash.GetGitHashFrom(dirname, svn_version) + move_head_cmd = ['git', '-C', dirname, 'checkout', git_hash, '-q'] subprocess.run(move_head_cmd, stdout=subprocess.DEVNULL, check=True) for cur_package in packages: @@ -636,12 +636,14 @@ def UpdatePackagesPatchMetadataFile( if not patches_json_fp.is_file(): raise RuntimeError(f'patches file {patches_json_fp} is not a file') - patches_info = patch_utils.apply_all_from_json( - svn_version=svn_version, - llvm_src_dir=Path(src_path), - patches_json_fp=patches_json_fp, - continue_on_failure=mode == failure_modes.FailureModes.CONTINUE, - ) + src_path = Path(dirname) + with patch_utils.git_clean_context(src_path): + patches_info = patch_utils.apply_all_from_json( + svn_version=svn_version, + llvm_src_dir=src_path, + patches_json_fp=patches_json_fp, + continue_on_failure=mode == failure_modes.FailureModes.CONTINUE, + ) package_info[cur_package] = patches_info._asdict() return package_info diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index 569fdcc4..35872324 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -653,7 +653,7 @@ class UpdateLLVMHashTest(unittest.TestCase): llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next git_hash = 'a123testhash4' svn_version = 1000 - chroot_path = '/some/path/to/chroot' + chroot_path = Path('/some/path/to/chroot') patch_metadata_file = 'PATCHES.json' git_hash_source = 'google3' branch = 'update-LLVM_NEXT_HASH-a123testhash4' @@ -731,7 +731,7 @@ class UpdateLLVMHashTest(unittest.TestCase): # patch results contains a disabled patch in 'disable_patches' mode. def RetrievedPatchResults(chroot_path, svn_version, packages, mode): - self.assertEqual(chroot_path, '/some/path/to/chroot') + self.assertEqual(chroot_path, Path('/some/path/to/chroot')) self.assertEqual(svn_version, 1000) self.assertEqual(patch_metadata_file, 'PATCHES.json') self.assertListEqual(packages, ['path/to']) @@ -786,8 +786,8 @@ class UpdateLLVMHashTest(unittest.TestCase): llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next git_hash = 'a123testhash5' svn_version = 1000 - chroot_path = '/some/path/to/chroot' patch_metadata_file = 'PATCHES.json' + chroot_path = Path('/some/path/to/chroot') git_hash_source = 'tot' branch = 'update-LLVM_NEXT_HASH-a123testhash5' extra_commit_msg = '\ncommit-message-end' -- cgit v1.2.3 From 06c7bfa5c6e52e01dc2fdcb24b7062f1d06f9bdf Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Thu, 21 Jul 2022 20:38:10 +0000 Subject: llvm_tools: Remove unused patch_metadata_file arg This argument is not actually used anymore. Technically it was never actually needed and it was passed in other ways to the patch_manager.py even before the refactoring. This just removes it from one function's arguments. It's hard to clean the forwarded patch_metadata_file arg in every situation, so let's just do it for this function that we introduced. BUG=None TEST=./run_tests_for llvm_tools/ Change-Id: I5c3f2a217df082cd51362aa70c8b585ad5135e47 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3780680 Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> --- llvm_tools/modify_a_tryjob.py | 1 - llvm_tools/update_chromeos_llvm_hash.py | 6 +----- llvm_tools/update_chromeos_llvm_hash_unittest.py | 11 ++++------- llvm_tools/update_packages_and_run_tests.py | 4 +--- 4 files changed, 6 insertions(+), 16 deletions(-) diff --git a/llvm_tools/modify_a_tryjob.py b/llvm_tools/modify_a_tryjob.py index cea81069..bed99933 100755 --- a/llvm_tools/modify_a_tryjob.py +++ b/llvm_tools/modify_a_tryjob.py @@ -119,7 +119,6 @@ def GetCLAfterUpdatingPackages(packages, git_hash, svn_version, chroot_path, git_hash, svn_version, chroot_path, - patch_metadata_file, failure_modes.FailureModes.DISABLE_PATCHES, svn_option, extra_commit_msg=None) diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index fe13f708..88257740 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -467,8 +467,7 @@ def StagePackagesPatchResultsForCommit(package_info_dict, commit_messages): def UpdatePackages(packages, llvm_variant, git_hash, svn_version, - chroot_path: Path, patch_metadata_file, mode, - git_hash_source, extra_commit_msg): + chroot_path: Path, mode, git_hash_source, extra_commit_msg): """Updates an LLVM hash and uprevs the ebuild of the packages. A temporary repo is created for the changes. The changes are @@ -480,8 +479,6 @@ def UpdatePackages(packages, llvm_variant, git_hash, svn_version, git_hash: The new git hash. svn_version: The SVN-style revision number of git_hash. chroot_path: The absolute path to the chroot. - patch_metadata_file: The name of the .json file in '$FILESDIR/' that has - the patches and its metadata. mode: The mode of the patch manager when handling an applicable patch that failed to apply. Ex. 'FailureModes.FAIL' @@ -674,7 +671,6 @@ def main(): git_hash, svn_version, args_output.chroot_path, - args_output.patch_metadata_file, failure_modes.FailureModes( args_output.failure_mode), git_hash_source, diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index 35872324..d4fbfb21 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -654,7 +654,6 @@ class UpdateLLVMHashTest(unittest.TestCase): git_hash = 'a123testhash4' svn_version = 1000 chroot_path = Path('/some/path/to/chroot') - patch_metadata_file = 'PATCHES.json' git_hash_source = 'google3' branch = 'update-LLVM_NEXT_HASH-a123testhash4' extra_commit_msg = None @@ -664,8 +663,7 @@ class UpdateLLVMHashTest(unittest.TestCase): with self.assertRaises(ValueError) as err: update_chromeos_llvm_hash.UpdatePackages( packages_to_update, llvm_variant, git_hash, svn_version, chroot_path, - patch_metadata_file, failure_modes.FailureModes.FAIL, - git_hash_source, extra_commit_msg) + failure_modes.FailureModes.FAIL, git_hash_source, extra_commit_msg) self.assertEqual(str(err.exception), 'Failed to uprev the ebuild.') @@ -733,10 +731,10 @@ class UpdateLLVMHashTest(unittest.TestCase): self.assertEqual(chroot_path, Path('/some/path/to/chroot')) self.assertEqual(svn_version, 1000) - self.assertEqual(patch_metadata_file, 'PATCHES.json') self.assertListEqual(packages, ['path/to']) self.assertEqual(mode, failure_modes.FailureModes.DISABLE_PATCHES) + patch_metadata_file = 'PATCHES.json' PatchInfo = collections.namedtuple('PatchInfo', [ 'applied_patches', 'failed_patches', 'non_applicable_patches', 'disabled_patches', 'removed_patches', 'modified_metadata' @@ -786,7 +784,6 @@ class UpdateLLVMHashTest(unittest.TestCase): llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next git_hash = 'a123testhash5' svn_version = 1000 - patch_metadata_file = 'PATCHES.json' chroot_path = Path('/some/path/to/chroot') git_hash_source = 'tot' branch = 'update-LLVM_NEXT_HASH-a123testhash5' @@ -794,8 +791,8 @@ class UpdateLLVMHashTest(unittest.TestCase): change_list = update_chromeos_llvm_hash.UpdatePackages( packages_to_update, llvm_variant, git_hash, svn_version, chroot_path, - patch_metadata_file, failure_modes.FailureModes.DISABLE_PATCHES, - git_hash_source, extra_commit_msg) + failure_modes.FailureModes.DISABLE_PATCHES, git_hash_source, + extra_commit_msg) self.assertEqual(change_list.url, 'https://some_name/path/to/commit/+/12345') diff --git a/llvm_tools/update_packages_and_run_tests.py b/llvm_tools/update_packages_and_run_tests.py index 258a3950..1d122800 100755 --- a/llvm_tools/update_packages_and_run_tests.py +++ b/llvm_tools/update_packages_and_run_tests.py @@ -19,6 +19,7 @@ import failure_modes import get_llvm_hash import update_chromeos_llvm_hash + VALID_CQ_TRYBOTS = ['llvm', 'llvm-next', 'llvm-tot'] @@ -395,8 +396,6 @@ def main(): args_output = GetCommandLineArgs() - patch_metadata_file = 'PATCHES.json' - svn_option = args_output.llvm_version git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption( @@ -442,7 +441,6 @@ def main(): git_hash, svn_version, args_output.chroot_path, - patch_metadata_file, failure_modes.FailureModes.DISABLE_PATCHES, svn_option, extra_commit_msg=extra_commit_msg) -- cgit v1.2.3 From 5f84460959d28b1bf80e429eede18aaa6ef20ccf Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Mon, 25 Jul 2022 12:30:08 -0700 Subject: buildbot_utils: remove out-of-date code It's well past 2019. Let's remove this. Style changes are courtesy of `yapf`. BUG=None TEST=None Change-Id: Ic70f01775958fc61790901028569945f76eeb763 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3785491 Tested-by: George Burgess <gbiv@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> Auto-Submit: George Burgess <gbiv@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> --- cros_utils/buildbot_utils.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py index 2da5c5e4..3cff9d93 100644 --- a/cros_utils/buildbot_utils.py +++ b/cros_utils/buildbot_utils.py @@ -17,6 +17,7 @@ import time from cros_utils import command_executer from cros_utils import logger + INITIAL_SLEEP_TIME = 7200 # 2 hours; wait time before polling buildbot. SLEEP_TIME = 600 # 10 minutes; time between polling of buildbot. @@ -177,8 +178,8 @@ def GetTrybotImage(chromeos_root, else: wait_msg = 'Unable to find build result; job may be running.' logger.GetLogger().LogOutput(wait_msg) - logger.GetLogger().LogOutput('{0} minutes elapsed.'.format(elapsed / 60)) - logger.GetLogger().LogOutput('Sleeping {0} seconds.'.format(SLEEP_TIME)) + logger.GetLogger().LogOutput(f'{elapsed / 60} minutes elapsed.') + logger.GetLogger().LogOutput(f'Sleeping {SLEEP_TIME} seconds.') time.sleep(SLEEP_TIME) elapsed += SLEEP_TIME else: @@ -243,8 +244,9 @@ def GetLatestImage(chromeos_root, path): ce = command_executer.GetCommandExecuter() command = ('gsutil ls gs://chromeos-image-archive/%s' % path) - ret, out, _ = ce.ChrootRunCommandWOutput( - chromeos_root, command, print_to_console=False) + ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, + command, + print_to_console=False) if ret != 0: raise RuntimeError('Failed to list buckets with command: %s.' % command) candidates = [l.split('/')[-2] for l in out.split()] @@ -253,11 +255,6 @@ def GetLatestImage(chromeos_root, path): candidates.sort(reverse=True) for c in candidates: build = '%s/R%d-%d.%d.%d' % (path, c[0], c[1], c[2], c[3]) - # Denylist "R79-12384.0.0" image released by mistake. - # TODO(crbug.com/992242): Remove the filter by 2019-09-05. - if c == [79, 12384, 0, 0]: - continue - if DoesImageExist(chromeos_root, build): return build @@ -273,16 +270,15 @@ def GetLatestRecipeImage(chromeos_root, path): ce = command_executer.GetCommandExecuter() command = ('gsutil ls gs://chromeos-image-archive/%s' % path) - ret, out, _ = ce.ChrootRunCommandWOutput( - chromeos_root, command, print_to_console=False) + ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, + command, + print_to_console=False) if ret != 0: raise RuntimeError('Failed to list buckets with command: %s.' % command) candidates = [l.split('/')[-2] for l in out.split()] candidates = [(fmt.match(c), c) for c in candidates] - candidates = [([int(r) - for r in m[0].group(1, 2, 3, 4, 5)], m[1]) - for m in candidates - if m] + candidates = [([int(r) for r in m[0].group(1, 2, 3, 4, 5)], m[1]) + for m in candidates if m] candidates.sort(key=lambda x: x[0], reverse=True) # Try to get ony last two days of images since nightly tests are run once # another day. -- cgit v1.2.3 From 47f4df525e7956780ca2cda5945f4785f0c76550 Mon Sep 17 00:00:00 2001 From: Michael Benfield <mbenfield@google.com> Date: Tue, 26 Jul 2022 23:08:24 +0000 Subject: rust-analyzer-chromiumos-wrapper: Support some command line arguments. VS Code (and possibly other editors) like to pass `--version` and possibly other arguments, so forward them to `rust-analyzer`. BUG=b:240341002 TEST=Build, test on VS Code Change-Id: Icadf05088c4220f058fcdce68489d266368ab1e7 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3785120 Commit-Queue: George Burgess <gbiv@chromium.org> Auto-Submit: Michael Benfield <mbenfield@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Michael Benfield <mbenfield@google.com> --- rust-analyzer-chromiumos-wrapper/src/main.rs | 50 +++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 8 deletions(-) diff --git a/rust-analyzer-chromiumos-wrapper/src/main.rs b/rust-analyzer-chromiumos-wrapper/src/main.rs index 7bc52d26..626108ad 100644 --- a/rust-analyzer-chromiumos-wrapper/src/main.rs +++ b/rust-analyzer-chromiumos-wrapper/src/main.rs @@ -20,11 +20,7 @@ use simplelog::{Config, LevelFilter, WriteLogger}; use serde_json::{from_slice, to_writer, Value}; fn main() -> Result<()> { - if env::args().len() > 1 { - bail!("rust-analyzer-chromiumos-wrapper doesn't support command line arguments"); - } - - init_log()?; + let args = env::args().skip(1); let d = env::current_dir()?; let chromiumos_root = match find_chromiumos_root(&d) { @@ -32,10 +28,46 @@ fn main() -> Result<()> { None => { // It doesn't appear that we're in a chroot. Run the // regular rust-analyzer. - return Err(process::Command::new("rust-analyzer").exec())?; + return Err(process::Command::new("rust-analyzer").args(args).exec())?; } }; + let args: Vec<String> = args.collect(); + if !args.is_empty() { + // We've received command line arguments, and there are 3 possibilities: + // * We just forward the arguments to rust-analyzer and exit. + // * We don't support the arguments, so we bail. + // * We still need to do our path translation in the LSP protocol. + fn run(args: &[String]) -> Result<()> { + return Err(process::Command::new("cros_sdk") + .args(["--", "rust-analyzer"]) + .args(args) + .exec())?; + } + + if args.iter().any(|x| match x.as_str() { + "--version" | "--help" | "-h" | "--print-config-schema" => true, + _ => false, + }) { + // With any of these options rust-analyzer will just print something and exit. + return run(&args); + } + + if !args[0].starts_with("-") { + // It's a subcommand, and seemingly none of these need the path translation + // rust-analyzer-chromiumos-wrapper provides. + return run(&args); + } + + if args.iter().any(|x| x == "--log-file") { + bail!("rust-analyzer-chromiums_wrapper doesn't support --log-file"); + } + + // Otherwise it seems we're probably OK to proceed. + } + + init_log()?; + let outside_prefix: &'static str = { let path = chromiumos_root .to_str() @@ -55,8 +87,10 @@ fn main() -> Result<()> { let inside_prefix: &'static str = "file:///mnt/host/source/"; let cmd = "cros_sdk"; - let args: [&str; 2] = ["--", "rust-analyzer"]; - let mut child = KillOnDrop(run_command(cmd, args)?); + let all_args = ["--", "rust-analyzer"] + .into_iter() + .chain(args.iter().map(|x| x.as_str())); + let mut child = KillOnDrop(run_command(cmd, all_args)?); let mut child_stdin = BufWriter::new(child.0.stdin.take().unwrap()); let mut child_stdout = BufReader::new(child.0.stdout.take().unwrap()); -- cgit v1.2.3 From 14668ed47d28f2541792809231cfd08598c12f0c Mon Sep 17 00:00:00 2001 From: Bob Haarman <inglorion@chromium.org> Date: Tue, 26 Jul 2022 15:08:15 -0700 Subject: rust_uprev: yapf and isort When making some changes, I got some complaints from the formatters. Fixing the formatting first, before making edits. BUG=None TEST=./run_tests_for.py rust_tools/rust_uprev.py Change-Id: I4802b40dfeb3348cf2c060737992250b75136e0e Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3788604 Commit-Queue: Bob Haarman <inglorion@chromium.org> Reviewed-by: Michael Benfield <mbenfield@google.com> Tested-by: Bob Haarman <inglorion@chromium.org> --- rust_tools/rust_uprev.py | 69 +++++++++++++++++++++++-------------------- rust_tools/rust_uprev_test.py | 3 +- 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/rust_tools/rust_uprev.py b/rust_tools/rust_uprev.py index 9ab7e99d..49df7172 100755 --- a/rust_tools/rust_uprev.py +++ b/rust_tools/rust_uprev.py @@ -34,18 +34,20 @@ See `--help` for all available options. """ import argparse -import pathlib import json import logging import os +import pathlib +from pathlib import Path import re import shutil import subprocess import sys -from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, T, Tuple -from llvm_tools import chroot, git +from llvm_tools import chroot +from llvm_tools import git + EQUERY = 'equery' GSUTIL = 'gsutil.py' @@ -275,8 +277,9 @@ def parse_commandline_args() -> argparse.Namespace: return args -def prepare_uprev(rust_version: RustVersion, template: Optional[RustVersion] - ) -> Optional[Tuple[RustVersion, str, RustVersion]]: +def prepare_uprev( + rust_version: RustVersion, template: Optional[RustVersion] +) -> Optional[Tuple[RustVersion, str, RustVersion]]: if template is None: ebuild_path = find_ebuild_for_package('rust') ebuild_name = os.path.basename(ebuild_path) @@ -378,7 +381,8 @@ def flip_mirror_in_ebuild(ebuild_file: Path, add: bool) -> None: f.write(new_contents) -def ebuild_actions(package: str, actions: List[str], +def ebuild_actions(package: str, + actions: List[str], sudo: bool = False) -> None: ebuild_path_inchroot = find_ebuild_for_package(package) cmd = ['ebuild', ebuild_path_inchroot] + actions @@ -574,27 +578,27 @@ def create_rust_uprev(rust_version: RustVersion, 'fetch bootstrap distfiles', lambda: fetch_bootstrap_distfiles( old_bootstrap_version, template_version)) run_step('fetch rust distfiles', lambda: fetch_rust_distfiles(rust_version)) - run_step('update bootstrap ebuild', lambda: update_bootstrap_ebuild( - template_version)) + run_step('update bootstrap ebuild', + lambda: update_bootstrap_ebuild(template_version)) run_step( 'update bootstrap manifest', lambda: update_manifest(rust_bootstrap_path( ).joinpath(f'rust-bootstrap-{template_version}.ebuild'))) - run_step('copy patches', lambda: copy_patches(RUST_PATH, template_version, - rust_version)) - ebuild_file = run_step( - 'create ebuild', lambda: create_ebuild(template_ebuild, rust_version)) - run_step( - 'update ebuild', lambda: update_ebuild(ebuild_file, template_version)) - run_step('update manifest to add new version', lambda: update_manifest( - Path(ebuild_file))) + run_step('copy patches', + lambda: copy_patches(RUST_PATH, template_version, rust_version)) + ebuild_file = run_step('create ebuild', + lambda: create_ebuild(template_ebuild, rust_version)) + run_step('update ebuild', + lambda: update_ebuild(ebuild_file, template_version)) + run_step('update manifest to add new version', + lambda: update_manifest(Path(ebuild_file))) if not skip_compile: run_step( - 'emerge rust', lambda: subprocess.check_call( - ['sudo', 'emerge', 'dev-lang/rust'])) - run_step('insert version into rust packages', lambda: update_rust_packages( - rust_version, add=True)) - run_step('upgrade virtual/rust', lambda: update_virtual_rust( - template_version, rust_version)) + 'emerge rust', + lambda: subprocess.check_call(['sudo', 'emerge', 'dev-lang/rust'])) + run_step('insert version into rust packages', + lambda: update_rust_packages(rust_version, add=True)) + run_step('upgrade virtual/rust', + lambda: update_virtual_rust(template_version, rust_version)) def find_rust_versions_in_chroot() -> List[Tuple[RustVersion, str]]: @@ -628,15 +632,16 @@ def remove_files(filename: str, path: str) -> None: def remove_rust_bootstrap_version(version: RustVersion, run_step: Callable[[], T]) -> None: prefix = f'rust-bootstrap-{version}' - run_step('remove old bootstrap ebuild', lambda: remove_files( - f'{prefix}*.ebuild', rust_bootstrap_path())) + run_step('remove old bootstrap ebuild', + lambda: remove_files(f'{prefix}*.ebuild', rust_bootstrap_path())) ebuild_file = find_ebuild_for_package('rust-bootstrap') - run_step('update bootstrap manifest to delete old version', lambda: - update_manifest(ebuild_file)) + run_step('update bootstrap manifest to delete old version', + lambda: update_manifest(ebuild_file)) def remove_rust_uprev(rust_version: Optional[RustVersion], run_step: Callable[[], T]) -> None: + def find_desired_rust_version(): if rust_version: return rust_version, find_ebuild_for_rust_version(rust_version) @@ -652,14 +657,14 @@ def remove_rust_uprev(rust_version: Optional[RustVersion], result_from_json=find_desired_rust_version_from_json, ) run_step( - 'remove patches', lambda: remove_files( - f'files/rust-{delete_version}-*.patch', RUST_PATH)) + 'remove patches', + lambda: remove_files(f'files/rust-{delete_version}-*.patch', RUST_PATH)) run_step('remove ebuild', lambda: remove_files(delete_ebuild, RUST_PATH)) ebuild_file = find_ebuild_for_package('rust') - run_step('update manifest to delete old version', lambda: update_manifest( - ebuild_file)) - run_step('remove version from rust packages', lambda: update_rust_packages( - delete_version, add=False)) + run_step('update manifest to delete old version', + lambda: update_manifest(ebuild_file)) + run_step('remove version from rust packages', + lambda: update_rust_packages(delete_version, add=False)) run_step('remove virtual/rust', lambda: remove_virtual_rust(delete_version)) diff --git a/rust_tools/rust_uprev_test.py b/rust_tools/rust_uprev_test.py index 743e6130..90f59e4b 100755 --- a/rust_tools/rust_uprev_test.py +++ b/rust_tools/rust_uprev_test.py @@ -7,15 +7,14 @@ """Tests for rust_uprev.py""" import os +from pathlib import Path import shutil import subprocess import tempfile import unittest -from pathlib import Path from unittest import mock from llvm_tools import git - import rust_uprev from rust_uprev import RustVersion -- cgit v1.2.3 From f1c66c20439fd3465b3033289300a93f2cfc228e Mon Sep 17 00:00:00 2001 From: Bob Haarman <inglorion@chromium.org> Date: Tue, 19 Apr 2022 11:41:38 -0700 Subject: rust_uprev: Remove flip_mirror_in_ebuild Previously, we temporarily switched on RESTRICT="mirror" for an ebuild before updating the manifest. This allows us to fetch distfiles from their original locations (rather than the default, which is to require them to be fetched from mirrors). We don't actually need this, so this change removes the code that does this. BUG=None TEST=unit tests, also tested on rust-1.60.0 uprev Change-Id: I5f29ffad83a5826dbe523db4657d9ea17c43bcff Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3594132 Commit-Queue: Bob Haarman <inglorion@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Bob Haarman <inglorion@chromium.org> --- rust_tools/rust_uprev.py | 25 ------------------------- rust_tools/rust_uprev_test.py | 39 +-------------------------------------- 2 files changed, 1 insertion(+), 63 deletions(-) diff --git a/rust_tools/rust_uprev.py b/rust_tools/rust_uprev.py index 49df7172..7e170443 100755 --- a/rust_tools/rust_uprev.py +++ b/rust_tools/rust_uprev.py @@ -360,27 +360,6 @@ def update_ebuild(ebuild_file: str, new_bootstrap_version) -def flip_mirror_in_ebuild(ebuild_file: Path, add: bool) -> None: - restrict_re = re.compile( - r'(?P<before>RESTRICT=")(?P<values>"[^"]*"|.*)(?P<after>")') - with open(ebuild_file, encoding='utf-8') as f: - contents = f.read() - m = restrict_re.search(contents) - assert m, 'failed to find RESTRICT variable in Rust ebuild' - values = m.group('values') - if add: - if 'mirror' in values: - return - values += ' mirror' - else: - if 'mirror' not in values: - return - values = values.replace(' mirror', '') - new_contents = restrict_re.sub(r'\g<before>%s\g<after>' % values, contents) - with open(ebuild_file, 'w', encoding='utf-8') as f: - f.write(new_contents) - - def ebuild_actions(package: str, actions: List[str], sudo: bool = False) -> None: @@ -482,11 +461,7 @@ def get_distdir() -> os.PathLike: def update_manifest(ebuild_file: os.PathLike) -> None: """Updates the MANIFEST for the ebuild at the given path.""" ebuild = Path(ebuild_file) - logging.info('Added "mirror" to RESTRICT to %s', ebuild.name) - flip_mirror_in_ebuild(ebuild, add=True) ebuild_actions(ebuild.parent.name, ['manifest']) - logging.info('Removed "mirror" to RESTRICT from %s', ebuild.name) - flip_mirror_in_ebuild(ebuild, add=False) def update_rust_packages(rust_version: RustVersion, add: bool) -> None: diff --git a/rust_tools/rust_uprev_test.py b/rust_tools/rust_uprev_test.py index 90f59e4b..2e6e8713 100755 --- a/rust_tools/rust_uprev_test.py +++ b/rust_tools/rust_uprev_test.py @@ -234,48 +234,11 @@ BOOTSTRAP_VERSION="1.3.6" class UpdateManifestTest(unittest.TestCase): """Tests for update_manifest step in rust_uprev""" - # pylint: disable=protected-access - def _run_test_flip_mirror(self, before, after, add, expect_write): - mock_open = mock.mock_open(read_data=f'RESTRICT="{before}"') - with mock.patch('builtins.open', mock_open): - rust_uprev.flip_mirror_in_ebuild('', add=add) - if expect_write: - mock_open.return_value.__enter__().write.assert_called_once_with( - f'RESTRICT="{after}"') - - def test_add_mirror_in_ebuild(self): - self._run_test_flip_mirror(before='variable1 variable2', - after='variable1 variable2 mirror', - add=True, - expect_write=True) - - def test_remove_mirror_in_ebuild(self): - self._run_test_flip_mirror(before='variable1 variable2 mirror', - after='variable1 variable2', - add=False, - expect_write=True) - - def test_add_mirror_when_exists(self): - self._run_test_flip_mirror(before='variable1 variable2 mirror', - after='variable1 variable2 mirror', - add=True, - expect_write=False) - - def test_remove_mirror_when_not_exists(self): - self._run_test_flip_mirror(before='variable1 variable2', - after='variable1 variable2', - add=False, - expect_write=False) - - @mock.patch.object(rust_uprev, 'flip_mirror_in_ebuild') @mock.patch.object(rust_uprev, 'ebuild_actions') - def test_update_manifest(self, mock_run, mock_flip): + def test_update_manifest(self, mock_run): ebuild_file = Path('/path/to/rust/rust-1.1.1.ebuild') rust_uprev.update_manifest(ebuild_file) mock_run.assert_called_once_with('rust', ['manifest']) - mock_flip.assert_has_calls( - [mock.call(ebuild_file, add=True), - mock.call(ebuild_file, add=False)]) class UpdateBootstrapEbuildTest(unittest.TestCase): -- cgit v1.2.3 From d7a612f02a7118897045e9664a1aa4d498b7f5ec Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Mon, 8 Aug 2022 23:16:51 +0000 Subject: crosperf: Get crosperf autolock to pass presubmits This just resolves linting issues for our pre-upload hooks. When I wrote this I didn't know how to use the preupload hints properly. :) BUG=b:232827380 TEST=Passes preupload hints; successfully runs crosperf Change-Id: I11329bb52155850dfae8e9b88a6b7a5e36f839ba Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3816791 Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> --- crosperf/crosperf_autolock.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/crosperf/crosperf_autolock.py b/crosperf/crosperf_autolock.py index 7fb86b2c..92168599 100755 --- a/crosperf/crosperf_autolock.py +++ b/crosperf/crosperf_autolock.py @@ -6,14 +6,15 @@ """Wrapper script to automatically lock devices for crosperf.""" -import os -import sys import argparse -import subprocess import contextlib -import json -from typing import Optional, Any import dataclasses +import json +import os +import subprocess +import sys +from typing import Any, Dict, List, Optional, Tuple + # Have to do sys.path hackery because crosperf relies on PYTHONPATH # modifications. @@ -21,7 +22,7 @@ PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(PARENT_DIR) -def main(sys_args: list[str]) -> Optional[str]: +def main(sys_args: List[str]) -> Optional[str]: """Run crosperf_autolock. Returns error msg or None""" args, leftover_args = parse_args(sys_args) fleet_params = [ @@ -48,7 +49,7 @@ def main(sys_args: list[str]) -> Optional[str]: return None -def parse_args(args: list[str]) -> tuple[Any, list]: +def parse_args(args: List[str]) -> Tuple[Any, List]: """Parse the CLI arguments.""" parser = argparse.ArgumentParser( 'crosperf_autolock', @@ -115,8 +116,8 @@ def _eprint(*msg, **kwargs): print(*msg, file=sys.stderr, **kwargs) -def _run_crosperf(crosfleet_params: list[CrosfleetParams], lock_timeout: float, - leftover_args: list[str]): +def _run_crosperf(crosfleet_params: List[CrosfleetParams], lock_timeout: float, + leftover_args: List[str]): """Autolock devices and run crosperf with leftover arguments. Raises: @@ -167,7 +168,7 @@ def _run_crosperf(crosfleet_params: list[CrosfleetParams], lock_timeout: float, def crosfleet_machine_ctx(board: str, lease_minutes: int, lock_timeout: float, - dims: dict[str, Any], + dims: Dict[str, Any], abandon_timeout: float = 120.0) -> Any: """Acquire dut from crosfleet, and release once it leaves the context. @@ -176,7 +177,7 @@ def crosfleet_machine_ctx(board: str, lease_minutes: Length of lease, in minutes. lock_timeout: How long to wait for a lock until quitting. dims: Dictionary of dimension arguments to pass to crosfleet's '-dims' - abandon_timeout (optional): How long to wait for releasing until quitting. + abandon_timeout: How long to wait for releasing until quitting. Yields: A string representing the crosfleet DUT hostname. @@ -195,7 +196,7 @@ def crosfleet_machine_ctx(board: str, crosfleet_release(dut_hostname, abandon_timeout) -def crosfleet_autolock(board: str, lease_minutes: int, dims: dict[str, Any], +def crosfleet_autolock(board: str, lease_minutes: int, dims: Dict[str, Any], timeout_sec: float) -> str: """Lock a device using crosfleet, paramaterized by the board type. @@ -221,7 +222,7 @@ def crosfleet_autolock(board: str, lease_minutes: int, dims: dict[str, Any], f'-minutes={lease_minutes}', ] if dims: - dims_arg = ','.join('{}={}'.format(k, v) for k, v in dims.items()) + dims_arg = ','.join(f'{k}={v}' for k, v in dims.items()) crosfleet_cmd_args.extend(['-dims', f'{dims_arg}']) try: -- cgit v1.2.3 From c54fcb81c239264388222af4fdf5e4127f064c94 Mon Sep 17 00:00:00 2001 From: Michael Benfield <mbenfield@google.com> Date: Thu, 14 Jul 2022 19:59:38 +0000 Subject: pgo_rust.py: add This is a Python script to handle all aspects of creating and benchmarking PGO profiles for the Rust compiler. Note that this script is not usable without crrev.com/c/3762719, but as no one else will need to use the script until the next Rust uprev, there seems to be no need for Cq-Depend. BUG=b:234046818 TEST=Used the script to enable PGO for Rust 1.62.1 Change-Id: I9a35cee3a2d6350a3a29b85c8d62ef450ce8fc22 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3763629 Commit-Queue: Michael Benfield <mbenfield@google.com> Tested-by: Michael Benfield <mbenfield@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> --- pgo_tools_rust/pgo_rust.py | 541 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 541 insertions(+) create mode 100755 pgo_tools_rust/pgo_rust.py diff --git a/pgo_tools_rust/pgo_rust.py b/pgo_tools_rust/pgo_rust.py new file mode 100755 index 00000000..5e09c1c0 --- /dev/null +++ b/pgo_tools_rust/pgo_rust.py @@ -0,0 +1,541 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The ChromiumOS Authors. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Handle most aspects of creating and benchmarking PGO profiles for Rust. + +This is meant to be done at Rust uprev time. Ultimately profdata files need +to be placed at + +gs://chromeos-localmirror/distfiles/rust-pgo-{rust_version}-frontend.profdata.tz +and +gs://chromeos-localmirror/distfiles/rust-pgo-{rust_version}-llvm.profdata.tz + +The intended flow is that you first get the new Rust version in a shape so that +it builds, for instance modifying or adding patches as necessary. Note that if +you need to generate manifests for dev-lang/rust and dev-lang/rust-host before +the profdata files are created, which will cause the `ebuild manifest` command +to fail. One way to handle this is to temporarily delete the lines of the +variable SRC_URI in cros-rustc.eclass which refer to profdata files. + +After you have a new working Rust version, you can run the following. + +``` +$ ./pgo_rust.py generate # generate profdata files +$ ./pgo_rust.py benchmark-pgo # benchmark with PGO +$ ./pgo_rust.py benchmark-nopgo # benchmark without PGO +$ ./pgo_rust.py upload-profdata # upload profdata to localmirror +``` + +The benchmark steps aren't strictly necessary, but are recommended and will +upload benchmark data to + +gs://chromeos-toolchain-artifacts/rust-pgo/benchmarks/{rust_version}/ + +Currently by default ripgrep 13.0.0 is used as both the crate to build using an +instrumented Rust while generating profdata, and the crate to build to +benchmark Rust. You may wish to experiment with other crates for either role. +In that case upload your crate to + +gs://chromeos-toolchain-artifacts/rust-pgo/crates/{name}-{version}.tar.xz + +and use `--crate-name` and `--crate-version` to indicate which crate to build +to generate profdata (or which crate's generated profdata to use), and +`--bench-crate-name` to indicate which crate to build in benchmarks. + +Notes on various local and GS locations follow. + +Note that currently we need to keep separate profdata files for the LLVM and +frontend components of Rust. This is because LLVM profdata is instrumented by +the system LLVM, but Rust's profdata is instrumented by its own LLVM, which +may have separate profdata. + +profdata files accessed by ebuilds must be stored in + +gs://chromeos-localmirror/distfiles + +Specifically, they go to + +gs://chromeos-localmirror/distfiles/rust-pgo-{rust-version}-llvm.profdata.xz + +gs://chromeos-localmirror/distfiles/ + rust-pgo-{rust-version}-frontend.profdata.xz + +But we can store other data elsewhere, like gs://chromeos-toolchain-artifacts. + +GS locations: + +{GS_BASE}/crates/ - store crates we may use for generating profiles or +benchmarking PGO optimized Rust compilers + +{GS_BASE}/benchmarks/{rust_version}/nopgo/ + {bench_crate_name}-{bench_crate_version}-{triple} + +{GS_BASE}/benchmarks/{rust_version}/{crate_name}-{crate_version}/ + {bench_crate_name}-{bench_crate_version}-{triple} + +Local locations: + +{LOCAL_BASE}/crates/ + +{LOCAL_BASE}/llvm-profraw/ + +{LOCAL_BASE}/frontend-profraw/ + +{LOCAL_BASE}/profdata/{crate_name}-{crate_version}/llvm.profdata + +{LOCAL_BASE}/profdata/{crate_name}-{crate_version}/frontend.profdata + +{LOCAL_BASE}/benchmarks/{rust_version}/nopgo/ + {bench_crate_name}-{bench_crate_version}-{triple} + +{LOCAL_BASE}/benchmarks/{rust_version}/{crate_name}-{crate_version}/ + {bench_crate_name}-{bench_crate_version}-{triple} + +{LOCAL_BASE}/llvm.profdata - must go here to be used by Rust ebuild +{LOCAL_BASE}/frontend.profdata - must go here to be used by Rust ebuild +""" + +import argparse +import contextlib +import logging +import os +from pathlib import Path +from pathlib import PurePosixPath +import re +import shutil +import subprocess +import sys +from typing import Dict, List, Optional + + +TARGET_TRIPLES = [ + 'x86_64-cros-linux-gnu', + 'x86_64-pc-linux-gnu', + 'armv7a-cros-linux-gnueabihf', + 'aarch64-cros-linux-gnu', +] + +LOCAL_BASE = Path('/tmp/rust-pgo') + +GS_BASE = PurePosixPath('/chromeos-toolchain-artifacts/rust-pgo') + +GS_DISTFILES = PurePosixPath('/chromeos-localmirror/distfiles') + +CRATE_NAME = 'ripgrep' + +CRATE_VERSION = '13.0.0' + + +@contextlib.contextmanager +def chdir(new_directory: Path): + initial_directory = Path.cwd() + os.chdir(new_directory) + try: + yield + finally: + os.chdir(initial_directory) + + +def run(args: List, + *, + indent: int = 4, + env: Optional[Dict[str, str]] = None, + capture_stdout: bool = False, + message: bool = True) -> Optional[str]: + args = [str(arg) for arg in args] + + if env is None: + new_env = os.environ + else: + new_env = os.environ.copy() + new_env.update(env) + + if message: + if env is None: + logging.info('Running %s', args) + else: + logging.info('Running %s in environment %s', args, env) + + result = subprocess.run(args, + env=new_env, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding='utf-8', + check=False) + + stdout = result.stdout + stderr = result.stderr + if indent != 0: + stdout = re.sub('^', ' ' * indent, stdout, flags=re.MULTILINE) + stderr = re.sub('^', ' ' * indent, stderr, flags=re.MULTILINE) + + if capture_stdout: + ret = result.stdout + else: + logging.info('STDOUT:') + logging.info(stdout) + logging.info('STDERR:') + logging.info(stderr) + ret = None + + result.check_returncode() + + if message: + if env is None: + logging.info('Ran %s\n', args) + else: + logging.info('Ran %s in environment %s\n', args, env) + + return ret + + +def get_rust_version() -> str: + s = run(['rustc', '--version'], capture_stdout=True) + m = re.search(r'\d+\.\d+\.\d+', s) + assert m is not None, repr(s) + return m.group(0) + + +def download_unpack_crate(*, crate_name: str, crate_version: str): + filename_no_extension = f'{crate_name}-{crate_version}' + gs_path = GS_BASE / 'crates' / f'{filename_no_extension}.tar.xz' + local_path = LOCAL_BASE / 'crates' + shutil.rmtree(local_path / f'{crate_name}-{crate_version}', + ignore_errors=True) + with chdir(local_path): + run(['gsutil.py', 'cp', f'gs:/{gs_path}', '.']) + run(['xz', '-d', f'{filename_no_extension}.tar.xz']) + run(['tar', 'xvf', f'{filename_no_extension}.tar']) + + +def build_crate(*, + crate_name: str, + crate_version: str, + target_triple: str, + time_file: Optional[str] = None): + local_path = LOCAL_BASE / 'crates' / f'{crate_name}-{crate_version}' + with chdir(local_path): + Path('.cargo').mkdir(exist_ok=True) + with open('.cargo/config.toml', 'w') as f: + f.write('\n'.join(( + '[source.crates-io]', + 'replace-with = "vendored-sources"', + '', + '[source.vendored-sources]', + 'directory = "vendor"', + '', + f'[target.{target_triple}]', + f'linker = "{target_triple}-clang"', + '', + "[target.'cfg(all())']", + 'rustflags = [', + ' "-Clto=thin",', + ' "-Cembed-bitcode=yes",', + ']', + ))) + + run(['cargo', 'clean']) + + cargo_cmd = ['cargo', 'build', '--release', '--target', target_triple] + + if time_file is None: + run(cargo_cmd) + else: + time_cmd = [ + '/usr/bin/time', f'--output={time_file}', + '--format=wall time (s) %e\nuser time (s) %U\nmax RSS %M\n' + ] + run(time_cmd + cargo_cmd) + + +def build_rust(*, + generate_frontend_profile: bool = False, + generate_llvm_profile: bool = False, + use_frontend_profile: bool = False, + use_llvm_profile: bool = False): + + if use_frontend_profile or use_llvm_profile: + assert not generate_frontend_profile and not generate_llvm_profile, ( + "Can't build a compiler to both use profile information and generate it" + ) + + assert not generate_frontend_profile or not generate_llvm_profile, ( + "Can't generate both frontend and LLVM profile information") + + use = '-rust_profile_frontend_use -rust_profile_llvm_use ' + if generate_frontend_profile: + use += 'rust_profile_frontend_generate ' + if generate_llvm_profile: + use += 'rust_profile_llvm_generate ' + if use_frontend_profile: + use += 'rust_profile_frontend_use_local ' + if use_llvm_profile: + use += 'rust_profile_llvm_use_local ' + + # -E to preserve our USE environment variable. + run(['sudo', '-E', 'emerge', 'dev-lang/rust', 'dev-lang/rust-host'], + env={'USE': use}) + + +def merge_profdata(llvm_or_frontend, *, source_directory: Path, dest: Path): + assert llvm_or_frontend in ('llvm', 'frontend') + + # The two `llvm-profdata` programs come from different LLVM versions, and may + # support different versions of the profdata format, so make sure to use the + # right one. + llvm_profdata = ('/usr/bin/llvm-profdata' if llvm_or_frontend == 'llvm' else + '/usr/libexec/rust/llvm-profdata') + + dest.parent.mkdir(parents=True, exist_ok=True) + + files = list(source_directory.glob('*.profraw')) + run([llvm_profdata, 'merge', f'--output={dest}'] + files) + + +def do_upload_profdata(*, source: Path, dest: PurePosixPath): + new_path = source.parent / source.name / '.xz' + run(['xz', '--keep', '--compress', '--force', source]) + upload_file(source=new_path, dest=dest, public_read=True) + + +def upload_file(*, + source: Path, + dest: PurePosixPath, + public_read: bool = False): + if public_read: + run(['gsutil.py', 'cp', '-a', 'public-read', source, f'gs:/{dest}']) + else: + run(['gsutil.py', 'cp', source, f'gs:/{dest}']) + + +def maybe_download_crate(*, crate_name: str, crate_version: str): + directory = LOCAL_BASE / 'crates' / f'{crate_name}-{crate_version}' + if directory.is_dir(): + logging.info('Crate already downloaded') + else: + logging.info('Downloading crate') + download_unpack_crate(crate_name=crate_name, crate_version=crate_version) + + +def generate(args): + maybe_download_crate(crate_name=args.crate_name, + crate_version=args.crate_version) + + llvm_dir = LOCAL_BASE / 'llvm-profraw' + shutil.rmtree(llvm_dir, ignore_errors=True) + frontend_dir = LOCAL_BASE / 'frontend-profraw' + shutil.rmtree(frontend_dir, ignore_errors=True) + + logging.info('Building Rust instrumented for llvm') + build_rust(generate_llvm_profile=True) + + llvm_dir.mkdir(parents=True, exist_ok=True) + for triple in TARGET_TRIPLES: + logging.info('Building crate with LLVM instrumentation, for triple %s', + triple) + build_crate(crate_name=args.crate_name, + crate_version=args.crate_version, + target_triple=triple) + + logging.info('Merging LLVM profile data') + merge_profdata( + 'llvm', + source_directory=LOCAL_BASE / 'llvm-profraw', + dest=(LOCAL_BASE / 'profdata' / + f'{args.crate_name}-{args.crate_version}' / 'llvm.profdata')) + + logging.info('Building Rust instrumented for frontend') + build_rust(generate_frontend_profile=True) + + frontend_dir.mkdir(parents=True, exist_ok=True) + for triple in TARGET_TRIPLES: + logging.info('Building crate with frontend instrumentation, for triple %s', + triple) + build_crate(crate_name=args.crate_name, + crate_version=args.crate_version, + target_triple=triple) + + logging.info('Merging frontend profile data') + merge_profdata( + 'frontend', + source_directory=LOCAL_BASE / 'frontend-profraw', + dest=(LOCAL_BASE / 'profdata' / + f'{args.crate_name}-{args.crate_version}' / 'frontend.profdata')) + + +def benchmark_nopgo(args): + logging.info('Building Rust, no PGO') + build_rust() + + time_directory = LOCAL_BASE / 'benchmarks' / 'nopgo' + logging.info('Benchmarking crate build with no PGO') + time_directory.mkdir(parents=True, exist_ok=True) + for triple in TARGET_TRIPLES: + build_crate( + crate_name=args.bench_crate_name, + crate_version=args.bench_crate_version, + target_triple=triple, + time_file=( + time_directory / + f'{args.bench_crate_name}-{args.bench_crate_version}-{triple}')) + + rust_version = get_rust_version() + dest_directory = GS_BASE / 'benchmarks' / rust_version / 'nopgo' + logging.info('Uploading benchmark data') + for file in time_directory.iterdir(): + upload_file(source=time_directory / file.name, + dest=dest_directory / file.name) + + +def benchmark_pgo(args): + maybe_download_crate(crate_name=args.bench_crate_name, + crate_version=args.bench_crate_version) + + files_dir = Path('/mnt/host/source/src/third_party/chromiumos-overlay', + 'dev-lang/rust/files') + + logging.info('Copying profile data to be used in building Rust') + run([ + 'cp', + (LOCAL_BASE / 'profdata' / f'{args.crate_name}-{args.crate_version}' / + 'llvm.profdata'), files_dir + ]) + run([ + 'cp', + (LOCAL_BASE / 'profdata' / f'{args.crate_name}-{args.crate_version}' / + 'frontend.profdata'), files_dir + ]) + + logging.info('Building Rust with PGO') + build_rust(use_llvm_profile=True, use_frontend_profile=True) + + time_directory = (LOCAL_BASE / 'benchmarks' / + f'{args.crate_name}-{args.crate_version}') + time_directory.mkdir(parents=True, exist_ok=True) + logging.info('Benchmarking crate built with PGO') + for triple in TARGET_TRIPLES: + build_crate( + crate_name=args.bench_crate_name, + crate_version=args.bench_crate_version, + target_triple=triple, + time_file=( + time_directory / + f'{args.bench_crate_name}-{args.bench_crate_version}-{triple}')) + + rust_version = get_rust_version() + dest_directory = (GS_BASE / 'benchmarks' / rust_version / + f'{args.crate_name}-{args.crate_version}') + logging.info('Uploading benchmark data') + for file in time_directory.iterdir(): + upload_file(source=time_directory / file.name, + dest=dest_directory / file.name) + + +def upload_profdata(args): + directory = (LOCAL_BASE / 'profdata /' + f'{args.crate_name}-{args.crate_version}') + rust_version = get_rust_version() + + logging.info('Uploading LLVM profdata') + do_upload_profdata(source=directory / 'llvm.profdata', + dest=(GS_DISTFILES / + f'rust-pgo-{rust_version}-llvm.profdata.xz')) + + logging.info('Uploading frontend profdata') + do_upload_profdata(source=directory / 'frontend.profdata', + dest=(GS_DISTFILES / + f'rust-pgo-{rust_version}-frontend.profdata.xz')) + + +def main(): + logging.basicConfig(stream=sys.stdout, + level=logging.NOTSET, + format='%(message)s') + + parser = argparse.ArgumentParser( + prog=sys.argv[0], + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command', help='') + subparsers.required = True + + parser_generate = subparsers.add_parser( + 'generate', + help='Generate LLVM and frontend profdata files by building ' + 'instrumented Rust compilers, and using them to build the ' + 'indicated crate (downloading the crate if necessary).') + parser_generate.set_defaults(func=generate) + parser_generate.add_argument('--crate-name', + default=CRATE_NAME, + help='Name of the crate to build') + parser_generate.add_argument('--crate-version', + default=CRATE_VERSION, + help='Version of the crate to build') + + parser_benchmark_nopgo = subparsers.add_parser( + 'benchmark-nopgo', + help='Build the Rust compiler without PGO, benchmark ' + 'the build of the indicated crate, and upload ' + 'the benchmark data.') + parser_benchmark_nopgo.set_defaults(func=benchmark_nopgo) + parser_benchmark_nopgo.add_argument( + '--bench-crate-name', + default=CRATE_NAME, + help='Name of the crate whose build to benchmark') + parser_benchmark_nopgo.add_argument( + '--bench-crate-version', + default=CRATE_VERSION, + help='Version of the crate whose benchmark to build') + + parser_benchmark_pgo = subparsers.add_parser( + 'benchmark-pgo', + help='Build the Rust compiler using PGO with the indicated ' + 'profdata files, benchmark the build of the indicated crate, ' + 'and upload the benchmark data.') + parser_benchmark_pgo.set_defaults(func=benchmark_pgo) + parser_benchmark_pgo.add_argument( + '--bench-crate-name', + default=CRATE_NAME, + help='Name of the crate whose build to benchmark') + parser_benchmark_pgo.add_argument( + '--bench-crate-version', + default=CRATE_VERSION, + help='Version of the crate whose benchmark to build') + parser_benchmark_pgo.add_argument( + '--crate-name', + default=CRATE_NAME, + help='Name of the crate whose profile to use') + parser_benchmark_pgo.add_argument( + '--crate-version', + default=CRATE_VERSION, + help='Version of the crate whose profile to use') + + parser_upload_profdata = subparsers.add_parser( + 'upload-profdata', help='Upload the profdata files') + parser_upload_profdata.set_defaults(func=upload_profdata) + parser_upload_profdata.add_argument( + '--crate-name', + default=CRATE_NAME, + help='Name of the crate whose profile to use') + parser_upload_profdata.add_argument( + '--crate-version', + default=CRATE_VERSION, + help='Version of the crate whose profile to use') + + args = parser.parse_args() + + (LOCAL_BASE / 'crates').mkdir(parents=True, exist_ok=True) + (LOCAL_BASE / 'llvm-profraw').mkdir(parents=True, exist_ok=True) + (LOCAL_BASE / 'frontend-profraw').mkdir(parents=True, exist_ok=True) + (LOCAL_BASE / 'benchmarks').mkdir(parents=True, exist_ok=True) + + args.func(args) + + return 0 + + +if __name__ == '__main__': + sys.exit(main()) -- cgit v1.2.3 From 6bf6e8bb14f3943f1d050c3651ac037e699cee40 Mon Sep 17 00:00:00 2001 From: Manoj Gupta <manojgupta@google.com> Date: Mon, 15 Aug 2022 23:46:04 +0000 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: Idbaf0b4080834a449fd14c8b6294cdde3ac9d8c5 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3832268 Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Manoj Gupta <manojgupta@chromium.org> Auto-Submit: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 1d2f1cc2..043b9607 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R105-14943.0-1657532004" + "name": "R106-14989.36-1660555931" }, "chromeos-kernel-4_14": { - "name": "R105-14943.0-1657532105" + "name": "R106-14989.36-1660556022" }, "chromeos-kernel-4_19": { - "name": "R105-14943.0-1657532036" + "name": "R106-14989.36-1660555896" }, "chromeos-kernel-5_4": { - "name": "R105-14943.0-1657531926" + "name": "R106-14989.36-1660555930" }, "chromeos-kernel-5_10": { - "name": "R105-14943.0-1657531932" + "name": "R106-14989.36-1660555953" } } -- cgit v1.2.3 From c06ff64d26f90e7bb8047358d2d4c6c25619c6d3 Mon Sep 17 00:00:00 2001 From: Adrian Dole <adriandole@google.com> Date: Wed, 17 Aug 2022 21:35:02 +0000 Subject: toolchain_utils: remove libcxxabi BUG=b:235023022 TEST=./update_chromeos_llvm_hash_unittest.py Change-Id: I4d015daeefff57087852c5aae0faad7d439d9267 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3834534 Tested-by: Adrian Dole <adriandole@google.com> Commit-Queue: Adrian Dole <adriandole@google.com> Reviewed-by: Adrian Dole <adriandole@google.com> Auto-Submit: Adrian Dole <adriandole@google.com> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> --- llvm_tools/README.md | 9 ++++----- llvm_tools/update_chromeos_llvm_hash.py | 1 - 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/llvm_tools/README.md b/llvm_tools/README.md index 86a4b778..e2ef34f1 100644 --- a/llvm_tools/README.md +++ b/llvm_tools/README.md @@ -19,10 +19,9 @@ password)** ### Usage This script is used for updating a package's LLVM hash (sys-devel/llvm, -sys-libs/compiler-rt, sys-libs/libcxx, sys-libs/libcxxabi, and -sys-libs/llvm-libunwind) and then run tests after updating the git hash. -There are three ways to test the change, including starting tryjobs, -recipe builders or using cq+1. +sys-libs/compiler-rt, sys-libs/libcxx, and sys-libs/llvm-libunwind) +and then run tests after updating the git hash. There are three ways to test +the change, including starting tryjobs, recipe builders or using cq+1. An example when this script should be run is when certain boards would like to be tested with the updated `LLVM_NEXT_HASH`. @@ -118,7 +117,7 @@ For example, to create a roll CL to the git hash of revision 367622: ``` $ ./update_chromeos_llvm_hash.py \ --update_packages sys-devel/llvm sys-libs/compiler-rt \ - sys-libs/libcxx sys-libs/libcxxabi sys-libs/llvm-libunwind \ + sys-libs/libcxx sys-libs/llvm-libunwind \ 'dev-util/lldb-server' \ --llvm_version 367622 \ --failure_mode disable_patches diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index 88257740..5d5a888f 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -34,7 +34,6 @@ DEFAULT_PACKAGES = [ 'sys-devel/llvm', 'sys-libs/compiler-rt', 'sys-libs/libcxx', - 'sys-libs/libcxxabi', 'sys-libs/llvm-libunwind', ] -- cgit v1.2.3 From 0abb0b34ef29c06f20208f7bc28040868169b0c1 Mon Sep 17 00:00:00 2001 From: Adrian Dole <adriandole@google.com> Date: Tue, 23 Aug 2022 21:52:30 +0000 Subject: toolchain-utils: update manifest when updating LLVM BUG=b:234635394 TEST=./update_chromeos_llvm_hash_unittest.py Change-Id: Iff52984a44b41b230a74ebef7b5f8dd25b581237 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3852368 Tested-by: Adrian Dole <adriandole@google.com> Commit-Queue: Adrian Dole <adriandole@google.com> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Auto-Submit: Adrian Dole <adriandole@google.com> Reviewed-by: Adrian Dole <adriandole@google.com> --- llvm_tools/update_chromeos_llvm_hash.py | 121 ++++++++++++++--------- llvm_tools/update_chromeos_llvm_hash_unittest.py | 41 ++++++-- 2 files changed, 110 insertions(+), 52 deletions(-) diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index 5d5a888f..3a2ce2cf 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -10,8 +10,6 @@ For each package, a temporary repo is created and the changes are uploaded for review. """ -from __future__ import print_function - import argparse import datetime import enum @@ -37,6 +35,8 @@ DEFAULT_PACKAGES = [ 'sys-libs/llvm-libunwind', ] +DEFAULT_MANIFEST_PACKAGES = ['sys-devel/llvm'] + # Specify which LLVM hash to update class LLVMVariant(enum.Enum): @@ -87,11 +87,15 @@ def GetCommandLineArgs(): help='the path to the chroot (default: %(default)s)') # Add argument for specific builds to uprev and update their llvm-next hash. - parser.add_argument('--update_packages', - default=DEFAULT_PACKAGES, - required=False, - nargs='+', - help='the ebuilds to update their hash for llvm-next ' + parser.add_argument( + '--update_packages', + default=','.join(DEFAULT_PACKAGES), + help='Comma-separated ebuilds to update llvm-next hash for ' + '(default: %(default)s)') + + parser.add_argument('--manifest_packages', + default=','.join(DEFAULT_MANIFEST_PACKAGES), + help='Comma-separated ebuilds to update manifests for ' '(default: %(default)s)') # Add argument for whether to display command contents to `stdout`. @@ -113,7 +117,7 @@ def GetCommandLineArgs(): type=get_llvm_hash.IsSvnOption, required=True, help='which git hash to use. Either a svn revision, or one ' - 'of %s' % sorted(get_llvm_hash.KNOWN_HASH_SOURCES)) + f'of {sorted(get_llvm_hash.KNOWN_HASH_SOURCES)}') # Add argument for the mode of the patch management when handling patches. parser.add_argument( @@ -175,7 +179,7 @@ def GetEbuildPathsFromSymLinkPaths(symlinks): # then add the ebuild path to the dict. for cur_symlink in symlinks: if not os.path.islink(cur_symlink): - raise ValueError('Invalid symlink provided: %s' % cur_symlink) + raise ValueError(f'Invalid symlink provided: {cur_symlink}') # Construct the absolute path to the ebuild. ebuild_path = os.path.realpath(cur_symlink) @@ -211,9 +215,9 @@ def UpdateEbuildLLVMHash(ebuild_path, llvm_variant, git_hash, svn_version): # gets updated to the temporary file. if not os.path.isfile(ebuild_path): - raise ValueError('Invalid ebuild path provided: %s' % ebuild_path) + raise ValueError(f'Invalid ebuild path provided: {ebuild_path}') - temp_ebuild_file = '%s.temp' % ebuild_path + temp_ebuild_file = f'{ebuild_path}.temp' with open(ebuild_path) as ebuild_file: # write updates to a temporary file in case of interrupts @@ -248,15 +252,14 @@ def ReplaceLLVMHash(ebuild_lines, llvm_variant, git_hash, svn_version): for cur_line in ebuild_lines: if not is_updated and llvm_regex.search(cur_line): # Update the git hash and revision number. - cur_line = '%s=\"%s\" # r%d\n' % (llvm_variant.value, git_hash, - svn_version) + cur_line = f'{llvm_variant.value}=\"{git_hash}\" # r{svn_version}\n' is_updated = True yield cur_line if not is_updated: - raise ValueError('Failed to update %s' % llvm_variant.value) + raise ValueError(f'Failed to update {llvm_variant.value}') def UprevEbuildSymlink(symlink): @@ -273,7 +276,7 @@ def UprevEbuildSymlink(symlink): """ if not os.path.islink(symlink): - raise ValueError('Invalid symlink provided: %s' % symlink) + raise ValueError(f'Invalid symlink provided: {symlink}') new_symlink, is_changed = re.subn( r'r([0-9]+).ebuild', @@ -307,7 +310,7 @@ def UprevEbuildToVersion(symlink, svn_version, git_hash): """ if not os.path.islink(symlink): - raise ValueError('Invalid symlink provided: %s' % symlink) + raise ValueError(f'Invalid symlink provided: {symlink}') ebuild = os.path.realpath(symlink) llvm_major_version = get_llvm_hash.GetLLVMMajorVersion(git_hash) @@ -343,7 +346,7 @@ def UprevEbuildToVersion(symlink, svn_version, git_hash): subprocess.check_output(['ln', '-s', '-r', new_ebuild, new_symlink]) if not os.path.islink(new_symlink): - raise ValueError('Invalid symlink name: %s' % new_ebuild[:-len('.ebuild')]) + raise ValueError(f'Invalid symlink name: {new_ebuild[:-len(".ebuild")]}') subprocess.check_output(['git', '-C', symlink_dir, 'add', new_symlink]) @@ -403,8 +406,8 @@ def StagePatchMetadataFileForCommit(patch_metadata_file_path): """ if not os.path.isfile(patch_metadata_file_path): - raise ValueError('Invalid patch metadata file provided: %s' % - patch_metadata_file_path) + raise ValueError( + f'Invalid patch metadata file provided: {patch_metadata_file_path}') # Cmd to stage the patch metadata file for commit. subprocess.check_output([ @@ -435,14 +438,15 @@ def StagePackagesPatchResultsForCommit(package_info_dict, commit_messages): if (patch_info_dict['disabled_patches'] or patch_info_dict['removed_patches'] or patch_info_dict['modified_metadata']): - cur_package_header = '\nFor the package %s:' % package_name + cur_package_header = f'\nFor the package {package_name}:' commit_messages.append(cur_package_header) # Add to the commit message that the patch metadata file was modified. if patch_info_dict['modified_metadata']: patch_metadata_path = patch_info_dict['modified_metadata'] - commit_messages.append('The patch metadata file %s was modified' % - os.path.basename(patch_metadata_path)) + metadata_file_name = os.path.basename(patch_metadata_path) + commit_messages.append( + f'The patch metadata file {metadata_file_name} was modified') StagePatchMetadataFileForCommit(patch_metadata_path) @@ -465,8 +469,25 @@ def StagePackagesPatchResultsForCommit(package_info_dict, commit_messages): return commit_messages -def UpdatePackages(packages, llvm_variant, git_hash, svn_version, - chroot_path: Path, mode, git_hash_source, extra_commit_msg): +def UpdateManifests(packages: List[str], chroot_path: Path): + """Updates manifest files for packages. + + Args: + packages: A list of packages to update manifests for. + chroot_path: The absolute path to the chroot. + + Raises: + CalledProcessError: ebuild failed to update manifest. + """ + manifest_ebuilds = chroot.GetChrootEbuildPaths(chroot_path, packages) + for ebuild_path in manifest_ebuilds: + subprocess_helpers.ChrootRunCommand(chroot_path, + ['ebuild', ebuild_path, 'manifest']) + + +def UpdatePackages(packages, manifest_packages: List[str], llvm_variant, + git_hash, svn_version, chroot_path: Path, mode, + git_hash_source, extra_commit_msg): """Updates an LLVM hash and uprevs the ebuild of the packages. A temporary repo is created for the changes. The changes are @@ -474,6 +495,7 @@ def UpdatePackages(packages, llvm_variant, git_hash, svn_version, Args: packages: A list of all the packages that are going to be updated. + manifest_packages: A list of packages to update manifests for. llvm_variant: The LLVM hash to update. git_hash: The new git hash. svn_version: The SVN-style revision number of git_hash. @@ -505,13 +527,12 @@ def UpdatePackages(packages, llvm_variant, git_hash, svn_version, if llvm_variant == LLVMVariant.next: commit_message_header = 'llvm-next' if git_hash_source in get_llvm_hash.KNOWN_HASH_SOURCES: - commit_message_header += ('/%s: upgrade to %s (r%d)' % - (git_hash_source, git_hash, svn_version)) + commit_message_header += ( + f'/{git_hash_source}: upgrade to {git_hash} (r{svn_version})') else: - commit_message_header += (': upgrade to %s (r%d)' % - (git_hash, svn_version)) + commit_message_header += (f': upgrade to {git_hash} (r{svn_version})') - commit_messages = [ + commit_lines = [ commit_message_header + '\n', 'The following packages have been updated:', ] @@ -538,8 +559,13 @@ def UpdatePackages(packages, llvm_variant, git_hash, svn_version, cur_dir_name = os.path.basename(path_to_ebuild_dir) parent_dir_name = os.path.basename(os.path.dirname(path_to_ebuild_dir)) - packages.append('%s/%s' % (parent_dir_name, cur_dir_name)) - commit_messages.append('%s/%s' % (parent_dir_name, cur_dir_name)) + packages.append(f'{parent_dir_name}/{cur_dir_name}') + commit_lines.append(f'{parent_dir_name}/{cur_dir_name}') + + if manifest_packages: + UpdateManifests(manifest_packages, chroot_path) + commit_lines.append('Updated manifest for:') + commit_lines.extend(manifest_packages) EnsurePackageMaskContains(chroot_path, git_hash) @@ -548,13 +574,13 @@ def UpdatePackages(packages, llvm_variant, git_hash, svn_version, chroot_path, svn_version, packages, mode) # Update the commit message if changes were made to a package's patches. - commit_messages = StagePackagesPatchResultsForCommit( - package_info_dict, commit_messages) + commit_lines = StagePackagesPatchResultsForCommit(package_info_dict, + commit_lines) if extra_commit_msg: - commit_messages.append(extra_commit_msg) + commit_lines.append(extra_commit_msg) - change_list = git.UploadChanges(repo_path, branch, commit_messages) + change_list = git.UploadChanges(repo_path, branch, commit_lines) finally: git.DeleteBranch(repo_path, branch) @@ -580,7 +606,7 @@ def EnsurePackageMaskContains(chroot_path, git_hash): 'profiles/targets/chromeos/package.mask') with open(mask_path, 'r+') as mask_file: mask_contents = mask_file.read() - expected_line = '=sys-devel/llvm-%s.0_pre*\n' % llvm_major_version + expected_line = f'=sys-devel/llvm-{llvm_major_version}.0_pre*\n' if expected_line not in mask_contents: mask_file.write(expected_line) @@ -665,19 +691,22 @@ def main(): git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption( git_hash_source) - change_list = UpdatePackages(args_output.update_packages, - llvm_variant, - git_hash, - svn_version, - args_output.chroot_path, - failure_modes.FailureModes( + packages = args_output.update_packages.split(',') + manifest_packages = args_output.manifest_packages.split(',') + change_list = UpdatePackages(packages=packages, + manifest_packages=manifest_packages, + llvm_variant=llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=args_output.chroot_path, + mode=failure_modes.FailureModes( args_output.failure_mode), - git_hash_source, + git_hash_source=git_hash_source, extra_commit_msg=None) - print('Successfully updated packages to %s (%d)' % (git_hash, svn_version)) - print('Gerrit URL: %s' % change_list.url) - print('Change list number: %d' % change_list.cl_number) + print(f'Successfully updated packages to {git_hash} ({svn_version})') + print(f'Gerrit URL: {change_list.url}') + print(f'Change list number: {change_list.cl_number}') if __name__ == '__main__': diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index d4fbfb21..9a51b62a 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -20,10 +20,10 @@ import chroot import failure_modes import get_llvm_hash import git +import subprocess_helpers import test_helpers import update_chromeos_llvm_hash - # These are unittests; protected access is OK to a point. # pylint: disable=protected-access @@ -311,6 +311,22 @@ class UpdateLLVMHashTest(unittest.TestCase): self.assertEqual(mock_command_output.call_args_list[3], mock.call(expected_cmd)) + @mock.patch.object(chroot, + 'GetChrootEbuildPaths', + return_value=['/chroot/path/test.ebuild']) + @mock.patch.object(subprocess, 'check_output', return_value='') + def testManifestUpdate(self, mock_subprocess, mock_ebuild_paths): + manifest_packages = ['sys-devel/llvm'] + chroot_path = '/path/to/chroot' + update_chromeos_llvm_hash.UpdateManifests(manifest_packages, chroot_path) + + args = mock_subprocess.call_args[0][-1] + manifest_cmd = [ + 'cros_sdk', '--', 'ebuild', '/chroot/path/test.ebuild', 'manifest' + ] + self.assertEqual(args, manifest_cmd) + mock_ebuild_paths.assert_called_once() + @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') @mock.patch.object(os.path, 'islink', return_value=True) @mock.patch.object(os.path, 'realpath') @@ -662,8 +678,15 @@ class UpdateLLVMHashTest(unittest.TestCase): # the 'try' block by UprevEbuildSymlink function. with self.assertRaises(ValueError) as err: update_chromeos_llvm_hash.UpdatePackages( - packages_to_update, llvm_variant, git_hash, svn_version, chroot_path, - failure_modes.FailureModes.FAIL, git_hash_source, extra_commit_msg) + packages=packages_to_update, + manifest_packages=[], + llvm_variant=llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=chroot_path, + mode=failure_modes.FailureModes.FAIL, + git_hash_source=git_hash_source, + extra_commit_msg=extra_commit_msg) self.assertEqual(str(err.exception), 'Failed to uprev the ebuild.') @@ -790,9 +813,15 @@ class UpdateLLVMHashTest(unittest.TestCase): extra_commit_msg = '\ncommit-message-end' change_list = update_chromeos_llvm_hash.UpdatePackages( - packages_to_update, llvm_variant, git_hash, svn_version, chroot_path, - failure_modes.FailureModes.DISABLE_PATCHES, git_hash_source, - extra_commit_msg) + packages=packages_to_update, + manifest_packages=[], + llvm_variant=llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=chroot_path, + mode=failure_modes.FailureModes.DISABLE_PATCHES, + git_hash_source=git_hash_source, + extra_commit_msg=extra_commit_msg) self.assertEqual(change_list.url, 'https://some_name/path/to/commit/+/12345') -- cgit v1.2.3 From a077084e5356ed2074d4fa76b4333b76ccd1a26e Mon Sep 17 00:00:00 2001 From: Adrian Dole <adriandole@google.com> Date: Wed, 31 Aug 2022 18:25:52 +0000 Subject: toolchain_utils: fix uses of UpdatePackages BUG=b:243897875 TEST=./modify_a_tryjob_unittest.py && ./update_packages_and_run_tests_unittest.py Change-Id: I46bb7577134134e44e5f0571261031475ebdef7d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3867948 Auto-Submit: Adrian Dole <adriandole@google.com> Tested-by: Adrian Dole <adriandole@google.com> Commit-Queue: Adrian Dole <adriandole@google.com> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: Adrian Dole <adriandole@google.com> --- llvm_tools/modify_a_tryjob.py | 15 ++++++++------- llvm_tools/update_packages_and_run_tests.py | 15 ++++++++------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/llvm_tools/modify_a_tryjob.py b/llvm_tools/modify_a_tryjob.py index bed99933..53f783ba 100755 --- a/llvm_tools/modify_a_tryjob.py +++ b/llvm_tools/modify_a_tryjob.py @@ -114,13 +114,14 @@ def GetCLAfterUpdatingPackages(packages, git_hash, svn_version, chroot_path, """Updates the packages' LLVM_NEXT.""" change_list = update_chromeos_llvm_hash.UpdatePackages( - packages, - update_chromeos_llvm_hash.LLVMVariant.next, - git_hash, - svn_version, - chroot_path, - failure_modes.FailureModes.DISABLE_PATCHES, - svn_option, + packages=packages, + manifest_packages=[], + llvm_variant=update_chromeos_llvm_hash.LLVMVariant.next, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=chroot_path, + mode=failure_modes.FailureModes.DISABLE_PATCHES, + git_hash_source=svn_option, extra_commit_msg=None) print('\nSuccessfully updated packages to %d' % svn_version) diff --git a/llvm_tools/update_packages_and_run_tests.py b/llvm_tools/update_packages_and_run_tests.py index 1d122800..477caa61 100755 --- a/llvm_tools/update_packages_and_run_tests.py +++ b/llvm_tools/update_packages_and_run_tests.py @@ -436,13 +436,14 @@ def main(): extra_commit_msg += cq_trybot_msg change_list = update_chromeos_llvm_hash.UpdatePackages( - update_chromeos_llvm_hash.DEFAULT_PACKAGES, - llvm_variant, - git_hash, - svn_version, - args_output.chroot_path, - failure_modes.FailureModes.DISABLE_PATCHES, - svn_option, + packages=update_chromeos_llvm_hash.DEFAULT_PACKAGES, + manifest_packages=[], + llvm_variant=llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=args_output.chroot_path, + mode=failure_modes.FailureModes.DISABLE_PATCHES, + git_hash_source=svn_option, extra_commit_msg=extra_commit_msg) AddReviewers(change_list.cl_number, args_output.reviewers, -- cgit v1.2.3 From 0665748ca334615e7f41978c36b8fba7817ed2c7 Mon Sep 17 00:00:00 2001 From: Ryan Beltran <ryanbeltran@chromium.org> Date: Thu, 1 Sep 2022 17:51:48 +0000 Subject: compiler_wrapper: redisable unused-but-set-var This CL add the Wno-unused-but-set-variable flag back into the compiler wrapper. It was previously removed, but there are still occurances of the warning and so we were never able to bundle that change into chromoumos-overlay/llvm. An attempt to revert the original CL failed due to a merge conflict. BUG=b:227655984 TEST=None Change-Id: Ic148a6ae577d5e4394249693ce9b09b95cb9df16 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3868978 Commit-Queue: Ryan Beltran <ryanbeltran@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Ryan Beltran <ryanbeltran@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> --- compiler_wrapper/config.go | 1 + compiler_wrapper/testdata/cros_clang_host_golden/bisect.json | 3 +++ .../clang_ftrapv_maincc_target_specific.json | 9 +++++++++ .../testdata/cros_clang_host_golden/clang_host_wrapper.json | 1 + .../cros_clang_host_golden/clang_maincc_target_specific.json | 9 +++++++++ .../testdata/cros_clang_host_golden/clang_path.json | 12 ++++++++++++ .../cros_clang_host_golden/clang_sanitizer_args.json | 8 ++++++++ .../testdata/cros_clang_host_golden/clang_specific_args.json | 4 ++++ .../testdata/cros_clang_host_golden/clangtidy.json | 8 ++++++++ .../cros_clang_host_golden/force_disable_werror.json | 5 +++++ compiler_wrapper/testdata/cros_hardened_golden/bisect.json | 3 +++ .../clang_ftrapv_maincc_target_specific.json | 9 +++++++++ .../cros_hardened_golden/clang_maincc_target_specific.json | 9 +++++++++ .../testdata/cros_hardened_golden/clang_path.json | 12 ++++++++++++ .../testdata/cros_hardened_golden/clang_sanitizer_args.json | 8 ++++++++ .../testdata/cros_hardened_golden/clang_specific_args.json | 4 ++++ .../cros_hardened_golden/clang_sysroot_wrapper_common.json | 6 ++++++ .../testdata/cros_hardened_golden/clangtidy.json | 8 ++++++++ .../testdata/cros_hardened_golden/force_disable_werror.json | 5 +++++ .../testdata/cros_hardened_golden/gcc_clang_syntax.json | 4 ++++ .../testdata/cros_hardened_llvmnext_golden/bisect.json | 3 +++ .../testdata/cros_hardened_llvmnext_golden/clang_path.json | 12 ++++++++++++ .../testdata/cros_hardened_llvmnext_golden/clangtidy.json | 8 ++++++++ .../cros_hardened_llvmnext_golden/force_disable_werror.json | 5 +++++ .../cros_hardened_llvmnext_golden/gcc_clang_syntax.json | 4 ++++ .../testdata/cros_hardened_noccache_golden/bisect.json | 3 +++ .../testdata/cros_hardened_noccache_golden/clang_path.json | 12 ++++++++++++ .../testdata/cros_hardened_noccache_golden/clangtidy.json | 8 ++++++++ .../cros_hardened_noccache_golden/force_disable_werror.json | 5 +++++ .../cros_hardened_noccache_golden/gcc_clang_syntax.json | 4 ++++ .../testdata/cros_nonhardened_golden/bisect.json | 3 +++ .../clang_ftrapv_maincc_target_specific.json | 9 +++++++++ .../clang_maincc_target_specific.json | 9 +++++++++ .../testdata/cros_nonhardened_golden/clang_path.json | 12 ++++++++++++ .../cros_nonhardened_golden/clang_sanitizer_args.json | 8 ++++++++ .../cros_nonhardened_golden/clang_specific_args.json | 4 ++++ .../clang_sysroot_wrapper_common.json | 6 ++++++ .../testdata/cros_nonhardened_golden/clangtidy.json | 8 ++++++++ .../cros_nonhardened_golden/force_disable_werror.json | 5 +++++ .../testdata/cros_nonhardened_golden/gcc_clang_syntax.json | 4 ++++ 40 files changed, 260 insertions(+) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 9f49e259..6717d503 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -130,6 +130,7 @@ func crosCommonClangPostFlags() []string { return []string{ "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", } diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json index 2762eb64..6993499c 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json @@ -44,6 +44,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ], @@ -100,6 +101,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ], @@ -159,6 +161,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ], diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json index ab4a2fb1..26654033 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json @@ -35,6 +35,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -78,6 +79,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -121,6 +123,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -164,6 +167,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -207,6 +211,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -250,6 +255,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -293,6 +299,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -336,6 +343,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -379,6 +387,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json index de144078..b151051c 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json @@ -34,6 +34,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json index 288a9a81..396bb95c 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json @@ -34,6 +34,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -76,6 +77,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -118,6 +120,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -160,6 +163,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -202,6 +206,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -244,6 +249,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -286,6 +292,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -328,6 +335,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -370,6 +378,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json index dd4bc1e4..f32a7045 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json @@ -34,6 +34,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -79,6 +80,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -124,6 +126,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -169,6 +172,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -223,6 +227,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -280,6 +285,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -337,6 +343,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -382,6 +389,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -424,6 +432,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -466,6 +475,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -508,6 +518,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -553,6 +564,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json index 4836dda4..8df51096 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json @@ -37,6 +37,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -82,6 +83,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -127,6 +129,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -172,6 +175,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -216,6 +220,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -262,6 +267,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -306,6 +312,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -350,6 +357,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json index 4bd6a4d1..7c296af9 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json @@ -52,6 +52,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -96,6 +97,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -140,6 +142,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -184,6 +187,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json index 4191fc6c..c1cf0507 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json @@ -50,6 +50,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -79,6 +80,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -138,6 +140,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -167,6 +170,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -228,6 +232,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -260,6 +265,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -322,6 +328,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -351,6 +358,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json index ddcaa2e7..ad290b45 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json @@ -37,6 +37,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -82,6 +83,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -113,6 +115,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-Wno-error", @@ -162,6 +165,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation" ] @@ -193,6 +197,7 @@ "main.cc", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-Wno-error", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json index a397865c..8bd823ed 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json @@ -53,6 +53,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -125,6 +126,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -200,6 +202,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json index da680ac7..56f78efa 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json @@ -45,6 +45,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -107,6 +108,7 @@ "-L/usr/x86_64-cros-eabi/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -169,6 +171,7 @@ "-L/usr/x86_64-cros-win-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -231,6 +234,7 @@ "-L/usr/armv7m-cros-linux-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -292,6 +296,7 @@ "-L/usr/armv7m-cros-eabi/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -353,6 +358,7 @@ "-L/usr/armv7m-cros-win-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -414,6 +420,7 @@ "-L/usr/armv8m-cros-linux-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -475,6 +482,7 @@ "-L/usr/armv8m-cros-eabi/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -536,6 +544,7 @@ "-L/usr/armv8m-cros-win-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json index ab79c13b..db2f59aa 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json @@ -43,6 +43,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -103,6 +104,7 @@ "-L/usr/x86_64-cros-eabi/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -163,6 +165,7 @@ "-L/usr/x86_64-cros-win-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -223,6 +226,7 @@ "-L/usr/armv7m-cros-linux-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -282,6 +286,7 @@ "-L/usr/armv7m-cros-eabi/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -341,6 +346,7 @@ "-L/usr/armv7m-cros-win-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -400,6 +406,7 @@ "-L/usr/armv8m-cros-linux-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -459,6 +466,7 @@ "-L/usr/armv8m-cros-eabi/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -518,6 +526,7 @@ "-L/usr/armv8m-cros-win-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json index 5c0bdc71..186a16ed 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json @@ -43,6 +43,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -106,6 +107,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -169,6 +171,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -232,6 +235,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -304,6 +308,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -379,6 +384,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -449,6 +455,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -512,6 +519,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -572,6 +580,7 @@ "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -632,6 +641,7 @@ "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -692,6 +702,7 @@ "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -755,6 +766,7 @@ "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json index 0e92dbe0..6a38e845 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json @@ -45,6 +45,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -107,6 +108,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -169,6 +171,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -231,6 +234,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -292,6 +296,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -355,6 +360,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -416,6 +422,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -478,6 +485,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json index e4cc49ee..ec91216d 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json @@ -61,6 +61,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -123,6 +124,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -185,6 +187,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -247,6 +250,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json index 93e7f0e2..0ad97b7a 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json @@ -79,6 +79,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -142,6 +143,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -199,6 +201,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -261,6 +264,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -323,6 +327,7 @@ "-L/usr/armv7a-cros-linux-gnueabihf/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -383,6 +388,7 @@ "-Lxyz/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json index dd96e907..f7438940 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json @@ -58,6 +58,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -99,6 +100,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -170,6 +172,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -212,6 +215,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -285,6 +289,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -330,6 +335,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -404,6 +410,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -446,6 +453,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json index 659253f7..9cd72611 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json @@ -46,6 +46,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -109,6 +110,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -158,6 +160,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -225,6 +228,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -274,6 +278,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json index e1e146e9..d9b532f0 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json @@ -43,6 +43,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -128,6 +129,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -208,6 +210,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -271,6 +274,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json index a397865c..8bd823ed 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json @@ -53,6 +53,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -125,6 +126,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -200,6 +202,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json index 5c0bdc71..186a16ed 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json @@ -43,6 +43,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -106,6 +107,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -169,6 +171,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -232,6 +235,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -304,6 +308,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -379,6 +384,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -449,6 +455,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -512,6 +519,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -572,6 +580,7 @@ "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -632,6 +641,7 @@ "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -692,6 +702,7 @@ "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -755,6 +766,7 @@ "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json index dd96e907..f7438940 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json @@ -58,6 +58,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -99,6 +100,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -170,6 +172,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -212,6 +215,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -285,6 +289,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -330,6 +335,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -404,6 +410,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -446,6 +453,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json index 659253f7..9cd72611 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json @@ -46,6 +46,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -109,6 +110,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -158,6 +160,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -225,6 +228,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -274,6 +278,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json index e1e146e9..d9b532f0 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json @@ -43,6 +43,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -128,6 +129,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -208,6 +210,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -271,6 +274,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json index d834d581..da522b99 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json @@ -52,6 +52,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -120,6 +121,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -191,6 +193,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json index d1ef5fe5..287833cf 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json @@ -42,6 +42,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -99,6 +100,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -156,6 +158,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -213,6 +216,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -279,6 +283,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -349,6 +354,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -418,6 +424,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -475,6 +482,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -529,6 +537,7 @@ "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -583,6 +592,7 @@ "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -637,6 +647,7 @@ "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -694,6 +705,7 @@ "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json index dd96e907..f7438940 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json @@ -58,6 +58,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -99,6 +100,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -170,6 +172,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -212,6 +215,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -285,6 +289,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -330,6 +335,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -404,6 +410,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -446,6 +453,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json index 0818bca1..54943fb3 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json @@ -45,6 +45,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -102,6 +103,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -145,6 +147,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -206,6 +209,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -249,6 +253,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json index ff82f64e..1704cd17 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json @@ -43,6 +43,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -123,6 +124,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -203,6 +205,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -266,6 +269,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json index 4c025df8..96fd88cf 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json @@ -46,6 +46,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -111,6 +112,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -179,6 +181,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json index 4c99f2bb..35f90b1e 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json @@ -38,6 +38,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -93,6 +94,7 @@ "-L/usr/x86_64-cros-eabi/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -148,6 +150,7 @@ "-L/usr/x86_64-cros-win-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -204,6 +207,7 @@ "-L/usr/armv7m-cros-linux-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -258,6 +262,7 @@ "-L/usr/armv7m-cros-eabi/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -313,6 +318,7 @@ "-L/usr/armv7m-cros-win-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -368,6 +374,7 @@ "-L/usr/armv8m-cros-linux-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -422,6 +429,7 @@ "-L/usr/armv8m-cros-eabi/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -477,6 +485,7 @@ "-L/usr/armv8m-cros-win-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json index dbed527c..e2479e9b 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json @@ -36,6 +36,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -89,6 +90,7 @@ "-L/usr/x86_64-cros-eabi/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -142,6 +144,7 @@ "-L/usr/x86_64-cros-win-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -196,6 +199,7 @@ "-L/usr/armv7m-cros-linux-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -248,6 +252,7 @@ "-L/usr/armv7m-cros-eabi/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -301,6 +306,7 @@ "-L/usr/armv7m-cros-win-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -354,6 +360,7 @@ "-L/usr/armv8m-cros-linux-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -406,6 +413,7 @@ "-L/usr/armv8m-cros-eabi/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -459,6 +467,7 @@ "-L/usr/armv8m-cros-win-gnu/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json index 6fb3b088..11c566b6 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json @@ -36,6 +36,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -92,6 +93,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -148,6 +150,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -204,6 +207,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -269,6 +273,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -337,6 +342,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -400,6 +406,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -456,6 +463,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -509,6 +517,7 @@ "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -562,6 +571,7 @@ "-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -615,6 +625,7 @@ "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -671,6 +682,7 @@ "-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json index 9ec3cd06..fe0df74d 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json @@ -39,6 +39,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -95,6 +96,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -151,6 +153,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -207,6 +210,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -262,6 +266,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -319,6 +324,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -374,6 +380,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -429,6 +436,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json index 537df6ff..b744e8c6 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json @@ -54,6 +54,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -109,6 +110,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -164,6 +166,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -219,6 +222,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json index 860fc798..ed3a6ef1 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json @@ -69,6 +69,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -125,6 +126,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -175,6 +177,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -231,6 +234,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -288,6 +292,7 @@ "-L/usr/armv7a-cros-linux-gnueabihf/usr/lib", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-B../../bin", @@ -341,6 +346,7 @@ "-Lxyz/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json index 30f9466d..830abee6 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json @@ -51,6 +51,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -85,6 +86,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -149,6 +151,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -184,6 +187,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -250,6 +254,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -288,6 +293,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -355,6 +361,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -390,6 +397,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json index df6e8c7f..07c20900 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json @@ -39,6 +39,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -95,6 +96,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -137,6 +139,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -197,6 +200,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -239,6 +243,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json index 77225038..9dd5687b 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json @@ -36,6 +36,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -111,6 +112,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -181,6 +183,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", @@ -237,6 +240,7 @@ "-L/usr/x86_64-cros-linux-gnu/usr/lib64", "-Wno-compound-token-split-by-space", "-Wno-deprecated-copy", + "-Wno-unused-but-set-variable", "-Wno-implicit-int-float-conversion", "-Wno-string-concatenation", "-mno-movbe", -- cgit v1.2.3 From 7d2a6951f9361849982537ed1ea0825f3588f157 Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@google.com> Date: Thu, 1 Sep 2022 14:41:40 -0700 Subject: toolchain-utils: Fix lint errors Move the variable with comments away from import. It causes a conflict between yapf and lint/isort. BUG=b:244644217 TEST=repo hooks pass Change-Id: I50942a6355d30e3d39e9e60fc26e8eba687eba05 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3869850 Tested-by: Denis Nikitin <denik@chromium.org> Auto-Submit: Denis Nikitin <denik@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> --- buildbot_test_toolchains.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py index 3594fddb..4caa906e 100755 --- a/buildbot_test_toolchains.py +++ b/buildbot_test_toolchains.py @@ -26,13 +26,10 @@ import shutil import sys import time +from cros_utils import buildbot_utils from cros_utils import command_executer from cros_utils import logger -from cros_utils import buildbot_utils - -# CL that uses LLVM-Next to build the images (includes chrome). -USE_LLVM_NEXT_PATCH = '513590' CROSTC_ROOT = '/usr/local/google/crostc' NIGHTLY_TESTS_DIR = os.path.join(CROSTC_ROOT, 'nightly-tests') @@ -73,6 +70,9 @@ RECIPE_IMAGE_RE = RECIPE_IMAGE_FS.format(**RECIPE_IMAGE_RE_GROUPS) TELEMETRY_AQUARIUM_UNSUPPORTED = ['bob', 'elm', 'veyron_tiger'] +# CL that uses LLVM-Next to build the images (includes chrome). +USE_LLVM_NEXT_PATCH = '513590' + class ToolchainComparator(object): """Class for doing the nightly tests work.""" @@ -226,13 +226,10 @@ class ToolchainComparator(object): crosperf = os.path.join(TOOLCHAIN_DIR, 'crosperf', 'crosperf') noschedv2_opts = '--noschedv2' if self._noschedv2 else '' - command = ('{crosperf} --no_email={no_email} --results_dir={r_dir} ' - '--logging_level=verbose --json_report=True {noschedv2_opts} ' - '{exp_file}').format(crosperf=crosperf, - no_email=not self._test, - r_dir=self._reports_dir, - noschedv2_opts=noschedv2_opts, - exp_file=experiment_file) + no_email = not self._test + command = (f'{crosperf} --no_email={no_email} ' + f'--results_dir={self._reports_dir} --logging_level=verbose ' + f'--json_report=True {noschedv2_opts} {experiment_file}') return self._ce.RunCommand(command) -- cgit v1.2.3 From 99cc6852aabf6e00a1f156c917e2d3eaca78a595 Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@google.com> Date: Thu, 1 Sep 2022 14:43:05 -0700 Subject: toolchain-utils: Remove rendering.desktop tests Aquarium stories were recently disabled in telemetry rendering.desktop. Remove the tests to unblock nightly tests. BUG=b:244607231 TEST=cq Change-Id: I12fd75d874228ef08949ee2e8ba27ee81dc48c1f Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3869851 Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Denis Nikitin <denik@chromium.org> --- buildbot_test_toolchains.py | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py index 4caa906e..f957066a 100755 --- a/buildbot_test_toolchains.py +++ b/buildbot_test_toolchains.py @@ -68,8 +68,6 @@ RECIPE_IMAGE_RE_GROUPS = { } RECIPE_IMAGE_RE = RECIPE_IMAGE_FS.format(**RECIPE_IMAGE_RE_GROUPS) -TELEMETRY_AQUARIUM_UNSUPPORTED = ['bob', 'elm', 'veyron_tiger'] - # CL that uses LLVM-Next to build the images (includes chrome). USE_LLVM_NEXT_PATCH = '513590' @@ -161,6 +159,7 @@ class ToolchainComparator(object): remote: %s retries: 1 """ % (self._board, self._remotes) + # TODO(b/244607231): Add graphic benchmarks removed in crrev.com/c/3869851. experiment_tests = """ benchmark: all_toolchain_perf { suite: telemetry_Crosperf @@ -176,29 +175,11 @@ class ToolchainComparator(object): retries: 0 } """ - telemetry_aquarium_tests = """ - benchmark: rendering.desktop { - run_local: False - suite: telemetry_Crosperf - test_args: --story-filter=aquarium$ - iterations: 5 - } - - benchmark: rendering.desktop { - run_local: False - suite: telemetry_Crosperf - test_args: --story-filter=aquarium_20k$ - iterations: 3 - } - """ with open(experiment_file, 'w', encoding='utf-8') as f: f.write(experiment_header) f.write(experiment_tests) - if self._board not in TELEMETRY_AQUARIUM_UNSUPPORTED: - f.write(telemetry_aquarium_tests) - # Now add vanilla to test file. official_image = """ vanilla_image { -- cgit v1.2.3 From 0d7d68f9b28904714edb7f0f7759330a8b9425a2 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Wed, 7 Sep 2022 01:35:25 +0000 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: I740f7221827c9697d4d0565bcbef9d747a7cbf7b Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3877530 Commit-Queue: Denis Nikitin <denik@chromium.org> Reviewed-by: Denis Nikitin <denik@chromium.org> Auto-Submit: Jordan Abrahams-Whitehead <ajordanr@google.com> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 043b9607..fb13f97a 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R106-14989.36-1660555931" + "name": "R107-15081.0-1662370474" }, "chromeos-kernel-4_14": { - "name": "R106-14989.36-1660556022" + "name": "R107-15081.0-1662370440" }, "chromeos-kernel-4_19": { - "name": "R106-14989.36-1660555896" + "name": "R107-15054.18-1662370521" }, "chromeos-kernel-5_4": { - "name": "R106-14989.36-1660555930" + "name": "R107-15054.18-1662370576" }, "chromeos-kernel-5_10": { - "name": "R106-14989.36-1660555953" + "name": "R107-15080.0-1662370502" } } -- cgit v1.2.3 From e82af106cc9b2914429c4d29f1d3a17fd86c816b Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Fri, 2 Sep 2022 14:14:08 -0700 Subject: check-presubmit: add support for the new python formatter, black This is redundant with the `check_black:` flag that we could add to our `PRESUBMIT.cfg`, but `check_black` doesn't offer to autofix formatting. BUG=b:244644217 TEST=Ran the presubmit on this python file. It failed. :) Change-Id: Ie1d392e430ca64c3136eb2d6ab0ed62c550104dc Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3877334 Tested-by: George Burgess <gbiv@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: Ryan Beltran <ryanbeltran@chromium.org> --- toolchain_utils_githooks/check-presubmit.py | 110 ++++++++++++++-------------- 1 file changed, 56 insertions(+), 54 deletions(-) diff --git a/toolchain_utils_githooks/check-presubmit.py b/toolchain_utils_githooks/check-presubmit.py index 5302cf80..e99e70ae 100755 --- a/toolchain_utils_githooks/check-presubmit.py +++ b/toolchain_utils_githooks/check-presubmit.py @@ -12,7 +12,6 @@ import datetime import multiprocessing import multiprocessing.pool import os -from pathlib import Path import re import shlex import shutil @@ -21,6 +20,7 @@ import sys import threading import traceback import typing as t +from pathlib import Path def run_command_unchecked(command: t.List[str], @@ -164,71 +164,77 @@ def check_isort(toolchain_utils_root: str, ) -def check_yapf(toolchain_utils_root: str, yapf: Path, - python_files: t.Iterable[str]) -> CheckResult: - """Subchecker of check_py_format. Checks python file formats with yapf""" - # Folks have been bitten by accidentally using multiple yapf versions in the - # past. This is an issue, since newer versions of yapf sometimes format - # things differently. Make the version obvious. - command = [yapf, '--version'] +def check_black(toolchain_utils_root: str, black: Path, + python_files: t.Iterable[str]) -> CheckResult: + """Subchecker of check_py_format. Checks python file formats with black""" + # Folks have been bitten by accidentally using multiple formatter versions in + # the past. This is an issue, since newer versions of black may format things + # differently. Make the version obvious. + command = [black, '--version'] exit_code, stdout_and_stderr = run_command_unchecked( command, cwd=toolchain_utils_root) if exit_code: return CheckResult( ok=False, - output=f'Failed getting yapf version; stdstreams: {stdout_and_stderr}', + output=f'Failed getting black version; stdstreams: {stdout_and_stderr}', autofix_commands=[], ) - yapf_version = stdout_and_stderr.strip() - # This is the depot_tools version. If folks have this, things will break for - # them. Ask them to upgrade. Peephole this rather than making some - # complicated version-parsing scheme, since it's likely that everyone with a - # too-old version is using specifically the depot_tools one. - if yapf_version == 'yapf 0.27.0': - return CheckResult( - ok=False, - output='YAPF is too old; please upgrade it: `pip install --user yapf`', - autofix_commands=[], - ) - - command = [yapf, '-d'] + python_files + black_version = stdout_and_stderr.strip() + command = [black, '--line-length=80', '--check'] + python_files exit_code, stdout_and_stderr = run_command_unchecked( command, cwd=toolchain_utils_root) - - # yapf fails when files are poorly formatted. + # black fails when files are poorly formatted. if exit_code == 0: return CheckResult( ok=True, - output=f'Using {yapf_version}, no issues were found.', + output=f'Using {black_version!r}, no issues were found.', + autofix_commands=[], + ) + + # Output format looks something like: + # f'{complaints}\nOh no!{emojis}\n{summary}' + # Whittle it down to complaints. + complaints = stdout_and_stderr.split('\nOh no!', 1) + if len(complaints) != 2: + return CheckResult( + ok=False, + output=f'Unparseable `black` output:\n{stdout_and_stderr}', autofix_commands=[], ) bad_files = [] - bad_file_re = re.compile(r'^--- (.*)\s+\(original\)\s*$') - for line in stdout_and_stderr.splitlines(): - m = bad_file_re.match(line) - if not m: + errors = [] + refmt_prefix = 'would reformat ' + for line in complaints[0].strip().splitlines(): + line = line.strip() + if line.startswith('error:'): + errors.append(line) continue - file_name, = m.groups() - bad_files.append(file_name.strip()) + if not line.startswith(refmt_prefix): + return CheckResult( + ok=False, + output=f'Unparseable `black` output:\n{stdout_and_stderr}', + autofix_commands=[], + ) - # ... and doesn't really differentiate "your files have broken formatting" - # errors from general ones. So if we see nothing diffed, assume that a - # general error happened. - if not bad_files: + bad_files.append(line[len(refmt_prefix):].strip()) + + # If black had internal errors that it could handle, print them out and exit + # without an autofix. + if errors: + err_str = "\n".join(errors) return CheckResult( ok=False, - output='`%s` failed; stdout/stderr:\n%s' % - (escape_command(command), stdout_and_stderr), + output=f'Using {black_version!r} had the following errors:\n{err_str}', autofix_commands=[], ) - autofix = [str(yapf), '-i'] + bad_files + autofix = [black] + bad_files return CheckResult( ok=False, - output=f'Using {yapf_version}, these file(s) have formatting errors: ' + output=f'Using {black_version!r}, these file(s) have formatting errors: ' f'{bad_files}', autofix_commands=[autofix], ) @@ -280,18 +286,14 @@ def check_py_format(toolchain_utils_root: str, thread_pool: multiprocessing.pool.ThreadPool, files: t.Iterable[str]) -> t.List[CheckResult]: """Runs yapf on files to check for style bugs. Also checks for #!s.""" - pip_yapf = Path('~/.local/bin/yapf').expanduser() - if pip_yapf.exists(): - yapf = pip_yapf - else: - yapf = 'yapf' - if not has_executable_on_path(yapf): - return CheckResult( - ok=False, - output="yapf isn't available on your $PATH. Please either " - 'enter a chroot, or place depot_tools on your $PATH.', - autofix_commands=[], - ) + black = 'black' + if not has_executable_on_path(black): + return CheckResult( + ok=False, + output="black isn't available on your $PATH. Please either " + 'enter a chroot, or place depot_tools on your $PATH.', + autofix_commands=[], + ) python_files = [f for f in remove_deleted_files(files) if f.endswith('.py')] if not python_files: @@ -302,9 +304,9 @@ def check_py_format(toolchain_utils_root: str, ) tasks = [ - ('check_yapf', - thread_pool.apply_async(check_yapf, - (toolchain_utils_root, yapf, python_files))), + ('check_black', + thread_pool.apply_async(check_black, + (toolchain_utils_root, black, python_files))), ('check_isort', thread_pool.apply_async(check_isort, (toolchain_utils_root, python_files))), -- cgit v1.2.3 From 9e7d8714190f8f14a6124b80995ec6180824c3b1 Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Tue, 6 Sep 2022 11:22:22 -0700 Subject: run_tests_for: run our new python formatter, `black` This runs our new python formatter on `run_tests_for`, which is necessary for a later commit prior to the mass-reformat. BUG=b:244644217 TEST=Ran run_tests_for.py Change-Id: I3ae240dfe5bdb4343d49ba8b954ab9fa731c052d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3877335 Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Tested-by: George Burgess <gbiv@chromium.org> Reviewed-by: Ryan Beltran <ryanbeltran@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> --- run_tests_for.py | 398 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 206 insertions(+), 192 deletions(-) diff --git a/run_tests_for.py b/run_tests_for.py index 8d24d3bf..92e00fd6 100755 --- a/run_tests_for.py +++ b/run_tests_for.py @@ -35,249 +35,263 @@ import pipes import subprocess import sys -TestSpec = collections.namedtuple('TestSpec', ['directory', 'command']) +TestSpec = collections.namedtuple("TestSpec", ["directory", "command"]) # List of python scripts that are not test with relative path to # toolchain-utils. non_test_py_files = { - 'debug_info_test/debug_info_test.py', + "debug_info_test/debug_info_test.py", } def _make_relative_to_toolchain_utils(toolchain_utils, path): - """Cleans & makes a path relative to toolchain_utils. + """Cleans & makes a path relative to toolchain_utils. - Raises if that path isn't under toolchain_utils. - """ - # abspath has the nice property that it removes any markers like './'. - as_abs = os.path.abspath(path) - result = os.path.relpath(as_abs, start=toolchain_utils) + Raises if that path isn't under toolchain_utils. + """ + # abspath has the nice property that it removes any markers like './'. + as_abs = os.path.abspath(path) + result = os.path.relpath(as_abs, start=toolchain_utils) - if result.startswith('../'): - raise ValueError('Non toolchain-utils directory found: %s' % result) - return result + if result.startswith("../"): + raise ValueError("Non toolchain-utils directory found: %s" % result) + return result def _filter_python_tests(test_files, toolchain_utils): - """Returns all files that are real python tests.""" - python_tests = [] - for test_file in test_files: - rel_path = _make_relative_to_toolchain_utils(toolchain_utils, test_file) - if rel_path not in non_test_py_files: - python_tests.append(_python_test_to_spec(test_file)) - else: - print('## %s ... NON_TEST_PY_FILE' % rel_path) - return python_tests + """Returns all files that are real python tests.""" + python_tests = [] + for test_file in test_files: + rel_path = _make_relative_to_toolchain_utils(toolchain_utils, test_file) + if rel_path not in non_test_py_files: + python_tests.append(_python_test_to_spec(test_file)) + else: + print("## %s ... NON_TEST_PY_FILE" % rel_path) + return python_tests def _gather_python_tests_in(rel_subdir, toolchain_utils): - """Returns all files that appear to be Python tests in a given directory.""" - subdir = os.path.join(toolchain_utils, rel_subdir) - test_files = ( - os.path.join(subdir, file_name) for file_name in os.listdir(subdir) - if file_name.endswith('_test.py') or file_name.endswith('_unittest.py')) - return _filter_python_tests(test_files, toolchain_utils) + """Returns all files that appear to be Python tests in a given directory.""" + subdir = os.path.join(toolchain_utils, rel_subdir) + test_files = ( + os.path.join(subdir, file_name) + for file_name in os.listdir(subdir) + if file_name.endswith("_test.py") or file_name.endswith("_unittest.py") + ) + return _filter_python_tests(test_files, toolchain_utils) def _run_test(test_spec): - """Runs a test.""" - p = subprocess.Popen(test_spec.command, - cwd=test_spec.directory, - stdin=open('/dev/null'), - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - encoding='utf-8') - stdout, _ = p.communicate() - exit_code = p.wait() - return exit_code, stdout + """Runs a test.""" + p = subprocess.Popen( + test_spec.command, + cwd=test_spec.directory, + stdin=open("/dev/null"), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", + ) + stdout, _ = p.communicate() + exit_code = p.wait() + return exit_code, stdout def _python_test_to_spec(test_file): - """Given a .py file, convert it to a TestSpec.""" - # Run tests in the directory they exist in, since some of them are sensitive - # to that. - test_directory = os.path.dirname(os.path.abspath(test_file)) - file_name = os.path.basename(test_file) - - if os.access(test_file, os.X_OK): - command = ['./' + file_name] - else: - # Assume the user wanted py3. - command = ['python3', file_name] + """Given a .py file, convert it to a TestSpec.""" + # Run tests in the directory they exist in, since some of them are sensitive + # to that. + test_directory = os.path.dirname(os.path.abspath(test_file)) + file_name = os.path.basename(test_file) + + if os.access(test_file, os.X_OK): + command = ["./" + file_name] + else: + # Assume the user wanted py3. + command = ["python3", file_name] - return TestSpec(directory=test_directory, command=command) + return TestSpec(directory=test_directory, command=command) def _autodetect_python_tests_for(test_file, toolchain_utils): - """Given a test file, detect if there may be related tests.""" - if not test_file.endswith('.py'): - return [] - - test_prefixes = ('test_', 'unittest_') - test_suffixes = ('_test.py', '_unittest.py') - - test_file_name = os.path.basename(test_file) - test_file_is_a_test = ( - any(test_file_name.startswith(x) for x in test_prefixes) - or any(test_file_name.endswith(x) for x in test_suffixes)) - - if test_file_is_a_test: - test_files = [test_file] - else: - test_file_no_suffix = test_file[:-3] - candidates = [test_file_no_suffix + x for x in test_suffixes] + """Given a test file, detect if there may be related tests.""" + if not test_file.endswith(".py"): + return [] - dir_name = os.path.dirname(test_file) - candidates += (os.path.join(dir_name, x + test_file_name) - for x in test_prefixes) - test_files = (x for x in candidates if os.path.exists(x)) - return _filter_python_tests(test_files, toolchain_utils) + test_prefixes = ("test_", "unittest_") + test_suffixes = ("_test.py", "_unittest.py") + test_file_name = os.path.basename(test_file) + test_file_is_a_test = any( + test_file_name.startswith(x) for x in test_prefixes + ) or any(test_file_name.endswith(x) for x in test_suffixes) -def _run_test_scripts(all_tests, show_successful_output=False): - """Runs a list of TestSpecs. Returns whether all of them succeeded.""" - with contextlib.closing(multiprocessing.pool.ThreadPool()) as pool: - results = [pool.apply_async(_run_test, (test, )) for test in all_tests] - - failures = [] - for i, (test, future) in enumerate(zip(all_tests, results)): - # Add a bit more spacing between outputs. - if show_successful_output and i: - print('\n') - - pretty_test = ' '.join(pipes.quote(test_arg) for test_arg in test.command) - pretty_directory = os.path.relpath(test.directory) - if pretty_directory == '.': - test_message = pretty_test + if test_file_is_a_test: + test_files = [test_file] else: - test_message = '%s in %s/' % (pretty_test, pretty_directory) - - print('## %s ... ' % test_message, end='') - # Be sure that the users sees which test is running. - sys.stdout.flush() + test_file_no_suffix = test_file[:-3] + candidates = [test_file_no_suffix + x for x in test_suffixes] - exit_code, stdout = future.get() - if not exit_code: - print('PASS') - else: - print('FAIL') - failures.append(pretty_test) + dir_name = os.path.dirname(test_file) + candidates += ( + os.path.join(dir_name, x + test_file_name) for x in test_prefixes + ) + test_files = (x for x in candidates if os.path.exists(x)) + return _filter_python_tests(test_files, toolchain_utils) - if show_successful_output or exit_code: - sys.stdout.write(stdout) - if failures: - word = 'tests' if len(failures) > 1 else 'test' - print('%d %s failed: %s' % (len(failures), word, failures)) - - return not failures +def _run_test_scripts(all_tests, show_successful_output=False): + """Runs a list of TestSpecs. Returns whether all of them succeeded.""" + with contextlib.closing(multiprocessing.pool.ThreadPool()) as pool: + results = [pool.apply_async(_run_test, (test,)) for test in all_tests] + + failures = [] + for i, (test, future) in enumerate(zip(all_tests, results)): + # Add a bit more spacing between outputs. + if show_successful_output and i: + print("\n") + + pretty_test = " ".join( + pipes.quote(test_arg) for test_arg in test.command + ) + pretty_directory = os.path.relpath(test.directory) + if pretty_directory == ".": + test_message = pretty_test + else: + test_message = "%s in %s/" % (pretty_test, pretty_directory) + + print("## %s ... " % test_message, end="") + # Be sure that the users sees which test is running. + sys.stdout.flush() + + exit_code, stdout = future.get() + if not exit_code: + print("PASS") + else: + print("FAIL") + failures.append(pretty_test) + + if show_successful_output or exit_code: + sys.stdout.write(stdout) + + if failures: + word = "tests" if len(failures) > 1 else "test" + print("%d %s failed: %s" % (len(failures), word, failures)) + + return not failures def _compress_list(l): - """Removes consecutive duplicate elements from |l|. - - >>> _compress_list([]) - [] - >>> _compress_list([1, 1]) - [1] - >>> _compress_list([1, 2, 1]) - [1, 2, 1] - """ - result = [] - for e in l: - if result and result[-1] == e: - continue - result.append(e) - return result + """Removes consecutive duplicate elements from |l|. + + >>> _compress_list([]) + [] + >>> _compress_list([1, 1]) + [1] + >>> _compress_list([1, 2, 1]) + [1, 2, 1] + """ + result = [] + for e in l: + if result and result[-1] == e: + continue + result.append(e) + return result def _fix_python_path(toolchain_utils): - pypath = os.environ.get('PYTHONPATH', '') - if pypath: - pypath = ':' + pypath - os.environ['PYTHONPATH'] = toolchain_utils + pypath + pypath = os.environ.get("PYTHONPATH", "") + if pypath: + pypath = ":" + pypath + os.environ["PYTHONPATH"] = toolchain_utils + pypath def _find_forced_subdir_python_tests(test_paths, toolchain_utils): - assert all(os.path.isabs(path) for path in test_paths) + assert all(os.path.isabs(path) for path in test_paths) - # Directories under toolchain_utils for which any change will cause all tests - # in that directory to be rerun. Includes changes in subdirectories. - all_dirs = { - 'crosperf', - 'cros_utils', - } + # Directories under toolchain_utils for which any change will cause all tests + # in that directory to be rerun. Includes changes in subdirectories. + all_dirs = { + "crosperf", + "cros_utils", + } - relative_paths = [ - _make_relative_to_toolchain_utils(toolchain_utils, path) - for path in test_paths - ] + relative_paths = [ + _make_relative_to_toolchain_utils(toolchain_utils, path) + for path in test_paths + ] - gather_test_dirs = set() + gather_test_dirs = set() - for path in relative_paths: - top_level_dir = path.split('/')[0] - if top_level_dir in all_dirs: - gather_test_dirs.add(top_level_dir) + for path in relative_paths: + top_level_dir = path.split("/")[0] + if top_level_dir in all_dirs: + gather_test_dirs.add(top_level_dir) - results = [] - for d in sorted(gather_test_dirs): - results += _gather_python_tests_in(d, toolchain_utils) - return results + results = [] + for d in sorted(gather_test_dirs): + results += _gather_python_tests_in(d, toolchain_utils) + return results def _find_go_tests(test_paths): - """Returns TestSpecs for the go folders of the given files""" - assert all(os.path.isabs(path) for path in test_paths) + """Returns TestSpecs for the go folders of the given files""" + assert all(os.path.isabs(path) for path in test_paths) - dirs_with_gofiles = set( - os.path.dirname(p) for p in test_paths if p.endswith('.go')) - command = ['go', 'test', '-vet=all'] - # Note: We sort the directories to be deterministic. - return [ - TestSpec(directory=d, command=command) for d in sorted(dirs_with_gofiles) - ] + dirs_with_gofiles = set( + os.path.dirname(p) for p in test_paths if p.endswith(".go") + ) + command = ["go", "test", "-vet=all"] + # Note: We sort the directories to be deterministic. + return [ + TestSpec(directory=d, command=command) + for d in sorted(dirs_with_gofiles) + ] def main(argv): - default_toolchain_utils = os.path.abspath(os.path.dirname(__file__)) - - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('--show_all_output', - action='store_true', - help='show stdout of successful tests') - parser.add_argument('--toolchain_utils', - default=default_toolchain_utils, - help='directory of toolchain-utils. Often auto-detected') - parser.add_argument('file', - nargs='*', - help='a file that we should run tests for') - args = parser.parse_args(argv) - - modified_files = [os.path.abspath(f) for f in args.file] - show_all_output = args.show_all_output - toolchain_utils = args.toolchain_utils - - if not modified_files: - print('No files given. Exit.') - return 0 - - _fix_python_path(toolchain_utils) - - tests_to_run = _find_forced_subdir_python_tests(modified_files, - toolchain_utils) - for f in modified_files: - tests_to_run += _autodetect_python_tests_for(f, toolchain_utils) - tests_to_run += _find_go_tests(modified_files) - - # TestSpecs have lists, so we can't use a set. We'd likely want to keep them - # sorted for determinism anyway. - tests_to_run.sort() - tests_to_run = _compress_list(tests_to_run) - - success = _run_test_scripts(tests_to_run, show_all_output) - return 0 if success else 1 - - -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + default_toolchain_utils = os.path.abspath(os.path.dirname(__file__)) + + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--show_all_output", + action="store_true", + help="show stdout of successful tests", + ) + parser.add_argument( + "--toolchain_utils", + default=default_toolchain_utils, + help="directory of toolchain-utils. Often auto-detected", + ) + parser.add_argument( + "file", nargs="*", help="a file that we should run tests for" + ) + args = parser.parse_args(argv) + + modified_files = [os.path.abspath(f) for f in args.file] + show_all_output = args.show_all_output + toolchain_utils = args.toolchain_utils + + if not modified_files: + print("No files given. Exit.") + return 0 + + _fix_python_path(toolchain_utils) + + tests_to_run = _find_forced_subdir_python_tests( + modified_files, toolchain_utils + ) + for f in modified_files: + tests_to_run += _autodetect_python_tests_for(f, toolchain_utils) + tests_to_run += _find_go_tests(modified_files) + + # TestSpecs have lists, so we can't use a set. We'd likely want to keep them + # sorted for determinism anyway. + tests_to_run.sort() + tests_to_run = _compress_list(tests_to_run) + + success = _run_test_scripts(tests_to_run, show_all_output) + return 0 if success else 1 + + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) -- cgit v1.2.3 From 8448c60a6a2337ec993923837e1d55b41f49dabc Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Tue, 6 Sep 2022 11:31:25 -0700 Subject: run_tests_for: add per-test timeout Looks like some of our tests for older projects are failing by way of running forever, weirdly. Those need to be addressed, but we should also handle timing out tests more gracefully. Implement that. BUG=b:244644217 TEST=Ran across all tests. Timeout WAI. Change-Id: I7a8e2a809a64d2a07db52cfc59c4a9dc6e9e9e76 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3877336 Commit-Queue: George Burgess <gbiv@chromium.org> Reviewed-by: Ryan Beltran <ryanbeltran@chromium.org> Tested-by: George Burgess <gbiv@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> --- run_tests_for.py | 84 ++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 67 insertions(+), 17 deletions(-) diff --git a/run_tests_for.py b/run_tests_for.py index 92e00fd6..93d48984 100755 --- a/run_tests_for.py +++ b/run_tests_for.py @@ -28,12 +28,14 @@ from __future__ import print_function import argparse import collections -import contextlib +import signal import multiprocessing.pool import os import pipes import subprocess import sys +from typing import Tuple, Optional + TestSpec = collections.namedtuple("TestSpec", ["directory", "command"]) @@ -81,19 +83,49 @@ def _gather_python_tests_in(rel_subdir, toolchain_utils): return _filter_python_tests(test_files, toolchain_utils) -def _run_test(test_spec): - """Runs a test.""" +def _run_test(test_spec: TestSpec, timeout: int) -> Tuple[Optional[int], str]: + """Runs a test. + + Returns a tuple indicating the process' exit code, and the combined + stdout+stderr of the process. If the exit code is None, the process timed + out. + """ + # Each subprocess gets its own process group, since many of these tests + # spawn subprocesses for a variety of reasons. If these tests time out, we + # want to be able to clean up all of the children swiftly. p = subprocess.Popen( test_spec.command, cwd=test_spec.directory, - stdin=open("/dev/null"), + stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8", + preexec_fn=lambda: os.setpgid(0, 0), ) - stdout, _ = p.communicate() - exit_code = p.wait() - return exit_code, stdout + + child_pgid = p.pid + try: + out, _ = p.communicate(timeout=timeout) + return p.returncode, out + except BaseException as e: + # Try to shut the processes down gracefully. + os.killpg(child_pgid, signal.SIGINT) + try: + # 2 seconds is arbitrary, but given that these are unittests, + # should be plenty of time for them to shut down. + p.wait(timeout=2) + except subprocess.TimeoutExpired: + os.killpg(child_pgid, signal.SIGKILL) + except: + os.killpg(child_pgid, signal.SIGKILL) + raise + + if isinstance(e, subprocess.TimeoutExpired): + # We just killed the entire process group. This should complete + # ~immediately. If it doesn't, something is very wrong. + out, _ = p.communicate(timeout=5) + return (None, out) + raise def _python_test_to_spec(test_file): @@ -139,10 +171,11 @@ def _autodetect_python_tests_for(test_file, toolchain_utils): return _filter_python_tests(test_files, toolchain_utils) -def _run_test_scripts(all_tests, show_successful_output=False): +def _run_test_scripts(pool, all_tests, timeout, show_successful_output=False): """Runs a list of TestSpecs. Returns whether all of them succeeded.""" - with contextlib.closing(multiprocessing.pool.ThreadPool()) as pool: - results = [pool.apply_async(_run_test, (test,)) for test in all_tests] + results = [ + pool.apply_async(_run_test, (test, timeout)) for test in all_tests + ] failures = [] for i, (test, future) in enumerate(zip(all_tests, results)): @@ -164,18 +197,25 @@ def _run_test_scripts(all_tests, show_successful_output=False): sys.stdout.flush() exit_code, stdout = future.get() - if not exit_code: + if exit_code == 0: print("PASS") + is_failure = False else: - print("FAIL") - failures.append(pretty_test) + print("TIMEOUT" if exit_code is None else "FAIL") + failures.append(test_message) + is_failure = True - if show_successful_output or exit_code: - sys.stdout.write(stdout) + if show_successful_output or is_failure: + if stdout: + print("-- Stdout:\n", stdout) + else: + print("-- No stdout was produced.") if failures: word = "tests" if len(failures) > 1 else "test" - print("%d %s failed: %s" % (len(failures), word, failures)) + print(f"{len(failures)} {word} failed:") + for failure in failures: + print(f"\t{failure}") return not failures @@ -265,6 +305,13 @@ def main(argv): parser.add_argument( "file", nargs="*", help="a file that we should run tests for" ) + parser.add_argument( + "--timeout", + default=120, + type=int, + help="Time to allow a test to execute before timing it out, in " + "seconds.", + ) args = parser.parse_args(argv) modified_files = [os.path.abspath(f) for f in args.file] @@ -289,7 +336,10 @@ def main(argv): tests_to_run.sort() tests_to_run = _compress_list(tests_to_run) - success = _run_test_scripts(tests_to_run, show_all_output) + with multiprocessing.pool.ThreadPool() as pool: + success = _run_test_scripts( + pool, tests_to_run, args.timeout, show_all_output + ) return 0 if success else 1 -- cgit v1.2.3 From 74bd380a27f4f0e8e90ff2dc1cef0b502d74961b Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Fri, 2 Sep 2022 16:59:27 -0700 Subject: Autoformat all Python code This autoformats all Python code with our new Python formatter, `black`. BUG=b:244644217 TEST=None Change-Id: I15ee49233d98fb6295c0c53c129bbf8e78e0d9ff Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3877337 Tested-by: George Burgess <gbiv@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: George Burgess <gbiv@chromium.org> --- afdo_redaction/redact_profile.py | 306 +- afdo_redaction/redact_profile_test.py | 225 +- afdo_redaction/remove_cold_functions.py | 291 +- afdo_redaction/remove_cold_functions_test.py | 159 +- afdo_redaction/remove_indirect_calls.py | 87 +- afdo_redaction/remove_indirect_calls_test.py | 30 +- afdo_tools/bisection/afdo_prof_analysis.py | 781 ++-- .../bisection/afdo_prof_analysis_e2e_test.py | 502 +-- afdo_tools/bisection/afdo_prof_analysis_test.py | 287 +- afdo_tools/generate_afdo_from_tryjob.py | 263 +- afdo_tools/run_afdo_tryjob.py | 228 +- auto_delete_nightly_test_data.py | 583 +-- bestflags/example_algorithms.py | 270 +- bestflags/flags.py | 215 +- bestflags/flags_test.py | 235 +- bestflags/flags_util.py | 161 +- bestflags/generation.py | 183 +- bestflags/generation_test.py | 65 +- bestflags/genetic_algorithm.py | 507 +-- bestflags/hill_climb_best_neighbor.py | 171 +- bestflags/iterative_elimination.py | 230 +- bestflags/mock_task.py | 123 +- bestflags/pipeline_process.py | 228 +- bestflags/pipeline_process_test.py | 102 +- bestflags/pipeline_worker.py | 243 +- bestflags/pipeline_worker_test.py | 201 +- bestflags/steering.py | 204 +- bestflags/steering_test.py | 242 +- bestflags/task.py | 784 ++-- bestflags/task_test.py | 283 +- bestflags/testing_batch.py | 648 ++-- binary_search_tool/binary_search_perforce.py | 974 ++--- binary_search_tool/binary_search_state.py | 1776 +++++----- binary_search_tool/bisect_driver.py | 650 ++-- binary_search_tool/common.py | 502 +-- binary_search_tool/compiler_wrapper.py | 51 +- .../cros_pkg/create_cleanup_script.py | 171 +- binary_search_tool/pass_mapping.py | 24 +- binary_search_tool/run_bisect.py | 774 ++-- binary_search_tool/run_bisect_tests.py | 276 +- binary_search_tool/sysroot_wrapper/testing_test.py | 38 +- binary_search_tool/test/binary_search_tool_test.py | 1102 +++--- binary_search_tool/test/cmd_script.py | 113 +- binary_search_tool/test/cmd_script_no_support.py | 23 +- binary_search_tool/test/common.py | 40 +- binary_search_tool/test/gen_init_list.py | 17 +- binary_search_tool/test/gen_obj.py | 166 +- binary_search_tool/test/generate_cmd.py | 18 +- binary_search_tool/test/is_good.py | 24 +- binary_search_tool/test/is_good_noinc_prune.py | 42 +- binary_search_tool/test/switch_tmp.py | 24 +- binary_search_tool/test/switch_to_bad.py | 22 +- .../test/switch_to_bad_noinc_prune.py | 24 +- binary_search_tool/test/switch_to_bad_set_file.py | 28 +- binary_search_tool/test/switch_to_good.py | 18 +- .../test/switch_to_good_noinc_prune.py | 20 +- binary_search_tool/test/switch_to_good_set_file.py | 24 +- binary_search_tool/test/test_setup.py | 14 +- binary_search_tool/test/test_setup_bad.py | 8 +- build_chromeos.py | 594 ++-- build_tc.py | 709 ++-- buildbot_test_llvm.py | 274 +- buildbot_test_toolchains.py | 609 ++-- chromiumos_image_diff.py | 693 ++-- compiler_wrapper/build.py | 188 +- compiler_wrapper/bundle.py | 109 +- crate_ebuild_help.py | 185 +- cros_utils/bugs.py | 166 +- cros_utils/bugs_test.py | 209 +- cros_utils/buildbot_utils.py | 475 +-- cros_utils/buildbot_utils_unittest.py | 381 +- cros_utils/command_executer.py | 1416 ++++---- cros_utils/command_executer_timeout_test.py | 25 +- cros_utils/command_executer_unittest.py | 32 +- cros_utils/constants.py | 6 +- cros_utils/device_setup_utils.py | 1109 +++--- cros_utils/device_setup_utils_unittest.py | 1318 +++---- cros_utils/email_sender.py | 510 +-- cros_utils/email_sender_unittest.py | 201 +- cros_utils/file_utils.py | 137 +- cros_utils/html_tools.py | 45 +- cros_utils/locks.py | 64 +- cros_utils/logger.py | 676 ++-- cros_utils/machines.py | 26 +- cros_utils/misc.py | 783 ++-- cros_utils/misc_test.py | 87 +- cros_utils/no_pseudo_terminal_test.py | 71 +- cros_utils/perf_diff.py | 588 +-- cros_utils/tabulator.py | 2699 +++++++------- cros_utils/timeline.py | 76 +- cros_utils/timeline_test.py | 89 +- cros_utils/tiny_render.py | 168 +- cros_utils/tiny_render_test.py | 338 +- crosperf/benchmark.py | 124 +- crosperf/benchmark_run.py | 563 +-- crosperf/benchmark_run_unittest.py | 936 ++--- crosperf/benchmark_unittest.py | 109 +- crosperf/column_chart.py | 98 +- crosperf/compare_machines.py | 99 +- crosperf/config.py | 4 +- crosperf/config_unittest.py | 56 +- crosperf/crosperf.py | 218 +- crosperf/crosperf_autolock.py | 487 +-- crosperf/crosperf_unittest.py | 98 +- crosperf/download_images.py | 669 ++-- crosperf/download_images_buildid_test.py | 199 +- crosperf/download_images_unittest.py | 552 +-- crosperf/experiment.py | 457 +-- crosperf/experiment_factory.py | 1061 +++--- crosperf/experiment_factory_unittest.py | 832 +++-- crosperf/experiment_file.py | 412 ++- crosperf/experiment_file_unittest.py | 200 +- crosperf/experiment_runner.py | 688 ++-- crosperf/experiment_runner_unittest.py | 963 ++--- crosperf/experiment_status.py | 286 +- crosperf/field.py | 245 +- crosperf/flag_test_unittest.py | 44 +- crosperf/generate_report.py | 401 ++- crosperf/generate_report_unittest.py | 290 +- crosperf/help.py | 67 +- crosperf/image_checksummer.py | 110 +- crosperf/label.py | 354 +- crosperf/machine_image_manager.py | 341 +- crosperf/machine_image_manager_unittest.py | 521 +-- crosperf/machine_manager.py | 1250 ++++--- crosperf/machine_manager_unittest.py | 1453 ++++---- crosperf/mock_instance.py | 231 +- crosperf/results_cache.py | 2993 ++++++++-------- crosperf/results_cache_unittest.py | 3747 ++++++++++---------- crosperf/results_organizer.py | 371 +- crosperf/results_organizer_unittest.py | 272 +- crosperf/results_report.py | 1471 ++++---- crosperf/results_report_templates.py | 126 +- crosperf/results_report_unittest.py | 815 +++-- crosperf/schedv2.py | 794 +++-- crosperf/schedv2_unittest.py | 333 +- crosperf/settings.py | 150 +- crosperf/settings_factory.py | 929 +++-- crosperf/settings_factory_unittest.py | 183 +- crosperf/settings_unittest.py | 496 +-- crosperf/suite_runner.py | 652 ++-- crosperf/suite_runner_unittest.py | 661 ++-- crosperf/test_flag.py | 4 +- crosperf/translate_xbuddy.py | 42 +- cwp/cr-os/fetch_gn_descs.py | 297 +- cwp/cr-os/fetch_gn_descs_test.py | 195 +- debug_info_test/allowlist.py | 68 +- debug_info_test/check_cus.py | 104 +- debug_info_test/check_exist.py | 137 +- debug_info_test/check_icf.py | 78 +- debug_info_test/check_ngcc.py | 26 +- debug_info_test/debug_info_test.py | 84 +- file_lock_machine.py | 687 ++-- file_lock_machine_test.py | 205 +- go/chromeos/setup_chromeos_testing.py | 363 +- heatmaps/heat_map.py | 338 +- heatmaps/heat_map_test.py | 278 +- heatmaps/heatmap_generator.py | 929 ++--- heatmaps/heatmap_generator_test.py | 572 +-- image_chromeos.py | 902 ++--- llvm_extra/create_ebuild_file.py | 102 +- llvm_tools/auto_llvm_bisection.py | 274 +- llvm_tools/auto_llvm_bisection_unittest.py | 483 +-- llvm_tools/bisect_clang_crashes.py | 238 +- llvm_tools/bisect_clang_crashes_unittest.py | 133 +- llvm_tools/check_clang_diags.py | 336 +- llvm_tools/check_clang_diags_test.py | 178 +- llvm_tools/chroot.py | 110 +- llvm_tools/chroot_unittest.py | 72 +- llvm_tools/copy_helpers_to_chromiumos_overlay.py | 73 +- llvm_tools/custom_script_example.py | 111 +- llvm_tools/failure_modes.py | 16 +- llvm_tools/fetch_cros_sdk_rolls.py | 160 +- llvm_tools/get_llvm_hash.py | 587 +-- llvm_tools/get_llvm_hash_unittest.py | 246 +- llvm_tools/get_upstream_patch.py | 950 ++--- llvm_tools/git.py | 186 +- llvm_tools/git_llvm_rev.py | 644 ++-- llvm_tools/git_llvm_rev_test.py | 233 +- llvm_tools/git_unittest.py | 255 +- llvm_tools/llvm_bisection.py | 754 ++-- llvm_tools/llvm_bisection_unittest.py | 999 +++--- llvm_tools/llvm_project.py | 81 +- llvm_tools/modify_a_tryjob.py | 598 ++-- llvm_tools/modify_a_tryjob_unittest.py | 800 +++-- llvm_tools/nightly_revert_checker.py | 767 ++-- llvm_tools/nightly_revert_checker_test.py | 372 +- llvm_tools/patch_manager.py | 658 ++-- llvm_tools/patch_manager_unittest.py | 491 +-- llvm_tools/patch_utils.py | 741 ++-- llvm_tools/patch_utils_unittest.py | 368 +- llvm_tools/revert_checker.py | 416 ++- llvm_tools/subprocess_helpers.py | 47 +- llvm_tools/test_helpers.py | 94 +- llvm_tools/update_chromeos_llvm_hash.py | 1181 +++--- llvm_tools/update_chromeos_llvm_hash_unittest.py | 1927 +++++----- llvm_tools/update_packages_and_run_tests.py | 785 ++-- .../update_packages_and_run_tests_unittest.py | 936 ++--- llvm_tools/update_tryjob_status.py | 460 +-- llvm_tools/update_tryjob_status_unittest.py | 971 ++--- llvm_tools/upload_lexan_crashes_to_forcey.py | 411 +-- llvm_tools/upload_lexan_crashes_to_forcey_test.py | 250 +- lock_machine.py | 973 ++--- make_root_writable.py | 390 +- orderfile/post_process_orderfile.py | 94 +- orderfile/post_process_orderfile_test.py | 136 +- pgo_tools/merge_profdata_and_upload.py | 618 ++-- pgo_tools/monitor_pgo_profiles.py | 171 +- pgo_tools/monitor_pgo_profiles_unittest.py | 152 +- pgo_tools_rust/pgo_rust.py | 837 +++-- remote_test.py | 168 +- rust_tools/rust_uprev.py | 1296 +++---- rust_tools/rust_uprev_test.py | 885 +++-- rust_tools/rust_watch.py | 615 ++-- rust_tools/rust_watch_test.py | 302 +- .../mass_seccomp_editor/mass_seccomp_editor.py | 461 +-- .../test_mass_seccomp_editor.py | 24 +- tc_enter_chroot.py | 578 +-- toolchain_utils_githooks/check-presubmit.py | 1256 +++---- update_telemetry_defaults.py | 310 +- 220 files changed, 49792 insertions(+), 43544 deletions(-) diff --git a/afdo_redaction/redact_profile.py b/afdo_redaction/redact_profile.py index 285dbf53..f37199e3 100755 --- a/afdo_redaction/redact_profile.py +++ b/afdo_redaction/redact_profile.py @@ -24,7 +24,8 @@ It reads a textual AFDO profile from stdin, and prints a 'fixed' version of it to stdout. A summary of what the script actually did is printed to stderr. """ -from __future__ import division, print_function +from __future__ import division +from __future__ import print_function import collections import re @@ -32,23 +33,23 @@ import sys def _count_samples(samples): - """Count the total number of samples in a function.""" - line_re = re.compile(r'^(\s*)\d+(?:\.\d+)?: (\d+)\s*$') + """Count the total number of samples in a function.""" + line_re = re.compile(r"^(\s*)\d+(?:\.\d+)?: (\d+)\s*$") - top_level_samples = 0 - all_samples = 0 - for line in samples: - m = line_re.match(line) - if not m: - continue + top_level_samples = 0 + all_samples = 0 + for line in samples: + m = line_re.match(line) + if not m: + continue - spaces, n = m.groups() - n = int(n) - all_samples += n - if len(spaces) == 1: - top_level_samples += n + spaces, n = m.groups() + n = int(n) + all_samples += n + if len(spaces) == 1: + top_level_samples += n - return top_level_samples, all_samples + return top_level_samples, all_samples # A ProfileRecord is a set of samples for a top-level symbol in a textual AFDO @@ -80,70 +81,75 @@ def _count_samples(samples): # And samples look like one of: # arbitrary_number: sample_count # arbitrary_number: inlined_function_symbol:inlined_entry_count -ProfileRecord = collections.namedtuple('ProfileRecord', - ['function_line', 'samples']) +ProfileRecord = collections.namedtuple( + "ProfileRecord", ["function_line", "samples"] +) def _normalize_samples(samples): - """Normalizes the samples in the given function body. - - Normalization just means that we redact inlined function names. This is - done so that a bit of templating doesn't make two function bodies look - distinct. Namely: - - template <typename T> - __attribute__((noinline)) - int getNumber() { return 1; } - - template <typename T> - __attribute__((noinline)) - int getNumberIndirectly() { return getNumber<T>(); } - - int main() { - return getNumber<int>() + getNumber<float>(); - } - - If the profile has the mangled name for getNumber<float> in - getNumberIndirectly<float> (and similar for <int>), we'll consider them to - be distinct when they're not. - """ - - # I'm not actually sure if this ends up being an issue in practice, but it's - # simple enough to guard against. - inlined_re = re.compile(r'(^\s*\d+): [^:]+:(\s*\d+)\s*$') - result = [] - for s in samples: - m = inlined_re.match(s) - if m: - result.append('%s: __REDACTED__:%s' % m.groups()) - else: - result.append(s) - return tuple(result) + """Normalizes the samples in the given function body. + + Normalization just means that we redact inlined function names. This is + done so that a bit of templating doesn't make two function bodies look + distinct. Namely: + + template <typename T> + __attribute__((noinline)) + int getNumber() { return 1; } + + template <typename T> + __attribute__((noinline)) + int getNumberIndirectly() { return getNumber<T>(); } + + int main() { + return getNumber<int>() + getNumber<float>(); + } + + If the profile has the mangled name for getNumber<float> in + getNumberIndirectly<float> (and similar for <int>), we'll consider them to + be distinct when they're not. + """ + + # I'm not actually sure if this ends up being an issue in practice, but it's + # simple enough to guard against. + inlined_re = re.compile(r"(^\s*\d+): [^:]+:(\s*\d+)\s*$") + result = [] + for s in samples: + m = inlined_re.match(s) + if m: + result.append("%s: __REDACTED__:%s" % m.groups()) + else: + result.append(s) + return tuple(result) def _read_textual_afdo_profile(stream): - """Parses an AFDO profile from a line stream into ProfileRecords.""" - # ProfileRecords are actually nested, due to inlining. For the purpose of - # this script, that doesn't matter. - lines = (line.rstrip() for line in stream) - function_line = None - samples = [] - for line in lines: - if not line: - continue - - if line[0].isspace(): - assert function_line is not None, 'sample exists outside of a function?' - samples.append(line) - continue - - if function_line is not None: - yield ProfileRecord(function_line=function_line, samples=tuple(samples)) - function_line = line + """Parses an AFDO profile from a line stream into ProfileRecords.""" + # ProfileRecords are actually nested, due to inlining. For the purpose of + # this script, that doesn't matter. + lines = (line.rstrip() for line in stream) + function_line = None samples = [] + for line in lines: + if not line: + continue + + if line[0].isspace(): + assert ( + function_line is not None + ), "sample exists outside of a function?" + samples.append(line) + continue + + if function_line is not None: + yield ProfileRecord( + function_line=function_line, samples=tuple(samples) + ) + function_line = line + samples = [] - if function_line is not None: - yield ProfileRecord(function_line=function_line, samples=tuple(samples)) + if function_line is not None: + yield ProfileRecord(function_line=function_line, samples=tuple(samples)) # The default of 100 is arbitrarily selected, but it does make the overwhelming @@ -157,86 +163,96 @@ def _read_textual_afdo_profile(stream): # Non-nm based approaches are superior because they don't require any prior # build artifacts; just an AFDO profile. def dedup_records(profile_records, summary_file, max_repeats=100): - """Removes heavily duplicated records from profile_records. - - profile_records is expected to be an iterable of ProfileRecord. - max_repeats ia how many functions must share identical bodies for us to - consider it 'heavily duplicated' and remove the results. - """ - - # Build a mapping of function structure -> list of functions with identical - # structure and sample counts - counts = collections.defaultdict(list) - for record in profile_records: - counts[_normalize_samples(record.samples)].append(record) - - # Be sure that we didn't see any duplicate functions, since that's bad... - total_functions_recorded = sum(len(records) for records in counts.values()) - - unique_function_names = { - record.function_line.split(':')[0] - for records in counts.values() - for record in records - } - - assert len(unique_function_names) == total_functions_recorded, \ - 'duplicate function names?' - - num_kept = 0 - num_samples_kept = 0 - num_top_samples_kept = 0 - num_total = 0 - num_samples_total = 0 - num_top_samples_total = 0 - - for normalized_samples, records in counts.items(): - top_sample_count, all_sample_count = _count_samples(normalized_samples) - top_sample_count *= len(records) - all_sample_count *= len(records) - - num_total += len(records) - num_samples_total += all_sample_count - num_top_samples_total += top_sample_count - - if len(records) >= max_repeats: - continue - - num_kept += len(records) - num_samples_kept += all_sample_count - num_top_samples_kept += top_sample_count - for record in records: - yield record - - print( - 'Retained {:,}/{:,} functions'.format(num_kept, num_total), - file=summary_file) - print( - 'Retained {:,}/{:,} samples, total'.format(num_samples_kept, - num_samples_total), - file=summary_file) - print('Retained {:,}/{:,} top-level samples' \ - .format(num_top_samples_kept, num_top_samples_total), - file=summary_file) + """Removes heavily duplicated records from profile_records. + + profile_records is expected to be an iterable of ProfileRecord. + max_repeats ia how many functions must share identical bodies for us to + consider it 'heavily duplicated' and remove the results. + """ + + # Build a mapping of function structure -> list of functions with identical + # structure and sample counts + counts = collections.defaultdict(list) + for record in profile_records: + counts[_normalize_samples(record.samples)].append(record) + + # Be sure that we didn't see any duplicate functions, since that's bad... + total_functions_recorded = sum(len(records) for records in counts.values()) + + unique_function_names = { + record.function_line.split(":")[0] + for records in counts.values() + for record in records + } + + assert ( + len(unique_function_names) == total_functions_recorded + ), "duplicate function names?" + + num_kept = 0 + num_samples_kept = 0 + num_top_samples_kept = 0 + num_total = 0 + num_samples_total = 0 + num_top_samples_total = 0 + + for normalized_samples, records in counts.items(): + top_sample_count, all_sample_count = _count_samples(normalized_samples) + top_sample_count *= len(records) + all_sample_count *= len(records) + + num_total += len(records) + num_samples_total += all_sample_count + num_top_samples_total += top_sample_count + + if len(records) >= max_repeats: + continue + + num_kept += len(records) + num_samples_kept += all_sample_count + num_top_samples_kept += top_sample_count + for record in records: + yield record + + print( + "Retained {:,}/{:,} functions".format(num_kept, num_total), + file=summary_file, + ) + print( + "Retained {:,}/{:,} samples, total".format( + num_samples_kept, num_samples_total + ), + file=summary_file, + ) + print( + "Retained {:,}/{:,} top-level samples".format( + num_top_samples_kept, num_top_samples_total + ), + file=summary_file, + ) def run(profile_input_file, summary_output_file, profile_output_file): - profile_records = _read_textual_afdo_profile(profile_input_file) + profile_records = _read_textual_afdo_profile(profile_input_file) - # Sort this so we get deterministic output. AFDO doesn't care what order it's - # in. - deduped = sorted( - dedup_records(profile_records, summary_output_file), - key=lambda r: r.function_line) - for function_line, samples in deduped: - print(function_line, file=profile_output_file) - print('\n'.join(samples), file=profile_output_file) + # Sort this so we get deterministic output. AFDO doesn't care what order it's + # in. + deduped = sorted( + dedup_records(profile_records, summary_output_file), + key=lambda r: r.function_line, + ) + for function_line, samples in deduped: + print(function_line, file=profile_output_file) + print("\n".join(samples), file=profile_output_file) def _main(): - run(profile_input_file=sys.stdin, - summary_output_file=sys.stderr, - profile_output_file=sys.stdout) + run( + profile_input_file=sys.stdin, + summary_output_file=sys.stderr, + profile_output_file=sys.stdout, + ) -if __name__ == '__main__': - _main() +if __name__ == "__main__": + _main() diff --git a/afdo_redaction/redact_profile_test.py b/afdo_redaction/redact_profile_test.py index 26fda3fd..154f8f7e 100755 --- a/afdo_redaction/redact_profile_test.py +++ b/afdo_redaction/redact_profile_test.py @@ -6,131 +6,136 @@ """Tests for redact_profile.py.""" -from __future__ import division, print_function +from __future__ import division +from __future__ import print_function import io import unittest from afdo_redaction import redact_profile + _redact_limit = redact_profile.dedup_records.__defaults__[0] def _redact(input_lines, summary_to=None): - if isinstance(input_lines, str): - input_lines = input_lines.splitlines() + if isinstance(input_lines, str): + input_lines = input_lines.splitlines() - if summary_to is None: - summary_to = io.StringIO() + if summary_to is None: + summary_to = io.StringIO() - output_to = io.StringIO() - redact_profile.run( - profile_input_file=input_lines, - summary_output_file=summary_to, - profile_output_file=output_to) - return output_to.getvalue() + output_to = io.StringIO() + redact_profile.run( + profile_input_file=input_lines, + summary_output_file=summary_to, + profile_output_file=output_to, + ) + return output_to.getvalue() def _redact_with_summary(input_lines): - summary = io.StringIO() - result = _redact(input_lines, summary_to=summary) - return result, summary.getvalue() - - -def _generate_repeated_function_body(repeats, fn_name='_some_name'): - # Arbitrary function body ripped from a textual AFDO profile. - function_header = fn_name + ':1234:185' - function_body = [ - ' 6: 83', - ' 15: 126', - ' 62832: 126', - ' 6: _ZNK5blink10PaintLayer14GroupedMappingEv:2349', - ' 1: 206', - ' 1: _ZNK5blink10PaintLayer14GroupedMappersEv:2060', - ' 1: 206', - ' 11: _ZNK5blink10PaintLayer25GetCompositedLayerMappingEv:800', - ' 2.1: 80', - ] - - # Be sure to zfill this, so the functions are output in sorted order. - num_width = len(str(repeats)) - - lines = [] - for i in range(repeats): - num = str(i).zfill(num_width) - lines.append(num + function_header) - lines.extend(function_body) - return lines + summary = io.StringIO() + result = _redact(input_lines, summary_to=summary) + return result, summary.getvalue() + + +def _generate_repeated_function_body(repeats, fn_name="_some_name"): + # Arbitrary function body ripped from a textual AFDO profile. + function_header = fn_name + ":1234:185" + function_body = [ + " 6: 83", + " 15: 126", + " 62832: 126", + " 6: _ZNK5blink10PaintLayer14GroupedMappingEv:2349", + " 1: 206", + " 1: _ZNK5blink10PaintLayer14GroupedMappersEv:2060", + " 1: 206", + " 11: _ZNK5blink10PaintLayer25GetCompositedLayerMappingEv:800", + " 2.1: 80", + ] + + # Be sure to zfill this, so the functions are output in sorted order. + num_width = len(str(repeats)) + + lines = [] + for i in range(repeats): + num = str(i).zfill(num_width) + lines.append(num + function_header) + lines.extend(function_body) + return lines class Tests(unittest.TestCase): - """All of our tests for redact_profile.""" - - def test_no_input_works(self): - self.assertEqual(_redact(''), '') - - def test_single_function_works(self): - lines = _generate_repeated_function_body(1) - result_file = '\n'.join(lines) + '\n' - self.assertEqual(_redact(lines), result_file) - - def test_duplicate_of_single_function_works(self): - lines = _generate_repeated_function_body(2) - result_file = '\n'.join(lines) + '\n' - self.assertEqual(_redact(lines), result_file) - - def test_not_too_many_duplicates_of_single_function_redacts_none(self): - lines = _generate_repeated_function_body(_redact_limit - 1) - result_file = '\n'.join(lines) + '\n' - self.assertEqual(_redact(lines), result_file) - - def test_many_duplicates_of_single_function_redacts_them_all(self): - lines = _generate_repeated_function_body(_redact_limit) - self.assertEqual(_redact(lines), '') - - def test_many_duplicates_of_single_function_leaves_other_functions(self): - kept_lines = _generate_repeated_function_body(1, fn_name='_keep_me') - # Something to distinguish us from the rest. Just bump a random counter. - kept_lines[1] += '1' - - result_file = '\n'.join(kept_lines) + '\n' - - lines = _generate_repeated_function_body( - _redact_limit, fn_name='_discard_me') - self.assertEqual(_redact(kept_lines + lines), result_file) - self.assertEqual(_redact(lines + kept_lines), result_file) - - more_lines = _generate_repeated_function_body( - _redact_limit, fn_name='_and_discard_me') - self.assertEqual(_redact(lines + kept_lines + more_lines), result_file) - self.assertEqual(_redact(lines + more_lines), '') - - def test_correct_summary_is_printed_when_nothing_is_redacted(self): - lines = _generate_repeated_function_body(1) - _, summary = _redact_with_summary(lines) - self.assertIn('Retained 1/1 functions', summary) - self.assertIn('Retained 827/827 samples, total', summary) - # Note that top-level samples == "samples without inlining taken into - # account," not "sum(entry_counts)" - self.assertIn('Retained 335/335 top-level samples', summary) - - def test_correct_summary_is_printed_when_everything_is_redacted(self): - lines = _generate_repeated_function_body(_redact_limit) - _, summary = _redact_with_summary(lines) - self.assertIn('Retained 0/100 functions', summary) - self.assertIn('Retained 0/82,700 samples, total', summary) - self.assertIn('Retained 0/33,500 top-level samples', summary) - - def test_correct_summary_is_printed_when_most_everything_is_redacted(self): - kept_lines = _generate_repeated_function_body(1, fn_name='_keep_me') - kept_lines[1] += '1' - - lines = _generate_repeated_function_body(_redact_limit) - _, summary = _redact_with_summary(kept_lines + lines) - self.assertIn('Retained 1/101 functions', summary) - self.assertIn('Retained 1,575/84,275 samples, total', summary) - self.assertIn('Retained 1,083/34,583 top-level samples', summary) - - -if __name__ == '__main__': - unittest.main() + """All of our tests for redact_profile.""" + + def test_no_input_works(self): + self.assertEqual(_redact(""), "") + + def test_single_function_works(self): + lines = _generate_repeated_function_body(1) + result_file = "\n".join(lines) + "\n" + self.assertEqual(_redact(lines), result_file) + + def test_duplicate_of_single_function_works(self): + lines = _generate_repeated_function_body(2) + result_file = "\n".join(lines) + "\n" + self.assertEqual(_redact(lines), result_file) + + def test_not_too_many_duplicates_of_single_function_redacts_none(self): + lines = _generate_repeated_function_body(_redact_limit - 1) + result_file = "\n".join(lines) + "\n" + self.assertEqual(_redact(lines), result_file) + + def test_many_duplicates_of_single_function_redacts_them_all(self): + lines = _generate_repeated_function_body(_redact_limit) + self.assertEqual(_redact(lines), "") + + def test_many_duplicates_of_single_function_leaves_other_functions(self): + kept_lines = _generate_repeated_function_body(1, fn_name="_keep_me") + # Something to distinguish us from the rest. Just bump a random counter. + kept_lines[1] += "1" + + result_file = "\n".join(kept_lines) + "\n" + + lines = _generate_repeated_function_body( + _redact_limit, fn_name="_discard_me" + ) + self.assertEqual(_redact(kept_lines + lines), result_file) + self.assertEqual(_redact(lines + kept_lines), result_file) + + more_lines = _generate_repeated_function_body( + _redact_limit, fn_name="_and_discard_me" + ) + self.assertEqual(_redact(lines + kept_lines + more_lines), result_file) + self.assertEqual(_redact(lines + more_lines), "") + + def test_correct_summary_is_printed_when_nothing_is_redacted(self): + lines = _generate_repeated_function_body(1) + _, summary = _redact_with_summary(lines) + self.assertIn("Retained 1/1 functions", summary) + self.assertIn("Retained 827/827 samples, total", summary) + # Note that top-level samples == "samples without inlining taken into + # account," not "sum(entry_counts)" + self.assertIn("Retained 335/335 top-level samples", summary) + + def test_correct_summary_is_printed_when_everything_is_redacted(self): + lines = _generate_repeated_function_body(_redact_limit) + _, summary = _redact_with_summary(lines) + self.assertIn("Retained 0/100 functions", summary) + self.assertIn("Retained 0/82,700 samples, total", summary) + self.assertIn("Retained 0/33,500 top-level samples", summary) + + def test_correct_summary_is_printed_when_most_everything_is_redacted(self): + kept_lines = _generate_repeated_function_body(1, fn_name="_keep_me") + kept_lines[1] += "1" + + lines = _generate_repeated_function_body(_redact_limit) + _, summary = _redact_with_summary(kept_lines + lines) + self.assertIn("Retained 1/101 functions", summary) + self.assertIn("Retained 1,575/84,275 samples, total", summary) + self.assertIn("Retained 1,083/34,583 top-level samples", summary) + + +if __name__ == "__main__": + unittest.main() diff --git a/afdo_redaction/remove_cold_functions.py b/afdo_redaction/remove_cold_functions.py index 5a1b7439..4b4eaec6 100755 --- a/afdo_redaction/remove_cold_functions.py +++ b/afdo_redaction/remove_cold_functions.py @@ -24,160 +24,193 @@ This is part of the effort to stablize the impact of AFDO profile on Chrome binary size. See crbug.com/1062014 for more context. """ -from __future__ import division, print_function +from __future__ import division +from __future__ import print_function import argparse import collections import re import sys -_function_line_re = re.compile(r'^([\w\$\.@]+):(\d+)(?::\d+)?$') + +_function_line_re = re.compile(r"^([\w\$\.@]+):(\d+)(?::\d+)?$") ProfileRecord = collections.namedtuple( - 'ProfileRecord', ['function_count', 'function_body', 'function_name']) + "ProfileRecord", ["function_count", "function_body", "function_name"] +) def _read_sample_count(line): - m = _function_line_re.match(line) - assert m, 'Failed to interpret function line %s' % line - return m.group(1), int(m.group(2)) + m = _function_line_re.match(line) + assert m, "Failed to interpret function line %s" % line + return m.group(1), int(m.group(2)) def _read_textual_afdo_profile(stream): - """Parses an AFDO profile from a line stream into ProfileRecords.""" - # ProfileRecords are actually nested, due to inlining. For the purpose of - # this script, that doesn't matter. - lines = (line.rstrip() for line in stream) - function_line = None - samples = [] - ret = [] - for line in lines: - if not line: - continue - - if line[0].isspace(): - assert function_line is not None, 'sample exists outside of a function?' - samples.append(line) - continue - - if function_line is not None: - name, count = _read_sample_count(function_line) - body = [function_line] + samples - ret.append( - ProfileRecord( - function_count=count, function_body=body, function_name=name)) - function_line = line + """Parses an AFDO profile from a line stream into ProfileRecords.""" + # ProfileRecords are actually nested, due to inlining. For the purpose of + # this script, that doesn't matter. + lines = (line.rstrip() for line in stream) + function_line = None samples = [] + ret = [] + for line in lines: + if not line: + continue + + if line[0].isspace(): + assert ( + function_line is not None + ), "sample exists outside of a function?" + samples.append(line) + continue + + if function_line is not None: + name, count = _read_sample_count(function_line) + body = [function_line] + samples + ret.append( + ProfileRecord( + function_count=count, function_body=body, function_name=name + ) + ) + function_line = line + samples = [] - if function_line is not None: - name, count = _read_sample_count(function_line) - body = [function_line] + samples - ret.append( - ProfileRecord( - function_count=count, function_body=body, function_name=name)) - return ret + if function_line is not None: + name, count = _read_sample_count(function_line) + body = [function_line] + samples + ret.append( + ProfileRecord( + function_count=count, function_body=body, function_name=name + ) + ) + return ret def write_textual_afdo_profile(stream, records): - for r in records: - print('\n'.join(r.function_body), file=stream) + for r in records: + print("\n".join(r.function_body), file=stream) def analyze_functions(records, cwp, benchmark): - cwp_functions = {x.function_name for x in cwp} - benchmark_functions = {x.function_name for x in benchmark} - all_functions = {x.function_name for x in records} - cwp_only_functions = len((all_functions & cwp_functions) - - benchmark_functions) - benchmark_only_functions = len((all_functions & benchmark_functions) - - cwp_functions) - common_functions = len(all_functions & benchmark_functions & cwp_functions) - none_functions = len(all_functions - benchmark_functions - cwp_functions) - - assert not none_functions - return cwp_only_functions, benchmark_only_functions, common_functions + cwp_functions = {x.function_name for x in cwp} + benchmark_functions = {x.function_name for x in benchmark} + all_functions = {x.function_name for x in records} + cwp_only_functions = len( + (all_functions & cwp_functions) - benchmark_functions + ) + benchmark_only_functions = len( + (all_functions & benchmark_functions) - cwp_functions + ) + common_functions = len(all_functions & benchmark_functions & cwp_functions) + none_functions = len(all_functions - benchmark_functions - cwp_functions) + + assert not none_functions + return cwp_only_functions, benchmark_only_functions, common_functions def run(input_stream, output_stream, goal, cwp=None, benchmark=None): - records = _read_textual_afdo_profile(input_stream) - num_functions = len(records) - if not num_functions: - return - assert goal, "It's invalid to remove all functions in the profile" - - if cwp and benchmark: - cwp_records = _read_textual_afdo_profile(cwp) - benchmark_records = _read_textual_afdo_profile(benchmark) - cwp_num, benchmark_num, common_num = analyze_functions( - records, cwp_records, benchmark_records) - - records.sort(key=lambda x: (-x.function_count, x.function_name)) - records = records[:goal] - - print( - 'Retained %d/%d (%.1f%%) functions in the profile' % - (len(records), num_functions, 100.0 * len(records) / num_functions), - file=sys.stderr) - write_textual_afdo_profile(output_stream, records) - - if cwp and benchmark: - cwp_num_after, benchmark_num_after, common_num_after = analyze_functions( - records, cwp_records, benchmark_records) - print( - 'Retained %d/%d (%.1f%%) functions only appear in the CWP profile' % - (cwp_num_after, cwp_num, 100.0 * cwp_num_after / cwp_num), - file=sys.stderr) - print( - 'Retained %d/%d (%.1f%%) functions only appear in the benchmark profile' - % (benchmark_num_after, benchmark_num, - 100.0 * benchmark_num_after / benchmark_num), - file=sys.stderr) + records = _read_textual_afdo_profile(input_stream) + num_functions = len(records) + if not num_functions: + return + assert goal, "It's invalid to remove all functions in the profile" + + if cwp and benchmark: + cwp_records = _read_textual_afdo_profile(cwp) + benchmark_records = _read_textual_afdo_profile(benchmark) + cwp_num, benchmark_num, common_num = analyze_functions( + records, cwp_records, benchmark_records + ) + + records.sort(key=lambda x: (-x.function_count, x.function_name)) + records = records[:goal] + print( - 'Retained %d/%d (%.1f%%) functions appear in both CWP and benchmark' - ' profiles' % (common_num_after, common_num, - 100.0 * common_num_after / common_num), - file=sys.stderr) + "Retained %d/%d (%.1f%%) functions in the profile" + % (len(records), num_functions, 100.0 * len(records) / num_functions), + file=sys.stderr, + ) + write_textual_afdo_profile(output_stream, records) + + if cwp and benchmark: + ( + cwp_num_after, + benchmark_num_after, + common_num_after, + ) = analyze_functions(records, cwp_records, benchmark_records) + print( + "Retained %d/%d (%.1f%%) functions only appear in the CWP profile" + % (cwp_num_after, cwp_num, 100.0 * cwp_num_after / cwp_num), + file=sys.stderr, + ) + print( + "Retained %d/%d (%.1f%%) functions only appear in the benchmark profile" + % ( + benchmark_num_after, + benchmark_num, + 100.0 * benchmark_num_after / benchmark_num, + ), + file=sys.stderr, + ) + print( + "Retained %d/%d (%.1f%%) functions appear in both CWP and benchmark" + " profiles" + % ( + common_num_after, + common_num, + 100.0 * common_num_after / common_num, + ), + file=sys.stderr, + ) def main(): - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - '--input', - default='/dev/stdin', - help='File to read from. Defaults to stdin.') - parser.add_argument( - '--output', - default='/dev/stdout', - help='File to write to. Defaults to stdout.') - parser.add_argument( - '--number', - type=int, - required=True, - help='Number of functions to retain in the profile.') - parser.add_argument( - '--cwp', help='Textualized CWP profiles, used for further analysis') - parser.add_argument( - '--benchmark', - help='Textualized benchmark profile, used for further analysis') - args = parser.parse_args() - - if not args.number: - parser.error("It's invalid to remove the number of functions to 0.") - - if (args.cwp and not args.benchmark) or (not args.cwp and args.benchmark): - parser.error('Please specify both --cwp and --benchmark') - - with open(args.input) as stdin: - with open(args.output, 'w') as stdout: - # When user specify textualized cwp and benchmark profiles, perform - # the analysis. Otherwise, just trim the cold functions from profile. - if args.cwp and args.benchmark: - with open(args.cwp) as cwp: - with open(args.benchmark) as benchmark: - run(stdin, stdout, args.number, cwp, benchmark) - else: - run(stdin, stdout, args.number) - - -if __name__ == '__main__': - main() + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--input", + default="/dev/stdin", + help="File to read from. Defaults to stdin.", + ) + parser.add_argument( + "--output", + default="/dev/stdout", + help="File to write to. Defaults to stdout.", + ) + parser.add_argument( + "--number", + type=int, + required=True, + help="Number of functions to retain in the profile.", + ) + parser.add_argument( + "--cwp", help="Textualized CWP profiles, used for further analysis" + ) + parser.add_argument( + "--benchmark", + help="Textualized benchmark profile, used for further analysis", + ) + args = parser.parse_args() + + if not args.number: + parser.error("It's invalid to remove the number of functions to 0.") + + if (args.cwp and not args.benchmark) or (not args.cwp and args.benchmark): + parser.error("Please specify both --cwp and --benchmark") + + with open(args.input) as stdin: + with open(args.output, "w") as stdout: + # When user specify textualized cwp and benchmark profiles, perform + # the analysis. Otherwise, just trim the cold functions from profile. + if args.cwp and args.benchmark: + with open(args.cwp) as cwp: + with open(args.benchmark) as benchmark: + run(stdin, stdout, args.number, cwp, benchmark) + else: + run(stdin, stdout, args.number) + + +if __name__ == "__main__": + main() diff --git a/afdo_redaction/remove_cold_functions_test.py b/afdo_redaction/remove_cold_functions_test.py index 839e5378..a203ab15 100755 --- a/afdo_redaction/remove_cold_functions_test.py +++ b/afdo_redaction/remove_cold_functions_test.py @@ -9,20 +9,20 @@ from __future__ import print_function import io -from unittest.mock import patch import unittest +from unittest.mock import patch from afdo_redaction import remove_cold_functions def _construct_profile(indices=None): - real_world_profile_functions = [ - """SomeFunction1:24150:300 + real_world_profile_functions = [ + """SomeFunction1:24150:300 2: 75 3: 23850 39: 225 """, - """SomeFunction2:8925:225 + """SomeFunction2:8925:225 0: 225 0.2: 150 0.1: SomeFunction2:6300 @@ -30,7 +30,7 @@ def _construct_profile(indices=None): 0.2: SomeFunction2:150 3: 75 """, - """SomeFunction3:7500:75 + """SomeFunction3:7500:75 0: 75 0.2: 75 0.1: SomeFunction3:6600 @@ -38,7 +38,7 @@ def _construct_profile(indices=None): 0.2: SomeFunction3:75 1: 75 """, - """LargerFunction4:51450:0 + """LargerFunction4:51450:0 1: 0 3: 0 3.1: 7350 @@ -59,7 +59,7 @@ def _construct_profile(indices=None): 8: 0 9: 0 """, - """SomeFakeFunction5:7500:75 + """SomeFakeFunction5:7500:75 0: 75 0.2: 75 0.1: SomeFakeFunction5:6600 @@ -67,80 +67,87 @@ def _construct_profile(indices=None): 0.2: SomeFakeFunction5:75 1: 75 """, - ] + ] - ret = [] - if not indices: - for x in real_world_profile_functions: - ret += x.strip().splitlines() - return ret + ret = [] + if not indices: + for x in real_world_profile_functions: + ret += x.strip().splitlines() + return ret - ret = [] - for i in indices: - ret += real_world_profile_functions[i].strip().splitlines() - return ret + ret = [] + for i in indices: + ret += real_world_profile_functions[i].strip().splitlines() + return ret def _run_test(input_lines, goal, cwp_file=None, benchmark_file=None): - input_buf = io.StringIO('\n'.join(input_lines)) - output_buf = io.StringIO() - remove_cold_functions.run(input_buf, output_buf, goal, cwp_file, - benchmark_file) - return output_buf.getvalue().splitlines() + input_buf = io.StringIO("\n".join(input_lines)) + output_buf = io.StringIO() + remove_cold_functions.run( + input_buf, output_buf, goal, cwp_file, benchmark_file + ) + return output_buf.getvalue().splitlines() class Test(unittest.TestCase): - """Test functions in remove_cold_functions.py""" - - def test_empty_profile(self): - self.assertEqual(_run_test([], 0), []) - - def test_remove_all_functions_fail(self): - input_profile_lines = _construct_profile() - with self.assertRaises(Exception) as context: - _run_test(input_profile_lines, 0) - self.assertEqual( - str(context.exception), - "It's invalid to remove all functions in the profile") - - def test_remove_cold_functions_work(self): - input_profile_lines = _construct_profile() - # To make sure the cold functions are removed in order - expected_profile_lines = { - 5: input_profile_lines, - # Entry 4 wins the tie breaker because the name is smaller - # alphabetically. - 4: _construct_profile([0, 1, 3, 4]), - 3: _construct_profile([0, 1, 3]), - 2: _construct_profile([0, 3]), - 1: _construct_profile([3]), - } - - for num in expected_profile_lines: - self.assertCountEqual( - _run_test(input_profile_lines, num), expected_profile_lines[num]) - - def test_analyze_cwp_and_benchmark_work(self): - input_profile_lines = _construct_profile() - cwp_profile = _construct_profile([0, 1, 3, 4]) - benchmark_profile = _construct_profile([1, 2, 3, 4]) - cwp_buf = io.StringIO('\n'.join(cwp_profile)) - benchmark_buf = io.StringIO('\n'.join(benchmark_profile)) - with patch('sys.stderr', new=io.StringIO()) as fake_output: - _run_test(input_profile_lines, 3, cwp_buf, benchmark_buf) - - output = fake_output.getvalue() - self.assertIn('Retained 3/5 (60.0%) functions in the profile', output) - self.assertIn( - 'Retained 1/1 (100.0%) functions only appear in the CWP profile', - output) - self.assertIn( - 'Retained 0/1 (0.0%) functions only appear in the benchmark profile', - output) - self.assertIn( - 'Retained 2/3 (66.7%) functions appear in both CWP and benchmark' - ' profiles', output) - - -if __name__ == '__main__': - unittest.main() + """Test functions in remove_cold_functions.py""" + + def test_empty_profile(self): + self.assertEqual(_run_test([], 0), []) + + def test_remove_all_functions_fail(self): + input_profile_lines = _construct_profile() + with self.assertRaises(Exception) as context: + _run_test(input_profile_lines, 0) + self.assertEqual( + str(context.exception), + "It's invalid to remove all functions in the profile", + ) + + def test_remove_cold_functions_work(self): + input_profile_lines = _construct_profile() + # To make sure the cold functions are removed in order + expected_profile_lines = { + 5: input_profile_lines, + # Entry 4 wins the tie breaker because the name is smaller + # alphabetically. + 4: _construct_profile([0, 1, 3, 4]), + 3: _construct_profile([0, 1, 3]), + 2: _construct_profile([0, 3]), + 1: _construct_profile([3]), + } + + for num in expected_profile_lines: + self.assertCountEqual( + _run_test(input_profile_lines, num), expected_profile_lines[num] + ) + + def test_analyze_cwp_and_benchmark_work(self): + input_profile_lines = _construct_profile() + cwp_profile = _construct_profile([0, 1, 3, 4]) + benchmark_profile = _construct_profile([1, 2, 3, 4]) + cwp_buf = io.StringIO("\n".join(cwp_profile)) + benchmark_buf = io.StringIO("\n".join(benchmark_profile)) + with patch("sys.stderr", new=io.StringIO()) as fake_output: + _run_test(input_profile_lines, 3, cwp_buf, benchmark_buf) + + output = fake_output.getvalue() + self.assertIn("Retained 3/5 (60.0%) functions in the profile", output) + self.assertIn( + "Retained 1/1 (100.0%) functions only appear in the CWP profile", + output, + ) + self.assertIn( + "Retained 0/1 (0.0%) functions only appear in the benchmark profile", + output, + ) + self.assertIn( + "Retained 2/3 (66.7%) functions appear in both CWP and benchmark" + " profiles", + output, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/afdo_redaction/remove_indirect_calls.py b/afdo_redaction/remove_indirect_calls.py index a72e43b5..6d77ba7a 100755 --- a/afdo_redaction/remove_indirect_calls.py +++ b/afdo_redaction/remove_indirect_calls.py @@ -17,21 +17,22 @@ objects as Chrome, this can become problematic, and lead to NaCl doubling in size (or worse). See crbug.com/1005023 and crbug.com/916130. """ -from __future__ import division, print_function +from __future__ import division +from __future__ import print_function import argparse import re def _remove_indirect_call_targets(lines): - # Lines with indirect call targets look like: - # 1.1: 1234 foo:111 bar:122 - # - # Where 1.1 is the line info/discriminator, 1234 is the total number of - # samples seen for that line/discriminator, foo:111 is "111 of the calls here - # went to foo," and bar:122 is "122 of the calls here went to bar." - call_target_re = re.compile( - r""" + # Lines with indirect call targets look like: + # 1.1: 1234 foo:111 bar:122 + # + # Where 1.1 is the line info/discriminator, 1234 is the total number of + # samples seen for that line/discriminator, foo:111 is "111 of the calls here + # went to foo," and bar:122 is "122 of the calls here went to bar." + call_target_re = re.compile( + r""" ^\s+ # Top-level lines are function records. \d+(?:\.\d+)?: # Line info/discriminator \s+ @@ -39,42 +40,48 @@ def _remove_indirect_call_targets(lines): \s+ ((?:[^\s:]+:\d+\s*)+) # Indirect call target(s) $ - """, re.VERBOSE) - for line in lines: - line = line.rstrip() + """, + re.VERBOSE, + ) + for line in lines: + line = line.rstrip() - match = call_target_re.match(line) - if not match: - yield line + '\n' - continue + match = call_target_re.match(line) + if not match: + yield line + "\n" + continue - group_start, group_end = match.span(1) - assert group_end == len(line) - yield line[:group_start].rstrip() + '\n' + group_start, group_end = match.span(1) + assert group_end == len(line) + yield line[:group_start].rstrip() + "\n" def run(input_stream, output_stream): - for line in _remove_indirect_call_targets(input_stream): - output_stream.write(line) + for line in _remove_indirect_call_targets(input_stream): + output_stream.write(line) def main(): - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - '--input', - default='/dev/stdin', - help='File to read from. Defaults to stdin.') - parser.add_argument( - '--output', - default='/dev/stdout', - help='File to write to. Defaults to stdout.') - args = parser.parse_args() - - with open(args.input) as stdin: - with open(args.output, 'w') as stdout: - run(stdin, stdout) - - -if __name__ == '__main__': - main() + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--input", + default="/dev/stdin", + help="File to read from. Defaults to stdin.", + ) + parser.add_argument( + "--output", + default="/dev/stdout", + help="File to write to. Defaults to stdout.", + ) + args = parser.parse_args() + + with open(args.input) as stdin: + with open(args.output, "w") as stdout: + run(stdin, stdout) + + +if __name__ == "__main__": + main() diff --git a/afdo_redaction/remove_indirect_calls_test.py b/afdo_redaction/remove_indirect_calls_test.py index 5f8d938c..f3b4c5cc 100755 --- a/afdo_redaction/remove_indirect_calls_test.py +++ b/afdo_redaction/remove_indirect_calls_test.py @@ -15,22 +15,22 @@ from afdo_redaction import remove_indirect_calls def _run_test(input_lines): - input_buf = io.StringIO('\n'.join(input_lines)) - output_buf = io.StringIO() - remove_indirect_calls.run(input_buf, output_buf) - return output_buf.getvalue().splitlines() + input_buf = io.StringIO("\n".join(input_lines)) + output_buf = io.StringIO() + remove_indirect_calls.run(input_buf, output_buf) + return output_buf.getvalue().splitlines() class Test(unittest.TestCase): - """Tests""" + """Tests""" - def test_empty_profile(self): - self.assertEqual(_run_test([]), []) + def test_empty_profile(self): + self.assertEqual(_run_test([]), []) - def test_removal_on_real_world_code(self): - # These are copied from an actual textual AFDO profile, but the names made - # lints unhappy due to their length, so I had to be creative. - profile_lines = """_ZLongSymbolName:52862:1766 + def test_removal_on_real_world_code(self): + # These are copied from an actual textual AFDO profile, but the names made + # lints unhappy due to their length, so I had to be creative. + profile_lines = """_ZLongSymbolName:52862:1766 14: 2483 8.1: _SomeInlinedSym:45413 11: _AndAnother:35481 @@ -45,7 +45,7 @@ class Test(unittest.TestCase): 0: 2483 """.strip().splitlines() - expected_lines = """_ZLongSymbolName:52862:1766 + expected_lines = """_ZLongSymbolName:52862:1766 14: 2483 8.1: _SomeInlinedSym:45413 11: _AndAnother:35481 @@ -60,8 +60,8 @@ class Test(unittest.TestCase): 0: 2483 """.strip().splitlines() - self.assertEqual(_run_test(profile_lines), expected_lines) + self.assertEqual(_run_test(profile_lines), expected_lines) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/afdo_tools/bisection/afdo_prof_analysis.py b/afdo_tools/bisection/afdo_prof_analysis.py index 7f7c3cf2..a7bb4a4c 100755 --- a/afdo_tools/bisection/afdo_prof_analysis.py +++ b/afdo_tools/bisection/afdo_prof_analysis.py @@ -35,11 +35,12 @@ from tempfile import mkstemp class StatusEnum(IntEnum): - """Enum of valid statuses returned by profile decider.""" - GOOD_STATUS = 0 - BAD_STATUS = 1 - SKIP_STATUS = 125 - PROBLEM_STATUS = 127 + """Enum of valid statuses returned by profile decider.""" + + GOOD_STATUS = 0 + BAD_STATUS = 1 + SKIP_STATUS = 125 + PROBLEM_STATUS = 127 statuses = StatusEnum.__members__.values() @@ -48,396 +49,442 @@ _NUM_RUNS_RANGE_SEARCH = 20 # how many times range search should run its algo def json_to_text(json_prof): - text_profile = [] - for func in json_prof: - text_profile.append(func) - text_profile.append(json_prof[func]) - return ''.join(text_profile) + text_profile = [] + for func in json_prof: + text_profile.append(func) + text_profile.append(json_prof[func]) + return "".join(text_profile) def text_to_json(f): - """Performs basic parsing of an AFDO text-based profile. - - This parsing expects an input file object with contents of the form generated - by bin/llvm-profdata (within an LLVM build). - """ - results = {} - curr_func = None - curr_data = [] - for line in f: - if not line.startswith(' '): - if curr_func: - results[curr_func] = ''.join(curr_data) - curr_data = [] - curr_func, rest = line.split(':', 1) - curr_func = curr_func.strip() - curr_data.append(':' + rest) - else: - curr_data.append(line) - - if curr_func: - results[curr_func] = ''.join(curr_data) - return results + """Performs basic parsing of an AFDO text-based profile. + + This parsing expects an input file object with contents of the form generated + by bin/llvm-profdata (within an LLVM build). + """ + results = {} + curr_func = None + curr_data = [] + for line in f: + if not line.startswith(" "): + if curr_func: + results[curr_func] = "".join(curr_data) + curr_data = [] + curr_func, rest = line.split(":", 1) + curr_func = curr_func.strip() + curr_data.append(":" + rest) + else: + curr_data.append(line) + + if curr_func: + results[curr_func] = "".join(curr_data) + return results def prof_to_tmp(prof): - """Creates (and returns) temp filename for given JSON-based AFDO profile.""" - fd, temp_path = mkstemp() - text_profile = json_to_text(prof) - with open(temp_path, 'w') as f: - f.write(text_profile) - os.close(fd) - return temp_path + """Creates (and returns) temp filename for given JSON-based AFDO profile.""" + fd, temp_path = mkstemp() + text_profile = json_to_text(prof) + with open(temp_path, "w") as f: + f.write(text_profile) + os.close(fd) + return temp_path class DeciderState(object): - """Class for the external decider.""" - - def __init__(self, state_file, external_decider, seed): - self.accumulated_results = [] # over this run of the script - self.external_decider = external_decider - self.saved_results = [] # imported from a previous run of this script - self.state_file = state_file - self.seed = seed if seed is not None else time.time() - - def load_state(self): - if not os.path.exists(self.state_file): - logging.info('State file %s is empty, starting from beginning', - self.state_file) - return - - with open(self.state_file, encoding='utf-8') as f: - try: - data = json.load(f) - except: - raise ValueError('Provided state file %s to resume from does not' - ' contain a valid JSON.' % self.state_file) - - if 'seed' not in data or 'accumulated_results' not in data: - raise ValueError('Provided state file %s to resume from does not contain' - ' the correct information' % self.state_file) - - self.seed = data['seed'] - self.saved_results = data['accumulated_results'] - logging.info('Restored state from %s...', self.state_file) - - def save_state(self): - state = {'seed': self.seed, 'accumulated_results': self.accumulated_results} - tmp_file = self.state_file + '.new' - with open(tmp_file, 'w', encoding='utf-8') as f: - json.dump(state, f, indent=2) - os.rename(tmp_file, self.state_file) - logging.info('Logged state to %s...', self.state_file) - - def run(self, prof, save_run=True): - """Run the external deciding script on the given profile.""" - if self.saved_results and save_run: - result = self.saved_results.pop(0) - self.accumulated_results.append(result) - self.save_state() - return StatusEnum(result) - - filename = prof_to_tmp(prof) - - try: - return_code = subprocess.call([self.external_decider, filename]) - finally: - os.remove(filename) - - if return_code in statuses: - status = StatusEnum(return_code) - if status == StatusEnum.PROBLEM_STATUS: - prof_file = prof_to_tmp(prof) - raise RuntimeError('Provided decider script returned PROBLEM_STATUS ' - 'when run on profile stored at %s. AFDO Profile ' - 'analysis aborting' % prof_file) - if save_run: - self.accumulated_results.append(status.value) - logging.info('Run %d of external script %s returned %s', - len(self.accumulated_results), self.external_decider, - status.name) - self.save_state() - return status - raise ValueError( - 'Provided external script had unexpected return code %d' % return_code) + """Class for the external decider.""" + + def __init__(self, state_file, external_decider, seed): + self.accumulated_results = [] # over this run of the script + self.external_decider = external_decider + self.saved_results = [] # imported from a previous run of this script + self.state_file = state_file + self.seed = seed if seed is not None else time.time() + + def load_state(self): + if not os.path.exists(self.state_file): + logging.info( + "State file %s is empty, starting from beginning", + self.state_file, + ) + return + + with open(self.state_file, encoding="utf-8") as f: + try: + data = json.load(f) + except: + raise ValueError( + "Provided state file %s to resume from does not" + " contain a valid JSON." % self.state_file + ) + + if "seed" not in data or "accumulated_results" not in data: + raise ValueError( + "Provided state file %s to resume from does not contain" + " the correct information" % self.state_file + ) + + self.seed = data["seed"] + self.saved_results = data["accumulated_results"] + logging.info("Restored state from %s...", self.state_file) + + def save_state(self): + state = { + "seed": self.seed, + "accumulated_results": self.accumulated_results, + } + tmp_file = self.state_file + ".new" + with open(tmp_file, "w", encoding="utf-8") as f: + json.dump(state, f, indent=2) + os.rename(tmp_file, self.state_file) + logging.info("Logged state to %s...", self.state_file) + + def run(self, prof, save_run=True): + """Run the external deciding script on the given profile.""" + if self.saved_results and save_run: + result = self.saved_results.pop(0) + self.accumulated_results.append(result) + self.save_state() + return StatusEnum(result) + + filename = prof_to_tmp(prof) + + try: + return_code = subprocess.call([self.external_decider, filename]) + finally: + os.remove(filename) + + if return_code in statuses: + status = StatusEnum(return_code) + if status == StatusEnum.PROBLEM_STATUS: + prof_file = prof_to_tmp(prof) + raise RuntimeError( + "Provided decider script returned PROBLEM_STATUS " + "when run on profile stored at %s. AFDO Profile " + "analysis aborting" % prof_file + ) + if save_run: + self.accumulated_results.append(status.value) + logging.info( + "Run %d of external script %s returned %s", + len(self.accumulated_results), + self.external_decider, + status.name, + ) + self.save_state() + return status + raise ValueError( + "Provided external script had unexpected return code %d" + % return_code + ) def bisect_profiles(decider, good, bad, common_funcs, lo, hi): - """Recursive function which bisects good and bad profiles. - - Args: - decider: function which, given a JSON-based AFDO profile, returns an - element of 'statuses' based on the status of the profile - good: JSON-based good AFDO profile - bad: JSON-based bad AFDO profile - common_funcs: the list of functions which have top-level profiles in both - 'good' and 'bad' - lo: lower bound of range being bisected on - hi: upper bound of range being bisected on - - Returns a dictionary with two keys: 'individuals' and 'ranges'. - 'individuals': a list of individual functions found to make the profile BAD - 'ranges': a list of lists of function names. Each list of functions is a list - such that including all of those from the bad profile makes the good - profile BAD. It may not be the smallest problematic combination, but - definitely contains a problematic combination of profiles. - """ - - results = {'individuals': [], 'ranges': []} - if hi - lo <= 1: - logging.info('Found %s as a problematic function profile', common_funcs[lo]) - results['individuals'].append(common_funcs[lo]) - return results + """Recursive function which bisects good and bad profiles. + + Args: + decider: function which, given a JSON-based AFDO profile, returns an + element of 'statuses' based on the status of the profile + good: JSON-based good AFDO profile + bad: JSON-based bad AFDO profile + common_funcs: the list of functions which have top-level profiles in both + 'good' and 'bad' + lo: lower bound of range being bisected on + hi: upper bound of range being bisected on + + Returns a dictionary with two keys: 'individuals' and 'ranges'. + 'individuals': a list of individual functions found to make the profile BAD + 'ranges': a list of lists of function names. Each list of functions is a list + such that including all of those from the bad profile makes the good + profile BAD. It may not be the smallest problematic combination, but + definitely contains a problematic combination of profiles. + """ + + results = {"individuals": [], "ranges": []} + if hi - lo <= 1: + logging.info( + "Found %s as a problematic function profile", common_funcs[lo] + ) + results["individuals"].append(common_funcs[lo]) + return results + + mid = (lo + hi) // 2 + lo_mid_prof = good.copy() # covers bad from lo:mid + mid_hi_prof = good.copy() # covers bad from mid:hi + for func in common_funcs[lo:mid]: + lo_mid_prof[func] = bad[func] + for func in common_funcs[mid:hi]: + mid_hi_prof[func] = bad[func] + + lo_mid_verdict = decider.run(lo_mid_prof) + mid_hi_verdict = decider.run(mid_hi_prof) + + if lo_mid_verdict == StatusEnum.BAD_STATUS: + result = bisect_profiles(decider, good, bad, common_funcs, lo, mid) + results["individuals"].extend(result["individuals"]) + results["ranges"].extend(result["ranges"]) + if mid_hi_verdict == StatusEnum.BAD_STATUS: + result = bisect_profiles(decider, good, bad, common_funcs, mid, hi) + results["individuals"].extend(result["individuals"]) + results["ranges"].extend(result["ranges"]) + + # neither half is bad -> the issue is caused by several things occuring + # in conjunction, and this combination crosses 'mid' + if lo_mid_verdict == mid_hi_verdict == StatusEnum.GOOD_STATUS: + problem_range = range_search(decider, good, bad, common_funcs, lo, hi) + if problem_range: + logging.info( + "Found %s as a problematic combination of profiles", + str(problem_range), + ) + results["ranges"].append(problem_range) - mid = (lo + hi) // 2 - lo_mid_prof = good.copy() # covers bad from lo:mid - mid_hi_prof = good.copy() # covers bad from mid:hi - for func in common_funcs[lo:mid]: - lo_mid_prof[func] = bad[func] - for func in common_funcs[mid:hi]: - mid_hi_prof[func] = bad[func] - - lo_mid_verdict = decider.run(lo_mid_prof) - mid_hi_verdict = decider.run(mid_hi_prof) - - if lo_mid_verdict == StatusEnum.BAD_STATUS: - result = bisect_profiles(decider, good, bad, common_funcs, lo, mid) - results['individuals'].extend(result['individuals']) - results['ranges'].extend(result['ranges']) - if mid_hi_verdict == StatusEnum.BAD_STATUS: - result = bisect_profiles(decider, good, bad, common_funcs, mid, hi) - results['individuals'].extend(result['individuals']) - results['ranges'].extend(result['ranges']) - - # neither half is bad -> the issue is caused by several things occuring - # in conjunction, and this combination crosses 'mid' - if lo_mid_verdict == mid_hi_verdict == StatusEnum.GOOD_STATUS: - problem_range = range_search(decider, good, bad, common_funcs, lo, hi) - if problem_range: - logging.info('Found %s as a problematic combination of profiles', - str(problem_range)) - results['ranges'].append(problem_range) - - return results + return results def bisect_profiles_wrapper(decider, good, bad, perform_check=True): - """Wrapper for recursive profile bisection.""" - - # Validate good and bad profiles are such, otherwise bisection reports noise - # Note that while decider is a random mock, these assertions may fail. - if perform_check: - if decider.run(good, save_run=False) != StatusEnum.GOOD_STATUS: - raise ValueError('Supplied good profile is not actually GOOD') - if decider.run(bad, save_run=False) != StatusEnum.BAD_STATUS: - raise ValueError('Supplied bad profile is not actually BAD') - - common_funcs = sorted(func for func in good if func in bad) - if not common_funcs: - return {'ranges': [], 'individuals': []} - - # shuffle because the results of our analysis can be quite order-dependent - # but this list has no inherent ordering. By shuffling each time, the chances - # of finding new, potentially interesting results are increased each time - # the program is run - random.shuffle(common_funcs) - results = bisect_profiles(decider, good, bad, common_funcs, 0, - len(common_funcs)) - results['ranges'].sort() - results['individuals'].sort() - return results + """Wrapper for recursive profile bisection.""" + + # Validate good and bad profiles are such, otherwise bisection reports noise + # Note that while decider is a random mock, these assertions may fail. + if perform_check: + if decider.run(good, save_run=False) != StatusEnum.GOOD_STATUS: + raise ValueError("Supplied good profile is not actually GOOD") + if decider.run(bad, save_run=False) != StatusEnum.BAD_STATUS: + raise ValueError("Supplied bad profile is not actually BAD") + + common_funcs = sorted(func for func in good if func in bad) + if not common_funcs: + return {"ranges": [], "individuals": []} + + # shuffle because the results of our analysis can be quite order-dependent + # but this list has no inherent ordering. By shuffling each time, the chances + # of finding new, potentially interesting results are increased each time + # the program is run + random.shuffle(common_funcs) + results = bisect_profiles( + decider, good, bad, common_funcs, 0, len(common_funcs) + ) + results["ranges"].sort() + results["individuals"].sort() + return results def range_search(decider, good, bad, common_funcs, lo, hi): - """Searches for problematic range crossing mid border. - - The main inner algorithm is the following, which looks for the smallest - possible ranges with problematic combinations. It starts the upper bound at - the midpoint, and increments in halves until it gets a BAD profile. - Then, it increments the lower bound (in halves) until the resultant profile - is GOOD, and then we have a range that causes 'BAD'ness. - - It does this _NUM_RUNS_RANGE_SEARCH times, and shuffles the functions being - looked at uniquely each time to try and get the smallest possible range - of functions in a reasonable timeframe. - """ - - average = lambda x, y: int(round((x + y) // 2.0)) - - def find_upper_border(good_copy, funcs, lo, hi, last_bad_val=None): - """Finds the upper border of problematic range.""" - mid = average(lo, hi) - if mid in (lo, hi): - return last_bad_val or hi - - for func in funcs[lo:mid]: - good_copy[func] = bad[func] - verdict = decider.run(good_copy) - - # reset for next iteration - for func in funcs: - good_copy[func] = good[func] - - if verdict == StatusEnum.BAD_STATUS: - return find_upper_border(good_copy, funcs, lo, mid, mid) - return find_upper_border(good_copy, funcs, mid, hi, last_bad_val) - - def find_lower_border(good_copy, funcs, lo, hi, last_bad_val=None): - """Finds the lower border of problematic range.""" - mid = average(lo, hi) - if mid in (lo, hi): - return last_bad_val or lo - - for func in funcs[lo:mid]: - good_copy[func] = good[func] - verdict = decider.run(good_copy) - - # reset for next iteration - for func in funcs: - good_copy[func] = bad[func] - - if verdict == StatusEnum.BAD_STATUS: - return find_lower_border(good_copy, funcs, mid, hi, lo) - return find_lower_border(good_copy, funcs, lo, mid, last_bad_val) - - lo_mid_funcs = [] - mid_hi_funcs = [] - min_range_funcs = [] - for _ in range(_NUM_RUNS_RANGE_SEARCH): - - if min_range_funcs: # only examine range we've already narrowed to - random.shuffle(lo_mid_funcs) - random.shuffle(mid_hi_funcs) - else: # consider lo-mid and mid-hi separately bc must cross border - mid = (lo + hi) // 2 - lo_mid_funcs = common_funcs[lo:mid] - mid_hi_funcs = common_funcs[mid:hi] - - funcs = lo_mid_funcs + mid_hi_funcs - hi = len(funcs) - mid = len(lo_mid_funcs) - lo = 0 - - # because we need the problematic pair to pop up before we can narrow it - prof = good.copy() - for func in lo_mid_funcs: - prof[func] = bad[func] - - upper_border = find_upper_border(prof, funcs, mid, hi) - for func in lo_mid_funcs + funcs[mid:upper_border]: - prof[func] = bad[func] - - lower_border = find_lower_border(prof, funcs, lo, mid) - curr_range_funcs = funcs[lower_border:upper_border] - - if not min_range_funcs or len(curr_range_funcs) < len(min_range_funcs): - min_range_funcs = curr_range_funcs - lo_mid_funcs = lo_mid_funcs[lo_mid_funcs.index(min_range_funcs[0]):] - mid_hi_funcs = mid_hi_funcs[:mid_hi_funcs.index(min_range_funcs[-1]) + 1] - if len(min_range_funcs) == 2: - min_range_funcs.sort() - return min_range_funcs # can't get any smaller - - min_range_funcs.sort() - return min_range_funcs + """Searches for problematic range crossing mid border. + + The main inner algorithm is the following, which looks for the smallest + possible ranges with problematic combinations. It starts the upper bound at + the midpoint, and increments in halves until it gets a BAD profile. + Then, it increments the lower bound (in halves) until the resultant profile + is GOOD, and then we have a range that causes 'BAD'ness. + + It does this _NUM_RUNS_RANGE_SEARCH times, and shuffles the functions being + looked at uniquely each time to try and get the smallest possible range + of functions in a reasonable timeframe. + """ + + average = lambda x, y: int(round((x + y) // 2.0)) + + def find_upper_border(good_copy, funcs, lo, hi, last_bad_val=None): + """Finds the upper border of problematic range.""" + mid = average(lo, hi) + if mid in (lo, hi): + return last_bad_val or hi + + for func in funcs[lo:mid]: + good_copy[func] = bad[func] + verdict = decider.run(good_copy) + + # reset for next iteration + for func in funcs: + good_copy[func] = good[func] + + if verdict == StatusEnum.BAD_STATUS: + return find_upper_border(good_copy, funcs, lo, mid, mid) + return find_upper_border(good_copy, funcs, mid, hi, last_bad_val) + + def find_lower_border(good_copy, funcs, lo, hi, last_bad_val=None): + """Finds the lower border of problematic range.""" + mid = average(lo, hi) + if mid in (lo, hi): + return last_bad_val or lo + + for func in funcs[lo:mid]: + good_copy[func] = good[func] + verdict = decider.run(good_copy) + + # reset for next iteration + for func in funcs: + good_copy[func] = bad[func] + + if verdict == StatusEnum.BAD_STATUS: + return find_lower_border(good_copy, funcs, mid, hi, lo) + return find_lower_border(good_copy, funcs, lo, mid, last_bad_val) + + lo_mid_funcs = [] + mid_hi_funcs = [] + min_range_funcs = [] + for _ in range(_NUM_RUNS_RANGE_SEARCH): + + if min_range_funcs: # only examine range we've already narrowed to + random.shuffle(lo_mid_funcs) + random.shuffle(mid_hi_funcs) + else: # consider lo-mid and mid-hi separately bc must cross border + mid = (lo + hi) // 2 + lo_mid_funcs = common_funcs[lo:mid] + mid_hi_funcs = common_funcs[mid:hi] + + funcs = lo_mid_funcs + mid_hi_funcs + hi = len(funcs) + mid = len(lo_mid_funcs) + lo = 0 + + # because we need the problematic pair to pop up before we can narrow it + prof = good.copy() + for func in lo_mid_funcs: + prof[func] = bad[func] + + upper_border = find_upper_border(prof, funcs, mid, hi) + for func in lo_mid_funcs + funcs[mid:upper_border]: + prof[func] = bad[func] + + lower_border = find_lower_border(prof, funcs, lo, mid) + curr_range_funcs = funcs[lower_border:upper_border] + + if not min_range_funcs or len(curr_range_funcs) < len(min_range_funcs): + min_range_funcs = curr_range_funcs + lo_mid_funcs = lo_mid_funcs[ + lo_mid_funcs.index(min_range_funcs[0]) : + ] + mid_hi_funcs = mid_hi_funcs[ + : mid_hi_funcs.index(min_range_funcs[-1]) + 1 + ] + if len(min_range_funcs) == 2: + min_range_funcs.sort() + return min_range_funcs # can't get any smaller + + min_range_funcs.sort() + return min_range_funcs def check_good_not_bad(decider, good, bad): - """Check if bad prof becomes GOOD by adding funcs it lacks from good prof""" - bad_copy = bad.copy() - for func in good: - if func not in bad: - bad_copy[func] = good[func] - return decider.run(bad_copy) == StatusEnum.GOOD_STATUS + """Check if bad prof becomes GOOD by adding funcs it lacks from good prof""" + bad_copy = bad.copy() + for func in good: + if func not in bad: + bad_copy[func] = good[func] + return decider.run(bad_copy) == StatusEnum.GOOD_STATUS def check_bad_not_good(decider, good, bad): - """Check if good prof BAD after adding funcs bad prof has that good doesnt""" - good_copy = good.copy() - for func in bad: - if func not in good: - good_copy[func] = bad[func] - return decider.run(good_copy) == StatusEnum.BAD_STATUS + """Check if good prof BAD after adding funcs bad prof has that good doesnt""" + good_copy = good.copy() + for func in bad: + if func not in good: + good_copy[func] = bad[func] + return decider.run(good_copy) == StatusEnum.BAD_STATUS def parse_args(): - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - '--good_prof', - required=True, - help='Text-based "Good" profile for analysis') - parser.add_argument( - '--bad_prof', required=True, help='Text-based "Bad" profile for analysis') - parser.add_argument( - '--external_decider', - required=True, - help='External script that, given an AFDO profile, returns ' - 'GOOD/BAD/SKIP') - parser.add_argument( - '--analysis_output_file', - required=True, - help='File to output JSON results to') - parser.add_argument( - '--state_file', - default='%s/afdo_analysis_state.json' % os.getcwd(), - help='File path containing state to load from initially, and will be ' - 'overwritten with new state on each iteration') - parser.add_argument( - '--no_resume', - action='store_true', - help='If enabled, no initial state will be loaded and the program will ' - 'run from the beginning') - parser.add_argument( - '--remove_state_on_completion', - action='store_true', - help='If enabled, state file will be removed once profile analysis is ' - 'completed') - parser.add_argument( - '--seed', type=float, help='Float specifying seed for randomness') - return parser.parse_args() + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--good_prof", + required=True, + help='Text-based "Good" profile for analysis', + ) + parser.add_argument( + "--bad_prof", + required=True, + help='Text-based "Bad" profile for analysis', + ) + parser.add_argument( + "--external_decider", + required=True, + help="External script that, given an AFDO profile, returns " + "GOOD/BAD/SKIP", + ) + parser.add_argument( + "--analysis_output_file", + required=True, + help="File to output JSON results to", + ) + parser.add_argument( + "--state_file", + default="%s/afdo_analysis_state.json" % os.getcwd(), + help="File path containing state to load from initially, and will be " + "overwritten with new state on each iteration", + ) + parser.add_argument( + "--no_resume", + action="store_true", + help="If enabled, no initial state will be loaded and the program will " + "run from the beginning", + ) + parser.add_argument( + "--remove_state_on_completion", + action="store_true", + help="If enabled, state file will be removed once profile analysis is " + "completed", + ) + parser.add_argument( + "--seed", type=float, help="Float specifying seed for randomness" + ) + return parser.parse_args() def main(flags): - logging.getLogger().setLevel(logging.INFO) - if not flags.no_resume and flags.seed: # conflicting seeds - raise RuntimeError('Ambiguous seed value; do not resume from existing ' - 'state and also specify seed by command line flag') - - decider = DeciderState( - flags.state_file, flags.external_decider, seed=flags.seed) - if not flags.no_resume: - decider.load_state() - random.seed(decider.seed) - - with open(flags.good_prof) as good_f: - good_items = text_to_json(good_f) - with open(flags.bad_prof) as bad_f: - bad_items = text_to_json(bad_f) - - bisect_results = bisect_profiles_wrapper(decider, good_items, bad_items) - gnb_result = check_good_not_bad(decider, good_items, bad_items) - bng_result = check_bad_not_good(decider, good_items, bad_items) - - results = { - 'seed': decider.seed, - 'bisect_results': bisect_results, - 'good_only_functions': gnb_result, - 'bad_only_functions': bng_result - } - with open(flags.analysis_output_file, 'w', encoding='utf-8') as f: - json.dump(results, f, indent=2) - if flags.remove_state_on_completion: - os.remove(flags.state_file) - logging.info('Removed state file %s following completion of script...', - flags.state_file) - else: - completed_state_file = '%s.completed.%s' % (flags.state_file, - str(date.today())) - os.rename(flags.state_file, completed_state_file) - logging.info('Stored completed state file as %s...', completed_state_file) - return results - - -if __name__ == '__main__': - main(parse_args()) + logging.getLogger().setLevel(logging.INFO) + if not flags.no_resume and flags.seed: # conflicting seeds + raise RuntimeError( + "Ambiguous seed value; do not resume from existing " + "state and also specify seed by command line flag" + ) + + decider = DeciderState( + flags.state_file, flags.external_decider, seed=flags.seed + ) + if not flags.no_resume: + decider.load_state() + random.seed(decider.seed) + + with open(flags.good_prof) as good_f: + good_items = text_to_json(good_f) + with open(flags.bad_prof) as bad_f: + bad_items = text_to_json(bad_f) + + bisect_results = bisect_profiles_wrapper(decider, good_items, bad_items) + gnb_result = check_good_not_bad(decider, good_items, bad_items) + bng_result = check_bad_not_good(decider, good_items, bad_items) + + results = { + "seed": decider.seed, + "bisect_results": bisect_results, + "good_only_functions": gnb_result, + "bad_only_functions": bng_result, + } + with open(flags.analysis_output_file, "w", encoding="utf-8") as f: + json.dump(results, f, indent=2) + if flags.remove_state_on_completion: + os.remove(flags.state_file) + logging.info( + "Removed state file %s following completion of script...", + flags.state_file, + ) + else: + completed_state_file = "%s.completed.%s" % ( + flags.state_file, + str(date.today()), + ) + os.rename(flags.state_file, completed_state_file) + logging.info( + "Stored completed state file as %s...", completed_state_file + ) + return results + + +if __name__ == "__main__": + main(parse_args()) diff --git a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py index df334317..4fe265c9 100755 --- a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py +++ b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py @@ -19,263 +19,273 @@ from afdo_tools.bisection import afdo_prof_analysis as analysis class ObjectWithFields(object): - """Turns kwargs given to the constructor into fields on an object. + """Turns kwargs given to the constructor into fields on an object. - Examples: - x = ObjectWithFields(a=1, b=2) - assert x.a == 1 - assert x.b == 2 - """ + Examples: + x = ObjectWithFields(a=1, b=2) + assert x.a == 1 + assert x.b == 2 + """ - def __init__(self, **kwargs): - for key, val in kwargs.items(): - setattr(self, key, val) + def __init__(self, **kwargs): + for key, val in kwargs.items(): + setattr(self, key, val) class AfdoProfAnalysisE2ETest(unittest.TestCase): - """Class for end-to-end testing of AFDO Profile Analysis""" - - # nothing significant about the values, just easier to remember even vs odd - good_prof = { - 'func_a': ':1\n 1: 3\n 3: 5\n 5: 7\n', - 'func_b': ':3\n 3: 5\n 5: 7\n 7: 9\n', - 'func_c': ':5\n 5: 7\n 7: 9\n 9: 11\n', - 'func_d': ':7\n 7: 9\n 9: 11\n 11: 13\n', - 'good_func_a': ':11\n', - 'good_func_b': ':13\n' - } - - bad_prof = { - 'func_a': ':2\n 2: 4\n 4: 6\n 6: 8\n', - 'func_b': ':4\n 4: 6\n 6: 8\n 8: 10\n', - 'func_c': ':6\n 6: 8\n 8: 10\n 10: 12\n', - 'func_d': ':8\n 8: 10\n 10: 12\n 12: 14\n', - 'bad_func_a': ':12\n', - 'bad_func_b': ':14\n' - } - - expected = { - 'good_only_functions': False, - 'bad_only_functions': True, - 'bisect_results': { - 'ranges': [], - 'individuals': ['func_a'] - } - } - - def test_afdo_prof_analysis(self): - # Individual issues take precedence by nature of our algos - # so first, that should be caught - good = self.good_prof.copy() - bad = self.bad_prof.copy() - self.run_check(good, bad, self.expected) - - # Now remove individuals and exclusively BAD, and check that range is caught - bad['func_a'] = good['func_a'] - bad.pop('bad_func_a') - bad.pop('bad_func_b') - - expected_cp = self.expected.copy() - expected_cp['bad_only_functions'] = False - expected_cp['bisect_results'] = { - 'individuals': [], - 'ranges': [['func_b', 'func_c', 'func_d']] + """Class for end-to-end testing of AFDO Profile Analysis""" + + # nothing significant about the values, just easier to remember even vs odd + good_prof = { + "func_a": ":1\n 1: 3\n 3: 5\n 5: 7\n", + "func_b": ":3\n 3: 5\n 5: 7\n 7: 9\n", + "func_c": ":5\n 5: 7\n 7: 9\n 9: 11\n", + "func_d": ":7\n 7: 9\n 9: 11\n 11: 13\n", + "good_func_a": ":11\n", + "good_func_b": ":13\n", + } + + bad_prof = { + "func_a": ":2\n 2: 4\n 4: 6\n 6: 8\n", + "func_b": ":4\n 4: 6\n 6: 8\n 8: 10\n", + "func_c": ":6\n 6: 8\n 8: 10\n 10: 12\n", + "func_d": ":8\n 8: 10\n 10: 12\n 12: 14\n", + "bad_func_a": ":12\n", + "bad_func_b": ":14\n", } - self.run_check(good, bad, expected_cp) - - def test_afdo_prof_state(self): - """Verifies that saved state is correct replication.""" - temp_dir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) - - good = self.good_prof.copy() - bad = self.bad_prof.copy() - # add more functions to data - for x in range(400): - good['func_%d' % x] = '' - bad['func_%d' % x] = '' - - fd_first, first_result = tempfile.mkstemp(dir=temp_dir) - os.close(fd_first) - fd_state, state_file = tempfile.mkstemp(dir=temp_dir) - os.close(fd_state) - self.run_check( - self.good_prof, - self.bad_prof, - self.expected, - state_file=state_file, - out_file=first_result) - - fd_second, second_result = tempfile.mkstemp(dir=temp_dir) - os.close(fd_second) - completed_state_file = '%s.completed.%s' % (state_file, str(date.today())) - self.run_check( - self.good_prof, - self.bad_prof, - self.expected, - state_file=completed_state_file, - no_resume=False, - out_file=second_result) - - with open(first_result) as f: - initial_run = json.load(f) - with open(second_result) as f: - loaded_run = json.load(f) - self.assertEqual(initial_run, loaded_run) - - def test_exit_on_problem_status(self): - temp_dir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) - - fd_state, state_file = tempfile.mkstemp(dir=temp_dir) - os.close(fd_state) - with self.assertRaises(RuntimeError): - self.run_check( - self.good_prof, - self.bad_prof, - self.expected, - state_file=state_file, - extern_decider='problemstatus_external.sh') - - def test_state_assumption(self): - - def compare_runs(tmp_dir, first_ctr, second_ctr): - """Compares given prof versions between first and second run in test.""" - first_prof = '%s/.first_run_%d' % (tmp_dir, first_ctr) - second_prof = '%s/.second_run_%d' % (tmp_dir, second_ctr) - with open(first_prof) as f: - first_prof_text = f.read() - with open(second_prof) as f: - second_prof_text = f.read() - self.assertEqual(first_prof_text, second_prof_text) - - good_prof = {'func_a': ':1\n3: 3\n5: 7\n'} - bad_prof = {'func_a': ':2\n4: 4\n6: 8\n'} - # add some noise to the profiles; 15 is an arbitrary choice - for x in range(15): - func = 'func_%d' % x - good_prof[func] = ':%d\n' % (x) - bad_prof[func] = ':%d\n' % (x + 1) expected = { - 'bisect_results': { - 'ranges': [], - 'individuals': ['func_a'] - }, - 'good_only_functions': False, - 'bad_only_functions': False + "good_only_functions": False, + "bad_only_functions": True, + "bisect_results": {"ranges": [], "individuals": ["func_a"]}, } - # using a static temp dir rather than a dynamic one because these files are - # shared between the bash scripts and this Python test, and the arguments - # to the bash scripts are fixed by afdo_prof_analysis.py so it would be - # difficult to communicate dynamically generated directory to bash scripts - scripts_tmp_dir = '%s/afdo_test_tmp' % os.getcwd() - os.mkdir(scripts_tmp_dir) - self.addCleanup(shutil.rmtree, scripts_tmp_dir, ignore_errors=True) - - # files used in the bash scripts used as external deciders below - # - count_file tracks the current number of calls to the script in total - # - local_count_file tracks the number of calls to the script without - # interruption - count_file = '%s/.count' % scripts_tmp_dir - local_count_file = '%s/.local_count' % scripts_tmp_dir - - # runs through whole thing at once - initial_seed = self.run_check( - good_prof, - bad_prof, - expected, - extern_decider='state_assumption_external.sh') - with open(count_file) as f: - num_calls = int(f.read()) - os.remove(count_file) # reset counts for second run - finished_state_file = 'afdo_analysis_state.json.completed.%s' % str( - date.today()) - self.addCleanup(os.remove, finished_state_file) - - # runs the same analysis but interrupted each iteration - for i in range(2 * num_calls + 1): - no_resume_run = (i == 0) - seed = initial_seed if no_resume_run else None - try: + def test_afdo_prof_analysis(self): + # Individual issues take precedence by nature of our algos + # so first, that should be caught + good = self.good_prof.copy() + bad = self.bad_prof.copy() + self.run_check(good, bad, self.expected) + + # Now remove individuals and exclusively BAD, and check that range is caught + bad["func_a"] = good["func_a"] + bad.pop("bad_func_a") + bad.pop("bad_func_b") + + expected_cp = self.expected.copy() + expected_cp["bad_only_functions"] = False + expected_cp["bisect_results"] = { + "individuals": [], + "ranges": [["func_b", "func_c", "func_d"]], + } + + self.run_check(good, bad, expected_cp) + + def test_afdo_prof_state(self): + """Verifies that saved state is correct replication.""" + temp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) + + good = self.good_prof.copy() + bad = self.bad_prof.copy() + # add more functions to data + for x in range(400): + good["func_%d" % x] = "" + bad["func_%d" % x] = "" + + fd_first, first_result = tempfile.mkstemp(dir=temp_dir) + os.close(fd_first) + fd_state, state_file = tempfile.mkstemp(dir=temp_dir) + os.close(fd_state) + self.run_check( + self.good_prof, + self.bad_prof, + self.expected, + state_file=state_file, + out_file=first_result, + ) + + fd_second, second_result = tempfile.mkstemp(dir=temp_dir) + os.close(fd_second) + completed_state_file = "%s.completed.%s" % ( + state_file, + str(date.today()), + ) self.run_check( + self.good_prof, + self.bad_prof, + self.expected, + state_file=completed_state_file, + no_resume=False, + out_file=second_result, + ) + + with open(first_result) as f: + initial_run = json.load(f) + with open(second_result) as f: + loaded_run = json.load(f) + self.assertEqual(initial_run, loaded_run) + + def test_exit_on_problem_status(self): + temp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) + + fd_state, state_file = tempfile.mkstemp(dir=temp_dir) + os.close(fd_state) + with self.assertRaises(RuntimeError): + self.run_check( + self.good_prof, + self.bad_prof, + self.expected, + state_file=state_file, + extern_decider="problemstatus_external.sh", + ) + + def test_state_assumption(self): + def compare_runs(tmp_dir, first_ctr, second_ctr): + """Compares given prof versions between first and second run in test.""" + first_prof = "%s/.first_run_%d" % (tmp_dir, first_ctr) + second_prof = "%s/.second_run_%d" % (tmp_dir, second_ctr) + with open(first_prof) as f: + first_prof_text = f.read() + with open(second_prof) as f: + second_prof_text = f.read() + self.assertEqual(first_prof_text, second_prof_text) + + good_prof = {"func_a": ":1\n3: 3\n5: 7\n"} + bad_prof = {"func_a": ":2\n4: 4\n6: 8\n"} + # add some noise to the profiles; 15 is an arbitrary choice + for x in range(15): + func = "func_%d" % x + good_prof[func] = ":%d\n" % (x) + bad_prof[func] = ":%d\n" % (x + 1) + expected = { + "bisect_results": {"ranges": [], "individuals": ["func_a"]}, + "good_only_functions": False, + "bad_only_functions": False, + } + + # using a static temp dir rather than a dynamic one because these files are + # shared between the bash scripts and this Python test, and the arguments + # to the bash scripts are fixed by afdo_prof_analysis.py so it would be + # difficult to communicate dynamically generated directory to bash scripts + scripts_tmp_dir = "%s/afdo_test_tmp" % os.getcwd() + os.mkdir(scripts_tmp_dir) + self.addCleanup(shutil.rmtree, scripts_tmp_dir, ignore_errors=True) + + # files used in the bash scripts used as external deciders below + # - count_file tracks the current number of calls to the script in total + # - local_count_file tracks the number of calls to the script without + # interruption + count_file = "%s/.count" % scripts_tmp_dir + local_count_file = "%s/.local_count" % scripts_tmp_dir + + # runs through whole thing at once + initial_seed = self.run_check( good_prof, bad_prof, expected, - no_resume=no_resume_run, - extern_decider='state_assumption_interrupt.sh', - seed=seed) - break - except RuntimeError: - # script was interrupted, so we restart local count - os.remove(local_count_file) - else: - raise RuntimeError('Test failed -- took too many iterations') - - for initial_ctr in range(3): # initial runs unaffected by interruption - compare_runs(scripts_tmp_dir, initial_ctr, initial_ctr) - - start = 3 - for ctr in range(start, num_calls): - # second run counter incremented by 4 for each one first run is because - # +2 for performing initial checks on good and bad profs each time - # +1 for PROBLEM_STATUS run which causes error and restart - compare_runs(scripts_tmp_dir, ctr, 6 + (ctr - start) * 4) - - def run_check(self, - good_prof, - bad_prof, - expected, - state_file=None, - no_resume=True, - out_file=None, - extern_decider=None, - seed=None): - - temp_dir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) - - good_prof_file = '%s/%s' % (temp_dir, 'good_prof.txt') - bad_prof_file = '%s/%s' % (temp_dir, 'bad_prof.txt') - good_prof_text = analysis.json_to_text(good_prof) - bad_prof_text = analysis.json_to_text(bad_prof) - with open(good_prof_file, 'w') as f: - f.write(good_prof_text) - with open(bad_prof_file, 'w') as f: - f.write(bad_prof_text) - - dir_path = os.path.dirname(os.path.realpath(__file__)) # dir of this file - external_script = '%s/%s' % (dir_path, extern_decider or 'e2e_external.sh') - - # FIXME: This test ideally shouldn't be writing to $PWD - if state_file is None: - state_file = '%s/afdo_analysis_state.json' % os.getcwd() - - def rm_state(): - try: - os.unlink(state_file) - except OSError: - # Probably because the file DNE. That's fine. - pass - - self.addCleanup(rm_state) - - actual = analysis.main( - ObjectWithFields( - good_prof=good_prof_file, - bad_prof=bad_prof_file, - external_decider=external_script, - analysis_output_file=out_file or '/dev/null', - state_file=state_file, - no_resume=no_resume, - remove_state_on_completion=False, - seed=seed, - )) - actual_seed = actual.pop('seed') # nothing to check - self.assertEqual(actual, expected) - return actual_seed - - -if __name__ == '__main__': - unittest.main() + extern_decider="state_assumption_external.sh", + ) + with open(count_file) as f: + num_calls = int(f.read()) + os.remove(count_file) # reset counts for second run + finished_state_file = "afdo_analysis_state.json.completed.%s" % str( + date.today() + ) + self.addCleanup(os.remove, finished_state_file) + + # runs the same analysis but interrupted each iteration + for i in range(2 * num_calls + 1): + no_resume_run = i == 0 + seed = initial_seed if no_resume_run else None + try: + self.run_check( + good_prof, + bad_prof, + expected, + no_resume=no_resume_run, + extern_decider="state_assumption_interrupt.sh", + seed=seed, + ) + break + except RuntimeError: + # script was interrupted, so we restart local count + os.remove(local_count_file) + else: + raise RuntimeError("Test failed -- took too many iterations") + + for initial_ctr in range(3): # initial runs unaffected by interruption + compare_runs(scripts_tmp_dir, initial_ctr, initial_ctr) + + start = 3 + for ctr in range(start, num_calls): + # second run counter incremented by 4 for each one first run is because + # +2 for performing initial checks on good and bad profs each time + # +1 for PROBLEM_STATUS run which causes error and restart + compare_runs(scripts_tmp_dir, ctr, 6 + (ctr - start) * 4) + + def run_check( + self, + good_prof, + bad_prof, + expected, + state_file=None, + no_resume=True, + out_file=None, + extern_decider=None, + seed=None, + ): + + temp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) + + good_prof_file = "%s/%s" % (temp_dir, "good_prof.txt") + bad_prof_file = "%s/%s" % (temp_dir, "bad_prof.txt") + good_prof_text = analysis.json_to_text(good_prof) + bad_prof_text = analysis.json_to_text(bad_prof) + with open(good_prof_file, "w") as f: + f.write(good_prof_text) + with open(bad_prof_file, "w") as f: + f.write(bad_prof_text) + + dir_path = os.path.dirname( + os.path.realpath(__file__) + ) # dir of this file + external_script = "%s/%s" % ( + dir_path, + extern_decider or "e2e_external.sh", + ) + + # FIXME: This test ideally shouldn't be writing to $PWD + if state_file is None: + state_file = "%s/afdo_analysis_state.json" % os.getcwd() + + def rm_state(): + try: + os.unlink(state_file) + except OSError: + # Probably because the file DNE. That's fine. + pass + + self.addCleanup(rm_state) + + actual = analysis.main( + ObjectWithFields( + good_prof=good_prof_file, + bad_prof=bad_prof_file, + external_decider=external_script, + analysis_output_file=out_file or "/dev/null", + state_file=state_file, + no_resume=no_resume, + remove_state_on_completion=False, + seed=seed, + ) + ) + actual_seed = actual.pop("seed") # nothing to check + self.assertEqual(actual, expected) + return actual_seed + + +if __name__ == "__main__": + unittest.main() diff --git a/afdo_tools/bisection/afdo_prof_analysis_test.py b/afdo_tools/bisection/afdo_prof_analysis_test.py index 3e6f41e0..6d4b17d3 100755 --- a/afdo_tools/bisection/afdo_prof_analysis_test.py +++ b/afdo_tools/bisection/afdo_prof_analysis_test.py @@ -16,139 +16,154 @@ from afdo_tools.bisection import afdo_prof_analysis as analysis class AfdoProfAnalysisTest(unittest.TestCase): - """Class for testing AFDO Profile Analysis""" - bad_items = {'func_a': '1', 'func_b': '3', 'func_c': '5'} - good_items = {'func_a': '2', 'func_b': '4', 'func_d': '5'} - random.seed(13) # 13 is an arbitrary choice. just for consistency - # add some extra info to make tests more reflective of real scenario - for num in range(128): - func_name = 'func_extra_%d' % num - # 1/3 to both, 1/3 only to good, 1/3 only to bad - rand_val = random.randint(1, 101) - if rand_val < 67: - bad_items[func_name] = 'test_data' - if rand_val < 34 or rand_val >= 67: - good_items[func_name] = 'test_data' - - analysis.random.seed(5) # 5 is an arbitrary choice. For consistent testing - - def test_text_to_json(self): - test_data = io.StringIO('deflate_slow:87460059:3\n' - ' 3: 24\n' - ' 14: 54767\n' - ' 15: 664 fill_window:22\n' - ' 16: 661\n' - ' 19: 637\n' - ' 41: 36692 longest_match:36863\n' - ' 44: 36692\n' - ' 44.2: 5861\n' - ' 46: 13942\n' - ' 46.1: 14003\n') - expected = { - 'deflate_slow': ':87460059:3\n' - ' 3: 24\n' - ' 14: 54767\n' - ' 15: 664 fill_window:22\n' - ' 16: 661\n' - ' 19: 637\n' - ' 41: 36692 longest_match:36863\n' - ' 44: 36692\n' - ' 44.2: 5861\n' - ' 46: 13942\n' - ' 46.1: 14003\n' - } - actual = analysis.text_to_json(test_data) - self.assertEqual(actual, expected) - test_data.close() - - def test_text_to_json_empty_afdo(self): - expected = {} - actual = analysis.text_to_json('') - self.assertEqual(actual, expected) - - def test_json_to_text(self): - example_prof = {'func_a': ':1\ndata\n', 'func_b': ':2\nmore data\n'} - expected_text = 'func_a:1\ndata\nfunc_b:2\nmore data\n' - self.assertEqual(analysis.json_to_text(example_prof), expected_text) - - def test_bisect_profiles(self): - - # mock run of external script with arbitrarily-chosen bad profile vals - # save_run specified and unused b/c afdo_prof_analysis.py - # will call with argument explicitly specified - # pylint: disable=unused-argument - class DeciderClass(object): - """Class for this tests's decider.""" - - def run(self, prof, save_run=False): - if '1' in prof['func_a'] or '3' in prof['func_b']: - return analysis.StatusEnum.BAD_STATUS - return analysis.StatusEnum.GOOD_STATUS - - results = analysis.bisect_profiles_wrapper(DeciderClass(), self.good_items, - self.bad_items) - self.assertEqual(results['individuals'], sorted(['func_a', 'func_b'])) - self.assertEqual(results['ranges'], []) - - def test_range_search(self): - - # arbitrarily chosen functions whose values in the bad profile constitute - # a problematic pair - # pylint: disable=unused-argument - class DeciderClass(object): - """Class for this tests's decider.""" - - def run(self, prof, save_run=False): - if '1' in prof['func_a'] and '3' in prof['func_b']: - return analysis.StatusEnum.BAD_STATUS - return analysis.StatusEnum.GOOD_STATUS - - # put the problematic combination in separate halves of the common funcs - # so that non-bisecting search is invoked for its actual use case - common_funcs = [func for func in self.good_items if func in self.bad_items] - common_funcs.remove('func_a') - common_funcs.insert(0, 'func_a') - common_funcs.remove('func_b') - common_funcs.append('func_b') - - problem_range = analysis.range_search(DeciderClass(), self.good_items, - self.bad_items, common_funcs, 0, - len(common_funcs)) - - self.assertEqual(['func_a', 'func_b'], problem_range) - - def test_check_good_not_bad(self): - func_in_good = 'func_c' - - # pylint: disable=unused-argument - class DeciderClass(object): - """Class for this tests's decider.""" - - def run(self, prof, save_run=False): - if func_in_good in prof: - return analysis.StatusEnum.GOOD_STATUS - return analysis.StatusEnum.BAD_STATUS - - self.assertTrue( - analysis.check_good_not_bad(DeciderClass(), self.good_items, - self.bad_items)) - - def test_check_bad_not_good(self): - func_in_bad = 'func_d' - - # pylint: disable=unused-argument - class DeciderClass(object): - """Class for this tests's decider.""" - - def run(self, prof, save_run=False): - if func_in_bad in prof: - return analysis.StatusEnum.BAD_STATUS - return analysis.StatusEnum.GOOD_STATUS - - self.assertTrue( - analysis.check_bad_not_good(DeciderClass(), self.good_items, - self.bad_items)) - - -if __name__ == '__main__': - unittest.main() + """Class for testing AFDO Profile Analysis""" + + bad_items = {"func_a": "1", "func_b": "3", "func_c": "5"} + good_items = {"func_a": "2", "func_b": "4", "func_d": "5"} + random.seed(13) # 13 is an arbitrary choice. just for consistency + # add some extra info to make tests more reflective of real scenario + for num in range(128): + func_name = "func_extra_%d" % num + # 1/3 to both, 1/3 only to good, 1/3 only to bad + rand_val = random.randint(1, 101) + if rand_val < 67: + bad_items[func_name] = "test_data" + if rand_val < 34 or rand_val >= 67: + good_items[func_name] = "test_data" + + analysis.random.seed(5) # 5 is an arbitrary choice. For consistent testing + + def test_text_to_json(self): + test_data = io.StringIO( + "deflate_slow:87460059:3\n" + " 3: 24\n" + " 14: 54767\n" + " 15: 664 fill_window:22\n" + " 16: 661\n" + " 19: 637\n" + " 41: 36692 longest_match:36863\n" + " 44: 36692\n" + " 44.2: 5861\n" + " 46: 13942\n" + " 46.1: 14003\n" + ) + expected = { + "deflate_slow": ":87460059:3\n" + " 3: 24\n" + " 14: 54767\n" + " 15: 664 fill_window:22\n" + " 16: 661\n" + " 19: 637\n" + " 41: 36692 longest_match:36863\n" + " 44: 36692\n" + " 44.2: 5861\n" + " 46: 13942\n" + " 46.1: 14003\n" + } + actual = analysis.text_to_json(test_data) + self.assertEqual(actual, expected) + test_data.close() + + def test_text_to_json_empty_afdo(self): + expected = {} + actual = analysis.text_to_json("") + self.assertEqual(actual, expected) + + def test_json_to_text(self): + example_prof = {"func_a": ":1\ndata\n", "func_b": ":2\nmore data\n"} + expected_text = "func_a:1\ndata\nfunc_b:2\nmore data\n" + self.assertEqual(analysis.json_to_text(example_prof), expected_text) + + def test_bisect_profiles(self): + + # mock run of external script with arbitrarily-chosen bad profile vals + # save_run specified and unused b/c afdo_prof_analysis.py + # will call with argument explicitly specified + # pylint: disable=unused-argument + class DeciderClass(object): + """Class for this tests's decider.""" + + def run(self, prof, save_run=False): + if "1" in prof["func_a"] or "3" in prof["func_b"]: + return analysis.StatusEnum.BAD_STATUS + return analysis.StatusEnum.GOOD_STATUS + + results = analysis.bisect_profiles_wrapper( + DeciderClass(), self.good_items, self.bad_items + ) + self.assertEqual(results["individuals"], sorted(["func_a", "func_b"])) + self.assertEqual(results["ranges"], []) + + def test_range_search(self): + + # arbitrarily chosen functions whose values in the bad profile constitute + # a problematic pair + # pylint: disable=unused-argument + class DeciderClass(object): + """Class for this tests's decider.""" + + def run(self, prof, save_run=False): + if "1" in prof["func_a"] and "3" in prof["func_b"]: + return analysis.StatusEnum.BAD_STATUS + return analysis.StatusEnum.GOOD_STATUS + + # put the problematic combination in separate halves of the common funcs + # so that non-bisecting search is invoked for its actual use case + common_funcs = [ + func for func in self.good_items if func in self.bad_items + ] + common_funcs.remove("func_a") + common_funcs.insert(0, "func_a") + common_funcs.remove("func_b") + common_funcs.append("func_b") + + problem_range = analysis.range_search( + DeciderClass(), + self.good_items, + self.bad_items, + common_funcs, + 0, + len(common_funcs), + ) + + self.assertEqual(["func_a", "func_b"], problem_range) + + def test_check_good_not_bad(self): + func_in_good = "func_c" + + # pylint: disable=unused-argument + class DeciderClass(object): + """Class for this tests's decider.""" + + def run(self, prof, save_run=False): + if func_in_good in prof: + return analysis.StatusEnum.GOOD_STATUS + return analysis.StatusEnum.BAD_STATUS + + self.assertTrue( + analysis.check_good_not_bad( + DeciderClass(), self.good_items, self.bad_items + ) + ) + + def test_check_bad_not_good(self): + func_in_bad = "func_d" + + # pylint: disable=unused-argument + class DeciderClass(object): + """Class for this tests's decider.""" + + def run(self, prof, save_run=False): + if func_in_bad in prof: + return analysis.StatusEnum.BAD_STATUS + return analysis.StatusEnum.GOOD_STATUS + + self.assertTrue( + analysis.check_bad_not_good( + DeciderClass(), self.good_items, self.bad_items + ) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/afdo_tools/generate_afdo_from_tryjob.py b/afdo_tools/generate_afdo_from_tryjob.py index 11055146..3c5c7f64 100755 --- a/afdo_tools/generate_afdo_from_tryjob.py +++ b/afdo_tools/generate_afdo_from_tryjob.py @@ -17,149 +17,162 @@ import subprocess import sys import tempfile -_CREATE_LLVM_PROF = 'create_llvm_prof' -_GS_PREFIX = 'gs://' + +_CREATE_LLVM_PROF = "create_llvm_prof" +_GS_PREFIX = "gs://" def _fetch_gs_artifact(remote_name, local_name): - assert remote_name.startswith(_GS_PREFIX) - subprocess.check_call(['gsutil', 'cp', remote_name, local_name]) + assert remote_name.startswith(_GS_PREFIX) + subprocess.check_call(["gsutil", "cp", remote_name, local_name]) def _fetch_and_maybe_unpack(remote_name, local_name): - unpackers = [ - ('.tar.bz2', ['tar', 'xaf']), - ('.bz2', ['bunzip2']), - ('.tar.xz', ['tar', 'xaf']), - ('.xz', ['xz', '-d']), - ] - - unpack_ext = None - unpack_cmd = None - for ext, unpack in unpackers: - if remote_name.endswith(ext): - unpack_ext, unpack_cmd = ext, unpack - break - - download_to = local_name + unpack_ext if unpack_ext else local_name - _fetch_gs_artifact(remote_name, download_to) - if unpack_cmd is not None: - print('Unpacking', download_to) - subprocess.check_output(unpack_cmd + [download_to]) - assert os.path.exists(local_name) + unpackers = [ + (".tar.bz2", ["tar", "xaf"]), + (".bz2", ["bunzip2"]), + (".tar.xz", ["tar", "xaf"]), + (".xz", ["xz", "-d"]), + ] + + unpack_ext = None + unpack_cmd = None + for ext, unpack in unpackers: + if remote_name.endswith(ext): + unpack_ext, unpack_cmd = ext, unpack + break + + download_to = local_name + unpack_ext if unpack_ext else local_name + _fetch_gs_artifact(remote_name, download_to) + if unpack_cmd is not None: + print("Unpacking", download_to) + subprocess.check_output(unpack_cmd + [download_to]) + assert os.path.exists(local_name) def _generate_afdo(perf_profile_loc, tryjob_loc, output_name): - if perf_profile_loc.startswith(_GS_PREFIX): - local_loc = 'perf.data' - _fetch_and_maybe_unpack(perf_profile_loc, local_loc) - perf_profile_loc = local_loc - - chrome_in_debug_loc = 'debug/opt/google/chrome/chrome.debug' - debug_out = 'debug.tgz' - _fetch_gs_artifact(os.path.join(tryjob_loc, 'debug.tgz'), debug_out) - - print('Extracting chrome.debug.') - # This has tons of artifacts, and we only want Chrome; don't waste time - # extracting the rest in _fetch_and_maybe_unpack. - subprocess.check_call(['tar', 'xaf', 'debug.tgz', chrome_in_debug_loc]) - - # Note that the AFDO tool *requires* a binary named `chrome` to be present if - # we're generating a profile for chrome. It's OK for this to be split debug - # information. - os.rename(chrome_in_debug_loc, 'chrome') - - print('Generating AFDO profile.') - subprocess.check_call([ - _CREATE_LLVM_PROF, '--out=' + output_name, '--binary=chrome', - '--profile=' + perf_profile_loc - ]) + if perf_profile_loc.startswith(_GS_PREFIX): + local_loc = "perf.data" + _fetch_and_maybe_unpack(perf_profile_loc, local_loc) + perf_profile_loc = local_loc + + chrome_in_debug_loc = "debug/opt/google/chrome/chrome.debug" + debug_out = "debug.tgz" + _fetch_gs_artifact(os.path.join(tryjob_loc, "debug.tgz"), debug_out) + + print("Extracting chrome.debug.") + # This has tons of artifacts, and we only want Chrome; don't waste time + # extracting the rest in _fetch_and_maybe_unpack. + subprocess.check_call(["tar", "xaf", "debug.tgz", chrome_in_debug_loc]) + + # Note that the AFDO tool *requires* a binary named `chrome` to be present if + # we're generating a profile for chrome. It's OK for this to be split debug + # information. + os.rename(chrome_in_debug_loc, "chrome") + + print("Generating AFDO profile.") + subprocess.check_call( + [ + _CREATE_LLVM_PROF, + "--out=" + output_name, + "--binary=chrome", + "--profile=" + perf_profile_loc, + ] + ) def _abspath_or_gs_link(path): - if path.startswith(_GS_PREFIX): - return path - return os.path.abspath(path) + if path.startswith(_GS_PREFIX): + return path + return os.path.abspath(path) def _tryjob_arg(tryjob_arg): - # Forward gs args through - if tryjob_arg.startswith(_GS_PREFIX): - return tryjob_arg + # Forward gs args through + if tryjob_arg.startswith(_GS_PREFIX): + return tryjob_arg - # Clicking on the 'Artifacts' link gives us a pantheon link that's basically - # a preamble and gs path. - pantheon = 'https://pantheon.corp.google.com/storage/browser/' - if tryjob_arg.startswith(pantheon): - return _GS_PREFIX + tryjob_arg[len(pantheon):] + # Clicking on the 'Artifacts' link gives us a pantheon link that's basically + # a preamble and gs path. + pantheon = "https://pantheon.corp.google.com/storage/browser/" + if tryjob_arg.startswith(pantheon): + return _GS_PREFIX + tryjob_arg[len(pantheon) :] - # Otherwise, only do things with a tryjob ID (e.g. R75-11965.0.0-b3648595) - if not tryjob_arg.startswith('R'): - raise ValueError('Unparseable tryjob arg; give a tryjob ID, pantheon ' - 'link, or gs:// link. Please see source for more.') + # Otherwise, only do things with a tryjob ID (e.g. R75-11965.0.0-b3648595) + if not tryjob_arg.startswith("R"): + raise ValueError( + "Unparseable tryjob arg; give a tryjob ID, pantheon " + "link, or gs:// link. Please see source for more." + ) - chell_path = 'chromeos-image-archive/chell-chrome-pfq-tryjob/' - # ...And assume it's from chell, since that's the only thing we generate - # profiles with today. - return _GS_PREFIX + chell_path + tryjob_arg + chell_path = "chromeos-image-archive/chell-chrome-pfq-tryjob/" + # ...And assume it's from chell, since that's the only thing we generate + # profiles with today. + return _GS_PREFIX + chell_path + tryjob_arg def main(): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - '--perf_profile', - required=True, - help='Path to our perf profile. Accepts either a gs:// path or local ' - 'filepath.') - parser.add_argument( - '--tryjob', - required=True, - type=_tryjob_arg, - help="Path to our tryjob's artifacts. Accepts a gs:// path, pantheon " - 'link, or tryjob ID, e.g. R75-11965.0.0-b3648595. In the last case, ' - 'the assumption is that you ran a chell-chrome-pfq-tryjob.') - parser.add_argument( - '-o', - '--output', - default='afdo.prof', - help='Where to put the AFDO profile. Default is afdo.prof.') - parser.add_argument( - '-k', - '--keep_artifacts_on_failure', - action='store_true', - help="Don't remove the tempdir on failure") - args = parser.parse_args() - - if not distutils.spawn.find_executable(_CREATE_LLVM_PROF): - sys.exit(_CREATE_LLVM_PROF + ' not found; are you in the chroot?') - - profile = _abspath_or_gs_link(args.perf_profile) - afdo_output = os.path.abspath(args.output) - - initial_dir = os.getcwd() - temp_dir = tempfile.mkdtemp(prefix='generate_afdo') - success = True - try: - os.chdir(temp_dir) - _generate_afdo(profile, args.tryjob, afdo_output) - - # The AFDO tooling is happy to generate essentially empty profiles for us. - # Chrome's profiles are often 8+ MB; if we only see a small fraction of - # that, something's off. 512KB was arbitrarily selected. - if os.path.getsize(afdo_output) < 512 * 1024: - raise ValueError('The AFDO profile is suspiciously small for Chrome. ' - 'Something might have gone wrong.') - except: - success = False - raise - finally: - os.chdir(initial_dir) - - if success or not args.keep_artifacts_on_failure: - shutil.rmtree(temp_dir, ignore_errors=True) - else: - print('Artifacts are available at', temp_dir) - - -if __name__ == '__main__': - sys.exit(main()) + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--perf_profile", + required=True, + help="Path to our perf profile. Accepts either a gs:// path or local " + "filepath.", + ) + parser.add_argument( + "--tryjob", + required=True, + type=_tryjob_arg, + help="Path to our tryjob's artifacts. Accepts a gs:// path, pantheon " + "link, or tryjob ID, e.g. R75-11965.0.0-b3648595. In the last case, " + "the assumption is that you ran a chell-chrome-pfq-tryjob.", + ) + parser.add_argument( + "-o", + "--output", + default="afdo.prof", + help="Where to put the AFDO profile. Default is afdo.prof.", + ) + parser.add_argument( + "-k", + "--keep_artifacts_on_failure", + action="store_true", + help="Don't remove the tempdir on failure", + ) + args = parser.parse_args() + + if not distutils.spawn.find_executable(_CREATE_LLVM_PROF): + sys.exit(_CREATE_LLVM_PROF + " not found; are you in the chroot?") + + profile = _abspath_or_gs_link(args.perf_profile) + afdo_output = os.path.abspath(args.output) + + initial_dir = os.getcwd() + temp_dir = tempfile.mkdtemp(prefix="generate_afdo") + success = True + try: + os.chdir(temp_dir) + _generate_afdo(profile, args.tryjob, afdo_output) + + # The AFDO tooling is happy to generate essentially empty profiles for us. + # Chrome's profiles are often 8+ MB; if we only see a small fraction of + # that, something's off. 512KB was arbitrarily selected. + if os.path.getsize(afdo_output) < 512 * 1024: + raise ValueError( + "The AFDO profile is suspiciously small for Chrome. " + "Something might have gone wrong." + ) + except: + success = False + raise + finally: + os.chdir(initial_dir) + + if success or not args.keep_artifacts_on_failure: + shutil.rmtree(temp_dir, ignore_errors=True) + else: + print("Artifacts are available at", temp_dir) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/afdo_tools/run_afdo_tryjob.py b/afdo_tools/run_afdo_tryjob.py index 3c5b0072..5112723e 100755 --- a/afdo_tools/run_afdo_tryjob.py +++ b/afdo_tools/run_afdo_tryjob.py @@ -60,112 +60,124 @@ import time def main(): - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - '--force_no_patches', - action='store_true', - help='Run even if no patches are provided') - parser.add_argument( - '--tag_profiles_with_current_time', - action='store_true', - help='Perf profile names will have the current time added to them.') - parser.add_argument( - '--use_afdo_generation_stage', - action='store_true', - help='Perf profiles will be automatically converted to AFDO profiles.') - parser.add_argument( - '-g', - '--patch', - action='append', - default=[], - help='A patch to add to the AFDO run') - parser.add_argument( - '-n', - '--dry_run', - action='store_true', - help='Just print the command that would be run') - args = parser.parse_args() - - dry_run = args.dry_run - force_no_patches = args.force_no_patches - tag_profiles_with_current_time = args.tag_profiles_with_current_time - use_afdo_generation_stage = args.use_afdo_generation_stage - user_patches = args.patch - - if tag_profiles_with_current_time and use_afdo_generation_stage: - raise ValueError("You can't tag profiles with the time + have " - 'afdo-generate') - - if not tag_profiles_with_current_time and not use_afdo_generation_stage: - print('Neither current_time nor afdo_generate asked for. Assuming you ' - 'prefer current time tagging.') - print('You have 5 seconds to cancel and try again.') - print() + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--force_no_patches", + action="store_true", + help="Run even if no patches are provided", + ) + parser.add_argument( + "--tag_profiles_with_current_time", + action="store_true", + help="Perf profile names will have the current time added to them.", + ) + parser.add_argument( + "--use_afdo_generation_stage", + action="store_true", + help="Perf profiles will be automatically converted to AFDO profiles.", + ) + parser.add_argument( + "-g", + "--patch", + action="append", + default=[], + help="A patch to add to the AFDO run", + ) + parser.add_argument( + "-n", + "--dry_run", + action="store_true", + help="Just print the command that would be run", + ) + args = parser.parse_args() + + dry_run = args.dry_run + force_no_patches = args.force_no_patches + tag_profiles_with_current_time = args.tag_profiles_with_current_time + use_afdo_generation_stage = args.use_afdo_generation_stage + user_patches = args.patch + + if tag_profiles_with_current_time and use_afdo_generation_stage: + raise ValueError( + "You can't tag profiles with the time + have " "afdo-generate" + ) + + if not tag_profiles_with_current_time and not use_afdo_generation_stage: + print( + "Neither current_time nor afdo_generate asked for. Assuming you " + "prefer current time tagging." + ) + print("You have 5 seconds to cancel and try again.") + print() + if not dry_run: + time.sleep(5) + tag_profiles_with_current_time = True + + patches = [ + # Send profiles to localmirror instead of chromeos-prebuilt. This should + # always be done, since sending profiles into production is bad. :) + # https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/1436158 + 1436158, + # Force profile generation. Otherwise, we'll decide to not spawn off the + # perf hwtests. + # https://chromium-review.googlesource.com/c/chromiumos/chromite/+/1313291 + 1313291, + ] + + if tag_profiles_with_current_time: + # Tags the profiles with the current time of day. As detailed in the + # docstring, this is desirable unless you're sure that this is the only + # experimental profile that will be generated today. + # https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/1436157 + patches.append(1436157) + + if use_afdo_generation_stage: + # Make the profile generation stage look in localmirror, instead of having + # it look in chromeos-prebuilt. Without this, we'll never upload + # chrome.debug or try to generate an AFDO profile. + # https://chromium-review.googlesource.com/c/chromiumos/chromite/+/1436583 + patches.append(1436583) + + if not user_patches and not force_no_patches: + raise ValueError( + "No patches given; pass --force_no_patches to force a " "tryjob" + ) + + for patch in user_patches: + # We accept two formats. Either a URL that ends with a number, or a number. + if patch.startswith("http"): + patch = patch.split("/")[-1] + patches.append(int(patch)) + + count = collections.Counter(patches) + too_many = [k for k, v in count.items() if v > 1] + if too_many: + too_many.sort() + raise ValueError( + "Patch(es) asked for application more than once: %s" % too_many + ) + + args = [ + "cros", + "tryjob", + ] + + for patch in patches: + args += ["-g", str(patch)] + + args += [ + "--nochromesdk", + "--hwtest", + "chell-chrome-pfq-tryjob", + ] + + print(" ".join(pipes.quote(a) for a in args)) if not dry_run: - time.sleep(5) - tag_profiles_with_current_time = True - - patches = [ - # Send profiles to localmirror instead of chromeos-prebuilt. This should - # always be done, since sending profiles into production is bad. :) - # https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/1436158 - 1436158, - # Force profile generation. Otherwise, we'll decide to not spawn off the - # perf hwtests. - # https://chromium-review.googlesource.com/c/chromiumos/chromite/+/1313291 - 1313291, - ] - - if tag_profiles_with_current_time: - # Tags the profiles with the current time of day. As detailed in the - # docstring, this is desirable unless you're sure that this is the only - # experimental profile that will be generated today. - # https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/1436157 - patches.append(1436157) - - if use_afdo_generation_stage: - # Make the profile generation stage look in localmirror, instead of having - # it look in chromeos-prebuilt. Without this, we'll never upload - # chrome.debug or try to generate an AFDO profile. - # https://chromium-review.googlesource.com/c/chromiumos/chromite/+/1436583 - patches.append(1436583) - - if not user_patches and not force_no_patches: - raise ValueError('No patches given; pass --force_no_patches to force a ' - 'tryjob') - - for patch in user_patches: - # We accept two formats. Either a URL that ends with a number, or a number. - if patch.startswith('http'): - patch = patch.split('/')[-1] - patches.append(int(patch)) - - count = collections.Counter(patches) - too_many = [k for k, v in count.items() if v > 1] - if too_many: - too_many.sort() - raise ValueError( - 'Patch(es) asked for application more than once: %s' % too_many) - - args = [ - 'cros', - 'tryjob', - ] - - for patch in patches: - args += ['-g', str(patch)] - - args += [ - '--nochromesdk', - '--hwtest', - 'chell-chrome-pfq-tryjob', - ] - - print(' '.join(pipes.quote(a) for a in args)) - if not dry_run: - sys.exit(subprocess.call(args)) - - -if __name__ == '__main__': - main() + sys.exit(subprocess.call(args)) + + +if __name__ == "__main__": + main() diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index b625783f..8023ce33 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -7,7 +7,7 @@ """A crontab script to delete night test data.""" -__author__ = 'shenhan@google.com (Han Shen)' +__author__ = "shenhan@google.com (Han Shen)" import argparse import datetime @@ -26,297 +26,344 @@ from cros_utils import constants from cros_utils import misc -DIR_BY_WEEKDAY = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun') -NIGHTLY_TESTS_WORKSPACE = os.path.join(constants.CROSTC_WORKSPACE, - 'nightly-tests') +DIR_BY_WEEKDAY = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") +NIGHTLY_TESTS_WORKSPACE = os.path.join( + constants.CROSTC_WORKSPACE, "nightly-tests" +) def CleanNumberedDir(s, dry_run=False): - """Deleted directories under each dated_dir.""" - chromeos_dirs = [ - os.path.join(s, x) for x in os.listdir(s) - if misc.IsChromeOsTree(os.path.join(s, x)) - ] - ce = command_executer.GetCommandExecuter(log_level='none') - all_succeeded = True - for cd in chromeos_dirs: - if misc.DeleteChromeOsTree(cd, dry_run=dry_run): - print(f'Successfully removed chromeos tree {cd!r}.') - else: - all_succeeded = False - print(f'Failed to remove chromeos tree {cd!r}, please check.') - - if not all_succeeded: - print('Failed to delete at least one chromeos tree, please check.') - return False - - ## Now delete the numbered dir Before forcibly removing the directory, just - ## check 's' to make sure it matches the expected pattern. A valid dir to be - ## removed must be '/usr/local/google/crostc/(SUN|MON|TUE...|SAT)'. - valid_dir_pattern = ('^' + NIGHTLY_TESTS_WORKSPACE + '/(' + - '|'.join(DIR_BY_WEEKDAY) + ')') - if not re.search(valid_dir_pattern, s): - print(f'Trying to delete an invalid dir {s!r} (must match ' - f'{valid_dir_pattern!r}), please check.') - return False - - cmd = f'rm -fr {s}' - if dry_run: - print(cmd) - else: - if ce.RunCommand(cmd, print_to_console=False, terminated_timeout=480) == 0: - print(f'Successfully removed {s!r}.') + """Deleted directories under each dated_dir.""" + chromeos_dirs = [ + os.path.join(s, x) + for x in os.listdir(s) + if misc.IsChromeOsTree(os.path.join(s, x)) + ] + ce = command_executer.GetCommandExecuter(log_level="none") + all_succeeded = True + for cd in chromeos_dirs: + if misc.DeleteChromeOsTree(cd, dry_run=dry_run): + print(f"Successfully removed chromeos tree {cd!r}.") + else: + all_succeeded = False + print(f"Failed to remove chromeos tree {cd!r}, please check.") + + if not all_succeeded: + print("Failed to delete at least one chromeos tree, please check.") + return False + + ## Now delete the numbered dir Before forcibly removing the directory, just + ## check 's' to make sure it matches the expected pattern. A valid dir to be + ## removed must be '/usr/local/google/crostc/(SUN|MON|TUE...|SAT)'. + valid_dir_pattern = ( + "^" + NIGHTLY_TESTS_WORKSPACE + "/(" + "|".join(DIR_BY_WEEKDAY) + ")" + ) + if not re.search(valid_dir_pattern, s): + print( + f"Trying to delete an invalid dir {s!r} (must match " + f"{valid_dir_pattern!r}), please check." + ) + return False + + cmd = f"rm -fr {s}" + if dry_run: + print(cmd) else: - all_succeeded = False - print(f'Failed to remove {s!r}, please check.') - return all_succeeded + if ( + ce.RunCommand(cmd, print_to_console=False, terminated_timeout=480) + == 0 + ): + print(f"Successfully removed {s!r}.") + else: + all_succeeded = False + print(f"Failed to remove {s!r}, please check.") + return all_succeeded def CleanDatedDir(dated_dir, dry_run=False): - # List subdirs under dir - subdirs = [ - os.path.join(dated_dir, x) for x in os.listdir(dated_dir) - if os.path.isdir(os.path.join(dated_dir, x)) - ] - all_succeeded = True - for s in subdirs: - if not CleanNumberedDir(s, dry_run): - all_succeeded = False - return all_succeeded + # List subdirs under dir + subdirs = [ + os.path.join(dated_dir, x) + for x in os.listdir(dated_dir) + if os.path.isdir(os.path.join(dated_dir, x)) + ] + all_succeeded = True + for s in subdirs: + if not CleanNumberedDir(s, dry_run): + all_succeeded = False + return all_succeeded def ProcessArguments(argv): - """Process arguments.""" - parser = argparse.ArgumentParser( - description='Automatically delete nightly test data directories.', - usage='auto_delete_nightly_test_data.py options') - parser.add_argument('-d', - '--dry_run', - dest='dry_run', - default=False, - action='store_true', - help='Only print command line, do not execute anything.') - parser.add_argument('--days_to_preserve', - dest='days_to_preserve', - default=3, - help=('Specify the number of days (not including today),' - ' test data generated on these days will *NOT* be ' - 'deleted. Defaults to 3.')) - options = parser.parse_args(argv) - return options + """Process arguments.""" + parser = argparse.ArgumentParser( + description="Automatically delete nightly test data directories.", + usage="auto_delete_nightly_test_data.py options", + ) + parser.add_argument( + "-d", + "--dry_run", + dest="dry_run", + default=False, + action="store_true", + help="Only print command line, do not execute anything.", + ) + parser.add_argument( + "--days_to_preserve", + dest="days_to_preserve", + default=3, + help=( + "Specify the number of days (not including today)," + " test data generated on these days will *NOT* be " + "deleted. Defaults to 3." + ), + ) + options = parser.parse_args(argv) + return options def RemoveAllSubdirsMatchingPredicate( - base_dir: Path, days_to_preserve: int, dry_run: bool, - is_name_removal_worthy: Callable[[str], bool]) -> int: - """Removes all subdirs of base_dir that match the given predicate.""" - secs_to_preserve = 60 * 60 * 24 * days_to_preserve - now = time.time() - remove_older_than_time = now - secs_to_preserve - - try: - dir_entries = list(base_dir.iterdir()) - except FileNotFoundError as e: - # We get this if the directory itself doesn't exist. Since we're cleaning - # tempdirs, that's as good as a success. Further, the prior approach here - # was using the `find` binary, which exits successfully if nothing is - # found. - print(f"Error enumerating {base_dir}'s contents; skipping removal: {e}") - return 0 - - had_errors = False - for file in dir_entries: - if not is_name_removal_worthy(file.name): - continue + base_dir: Path, + days_to_preserve: int, + dry_run: bool, + is_name_removal_worthy: Callable[[str], bool], +) -> int: + """Removes all subdirs of base_dir that match the given predicate.""" + secs_to_preserve = 60 * 60 * 24 * days_to_preserve + now = time.time() + remove_older_than_time = now - secs_to_preserve try: - # Take the stat here and use that later, so we only need to check for a - # nonexistent file once. - st = file.stat() - except FileNotFoundError: - # This was deleted while were checking; ignore it. - continue - - if not stat.S_ISDIR(st.st_mode): - continue - - if secs_to_preserve and st.st_atime >= remove_older_than_time: - continue - - if dry_run: - print(f'Would remove {file}') - continue - - this_iteration_had_errors = False - - def OnError(_func, path_name, excinfo): - nonlocal this_iteration_had_errors - this_iteration_had_errors = True - print(f'Failed removing path at {path_name}; traceback:') - traceback.print_exception(*excinfo) - - shutil.rmtree(file, onerror=OnError) - - # Some errors can be other processes racing with us to delete things. Don't - # count those as an error which we complain loudly about. - if this_iteration_had_errors: - if file.exists(): - had_errors = True - else: - print(f'Discarding removal errors for {file}; dir was still removed.') - - return 1 if had_errors else 0 + dir_entries = list(base_dir.iterdir()) + except FileNotFoundError as e: + # We get this if the directory itself doesn't exist. Since we're cleaning + # tempdirs, that's as good as a success. Further, the prior approach here + # was using the `find` binary, which exits successfully if nothing is + # found. + print(f"Error enumerating {base_dir}'s contents; skipping removal: {e}") + return 0 + + had_errors = False + for file in dir_entries: + if not is_name_removal_worthy(file.name): + continue + + try: + # Take the stat here and use that later, so we only need to check for a + # nonexistent file once. + st = file.stat() + except FileNotFoundError: + # This was deleted while were checking; ignore it. + continue + + if not stat.S_ISDIR(st.st_mode): + continue + + if secs_to_preserve and st.st_atime >= remove_older_than_time: + continue + + if dry_run: + print(f"Would remove {file}") + continue + + this_iteration_had_errors = False + + def OnError(_func, path_name, excinfo): + nonlocal this_iteration_had_errors + this_iteration_had_errors = True + print(f"Failed removing path at {path_name}; traceback:") + traceback.print_exception(*excinfo) + + shutil.rmtree(file, onerror=OnError) + + # Some errors can be other processes racing with us to delete things. Don't + # count those as an error which we complain loudly about. + if this_iteration_had_errors: + if file.exists(): + had_errors = True + else: + print( + f"Discarding removal errors for {file}; dir was still removed." + ) + + return 1 if had_errors else 0 def IsChromeOsTmpDeletionCandidate(file_name: str): - """Returns whether the given basename can be deleted from a chroot's /tmp.""" - name_prefixes = ( - 'test_that_', - 'cros-update', - 'CrAU_temp_data', - ) - if any(file_name.startswith(x) for x in name_prefixes): - return True - # Remove files that look like `tmpABCDEFGHI`. - return len(file_name) == 9 and file_name.startswith('tmp') - - -def CleanChromeOsTmpFiles(chroot_tmp: str, days_to_preserve: int, - dry_run: bool) -> int: - # Clean chroot/tmp/test_that_* and chroot/tmp/tmpxxxxxx, that were last - # accessed more than specified time ago. - return RemoveAllSubdirsMatchingPredicate( - Path(chroot_tmp), - days_to_preserve, - dry_run, - IsChromeOsTmpDeletionCandidate, - ) - - -def CleanChromeOsImageFiles(chroot_tmp, subdir_suffix, days_to_preserve, - dry_run): - # Clean files that were last accessed more than the specified time. - seconds_delta = days_to_preserve * 24 * 3600 - now = time.time() - errors = 0 - - for tmp_dir in os.listdir(chroot_tmp): - # Directory under /tmp - tmp_dir = os.path.join(chroot_tmp, tmp_dir) - if tmp_dir.endswith(subdir_suffix): - # Tmp directory which ends with subdir_suffix. - for subdir in os.listdir(tmp_dir): - # Subdirectories targeted for deletion. - subdir_path = os.path.join(tmp_dir, subdir) - if now - os.path.getatime(subdir_path) > seconds_delta: - if dry_run: - print(f'Will run:\nshutil.rmtree({subdir_path!r})') - else: - try: - shutil.rmtree(subdir_path) - print('Successfully cleaned chromeos image autotest directories ' - f'from {subdir_path!r}.') - except OSError: - print('Some image autotest directories were not removed from ' - f'"{subdir_path}".') - errors += 1 - - return errors + """Returns whether the given basename can be deleted from a chroot's /tmp.""" + name_prefixes = ( + "test_that_", + "cros-update", + "CrAU_temp_data", + ) + if any(file_name.startswith(x) for x in name_prefixes): + return True + # Remove files that look like `tmpABCDEFGHI`. + return len(file_name) == 9 and file_name.startswith("tmp") + + +def CleanChromeOsTmpFiles( + chroot_tmp: str, days_to_preserve: int, dry_run: bool +) -> int: + # Clean chroot/tmp/test_that_* and chroot/tmp/tmpxxxxxx, that were last + # accessed more than specified time ago. + return RemoveAllSubdirsMatchingPredicate( + Path(chroot_tmp), + days_to_preserve, + dry_run, + IsChromeOsTmpDeletionCandidate, + ) + + +def CleanChromeOsImageFiles( + chroot_tmp, subdir_suffix, days_to_preserve, dry_run +): + # Clean files that were last accessed more than the specified time. + seconds_delta = days_to_preserve * 24 * 3600 + now = time.time() + errors = 0 + + for tmp_dir in os.listdir(chroot_tmp): + # Directory under /tmp + tmp_dir = os.path.join(chroot_tmp, tmp_dir) + if tmp_dir.endswith(subdir_suffix): + # Tmp directory which ends with subdir_suffix. + for subdir in os.listdir(tmp_dir): + # Subdirectories targeted for deletion. + subdir_path = os.path.join(tmp_dir, subdir) + if now - os.path.getatime(subdir_path) > seconds_delta: + if dry_run: + print(f"Will run:\nshutil.rmtree({subdir_path!r})") + else: + try: + shutil.rmtree(subdir_path) + print( + "Successfully cleaned chromeos image autotest directories " + f"from {subdir_path!r}." + ) + except OSError: + print( + "Some image autotest directories were not removed from " + f'"{subdir_path}".' + ) + errors += 1 + + return errors def CleanChromeOsTmpAndImages(days_to_preserve=1, dry_run=False): - """Delete temporaries, images under crostc/chromeos.""" - chromeos_chroot_tmp = os.path.join(constants.CROSTC_WORKSPACE, 'chromeos', - 'chroot', 'tmp') - # Clean files in tmp directory - rv = CleanChromeOsTmpFiles(chromeos_chroot_tmp, days_to_preserve, dry_run) - # Clean image files in *-tryjob directories - rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-tryjob', - days_to_preserve, dry_run) - # Clean image files in *-release directories - rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-release', - days_to_preserve, dry_run) - # Clean image files in *-pfq directories - rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-pfq', days_to_preserve, - dry_run) - # Clean image files in *-llvm-next-nightly directories - rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-llvm-next-nightly', - days_to_preserve, dry_run) - - return rv - - -def CleanOldCLs(days_to_preserve='1', dry_run=False): - """Abandon old CLs created by automation tooling.""" - ce = command_executer.GetCommandExecuter() - chromeos_root = os.path.join(constants.CROSTC_WORKSPACE, 'chromeos') - # Find Old CLs. - old_cls_cmd = ('gerrit --raw search "owner:me status:open age:%sd"' % - days_to_preserve) - _, cls, _ = ce.ChrootRunCommandWOutput(chromeos_root, - old_cls_cmd, - print_to_console=False) - # Convert any whitespaces to spaces. - cls = ' '.join(cls.split()) - if not cls: - return 0 - - abandon_cls_cmd = ('gerrit abandon %s' % cls) - if dry_run: - print('Going to execute: %s' % abandon_cls_cmd) - return 0 - - return ce.ChrootRunCommand(chromeos_root, - abandon_cls_cmd, - print_to_console=False) + """Delete temporaries, images under crostc/chromeos.""" + chromeos_chroot_tmp = os.path.join( + constants.CROSTC_WORKSPACE, "chromeos", "chroot", "tmp" + ) + # Clean files in tmp directory + rv = CleanChromeOsTmpFiles(chromeos_chroot_tmp, days_to_preserve, dry_run) + # Clean image files in *-tryjob directories + rv += CleanChromeOsImageFiles( + chromeos_chroot_tmp, "-tryjob", days_to_preserve, dry_run + ) + # Clean image files in *-release directories + rv += CleanChromeOsImageFiles( + chromeos_chroot_tmp, "-release", days_to_preserve, dry_run + ) + # Clean image files in *-pfq directories + rv += CleanChromeOsImageFiles( + chromeos_chroot_tmp, "-pfq", days_to_preserve, dry_run + ) + # Clean image files in *-llvm-next-nightly directories + rv += CleanChromeOsImageFiles( + chromeos_chroot_tmp, "-llvm-next-nightly", days_to_preserve, dry_run + ) + + return rv + + +def CleanOldCLs(days_to_preserve="1", dry_run=False): + """Abandon old CLs created by automation tooling.""" + ce = command_executer.GetCommandExecuter() + chromeos_root = os.path.join(constants.CROSTC_WORKSPACE, "chromeos") + # Find Old CLs. + old_cls_cmd = ( + 'gerrit --raw search "owner:me status:open age:%sd"' % days_to_preserve + ) + _, cls, _ = ce.ChrootRunCommandWOutput( + chromeos_root, old_cls_cmd, print_to_console=False + ) + # Convert any whitespaces to spaces. + cls = " ".join(cls.split()) + if not cls: + return 0 + + abandon_cls_cmd = "gerrit abandon %s" % cls + if dry_run: + print("Going to execute: %s" % abandon_cls_cmd) + return 0 + + return ce.ChrootRunCommand( + chromeos_root, abandon_cls_cmd, print_to_console=False + ) def CleanChromeTelemetryTmpFiles(dry_run: bool) -> int: - tmp_dir = (Path(constants.CROSTC_WORKSPACE) / 'chromeos' / '.cache' / - 'distfiles' / 'chrome-src-internal' / 'src' / 'tmp') - return RemoveAllSubdirsMatchingPredicate( - tmp_dir, - days_to_preserve=0, - dry_run=dry_run, - is_name_removal_worthy=lambda x: x.startswith('tmp') and x.endswith( - 'telemetry_Crosperf'), - ) + tmp_dir = ( + Path(constants.CROSTC_WORKSPACE) + / "chromeos" + / ".cache" + / "distfiles" + / "chrome-src-internal" + / "src" + / "tmp" + ) + return RemoveAllSubdirsMatchingPredicate( + tmp_dir, + days_to_preserve=0, + dry_run=dry_run, + is_name_removal_worthy=lambda x: x.startswith("tmp") + and x.endswith("telemetry_Crosperf"), + ) def Main(argv): - """Delete nightly test data directories, tmps and test images.""" - options = ProcessArguments(argv) - # Function 'isoweekday' returns 1(Monday) - 7 (Sunday). - d = datetime.datetime.today().isoweekday() - # We go back 1 week, delete from that day till we are - # options.days_to_preserve away from today. - s = d - 7 - e = d - int(options.days_to_preserve) - rv = 0 - for i in range(s + 1, e): - if i <= 0: - ## Wrap around if index is negative. 6 is from i + 7 - 1, because - ## DIR_BY_WEEKDAY starts from 0, while isoweekday is from 1-7. - dated_dir = DIR_BY_WEEKDAY[i + 6] - else: - dated_dir = DIR_BY_WEEKDAY[i - 1] - - rv += 0 if CleanDatedDir(os.path.join(NIGHTLY_TESTS_WORKSPACE, dated_dir), - options.dry_run) else 1 - - ## Clean temporaries, images under crostc/chromeos - rv2 = CleanChromeOsTmpAndImages(int(options.days_to_preserve), - options.dry_run) - - # Clean CLs that are not updated in last 2 weeks. - rv3 = CleanOldCLs('14', options.dry_run) - - # Clean telemetry temporaries from chrome source tree inside chroot. - rv4 = CleanChromeTelemetryTmpFiles(options.dry_run) - - return rv + rv2 + rv3 + rv4 - - -if __name__ == '__main__': - retval = Main(sys.argv[1:]) - sys.exit(retval) + """Delete nightly test data directories, tmps and test images.""" + options = ProcessArguments(argv) + # Function 'isoweekday' returns 1(Monday) - 7 (Sunday). + d = datetime.datetime.today().isoweekday() + # We go back 1 week, delete from that day till we are + # options.days_to_preserve away from today. + s = d - 7 + e = d - int(options.days_to_preserve) + rv = 0 + for i in range(s + 1, e): + if i <= 0: + ## Wrap around if index is negative. 6 is from i + 7 - 1, because + ## DIR_BY_WEEKDAY starts from 0, while isoweekday is from 1-7. + dated_dir = DIR_BY_WEEKDAY[i + 6] + else: + dated_dir = DIR_BY_WEEKDAY[i - 1] + + rv += ( + 0 + if CleanDatedDir( + os.path.join(NIGHTLY_TESTS_WORKSPACE, dated_dir), + options.dry_run, + ) + else 1 + ) + + ## Clean temporaries, images under crostc/chromeos + rv2 = CleanChromeOsTmpAndImages( + int(options.days_to_preserve), options.dry_run + ) + + # Clean CLs that are not updated in last 2 weeks. + rv3 = CleanOldCLs("14", options.dry_run) + + # Clean telemetry temporaries from chrome source tree inside chroot. + rv4 = CleanChromeTelemetryTmpFiles(options.dry_run) + + return rv + rv2 + rv3 + rv4 + + +if __name__ == "__main__": + retval = Main(sys.argv[1:]) + sys.exit(retval) diff --git a/bestflags/example_algorithms.py b/bestflags/example_algorithms.py index e16908a5..10136aca 100644 --- a/bestflags/example_algorithms.py +++ b/bestflags/example_algorithms.py @@ -10,7 +10,7 @@ Then it initiates the variables of the generation. Finally, it sets up the processes for different modules and runs the experiment. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import json import multiprocessing @@ -27,170 +27,190 @@ from task import Task from task import TEST_STAGE import testing_batch + parser = OptionParser() -parser.add_option('-f', - '--file', - dest='filename', - help='configuration file FILE input', - metavar='FILE') +parser.add_option( + "-f", + "--file", + dest="filename", + help="configuration file FILE input", + metavar="FILE", +) # The meta data for the genetic algorithm. -BUILD_CMD = 'BUILD_CMD' -TEST_CMD = 'TEST_CMD' -OUTPUT = 'OUTPUT' -DEFAULT_OUTPUT = 'output' -CONF = 'CONF' -DEFAULT_CONF = 'conf' -NUM_BUILDER = 'NUM_BUILDER' +BUILD_CMD = "BUILD_CMD" +TEST_CMD = "TEST_CMD" +OUTPUT = "OUTPUT" +DEFAULT_OUTPUT = "output" +CONF = "CONF" +DEFAULT_CONF = "conf" +NUM_BUILDER = "NUM_BUILDER" DEFAULT_NUM_BUILDER = 1 -NUM_TESTER = 'NUM_TESTER' +NUM_TESTER = "NUM_TESTER" DEFAULT_NUM_TESTER = 1 -STOP_THRESHOLD = 'STOP_THRESHOLD' +STOP_THRESHOLD = "STOP_THRESHOLD" DEFAULT_STOP_THRESHOLD = 1 -NUM_CHROMOSOMES = 'NUM_CHROMOSOMES' +NUM_CHROMOSOMES = "NUM_CHROMOSOMES" DEFAULT_NUM_CHROMOSOMES = 20 -NUM_TRIALS = 'NUM_TRIALS' +NUM_TRIALS = "NUM_TRIALS" DEFAULT_NUM_TRIALS = 20 -MUTATION_RATE = 'MUTATION_RATE' +MUTATION_RATE = "MUTATION_RATE" DEFAULT_MUTATION_RATE = 0.01 def _ProcessGA(meta_data): - """Set up the meta data for the genetic algorithm. + """Set up the meta data for the genetic algorithm. - Args: - meta_data: the meta data for the genetic algorithm. - """ - assert BUILD_CMD in meta_data - build_cmd = meta_data[BUILD_CMD] + Args: + meta_data: the meta data for the genetic algorithm. + """ + assert BUILD_CMD in meta_data + build_cmd = meta_data[BUILD_CMD] - assert TEST_CMD in meta_data - test_cmd = meta_data[TEST_CMD] + assert TEST_CMD in meta_data + test_cmd = meta_data[TEST_CMD] - if OUTPUT not in meta_data: - output_file = DEFAULT_OUTPUT - else: - output_file = meta_data[OUTPUT] + if OUTPUT not in meta_data: + output_file = DEFAULT_OUTPUT + else: + output_file = meta_data[OUTPUT] - if CONF not in meta_data: - conf_file = DEFAULT_CONF - else: - conf_file = meta_data[CONF] + if CONF not in meta_data: + conf_file = DEFAULT_CONF + else: + conf_file = meta_data[CONF] - if NUM_BUILDER not in meta_data: - num_builders = DEFAULT_NUM_BUILDER - else: - num_builders = meta_data[NUM_BUILDER] + if NUM_BUILDER not in meta_data: + num_builders = DEFAULT_NUM_BUILDER + else: + num_builders = meta_data[NUM_BUILDER] - if NUM_TESTER not in meta_data: - num_testers = DEFAULT_NUM_TESTER - else: - num_testers = meta_data[NUM_TESTER] + if NUM_TESTER not in meta_data: + num_testers = DEFAULT_NUM_TESTER + else: + num_testers = meta_data[NUM_TESTER] - if STOP_THRESHOLD not in meta_data: - stop_threshold = DEFAULT_STOP_THRESHOLD - else: - stop_threshold = meta_data[STOP_THRESHOLD] + if STOP_THRESHOLD not in meta_data: + stop_threshold = DEFAULT_STOP_THRESHOLD + else: + stop_threshold = meta_data[STOP_THRESHOLD] - if NUM_CHROMOSOMES not in meta_data: - num_chromosomes = DEFAULT_NUM_CHROMOSOMES - else: - num_chromosomes = meta_data[NUM_CHROMOSOMES] + if NUM_CHROMOSOMES not in meta_data: + num_chromosomes = DEFAULT_NUM_CHROMOSOMES + else: + num_chromosomes = meta_data[NUM_CHROMOSOMES] - if NUM_TRIALS not in meta_data: - num_trials = DEFAULT_NUM_TRIALS - else: - num_trials = meta_data[NUM_TRIALS] + if NUM_TRIALS not in meta_data: + num_trials = DEFAULT_NUM_TRIALS + else: + num_trials = meta_data[NUM_TRIALS] - if MUTATION_RATE not in meta_data: - mutation_rate = DEFAULT_MUTATION_RATE - else: - mutation_rate = meta_data[MUTATION_RATE] + if MUTATION_RATE not in meta_data: + mutation_rate = DEFAULT_MUTATION_RATE + else: + mutation_rate = meta_data[MUTATION_RATE] - specs = flags.ReadConf(conf_file) + specs = flags.ReadConf(conf_file) - # Initiate the build/test command and the log directory. - Task.InitLogCommand(build_cmd, test_cmd, output_file) + # Initiate the build/test command and the log directory. + Task.InitLogCommand(build_cmd, test_cmd, output_file) - # Initiate the build/test command and the log directory. - GAGeneration.InitMetaData(stop_threshold, num_chromosomes, num_trials, specs, - mutation_rate) + # Initiate the build/test command and the log directory. + GAGeneration.InitMetaData( + stop_threshold, num_chromosomes, num_trials, specs, mutation_rate + ) - # Generate the initial generations. - generation_tasks = testing_batch.GenerateRandomGATasks(specs, num_chromosomes, - num_trials) - generations = [GAGeneration(generation_tasks, set([]), 0)] + # Generate the initial generations. + generation_tasks = testing_batch.GenerateRandomGATasks( + specs, num_chromosomes, num_trials + ) + generations = [GAGeneration(generation_tasks, set([]), 0)] - # Execute the experiment. - _StartExperiment(num_builders, num_testers, generations) + # Execute the experiment. + _StartExperiment(num_builders, num_testers, generations) def _ParseJson(file_name): - """Parse the input json file. + """Parse the input json file. - Parse the input json file and call the proper function to perform the - algorithms. + Parse the input json file and call the proper function to perform the + algorithms. - Args: - file_name: the input json file name. - """ + Args: + file_name: the input json file name. + """ - experiments = json.load(open(file_name)) + experiments = json.load(open(file_name)) - for experiment in experiments: - if experiment == 'GA': - # An GA experiment - _ProcessGA(experiments[experiment]) + for experiment in experiments: + if experiment == "GA": + # An GA experiment + _ProcessGA(experiments[experiment]) def _StartExperiment(num_builders, num_testers, generations): - """Set up the experiment environment and execute the framework. - - Args: - num_builders: number of concurrent builders. - num_testers: number of concurrent testers. - generations: the initial generation for the framework. - """ - - manager = multiprocessing.Manager() - - # The queue between the steering algorithm and the builder. - steering_build = manager.Queue() - # The queue between the builder and the tester. - build_test = manager.Queue() - # The queue between the tester and the steering algorithm. - test_steering = manager.Queue() - - # Set up the processes for the builder, tester and steering algorithm module. - build_process = PipelineProcess(num_builders, 'builder', {}, BUILD_STAGE, - steering_build, pipeline_worker.Helper, - pipeline_worker.Worker, build_test) - - test_process = PipelineProcess(num_testers, 'tester', {}, TEST_STAGE, - build_test, pipeline_worker.Helper, - pipeline_worker.Worker, test_steering) - - steer_process = multiprocessing.Process( - target=Steering, - args=(set([]), generations, test_steering, steering_build)) - - # Start the processes. - build_process.start() - test_process.start() - steer_process.start() - - # Wait for the processes to finish. - build_process.join() - test_process.join() - steer_process.join() + """Set up the experiment environment and execute the framework. + + Args: + num_builders: number of concurrent builders. + num_testers: number of concurrent testers. + generations: the initial generation for the framework. + """ + + manager = multiprocessing.Manager() + + # The queue between the steering algorithm and the builder. + steering_build = manager.Queue() + # The queue between the builder and the tester. + build_test = manager.Queue() + # The queue between the tester and the steering algorithm. + test_steering = manager.Queue() + + # Set up the processes for the builder, tester and steering algorithm module. + build_process = PipelineProcess( + num_builders, + "builder", + {}, + BUILD_STAGE, + steering_build, + pipeline_worker.Helper, + pipeline_worker.Worker, + build_test, + ) + + test_process = PipelineProcess( + num_testers, + "tester", + {}, + TEST_STAGE, + build_test, + pipeline_worker.Helper, + pipeline_worker.Worker, + test_steering, + ) + + steer_process = multiprocessing.Process( + target=Steering, + args=(set([]), generations, test_steering, steering_build), + ) + + # Start the processes. + build_process.start() + test_process.start() + steer_process.start() + + # Wait for the processes to finish. + build_process.join() + test_process.join() + steer_process.join() def main(argv): - (options, _) = parser.parse_args(argv) - assert options.filename - _ParseJson(options.filename) + (options, _) = parser.parse_args(argv) + assert options.filename + _ParseJson(options.filename) -if __name__ == '__main__': - main(sys.argv) +if __name__ == "__main__": + main(sys.argv) diff --git a/bestflags/flags.py b/bestflags/flags.py index 01a845ca..9ae360af 100644 --- a/bestflags/flags.py +++ b/bestflags/flags.py @@ -21,177 +21,182 @@ Examples: "foo[0-9]bar" will expand to e.g. "foo5bar". """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import random import re + # # This matches a [...] group in the internal representation for a flag # specification, and is used in "filling out" flags - placing values inside # the flag_spec. The internal flag_spec format is like "foo[0]", with # values filled out like 5; this would be transformed by # FormattedForUse() into "foo5". -_FLAG_FILLOUT_VALUE_RE = re.compile(r'\[([^\]]*)\]') +_FLAG_FILLOUT_VALUE_RE = re.compile(r"\[([^\]]*)\]") # This matches a numeric flag flag=[start-end]. -rx = re.compile(r'\[(?P<start>\d+)-(?P<end>\d+)\]') +rx = re.compile(r"\[(?P<start>\d+)-(?P<end>\d+)\]") # Search the numeric flag pattern. def Search(spec): - return rx.search(spec) + return rx.search(spec) class NoSuchFileError(Exception): - """Define an Exception class for user providing invalid input file.""" - pass + """Define an Exception class for user providing invalid input file.""" + + pass def ReadConf(file_name): - """Parse the configuration file. + """Parse the configuration file. - The configuration contains one flag specification in each line. + The configuration contains one flag specification in each line. - Args: - file_name: The name of the configuration file. + Args: + file_name: The name of the configuration file. - Returns: - A list of specs in the configuration file. + Returns: + A list of specs in the configuration file. - Raises: - NoSuchFileError: The caller should provide a valid configuration file. - """ + Raises: + NoSuchFileError: The caller should provide a valid configuration file. + """ - with open(file_name, 'r') as input_file: - lines = input_file.readlines() + with open(file_name, "r") as input_file: + lines = input_file.readlines() - return sorted([line.strip() for line in lines if line.strip()]) + return sorted([line.strip() for line in lines if line.strip()]) - raise NoSuchFileError() + raise NoSuchFileError() class Flag(object): - """A class representing a particular command line flag argument. + """A class representing a particular command line flag argument. - The Flag consists of two parts: The spec and the value. - The spec is a definition of the following form: a string with escaped - sequences of the form [<start>-<end>] where start and end is an positive - integer for a fillable value. + The Flag consists of two parts: The spec and the value. + The spec is a definition of the following form: a string with escaped + sequences of the form [<start>-<end>] where start and end is an positive + integer for a fillable value. - An example of a spec is "foo[0-9]". - There are two kinds of flags, boolean flag and numeric flags. Boolean flags - can either be turned on or off, which numeric flags can have different - positive integer values. For example, -finline-limit=[1-1000] is a numeric - flag and -ftree-vectorize is a boolean flag. + An example of a spec is "foo[0-9]". + There are two kinds of flags, boolean flag and numeric flags. Boolean flags + can either be turned on or off, which numeric flags can have different + positive integer values. For example, -finline-limit=[1-1000] is a numeric + flag and -ftree-vectorize is a boolean flag. - A (boolean/numeric) flag is not turned on if it is not selected in the - FlagSet. - """ + A (boolean/numeric) flag is not turned on if it is not selected in the + FlagSet. + """ - def __init__(self, spec, value=-1): - self._spec = spec + def __init__(self, spec, value=-1): + self._spec = spec - # If the value is not specified, generate a random value to use. - if value == -1: - # If creating a boolean flag, the value will be 0. - value = 0 + # If the value is not specified, generate a random value to use. + if value == -1: + # If creating a boolean flag, the value will be 0. + value = 0 - # Parse the spec's expression for the flag value's numeric range. - numeric_flag_match = Search(spec) + # Parse the spec's expression for the flag value's numeric range. + numeric_flag_match = Search(spec) - # If this is a numeric flag, a value is chosen within start and end, start - # inclusive and end exclusive. - if numeric_flag_match: - start = int(numeric_flag_match.group('start')) - end = int(numeric_flag_match.group('end')) + # If this is a numeric flag, a value is chosen within start and end, start + # inclusive and end exclusive. + if numeric_flag_match: + start = int(numeric_flag_match.group("start")) + end = int(numeric_flag_match.group("end")) - assert start < end - value = random.randint(start, end) + assert start < end + value = random.randint(start, end) - self._value = value + self._value = value - def __eq__(self, other): - if isinstance(other, Flag): - return self._spec == other.GetSpec() and self._value == other.GetValue() - return False + def __eq__(self, other): + if isinstance(other, Flag): + return ( + self._spec == other.GetSpec() + and self._value == other.GetValue() + ) + return False - def __hash__(self): - return hash(self._spec) + self._value + def __hash__(self): + return hash(self._spec) + self._value - def GetValue(self): - """Get the value for this flag. + def GetValue(self): + """Get the value for this flag. - Returns: - The value. - """ + Returns: + The value. + """ - return self._value + return self._value - def GetSpec(self): - """Get the spec for this flag. + def GetSpec(self): + """Get the spec for this flag. - Returns: - The spec. - """ + Returns: + The spec. + """ - return self._spec + return self._spec - def FormattedForUse(self): - """Calculate the combination of flag_spec and values. + def FormattedForUse(self): + """Calculate the combination of flag_spec and values. - For e.g. the flag_spec 'foo[0-9]' and the value equals to 5, this will - return 'foo5'. The filled out version of the flag is the text string you use - when you actually want to pass the flag to some binary. + For e.g. the flag_spec 'foo[0-9]' and the value equals to 5, this will + return 'foo5'. The filled out version of the flag is the text string you use + when you actually want to pass the flag to some binary. - Returns: - A string that represent the filled out flag, e.g. the flag with the - FlagSpec '-X[0-9]Y' and value equals to 5 would return '-X5Y'. - """ + Returns: + A string that represent the filled out flag, e.g. the flag with the + FlagSpec '-X[0-9]Y' and value equals to 5 would return '-X5Y'. + """ - return _FLAG_FILLOUT_VALUE_RE.sub(str(self._value), self._spec) + return _FLAG_FILLOUT_VALUE_RE.sub(str(self._value), self._spec) class FlagSet(object): - """A dictionary of Flag objects. + """A dictionary of Flag objects. - The flags dictionary stores the spec and flag pair. - """ + The flags dictionary stores the spec and flag pair. + """ - def __init__(self, flag_array): - # Store the flags as a dictionary mapping of spec -> flag object - self._flags = dict([(flag.GetSpec(), flag) for flag in flag_array]) + def __init__(self, flag_array): + # Store the flags as a dictionary mapping of spec -> flag object + self._flags = dict([(flag.GetSpec(), flag) for flag in flag_array]) - def __eq__(self, other): - return isinstance(other, FlagSet) and self._flags == other.GetFlags() + def __eq__(self, other): + return isinstance(other, FlagSet) and self._flags == other.GetFlags() - def __hash__(self): - return sum([hash(flag) for flag in self._flags.values()]) + def __hash__(self): + return sum([hash(flag) for flag in self._flags.values()]) - def __getitem__(self, flag_spec): - """Get flag with a particular flag_spec. + def __getitem__(self, flag_spec): + """Get flag with a particular flag_spec. - Args: - flag_spec: The flag_spec to find. + Args: + flag_spec: The flag_spec to find. - Returns: - A flag. - """ + Returns: + A flag. + """ - return self._flags[flag_spec] + return self._flags[flag_spec] - def __contains__(self, flag_spec): - return self._flags.has_key(flag_spec) + def __contains__(self, flag_spec): + return self._flags.has_key(flag_spec) - def GetFlags(self): - return self._flags + def GetFlags(self): + return self._flags - def FormattedForUse(self): - """Format this for use in an application. + def FormattedForUse(self): + """Format this for use in an application. - Returns: - A list of flags, sorted alphabetically and filled in with the values - for each flag. - """ + Returns: + A list of flags, sorted alphabetically and filled in with the values + for each flag. + """ - return sorted([f.FormattedForUse() for f in self._flags.values()]) + return sorted([f.FormattedForUse() for f in self._flags.values()]) diff --git a/bestflags/flags_test.py b/bestflags/flags_test.py index 6e546621..cbb59287 100644 --- a/bestflags/flags_test.py +++ b/bestflags/flags_test.py @@ -6,7 +6,7 @@ Part of the Chrome build flags optimization. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import random import sys @@ -15,176 +15,179 @@ import unittest from flags import Flag from flags import FlagSet + # The number of tests to test. NUM_TESTS = 20 class FlagTest(unittest.TestCase): - """This class tests the Flag class.""" + """This class tests the Flag class.""" - def testInit(self): - """The value generated should fall within start and end of the spec. + def testInit(self): + """The value generated should fall within start and end of the spec. - If the value is not specified, the value generated should fall within start - and end of the spec. - """ + If the value is not specified, the value generated should fall within start + and end of the spec. + """ - for _ in range(NUM_TESTS): - start = random.randint(1, sys.maxint - 1) - end = random.randint(start + 1, sys.maxint) + for _ in range(NUM_TESTS): + start = random.randint(1, sys.maxint - 1) + end = random.randint(start + 1, sys.maxint) - spec = 'flag=[%s-%s]' % (start, end) + spec = "flag=[%s-%s]" % (start, end) - test_flag = Flag(spec) + test_flag = Flag(spec) - value = test_flag.GetValue() + value = test_flag.GetValue() - # If the value is not specified when the flag is constructed, a random - # value is chosen. This value should fall within start and end of the - # spec. - assert start <= value and value < end + # If the value is not specified when the flag is constructed, a random + # value is chosen. This value should fall within start and end of the + # spec. + assert start <= value and value < end - def testEqual(self): - """Test the equal operator (==) of the flag. + def testEqual(self): + """Test the equal operator (==) of the flag. - Two flags are equal if and only if their spec and value are equal. - """ + Two flags are equal if and only if their spec and value are equal. + """ - tests = range(NUM_TESTS) + tests = range(NUM_TESTS) - # Two tasks having the same spec and value should be equivalent. - for test in tests: - assert Flag(str(test), test) == Flag(str(test), test) + # Two tasks having the same spec and value should be equivalent. + for test in tests: + assert Flag(str(test), test) == Flag(str(test), test) - # Two tasks having different flag set should be different. - for test in tests: - flag = Flag(str(test), test) - other_flag_sets = [other for other in tests if test != other] - for other_test in other_flag_sets: - assert flag != Flag(str(other_test), other_test) + # Two tasks having different flag set should be different. + for test in tests: + flag = Flag(str(test), test) + other_flag_sets = [other for other in tests if test != other] + for other_test in other_flag_sets: + assert flag != Flag(str(other_test), other_test) - def testFormattedForUse(self): - """Test the FormattedForUse method of the flag. + def testFormattedForUse(self): + """Test the FormattedForUse method of the flag. - The FormattedForUse replaces the string within the [] with the actual value. - """ + The FormattedForUse replaces the string within the [] with the actual value. + """ - for _ in range(NUM_TESTS): - start = random.randint(1, sys.maxint - 1) - end = random.randint(start + 1, sys.maxint) - value = random.randint(start, end - 1) + for _ in range(NUM_TESTS): + start = random.randint(1, sys.maxint - 1) + end = random.randint(start + 1, sys.maxint) + value = random.randint(start, end - 1) - spec = 'flag=[%s-%s]' % (start, end) + spec = "flag=[%s-%s]" % (start, end) - test_flag = Flag(spec, value) + test_flag = Flag(spec, value) - # For numeric flag, the FormattedForUse replaces the string within the [] - # with the actual value. - test_value = test_flag.FormattedForUse() - actual_value = 'flag=%s' % value + # For numeric flag, the FormattedForUse replaces the string within the [] + # with the actual value. + test_value = test_flag.FormattedForUse() + actual_value = "flag=%s" % value - assert test_value == actual_value + assert test_value == actual_value - for _ in range(NUM_TESTS): - value = random.randint(1, sys.maxint - 1) + for _ in range(NUM_TESTS): + value = random.randint(1, sys.maxint - 1) - test_flag = Flag('flag', value) + test_flag = Flag("flag", value) - # For boolean flag, the FormattedForUse returns the spec. - test_value = test_flag.FormattedForUse() - actual_value = 'flag' - assert test_value == actual_value + # For boolean flag, the FormattedForUse returns the spec. + test_value = test_flag.FormattedForUse() + actual_value = "flag" + assert test_value == actual_value class FlagSetTest(unittest.TestCase): - """This class test the FlagSet class.""" + """This class test the FlagSet class.""" - def testEqual(self): - """Test the equal method of the Class FlagSet. + def testEqual(self): + """Test the equal method of the Class FlagSet. - Two FlagSet instances are equal if all their flags are equal. - """ + Two FlagSet instances are equal if all their flags are equal. + """ - flag_names = range(NUM_TESTS) + flag_names = range(NUM_TESTS) - # Two flag sets having the same flags should be equivalent. - for flag_name in flag_names: - spec = '%s' % flag_name + # Two flag sets having the same flags should be equivalent. + for flag_name in flag_names: + spec = "%s" % flag_name - assert FlagSet([Flag(spec)]) == FlagSet([Flag(spec)]) + assert FlagSet([Flag(spec)]) == FlagSet([Flag(spec)]) - # Two flag sets having different flags should be different. - for flag_name in flag_names: - spec = '%s' % flag_name - flag_set = FlagSet([Flag(spec)]) - other_flag_sets = [other for other in flag_names if flag_name != other] - for other_name in other_flag_sets: - other_spec = '%s' % other_name - assert flag_set != FlagSet([Flag(other_spec)]) + # Two flag sets having different flags should be different. + for flag_name in flag_names: + spec = "%s" % flag_name + flag_set = FlagSet([Flag(spec)]) + other_flag_sets = [ + other for other in flag_names if flag_name != other + ] + for other_name in other_flag_sets: + other_spec = "%s" % other_name + assert flag_set != FlagSet([Flag(other_spec)]) - def testGetItem(self): - """Test the get item method of the Class FlagSet. + def testGetItem(self): + """Test the get item method of the Class FlagSet. - The flag set is also indexed by the specs. The flag set should return the - appropriate flag given the spec. - """ + The flag set is also indexed by the specs. The flag set should return the + appropriate flag given the spec. + """ - tests = range(NUM_TESTS) + tests = range(NUM_TESTS) - specs = [str(spec) for spec in tests] - flag_array = [Flag(spec) for spec in specs] + specs = [str(spec) for spec in tests] + flag_array = [Flag(spec) for spec in specs] - flag_set = FlagSet(flag_array) + flag_set = FlagSet(flag_array) - # Created a dictionary of spec and flag, the flag set should return the flag - # the same as this dictionary. - spec_flag = dict(zip(specs, flag_array)) + # Created a dictionary of spec and flag, the flag set should return the flag + # the same as this dictionary. + spec_flag = dict(zip(specs, flag_array)) - for spec in spec_flag: - assert flag_set[spec] == spec_flag[spec] + for spec in spec_flag: + assert flag_set[spec] == spec_flag[spec] - def testContain(self): - """Test the contain method of the Class FlagSet. + def testContain(self): + """Test the contain method of the Class FlagSet. - The flag set is also indexed by the specs. The flag set should return true - for spec if it contains a flag containing spec. - """ + The flag set is also indexed by the specs. The flag set should return true + for spec if it contains a flag containing spec. + """ - true_tests = range(NUM_TESTS) - false_tests = range(NUM_TESTS, NUM_TESTS * 2) + true_tests = range(NUM_TESTS) + false_tests = range(NUM_TESTS, NUM_TESTS * 2) - specs = [str(spec) for spec in true_tests] + specs = [str(spec) for spec in true_tests] - flag_set = FlagSet([Flag(spec) for spec in specs]) + flag_set = FlagSet([Flag(spec) for spec in specs]) - for spec in specs: - assert spec in flag_set + for spec in specs: + assert spec in flag_set - for spec in false_tests: - assert spec not in flag_set + for spec in false_tests: + assert spec not in flag_set - def testFormattedForUse(self): - """Test the FormattedForUse method of the Class FlagSet. + def testFormattedForUse(self): + """Test the FormattedForUse method of the Class FlagSet. - The output should be a sorted list of strings. - """ + The output should be a sorted list of strings. + """ - flag_names = range(NUM_TESTS) - flag_names.reverse() - flags = [] - result = [] + flag_names = range(NUM_TESTS) + flag_names.reverse() + flags = [] + result = [] - # Construct the flag set. - for flag_name in flag_names: - spec = '%s' % flag_name - flags.append(Flag(spec)) - result.append(spec) + # Construct the flag set. + for flag_name in flag_names: + spec = "%s" % flag_name + flags.append(Flag(spec)) + result.append(spec) - flag_set = FlagSet(flags) + flag_set = FlagSet(flags) - # The results string should be sorted. - assert sorted(result) == flag_set.FormattedForUse() + # The results string should be sorted. + assert sorted(result) == flag_set.FormattedForUse() -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/bestflags/flags_util.py b/bestflags/flags_util.py index 436f9779..088319c5 100644 --- a/bestflags/flags_util.py +++ b/bestflags/flags_util.py @@ -6,90 +6,91 @@ Part of the Chrome build flags optimization. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import flags from flags import Flag def ClimbNext(flags_dict, climb_spec): - """Get the flags that are different from |flags_dict| by |climb_spec|. - - Given a set of flags, |flags_dict|, return a new set of flags that are - adjacent along the flag spec |climb_spec|. - - An example flags_dict is {foo=[1-9]:foo=5, bar=[1-5]:bar=2} and climb_spec is - bar=[1-5]. This method changes the flag that contains the spec bar=[1-5]. The - results are its neighbors dictionaries, i.e., {foo=[1-9]:foo=5, - bar=[1-5]:bar=1} and {foo=[1-9]:foo=5, bar=[1-5]:bar=3}. - - Args: - flags_dict: The dictionary containing the original flags whose neighbors are - to be explored. - climb_spec: The spec in the flags_dict is to be changed. The spec is a - definition in the little language, a string with escaped sequences of the - form [<start>-<end>] where start and end is an positive integer for a - fillable value. An example of a spec is "foo[0-9]". - - Returns: - List of dictionaries of neighbor flags. - """ - - # This method searches for a pattern [start-end] in the spec. If the spec - # contains this pattern, it is a numeric flag. Otherwise it is a boolean flag. - # For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is - # a boolean flag. - numeric_flag_match = flags.Search(climb_spec) - - # If the flags do not contain the spec. - if climb_spec not in flags_dict: - results = flags_dict.copy() - - if numeric_flag_match: - # Numeric flags. - results[climb_spec] = Flag(climb_spec, - int(numeric_flag_match.group('start'))) + """Get the flags that are different from |flags_dict| by |climb_spec|. + + Given a set of flags, |flags_dict|, return a new set of flags that are + adjacent along the flag spec |climb_spec|. + + An example flags_dict is {foo=[1-9]:foo=5, bar=[1-5]:bar=2} and climb_spec is + bar=[1-5]. This method changes the flag that contains the spec bar=[1-5]. The + results are its neighbors dictionaries, i.e., {foo=[1-9]:foo=5, + bar=[1-5]:bar=1} and {foo=[1-9]:foo=5, bar=[1-5]:bar=3}. + + Args: + flags_dict: The dictionary containing the original flags whose neighbors are + to be explored. + climb_spec: The spec in the flags_dict is to be changed. The spec is a + definition in the little language, a string with escaped sequences of the + form [<start>-<end>] where start and end is an positive integer for a + fillable value. An example of a spec is "foo[0-9]". + + Returns: + List of dictionaries of neighbor flags. + """ + + # This method searches for a pattern [start-end] in the spec. If the spec + # contains this pattern, it is a numeric flag. Otherwise it is a boolean flag. + # For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is + # a boolean flag. + numeric_flag_match = flags.Search(climb_spec) + + # If the flags do not contain the spec. + if climb_spec not in flags_dict: + results = flags_dict.copy() + + if numeric_flag_match: + # Numeric flags. + results[climb_spec] = Flag( + climb_spec, int(numeric_flag_match.group("start")) + ) + else: + # Boolean flags. + results[climb_spec] = Flag(climb_spec) + + return [results] + + # The flags contain the spec. + if not numeric_flag_match: + # Boolean flags. + results = flags_dict.copy() + + # Turn off the flag. A flag is turned off if it is not presented in the + # flags_dict. + del results[climb_spec] + return [results] + + # Numeric flags. + flag = flags_dict[climb_spec] + + # The value of the flag having spec. + value = flag.GetValue() + results = [] + + if value + 1 < int(numeric_flag_match.group("end")): + # If the value is not the end value, explore the value that is 1 larger than + # the current value. + neighbor = flags_dict.copy() + neighbor[climb_spec] = Flag(climb_spec, value + 1) + results.append(neighbor) + + if value > int(numeric_flag_match.group("start")): + # If the value is not the start value, explore the value that is 1 lesser + # than the current value. + neighbor = flags_dict.copy() + neighbor[climb_spec] = Flag(climb_spec, value - 1) + results.append(neighbor) else: - # Boolean flags. - results[climb_spec] = Flag(climb_spec) - - return [results] - - # The flags contain the spec. - if not numeric_flag_match: - # Boolean flags. - results = flags_dict.copy() - - # Turn off the flag. A flag is turned off if it is not presented in the - # flags_dict. - del results[climb_spec] - return [results] - - # Numeric flags. - flag = flags_dict[climb_spec] - - # The value of the flag having spec. - value = flag.GetValue() - results = [] - - if value + 1 < int(numeric_flag_match.group('end')): - # If the value is not the end value, explore the value that is 1 larger than - # the current value. - neighbor = flags_dict.copy() - neighbor[climb_spec] = Flag(climb_spec, value + 1) - results.append(neighbor) - - if value > int(numeric_flag_match.group('start')): - # If the value is not the start value, explore the value that is 1 lesser - # than the current value. - neighbor = flags_dict.copy() - neighbor[climb_spec] = Flag(climb_spec, value - 1) - results.append(neighbor) - else: - # Delete the value, i.e., turn off the flag. A flag is turned off if it is - # not presented in the flags_dict. - neighbor = flags_dict.copy() - del neighbor[climb_spec] - results.append(neighbor) - - return results + # Delete the value, i.e., turn off the flag. A flag is turned off if it is + # not presented in the flags_dict. + neighbor = flags_dict.copy() + del neighbor[climb_spec] + results.append(neighbor) + + return results diff --git a/bestflags/generation.py b/bestflags/generation.py index 5c9cd649..9ab8edbf 100644 --- a/bestflags/generation.py +++ b/bestflags/generation.py @@ -15,125 +15,126 @@ candidate_pool will contain a current task t being evaluated and the exe_set will contains all the task t's neighbor. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" class NoneOverridingError(Exception): - """Define an Exception class for subclasses not overriding certain methods.""" - pass + """Define an Exception class for subclasses not overriding certain methods.""" + + pass class Generation(object): - """A generation of a framework run. + """A generation of a framework run. - The base class of generation. Concrete subclasses, e.g., GAGeneration should - override the Next and IsImproved method to implement algorithm specific - applications. - """ + The base class of generation. Concrete subclasses, e.g., GAGeneration should + override the Next and IsImproved method to implement algorithm specific + applications. + """ - def __init__(self, exe_set, candidate_pool): - """Set up the tasks set of this generation. + def __init__(self, exe_set, candidate_pool): + """Set up the tasks set of this generation. - Args: - exe_set: A set of tasks to be run. - candidate_pool: A set of tasks to be considered to be used to generate the - next generation. - """ + Args: + exe_set: A set of tasks to be run. + candidate_pool: A set of tasks to be considered to be used to generate the + next generation. + """ - self._exe_set = exe_set - self._candidate_pool = candidate_pool + self._exe_set = exe_set + self._candidate_pool = candidate_pool - # Keeping the record of how many tasks are pending. Pending tasks are the - # ones that have been sent out to the next stage for execution but have not - # finished. A generation is not ready for the reproduction of the new - # generations until all its pending tasks have been executed. - self._pending = len(exe_set) + # Keeping the record of how many tasks are pending. Pending tasks are the + # ones that have been sent out to the next stage for execution but have not + # finished. A generation is not ready for the reproduction of the new + # generations until all its pending tasks have been executed. + self._pending = len(exe_set) - def CandidatePool(self): - """Return the candidate tasks of this generation.""" + def CandidatePool(self): + """Return the candidate tasks of this generation.""" - return self._candidate_pool + return self._candidate_pool - def Pool(self): - """Return the task set of this generation.""" + def Pool(self): + """Return the task set of this generation.""" - return self._exe_set + return self._exe_set - def Done(self): - """All the tasks in this generation are done. + def Done(self): + """All the tasks in this generation are done. - Returns: - True if all the tasks have been executed. That is the number of pending - task is 0. - """ + Returns: + True if all the tasks have been executed. That is the number of pending + task is 0. + """ - return self._pending == 0 - - def UpdateTask(self, task): - """Match a task t in this generation that is equal to the input task. - - This method is called when the input task has just finished execution. This - method finds out whether there is a pending task t in the current generation - that is the same as the input task. Two tasks are the same if their flag - options are the same. A task is pending if it has not been performed. - If there is a pending task t that matches the input task, task t will be - substituted with the input task in this generation. In that case, the input - task, as well as its build and test results encapsulated in the task, will - be stored in the current generation. These results could be used to produce - the next generation. - If there is a match, the current generation will have one less pending task. - When there is no pending task, the generation can be used to produce the - next generation. - The caller of this function is responsible for not calling this method on - the same task more than once. - - Args: - task: A task that has its results ready. - - Returns: - Whether the input task belongs to this generation. - """ + return self._pending == 0 - # If there is a match, the input task belongs to this generation. - if task not in self._exe_set: - return False + def UpdateTask(self, task): + """Match a task t in this generation that is equal to the input task. - # Remove the place holder task in this generation and store the new input - # task and its result. - self._exe_set.remove(task) - self._exe_set.add(task) + This method is called when the input task has just finished execution. This + method finds out whether there is a pending task t in the current generation + that is the same as the input task. Two tasks are the same if their flag + options are the same. A task is pending if it has not been performed. + If there is a pending task t that matches the input task, task t will be + substituted with the input task in this generation. In that case, the input + task, as well as its build and test results encapsulated in the task, will + be stored in the current generation. These results could be used to produce + the next generation. + If there is a match, the current generation will have one less pending task. + When there is no pending task, the generation can be used to produce the + next generation. + The caller of this function is responsible for not calling this method on + the same task more than once. - # The current generation will have one less task to wait on. - self._pending -= 1 + Args: + task: A task that has its results ready. - assert self._pending >= 0 + Returns: + Whether the input task belongs to this generation. + """ - return True + # If there is a match, the input task belongs to this generation. + if task not in self._exe_set: + return False - def IsImproved(self): - """True if this generation has improvement upon its parent generation. + # Remove the place holder task in this generation and store the new input + # task and its result. + self._exe_set.remove(task) + self._exe_set.add(task) - Raises: - NoneOverridingError: The subclass should override this method. - """ - raise NoneOverridingError('Must be implemented by child class') + # The current generation will have one less task to wait on. + self._pending -= 1 - def Next(self, _): - """Calculate the next generation. + assert self._pending >= 0 - This is the core of the framework implementation. It must be overridden by - the concrete subclass to implement algorithm specific generations. + return True - Args: - _: A set of tasks that have been generated before. The overridden method - in the subclasses can use this so as not to generate task that has been - generated before. + def IsImproved(self): + """True if this generation has improvement upon its parent generation. - Returns: - A set of new generations. + Raises: + NoneOverridingError: The subclass should override this method. + """ + raise NoneOverridingError("Must be implemented by child class") - Raises: - NoneOverridingError: The subclass should override this method. - """ + def Next(self, _): + """Calculate the next generation. + + This is the core of the framework implementation. It must be overridden by + the concrete subclass to implement algorithm specific generations. + + Args: + _: A set of tasks that have been generated before. The overridden method + in the subclasses can use this so as not to generate task that has been + generated before. + + Returns: + A set of new generations. + + Raises: + NoneOverridingError: The subclass should override this method. + """ - raise NoneOverridingError('Must be implemented by child class') + raise NoneOverridingError("Must be implemented by child class") diff --git a/bestflags/generation_test.py b/bestflags/generation_test.py index bc5a0b1b..2d9d4680 100644 --- a/bestflags/generation_test.py +++ b/bestflags/generation_test.py @@ -6,7 +6,7 @@ Part of the Chrome build flags optimization. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import random import unittest @@ -14,6 +14,7 @@ import unittest from generation import Generation from mock_task import IdentifierMockTask + # Pick an integer at random. TEST_STAGE = -125 @@ -26,47 +27,47 @@ STRIDE = 7 class GenerationTest(unittest.TestCase): - """This class test the Generation class. + """This class test the Generation class. - Given a set of tasks in the generation, if there is any task that is pending, - then the Done method will return false, and true otherwise. - """ + Given a set of tasks in the generation, if there is any task that is pending, + then the Done method will return false, and true otherwise. + """ - def testDone(self): - """"Test the Done method. + def testDone(self): + """ "Test the Done method. - Produce a generation with a set of tasks. Set the cost of the task one by - one and verify that the Done method returns false before setting the cost - for all the tasks. After the costs of all the tasks are set, the Done method - should return true. - """ + Produce a generation with a set of tasks. Set the cost of the task one by + one and verify that the Done method returns false before setting the cost + for all the tasks. After the costs of all the tasks are set, the Done method + should return true. + """ - random.seed(0) + random.seed(0) - testing_tasks = range(NUM_TASKS) + testing_tasks = range(NUM_TASKS) - # The tasks for the generation to be tested. - tasks = [IdentifierMockTask(TEST_STAGE, t) for t in testing_tasks] + # The tasks for the generation to be tested. + tasks = [IdentifierMockTask(TEST_STAGE, t) for t in testing_tasks] - gen = Generation(set(tasks), None) + gen = Generation(set(tasks), None) - # Permute the list. - permutation = [(t * STRIDE) % NUM_TASKS for t in range(NUM_TASKS)] - permuted_tasks = [testing_tasks[index] for index in permutation] + # Permute the list. + permutation = [(t * STRIDE) % NUM_TASKS for t in range(NUM_TASKS)] + permuted_tasks = [testing_tasks[index] for index in permutation] - # The Done method of the Generation should return false before all the tasks - # in the permuted list are set. - for testing_task in permuted_tasks: - assert not gen.Done() + # The Done method of the Generation should return false before all the tasks + # in the permuted list are set. + for testing_task in permuted_tasks: + assert not gen.Done() - # Mark a task as done by calling the UpdateTask method of the generation. - # Send the generation the task as well as its results. - gen.UpdateTask(IdentifierMockTask(TEST_STAGE, testing_task)) + # Mark a task as done by calling the UpdateTask method of the generation. + # Send the generation the task as well as its results. + gen.UpdateTask(IdentifierMockTask(TEST_STAGE, testing_task)) - # The Done method should return true after all the tasks in the permuted - # list is set. - assert gen.Done() + # The Done method should return true after all the tasks in the permuted + # list is set. + assert gen.Done() -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/bestflags/genetic_algorithm.py b/bestflags/genetic_algorithm.py index 2a1b68db..0d947067 100644 --- a/bestflags/genetic_algorithm.py +++ b/bestflags/genetic_algorithm.py @@ -6,7 +6,7 @@ Part of the Chrome build flags optimization. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import random @@ -18,278 +18,287 @@ from task import Task def CrossoverWith(first_flag, second_flag): - """Get a crossed over gene. + """Get a crossed over gene. - At present, this just picks either/or of these values. However, it could be - implemented as an integer maskover effort, if required. - - Args: - first_flag: The first gene (Flag) to cross over with. - second_flag: The second gene (Flag) to cross over with. - - Returns: - A Flag that can be considered appropriately randomly blended between the - first and second input flag. - """ - - return first_flag if random.randint(0, 1) else second_flag - - -def RandomMutate(specs, flag_set, mutation_rate): - """Randomly mutate the content of a task. - - Args: - specs: A list of spec from which the flag set is created. - flag_set: The current flag set being mutated - mutation_rate: What fraction of genes to mutate. - - Returns: - A Genetic Task constructed by randomly mutating the input flag set. - """ - - results_flags = [] - - for spec in specs: - # Randomly choose whether this flag should be mutated. - if random.randint(0, int(1 / mutation_rate)): - continue - - # If the flag is not already in the flag set, it is added. - if spec not in flag_set: - results_flags.append(Flag(spec)) - continue - - # If the flag is already in the flag set, it is mutated. - numeric_flag_match = flags.Search(spec) - - # The value of a numeric flag will be changed, and a boolean flag will be - # dropped. - if not numeric_flag_match: - continue - - value = flag_set[spec].GetValue() - - # Randomly select a nearby value of the current value of the flag. - rand_arr = [value] - if value + 1 < int(numeric_flag_match.group('end')): - rand_arr.append(value + 1) - - rand_arr.append(value - 1) - value = random.sample(rand_arr, 1)[0] - - # If the value is smaller than the start of the spec, this flag will be - # dropped. - if value != int(numeric_flag_match.group('start')) - 1: - results_flags.append(Flag(spec, value)) - - return GATask(FlagSet(results_flags)) - - -class GATask(Task): - - def __init__(self, flag_set): - Task.__init__(self, flag_set) - - def ReproduceWith(self, other, specs, mutation_rate): - """Reproduce with other FlagSet. + At present, this just picks either/or of these values. However, it could be + implemented as an integer maskover effort, if required. Args: - other: A FlagSet to reproduce with. - specs: A list of spec from which the flag set is created. - mutation_rate: one in mutation_rate flags will be mutated (replaced by a - random version of the same flag, instead of one from either of the - parents). Set to 0 to disable mutation. + first_flag: The first gene (Flag) to cross over with. + second_flag: The second gene (Flag) to cross over with. Returns: - A GA task made by mixing self with other. + A Flag that can be considered appropriately randomly blended between the + first and second input flag. """ - # Get the flag dictionary. - father_flags = self.GetFlags().GetFlags() - mother_flags = other.GetFlags().GetFlags() - - # Flags that are common in both parents and flags that belong to only one - # parent. - self_flags = [] - other_flags = [] - common_flags = [] + return first_flag if random.randint(0, 1) else second_flag - # Find out flags that are common to both parent and flags that belong soly - # to one parent. - for self_flag in father_flags: - if self_flag in mother_flags: - common_flags.append(self_flag) - else: - self_flags.append(self_flag) - for other_flag in mother_flags: - if other_flag not in father_flags: - other_flags.append(other_flag) - - # Randomly select flags that belong to only one parent. - output_flags = [father_flags[f] for f in self_flags if random.randint(0, 1)] - others = [mother_flags[f] for f in other_flags if random.randint(0, 1)] - output_flags.extend(others) - # Turn on flags that belong to both parent. Randomly choose the value of the - # flag from either parent. - for flag in common_flags: - output_flags.append(CrossoverWith(father_flags[flag], mother_flags[flag])) - - # Mutate flags - if mutation_rate: - return RandomMutate(specs, FlagSet(output_flags), mutation_rate) - - return GATask(FlagSet(output_flags)) - - -class GAGeneration(Generation): - """The Genetic Algorithm.""" - - # The value checks whether the algorithm has converged and arrives at a fixed - # point. If STOP_THRESHOLD of generations have not seen any performance - # improvement, the Genetic Algorithm stops. - STOP_THRESHOLD = None - - # Number of tasks in each generation. - NUM_CHROMOSOMES = None - - # The value checks whether the algorithm has converged and arrives at a fixed - # point. If NUM_TRIALS of trials have been attempted to generate a new task - # without a success, the Genetic Algorithm stops. - NUM_TRIALS = None - - # The flags that can be used to generate new tasks. - SPECS = None - - # What fraction of genes to mutate. - MUTATION_RATE = 0 - - @staticmethod - def InitMetaData(stop_threshold, num_chromosomes, num_trials, specs, - mutation_rate): - """Set up the meta data for the Genetic Algorithm. +def RandomMutate(specs, flag_set, mutation_rate): + """Randomly mutate the content of a task. Args: - stop_threshold: The number of generations, upon which no performance has - seen, the Genetic Algorithm stops. - num_chromosomes: Number of tasks in each generation. - num_trials: The number of trials, upon which new task has been tried to - generated without success, the Genetic Algorithm stops. - specs: The flags that can be used to generate new tasks. + specs: A list of spec from which the flag set is created. + flag_set: The current flag set being mutated mutation_rate: What fraction of genes to mutate. - """ - - GAGeneration.STOP_THRESHOLD = stop_threshold - GAGeneration.NUM_CHROMOSOMES = num_chromosomes - GAGeneration.NUM_TRIALS = num_trials - GAGeneration.SPECS = specs - GAGeneration.MUTATION_RATE = mutation_rate - def __init__(self, tasks, parents, total_stucks): - """Set up the meta data for the Genetic Algorithm. - - Args: - tasks: A set of tasks to be run. - parents: A set of tasks from which this new generation is produced. This - set also contains the best tasks generated so far. - total_stucks: The number of generations that have not seen improvement. - The Genetic Algorithm will stop once the total_stucks equals to - NUM_TRIALS defined in the GAGeneration class. + Returns: + A Genetic Task constructed by randomly mutating the input flag set. """ - Generation.__init__(self, tasks, parents) - self._total_stucks = total_stucks + results_flags = [] - def IsImproved(self): - """True if this generation has improvement upon its parent generation.""" + for spec in specs: + # Randomly choose whether this flag should be mutated. + if random.randint(0, int(1 / mutation_rate)): + continue - tasks = self.Pool() - parents = self.CandidatePool() + # If the flag is not already in the flag set, it is added. + if spec not in flag_set: + results_flags.append(Flag(spec)) + continue - # The first generate does not have parents. - if not parents: - return True + # If the flag is already in the flag set, it is mutated. + numeric_flag_match = flags.Search(spec) - # Found out whether a task has improvement upon the best task in the - # parent generation. - best_parent = sorted(parents, key=lambda task: task.GetTestResult())[0] - best_current = sorted(tasks, key=lambda task: task.GetTestResult())[0] + # The value of a numeric flag will be changed, and a boolean flag will be + # dropped. + if not numeric_flag_match: + continue - # At least one task has improvement. - if best_current.IsImproved(best_parent): - self._total_stucks = 0 - return True + value = flag_set[spec].GetValue() - # If STOP_THRESHOLD of generations have no improvement, the algorithm stops. - if self._total_stucks >= GAGeneration.STOP_THRESHOLD: - return False + # Randomly select a nearby value of the current value of the flag. + rand_arr = [value] + if value + 1 < int(numeric_flag_match.group("end")): + rand_arr.append(value + 1) - self._total_stucks += 1 - return True + rand_arr.append(value - 1) + value = random.sample(rand_arr, 1)[0] - def Next(self, cache): - """Calculate the next generation. + # If the value is smaller than the start of the spec, this flag will be + # dropped. + if value != int(numeric_flag_match.group("start")) - 1: + results_flags.append(Flag(spec, value)) - Generate a new generation from the a set of tasks. This set contains the - best set seen so far and the tasks executed in the parent generation. + return GATask(FlagSet(results_flags)) - Args: - cache: A set of tasks that have been generated before. - Returns: - A set of new generations. - """ +class GATask(Task): + def __init__(self, flag_set): + Task.__init__(self, flag_set) + + def ReproduceWith(self, other, specs, mutation_rate): + """Reproduce with other FlagSet. + + Args: + other: A FlagSet to reproduce with. + specs: A list of spec from which the flag set is created. + mutation_rate: one in mutation_rate flags will be mutated (replaced by a + random version of the same flag, instead of one from either of the + parents). Set to 0 to disable mutation. + + Returns: + A GA task made by mixing self with other. + """ + + # Get the flag dictionary. + father_flags = self.GetFlags().GetFlags() + mother_flags = other.GetFlags().GetFlags() + + # Flags that are common in both parents and flags that belong to only one + # parent. + self_flags = [] + other_flags = [] + common_flags = [] + + # Find out flags that are common to both parent and flags that belong soly + # to one parent. + for self_flag in father_flags: + if self_flag in mother_flags: + common_flags.append(self_flag) + else: + self_flags.append(self_flag) + + for other_flag in mother_flags: + if other_flag not in father_flags: + other_flags.append(other_flag) + + # Randomly select flags that belong to only one parent. + output_flags = [ + father_flags[f] for f in self_flags if random.randint(0, 1) + ] + others = [mother_flags[f] for f in other_flags if random.randint(0, 1)] + output_flags.extend(others) + # Turn on flags that belong to both parent. Randomly choose the value of the + # flag from either parent. + for flag in common_flags: + output_flags.append( + CrossoverWith(father_flags[flag], mother_flags[flag]) + ) + + # Mutate flags + if mutation_rate: + return RandomMutate(specs, FlagSet(output_flags), mutation_rate) + + return GATask(FlagSet(output_flags)) + - target_len = GAGeneration.NUM_CHROMOSOMES - specs = GAGeneration.SPECS - mutation_rate = GAGeneration.MUTATION_RATE - - # Collect a set of size target_len of tasks. This set will be used to - # produce a new generation of tasks. - gen_tasks = [task for task in self.Pool()] - - parents = self.CandidatePool() - if parents: - gen_tasks.extend(parents) - - # A set of tasks that are the best. This set will be used as the parent - # generation to produce the next generation. - sort_func = lambda task: task.GetTestResult() - retained_tasks = sorted(gen_tasks, key=sort_func)[:target_len] - - child_pool = set() - for father in retained_tasks: - num_trials = 0 - # Try num_trials times to produce a new child. - while num_trials < GAGeneration.NUM_TRIALS: - # Randomly select another parent. - mother = random.choice(retained_tasks) - # Cross over. - child = mother.ReproduceWith(father, specs, mutation_rate) - if child not in child_pool and child not in cache: - child_pool.add(child) - break - else: - num_trials += 1 - - num_trials = 0 - - while len(child_pool) < target_len and num_trials < GAGeneration.NUM_TRIALS: - for keep_task in retained_tasks: - # Mutation. - child = RandomMutate(specs, keep_task.GetFlags(), mutation_rate) - if child not in child_pool and child not in cache: - child_pool.add(child) - if len(child_pool) >= target_len: - break - else: - num_trials += 1 - - # If NUM_TRIALS of tries have been attempted without generating a set of new - # tasks, the algorithm stops. - if num_trials >= GAGeneration.NUM_TRIALS: - return [] - - assert len(child_pool) == target_len - - return [GAGeneration(child_pool, set(retained_tasks), self._total_stucks)] +class GAGeneration(Generation): + """The Genetic Algorithm.""" + + # The value checks whether the algorithm has converged and arrives at a fixed + # point. If STOP_THRESHOLD of generations have not seen any performance + # improvement, the Genetic Algorithm stops. + STOP_THRESHOLD = None + + # Number of tasks in each generation. + NUM_CHROMOSOMES = None + + # The value checks whether the algorithm has converged and arrives at a fixed + # point. If NUM_TRIALS of trials have been attempted to generate a new task + # without a success, the Genetic Algorithm stops. + NUM_TRIALS = None + + # The flags that can be used to generate new tasks. + SPECS = None + + # What fraction of genes to mutate. + MUTATION_RATE = 0 + + @staticmethod + def InitMetaData( + stop_threshold, num_chromosomes, num_trials, specs, mutation_rate + ): + """Set up the meta data for the Genetic Algorithm. + + Args: + stop_threshold: The number of generations, upon which no performance has + seen, the Genetic Algorithm stops. + num_chromosomes: Number of tasks in each generation. + num_trials: The number of trials, upon which new task has been tried to + generated without success, the Genetic Algorithm stops. + specs: The flags that can be used to generate new tasks. + mutation_rate: What fraction of genes to mutate. + """ + + GAGeneration.STOP_THRESHOLD = stop_threshold + GAGeneration.NUM_CHROMOSOMES = num_chromosomes + GAGeneration.NUM_TRIALS = num_trials + GAGeneration.SPECS = specs + GAGeneration.MUTATION_RATE = mutation_rate + + def __init__(self, tasks, parents, total_stucks): + """Set up the meta data for the Genetic Algorithm. + + Args: + tasks: A set of tasks to be run. + parents: A set of tasks from which this new generation is produced. This + set also contains the best tasks generated so far. + total_stucks: The number of generations that have not seen improvement. + The Genetic Algorithm will stop once the total_stucks equals to + NUM_TRIALS defined in the GAGeneration class. + """ + + Generation.__init__(self, tasks, parents) + self._total_stucks = total_stucks + + def IsImproved(self): + """True if this generation has improvement upon its parent generation.""" + + tasks = self.Pool() + parents = self.CandidatePool() + + # The first generate does not have parents. + if not parents: + return True + + # Found out whether a task has improvement upon the best task in the + # parent generation. + best_parent = sorted(parents, key=lambda task: task.GetTestResult())[0] + best_current = sorted(tasks, key=lambda task: task.GetTestResult())[0] + + # At least one task has improvement. + if best_current.IsImproved(best_parent): + self._total_stucks = 0 + return True + + # If STOP_THRESHOLD of generations have no improvement, the algorithm stops. + if self._total_stucks >= GAGeneration.STOP_THRESHOLD: + return False + + self._total_stucks += 1 + return True + + def Next(self, cache): + """Calculate the next generation. + + Generate a new generation from the a set of tasks. This set contains the + best set seen so far and the tasks executed in the parent generation. + + Args: + cache: A set of tasks that have been generated before. + + Returns: + A set of new generations. + """ + + target_len = GAGeneration.NUM_CHROMOSOMES + specs = GAGeneration.SPECS + mutation_rate = GAGeneration.MUTATION_RATE + + # Collect a set of size target_len of tasks. This set will be used to + # produce a new generation of tasks. + gen_tasks = [task for task in self.Pool()] + + parents = self.CandidatePool() + if parents: + gen_tasks.extend(parents) + + # A set of tasks that are the best. This set will be used as the parent + # generation to produce the next generation. + sort_func = lambda task: task.GetTestResult() + retained_tasks = sorted(gen_tasks, key=sort_func)[:target_len] + + child_pool = set() + for father in retained_tasks: + num_trials = 0 + # Try num_trials times to produce a new child. + while num_trials < GAGeneration.NUM_TRIALS: + # Randomly select another parent. + mother = random.choice(retained_tasks) + # Cross over. + child = mother.ReproduceWith(father, specs, mutation_rate) + if child not in child_pool and child not in cache: + child_pool.add(child) + break + else: + num_trials += 1 + + num_trials = 0 + + while ( + len(child_pool) < target_len + and num_trials < GAGeneration.NUM_TRIALS + ): + for keep_task in retained_tasks: + # Mutation. + child = RandomMutate(specs, keep_task.GetFlags(), mutation_rate) + if child not in child_pool and child not in cache: + child_pool.add(child) + if len(child_pool) >= target_len: + break + else: + num_trials += 1 + + # If NUM_TRIALS of tries have been attempted without generating a set of new + # tasks, the algorithm stops. + if num_trials >= GAGeneration.NUM_TRIALS: + return [] + + assert len(child_pool) == target_len + + return [ + GAGeneration(child_pool, set(retained_tasks), self._total_stucks) + ] diff --git a/bestflags/hill_climb_best_neighbor.py b/bestflags/hill_climb_best_neighbor.py index dc8d15d1..51e30369 100644 --- a/bestflags/hill_climb_best_neighbor.py +++ b/bestflags/hill_climb_best_neighbor.py @@ -10,7 +10,7 @@ neighbor gives better performance than the current task, it explores the best neighbor. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" from flags import FlagSet import flags_util @@ -19,89 +19,92 @@ from task import Task class HillClimbingBestBranch(Generation): - """A variation of the hill climbing algorithm. + """A variation of the hill climbing algorithm. - Given a task, it explores all its neighbors. Pick the best neighbor for the - next iteration. - """ - - def __init__(self, exe_set, parents, specs): - """Set up the tasks set of this generation. - - Args: - exe_set: A set of tasks to be run. - parents: A set of tasks to be used to check whether their neighbors have - improved upon them. - specs: A list of specs to explore. The spec specifies the flags that can - be changed to find neighbors of a task. - """ - - Generation.__init__(self, exe_set, parents) - self._specs = specs - - # This variable will be used, by the Next method, to generate the tasks for - # the next iteration. This self._next_task contains the best task in the - # current iteration and it will be set by the IsImproved method. The tasks - # of the next iteration are the neighbor of self._next_task. - self._next_task = None - - def IsImproved(self): - """True if this generation has improvement over its parent generation. - - If this generation improves upon the previous generation, this method finds - out the best task in this generation and sets it to _next_task for the - method Next to use. - - Returns: - True if the best neighbor improves upon the parent task. - """ - - # Find the best neighbor. - best_task = None - for task in self._exe_set: - if not best_task or task.IsImproved(best_task): - best_task = task - - if not best_task: - return False - - # The first generation may not have parent generation. - parents = list(self._candidate_pool) - if parents: - assert len(parents) == 1 - self._next_task = best_task - # If the best neighbor improves upon the parent task. - return best_task.IsImproved(parents[0]) - - self._next_task = best_task - return True - - def Next(self, cache): - """Calculate the next generation. - - The best neighbor b of the current task is the parent of the next - generation. The neighbors of b will be the set of tasks to be evaluated - next. - - Args: - cache: A set of tasks that have been generated before. - - Returns: - A set of new generations. + Given a task, it explores all its neighbors. Pick the best neighbor for the + next iteration. """ - # The best neighbor. - current_task = self._next_task - flag_set = current_task.GetFlags() - - # The neighbors of the best neighbor. - children_tasks = set([]) - for spec in self._specs: - for next_flag in flags_util.ClimbNext(flag_set.GetFlags(), spec): - new_task = Task(FlagSet(next_flag.values())) - - if new_task not in cache: - children_tasks.add(new_task) - - return [HillClimbingBestBranch(children_tasks, set([current_task]), - self._specs)] + def __init__(self, exe_set, parents, specs): + """Set up the tasks set of this generation. + + Args: + exe_set: A set of tasks to be run. + parents: A set of tasks to be used to check whether their neighbors have + improved upon them. + specs: A list of specs to explore. The spec specifies the flags that can + be changed to find neighbors of a task. + """ + + Generation.__init__(self, exe_set, parents) + self._specs = specs + + # This variable will be used, by the Next method, to generate the tasks for + # the next iteration. This self._next_task contains the best task in the + # current iteration and it will be set by the IsImproved method. The tasks + # of the next iteration are the neighbor of self._next_task. + self._next_task = None + + def IsImproved(self): + """True if this generation has improvement over its parent generation. + + If this generation improves upon the previous generation, this method finds + out the best task in this generation and sets it to _next_task for the + method Next to use. + + Returns: + True if the best neighbor improves upon the parent task. + """ + + # Find the best neighbor. + best_task = None + for task in self._exe_set: + if not best_task or task.IsImproved(best_task): + best_task = task + + if not best_task: + return False + + # The first generation may not have parent generation. + parents = list(self._candidate_pool) + if parents: + assert len(parents) == 1 + self._next_task = best_task + # If the best neighbor improves upon the parent task. + return best_task.IsImproved(parents[0]) + + self._next_task = best_task + return True + + def Next(self, cache): + """Calculate the next generation. + + The best neighbor b of the current task is the parent of the next + generation. The neighbors of b will be the set of tasks to be evaluated + next. + + Args: + cache: A set of tasks that have been generated before. + + Returns: + A set of new generations. + """ + + # The best neighbor. + current_task = self._next_task + flag_set = current_task.GetFlags() + + # The neighbors of the best neighbor. + children_tasks = set([]) + for spec in self._specs: + for next_flag in flags_util.ClimbNext(flag_set.GetFlags(), spec): + new_task = Task(FlagSet(next_flag.values())) + + if new_task not in cache: + children_tasks.add(new_task) + + return [ + HillClimbingBestBranch( + children_tasks, set([current_task]), self._specs + ) + ] diff --git a/bestflags/iterative_elimination.py b/bestflags/iterative_elimination.py index 581a855c..7ba19633 100644 --- a/bestflags/iterative_elimination.py +++ b/bestflags/iterative_elimination.py @@ -24,7 +24,7 @@ lower value or removing the boolean flag -fstrict-aliasing produce a better fitness value. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import flags from generation import Generation @@ -32,146 +32,148 @@ import task def _DecreaseFlag(flags_dict, spec): - """Decrease the value of the flag that has the specification spec. - - If the flag that contains the spec is a boolean flag, it is eliminated. - Otherwise the flag is a numeric flag, its value will be reduced by one. - - Args: - flags_dict: The dictionary containing the original flags whose neighbors are - to be explored. - spec: The spec in the flags_dict is to be changed. - - Returns: - Dictionary of neighbor flag that is only different from the original - dictionary by the spec. - """ - - # The specification must be held by one of the flags. - assert spec in flags_dict - - # The results this method returns. - results = flags_dict.copy() - - # This method searches for a pattern [start-end] in the spec. If the spec - # contains this pattern, it is a numeric flag. Otherwise it is a boolean flag. - # For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is - # a boolean flag. - numeric_flag_match = flags.Search(spec) - - if numeric_flag_match: - # numeric flag - val = results[spec].GetValue() - - # If the value of the flag is the lower boundary of the specification, this - # flag will be turned off. Because it already contains the lowest value and - # can not be decreased any more. - if val == int(numeric_flag_match.group('start')): - # Turn off the flag. A flag is turned off if it is not presented in the - # flags_dict. - del results[spec] + """Decrease the value of the flag that has the specification spec. + + If the flag that contains the spec is a boolean flag, it is eliminated. + Otherwise the flag is a numeric flag, its value will be reduced by one. + + Args: + flags_dict: The dictionary containing the original flags whose neighbors are + to be explored. + spec: The spec in the flags_dict is to be changed. + + Returns: + Dictionary of neighbor flag that is only different from the original + dictionary by the spec. + """ + + # The specification must be held by one of the flags. + assert spec in flags_dict + + # The results this method returns. + results = flags_dict.copy() + + # This method searches for a pattern [start-end] in the spec. If the spec + # contains this pattern, it is a numeric flag. Otherwise it is a boolean flag. + # For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is + # a boolean flag. + numeric_flag_match = flags.Search(spec) + + if numeric_flag_match: + # numeric flag + val = results[spec].GetValue() + + # If the value of the flag is the lower boundary of the specification, this + # flag will be turned off. Because it already contains the lowest value and + # can not be decreased any more. + if val == int(numeric_flag_match.group("start")): + # Turn off the flag. A flag is turned off if it is not presented in the + # flags_dict. + del results[spec] + else: + results[spec] = flags.Flag(spec, val - 1) else: - results[spec] = flags.Flag(spec, val - 1) - else: - # Turn off the flag. A flag is turned off if it is not presented in the - # flags_dict. - del results[spec] + # Turn off the flag. A flag is turned off if it is not presented in the + # flags_dict. + del results[spec] - return results + return results class IterativeEliminationGeneration(Generation): - """The negative flag iterative elimination algorithm.""" + """The negative flag iterative elimination algorithm.""" - def __init__(self, exe_set, parent_task): - """Set up the base line parent task. + def __init__(self, exe_set, parent_task): + """Set up the base line parent task. - The parent task is the base line against which the new tasks are compared. - The new tasks are only different from the base line from one flag f by - either turning this flag f off, or lower the flag value by 1. - If a new task is better than the base line, one flag is identified that - gives degradation. The flag that give the worst degradation will be removed - or lower the value by 1 in the base in each iteration. + The parent task is the base line against which the new tasks are compared. + The new tasks are only different from the base line from one flag f by + either turning this flag f off, or lower the flag value by 1. + If a new task is better than the base line, one flag is identified that + gives degradation. The flag that give the worst degradation will be removed + or lower the value by 1 in the base in each iteration. - Args: - exe_set: A set of tasks to be run. Each one only differs from the - parent_task by one flag. - parent_task: The base line task, against which the new tasks in exe_set - are compared. - """ + Args: + exe_set: A set of tasks to be run. Each one only differs from the + parent_task by one flag. + parent_task: The base line task, against which the new tasks in exe_set + are compared. + """ - Generation.__init__(self, exe_set, None) - self._parent_task = parent_task + Generation.__init__(self, exe_set, None) + self._parent_task = parent_task - def IsImproved(self): - """Whether any new task has improvement upon the parent task.""" + def IsImproved(self): + """Whether any new task has improvement upon the parent task.""" - parent = self._parent_task - # Whether there is any new task that has improvement over the parent base - # line task. - for curr in [curr for curr in self.Pool() if curr != parent]: - if curr.IsImproved(parent): - return True + parent = self._parent_task + # Whether there is any new task that has improvement over the parent base + # line task. + for curr in [curr for curr in self.Pool() if curr != parent]: + if curr.IsImproved(parent): + return True - return False + return False - def Next(self, cache): - """Find out the flag that gives the worst degradation. + def Next(self, cache): + """Find out the flag that gives the worst degradation. - Found out the flag that gives the worst degradation. Turn that flag off from - the base line and use the new base line for the new generation. + Found out the flag that gives the worst degradation. Turn that flag off from + the base line and use the new base line for the new generation. - Args: - cache: A set of tasks that have been generated before. + Args: + cache: A set of tasks that have been generated before. - Returns: - A set of new generations. - """ - parent_task = self._parent_task + Returns: + A set of new generations. + """ + parent_task = self._parent_task - # Find out the task that gives the worst degradation. - worst_task = parent_task + # Find out the task that gives the worst degradation. + worst_task = parent_task - for curr in [curr for curr in self.Pool() if curr != parent_task]: - # The method IsImproved, which is supposed to be called before, ensures - # that there is at least a task that improves upon the parent_task. - if curr.IsImproved(worst_task): - worst_task = curr + for curr in [curr for curr in self.Pool() if curr != parent_task]: + # The method IsImproved, which is supposed to be called before, ensures + # that there is at least a task that improves upon the parent_task. + if curr.IsImproved(worst_task): + worst_task = curr - assert worst_task != parent_task + assert worst_task != parent_task - # The flags_set of the worst task. - work_flags_set = worst_task.GetFlags().GetFlags() + # The flags_set of the worst task. + work_flags_set = worst_task.GetFlags().GetFlags() - results = set([]) + results = set([]) - # If the flags_set contains no flag, i.e., all the flags have been - # eliminated, the algorithm stops. - if not work_flags_set: - return [] + # If the flags_set contains no flag, i.e., all the flags have been + # eliminated, the algorithm stops. + if not work_flags_set: + return [] - # Turn of the remaining flags one by one for the next generation. - for spec in work_flags_set: - flag_set = flags.FlagSet(_DecreaseFlag(work_flags_set, spec).values()) - new_task = task.Task(flag_set) - if new_task not in cache: - results.add(new_task) + # Turn of the remaining flags one by one for the next generation. + for spec in work_flags_set: + flag_set = flags.FlagSet( + _DecreaseFlag(work_flags_set, spec).values() + ) + new_task = task.Task(flag_set) + if new_task not in cache: + results.add(new_task) - return [IterativeEliminationGeneration(results, worst_task)] + return [IterativeEliminationGeneration(results, worst_task)] class IterativeEliminationFirstGeneration(IterativeEliminationGeneration): - """The first iteration of the iterative elimination algorithm. + """The first iteration of the iterative elimination algorithm. - The first iteration also evaluates the base line task. The base line tasks in - the subsequent iterations have been evaluated. Therefore, - IterativeEliminationGeneration does not include the base line task in the - execution set. - """ + The first iteration also evaluates the base line task. The base line tasks in + the subsequent iterations have been evaluated. Therefore, + IterativeEliminationGeneration does not include the base line task in the + execution set. + """ - def IsImproved(self): - # Find out the base line task in the execution set. - parent = next(task for task in self.Pool() if task == self._parent_task) - self._parent_task = parent + def IsImproved(self): + # Find out the base line task in the execution set. + parent = next(task for task in self.Pool() if task == self._parent_task) + self._parent_task = parent - return IterativeEliminationGeneration.IsImproved(self) + return IterativeEliminationGeneration.IsImproved(self) diff --git a/bestflags/mock_task.py b/bestflags/mock_task.py index 39ebf50c..3d6a4acf 100644 --- a/bestflags/mock_task.py +++ b/bestflags/mock_task.py @@ -6,87 +6,88 @@ Part of the Chrome build flags optimization. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" # Pick an integer at random. POISONPILL = 975 class MockTask(object): - """This class emulates an actual task. - - It does not do the actual work, but simply returns the result as given when - this task is constructed. - """ - - def __init__(self, stage, identifier, cost=0): - """Set up the results for this task. - - Args: - stage: the stage of this test is in. - identifier: the identifier of this task. - cost: the mock cost of this task. - - The _cost field stored the cost. Once this task is performed, i.e., by - calling the work method or by setting the result from other task, the - _cost field will have this cost. The stage field verifies that the module - being tested and the unitest are in the same stage. If the unitest does - not care about cost of this task, the cost parameter should be leaved - blank. + """This class emulates an actual task. + + It does not do the actual work, but simply returns the result as given when + this task is constructed. """ - self._identifier = identifier - self._cost = cost - self._stage = stage + def __init__(self, stage, identifier, cost=0): + """Set up the results for this task. + + Args: + stage: the stage of this test is in. + identifier: the identifier of this task. + cost: the mock cost of this task. + + The _cost field stored the cost. Once this task is performed, i.e., by + calling the work method or by setting the result from other task, the + _cost field will have this cost. The stage field verifies that the module + being tested and the unitest are in the same stage. If the unitest does + not care about cost of this task, the cost parameter should be leaved + blank. + """ - # Indicate that this method has not been performed yet. - self._performed = False + self._identifier = identifier + self._cost = cost + self._stage = stage - def __eq__(self, other): - if isinstance(other, MockTask): - return (self._identifier == other.GetIdentifier(self._stage) and - self._cost == other.GetResult(self._stage)) - return False + # Indicate that this method has not been performed yet. + self._performed = False - def GetIdentifier(self, stage): - assert stage == self._stage - return self._identifier + def __eq__(self, other): + if isinstance(other, MockTask): + return self._identifier == other.GetIdentifier( + self._stage + ) and self._cost == other.GetResult(self._stage) + return False - def SetResult(self, stage, cost): - assert stage == self._stage - self._cost = cost - self._performed = True + def GetIdentifier(self, stage): + assert stage == self._stage + return self._identifier - def Work(self, stage): - assert stage == self._stage - self._performed = True + def SetResult(self, stage, cost): + assert stage == self._stage + self._cost = cost + self._performed = True - def GetResult(self, stage): - assert stage == self._stage - return self._cost + def Work(self, stage): + assert stage == self._stage + self._performed = True - def Done(self, stage): - """Indicates whether the task has been performed.""" + def GetResult(self, stage): + assert stage == self._stage + return self._cost - assert stage == self._stage - return self._performed + def Done(self, stage): + """Indicates whether the task has been performed.""" - def LogSteeringCost(self): - pass + assert stage == self._stage + return self._performed + + def LogSteeringCost(self): + pass class IdentifierMockTask(MockTask): - """This class defines the mock task that does not consider the cost. + """This class defines the mock task that does not consider the cost. - The task instances will be inserted into a set. Therefore the hash and the - equal methods are overridden. The unittests that compares identities of the - tasks for equality can use this mock task instead of the base mock tack. - """ + The task instances will be inserted into a set. Therefore the hash and the + equal methods are overridden. The unittests that compares identities of the + tasks for equality can use this mock task instead of the base mock tack. + """ - def __hash__(self): - return self._identifier + def __hash__(self): + return self._identifier - def __eq__(self, other): - if isinstance(other, MockTask): - return self._identifier == other.GetIdentifier(self._stage) - return False + def __eq__(self, other): + if isinstance(other, MockTask): + return self._identifier == other.GetIdentifier(self._stage) + return False diff --git a/bestflags/pipeline_process.py b/bestflags/pipeline_process.py index 97230b9f..6b061a10 100644 --- a/bestflags/pipeline_process.py +++ b/bestflags/pipeline_process.py @@ -8,116 +8,138 @@ Part of the Chrome build flags optimization. The actual stages include the builder and the executor. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import multiprocessing + # Pick an integer at random. POISONPILL = 975 class PipelineProcess(multiprocessing.Process): - """A process that encapsulates the actual content pipeline stage. - - The actual pipeline stage can be the builder or the tester. This process - continuously pull tasks from the queue until a poison pill is received. - Once a job is received, it will hand it to the actual stage for processing. - - Each pipeline stage contains three modules. - The first module continuously pulls task from the input queue. It searches the - cache to check whether the task has encountered before. If so, duplicate - computation can be avoided. - The second module consists of a pool of workers that do the actual work, e.g., - the worker will compile the source code and get the image in the builder - pipeline stage. - The third module is a helper that put the result cost to the cost field of the - duplicate tasks. For example, if two tasks are equivalent, only one task, say - t1 will be executed and the other task, say t2 will not be executed. The third - mode gets the result from t1, when it is available and set the cost of t2 to - be the same as that of t1. - """ - - def __init__(self, num_processes, name, cache, stage, task_queue, helper, - worker, result_queue): - """Set up input/output queue and the actual method to be called. - - Args: - num_processes: Number of helpers subprocessors this stage has. - name: The name of this stage. - cache: The computed tasks encountered before. - stage: An int value that specifies the stage for this pipeline stage, for - example, build stage or test stage. This value will be used to retrieve - the keys in different stage. I.e., the flags set is the key in build - stage and the checksum is the key in the test stage. The key is used to - detect duplicates. - task_queue: The input task queue for this pipeline stage. - helper: The method hosted by the helper module to fill up the cost of the - duplicate tasks. - worker: The method hosted by the worker pools to do the actual work, e.g., - compile the image. - result_queue: The output task queue for this pipeline stage. - """ - - multiprocessing.Process.__init__(self) - - self._name = name - self._task_queue = task_queue - self._result_queue = result_queue - - self._helper = helper - self._worker = worker - - self._cache = cache - self._stage = stage - self._num_processes = num_processes - - # the queues used by the modules for communication - manager = multiprocessing.Manager() - self._helper_queue = manager.Queue() - self._work_queue = manager.Queue() - - def run(self): - """Busy pulling the next task from the queue for execution. - - Once a job is pulled, this stage invokes the actual stage method and submits - the result to the next pipeline stage. - - The process will terminate on receiving the poison pill from previous stage. + """A process that encapsulates the actual content pipeline stage. + + The actual pipeline stage can be the builder or the tester. This process + continuously pull tasks from the queue until a poison pill is received. + Once a job is received, it will hand it to the actual stage for processing. + + Each pipeline stage contains three modules. + The first module continuously pulls task from the input queue. It searches the + cache to check whether the task has encountered before. If so, duplicate + computation can be avoided. + The second module consists of a pool of workers that do the actual work, e.g., + the worker will compile the source code and get the image in the builder + pipeline stage. + The third module is a helper that put the result cost to the cost field of the + duplicate tasks. For example, if two tasks are equivalent, only one task, say + t1 will be executed and the other task, say t2 will not be executed. The third + mode gets the result from t1, when it is available and set the cost of t2 to + be the same as that of t1. """ - # the worker pool - work_pool = multiprocessing.Pool(self._num_processes) - - # the helper process - helper_process = multiprocessing.Process( - target=self._helper, - args=(self._stage, self._cache, self._helper_queue, self._work_queue, - self._result_queue)) - helper_process.start() - mycache = self._cache.keys() - - while True: - task = self._task_queue.get() - if task == POISONPILL: - # Poison pill means shutdown - self._result_queue.put(POISONPILL) - break - - task_key = task.GetIdentifier(self._stage) - if task_key in mycache: - # The task has been encountered before. It will be sent to the helper - # module for further processing. - self._helper_queue.put(task) - else: - # Let the workers do the actual work. - work_pool.apply_async( - self._worker, - args=(self._stage, task, self._work_queue, self._result_queue)) - mycache.append(task_key) - - # Shutdown the workers pool and the helper process. - work_pool.close() - work_pool.join() - - self._helper_queue.put(POISONPILL) - helper_process.join() + def __init__( + self, + num_processes, + name, + cache, + stage, + task_queue, + helper, + worker, + result_queue, + ): + """Set up input/output queue and the actual method to be called. + + Args: + num_processes: Number of helpers subprocessors this stage has. + name: The name of this stage. + cache: The computed tasks encountered before. + stage: An int value that specifies the stage for this pipeline stage, for + example, build stage or test stage. This value will be used to retrieve + the keys in different stage. I.e., the flags set is the key in build + stage and the checksum is the key in the test stage. The key is used to + detect duplicates. + task_queue: The input task queue for this pipeline stage. + helper: The method hosted by the helper module to fill up the cost of the + duplicate tasks. + worker: The method hosted by the worker pools to do the actual work, e.g., + compile the image. + result_queue: The output task queue for this pipeline stage. + """ + + multiprocessing.Process.__init__(self) + + self._name = name + self._task_queue = task_queue + self._result_queue = result_queue + + self._helper = helper + self._worker = worker + + self._cache = cache + self._stage = stage + self._num_processes = num_processes + + # the queues used by the modules for communication + manager = multiprocessing.Manager() + self._helper_queue = manager.Queue() + self._work_queue = manager.Queue() + + def run(self): + """Busy pulling the next task from the queue for execution. + + Once a job is pulled, this stage invokes the actual stage method and submits + the result to the next pipeline stage. + + The process will terminate on receiving the poison pill from previous stage. + """ + + # the worker pool + work_pool = multiprocessing.Pool(self._num_processes) + + # the helper process + helper_process = multiprocessing.Process( + target=self._helper, + args=( + self._stage, + self._cache, + self._helper_queue, + self._work_queue, + self._result_queue, + ), + ) + helper_process.start() + mycache = self._cache.keys() + + while True: + task = self._task_queue.get() + if task == POISONPILL: + # Poison pill means shutdown + self._result_queue.put(POISONPILL) + break + + task_key = task.GetIdentifier(self._stage) + if task_key in mycache: + # The task has been encountered before. It will be sent to the helper + # module for further processing. + self._helper_queue.put(task) + else: + # Let the workers do the actual work. + work_pool.apply_async( + self._worker, + args=( + self._stage, + task, + self._work_queue, + self._result_queue, + ), + ) + mycache.append(task_key) + + # Shutdown the workers pool and the helper process. + work_pool.close() + work_pool.join() + + self._helper_queue.put(POISONPILL) + helper_process.join() diff --git a/bestflags/pipeline_process_test.py b/bestflags/pipeline_process_test.py index a6d784f5..ec810d13 100644 --- a/bestflags/pipeline_process_test.py +++ b/bestflags/pipeline_process_test.py @@ -6,7 +6,7 @@ Part of the Chrome build flags optimization. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import multiprocessing import unittest @@ -14,6 +14,7 @@ import unittest from mock_task import MockTask import pipeline_process + # Pick an integer at random. ERROR = -334 # Pick an integer at random. @@ -21,69 +22,74 @@ TEST_STAGE = -8 def MockHelper(stage, done_dict, helper_queue, _, result_queue): - """This method echos input to the output.""" + """This method echos input to the output.""" - assert stage == TEST_STAGE - while True: - if not helper_queue.empty(): - task = helper_queue.get() - if task == pipeline_process.POISONPILL: - # Poison pill means shutdown - break + assert stage == TEST_STAGE + while True: + if not helper_queue.empty(): + task = helper_queue.get() + if task == pipeline_process.POISONPILL: + # Poison pill means shutdown + break - if task in done_dict: - # verify that it does not get duplicate "1"s in the test. - result_queue.put(ERROR) - else: - result_queue.put(('helper', task.GetIdentifier(TEST_STAGE))) + if task in done_dict: + # verify that it does not get duplicate "1"s in the test. + result_queue.put(ERROR) + else: + result_queue.put(("helper", task.GetIdentifier(TEST_STAGE))) def MockWorker(stage, task, _, result_queue): - assert stage == TEST_STAGE - result_queue.put(('worker', task.GetIdentifier(TEST_STAGE))) + assert stage == TEST_STAGE + result_queue.put(("worker", task.GetIdentifier(TEST_STAGE))) class PipelineProcessTest(unittest.TestCase): - """This class test the PipelineProcess. + """This class test the PipelineProcess. - All the task inserted into the input queue should be taken out and hand to the - actual pipeline handler, except for the POISON_PILL. All these task should - also be passed to the next pipeline stage via the output queue. - """ + All the task inserted into the input queue should be taken out and hand to the + actual pipeline handler, except for the POISON_PILL. All these task should + also be passed to the next pipeline stage via the output queue. + """ - def testRun(self): - """Test the run method. + def testRun(self): + """Test the run method. - Ensure that all the tasks inserted into the queue are properly handled. - """ + Ensure that all the tasks inserted into the queue are properly handled. + """ - manager = multiprocessing.Manager() - inp = manager.Queue() - output = manager.Queue() + manager = multiprocessing.Manager() + inp = manager.Queue() + output = manager.Queue() - process = pipeline_process.PipelineProcess( - 2, 'testing', {}, TEST_STAGE, inp, MockHelper, MockWorker, output) + process = pipeline_process.PipelineProcess( + 2, "testing", {}, TEST_STAGE, inp, MockHelper, MockWorker, output + ) - process.start() - inp.put(MockTask(TEST_STAGE, 1)) - inp.put(MockTask(TEST_STAGE, 1)) - inp.put(MockTask(TEST_STAGE, 2)) - inp.put(pipeline_process.POISONPILL) - process.join() + process.start() + inp.put(MockTask(TEST_STAGE, 1)) + inp.put(MockTask(TEST_STAGE, 1)) + inp.put(MockTask(TEST_STAGE, 2)) + inp.put(pipeline_process.POISONPILL) + process.join() - # All tasks are processed once and only once. - result = [('worker', 1), ('helper', 1), ('worker', 2), - pipeline_process.POISONPILL] - while result: - task = output.get() + # All tasks are processed once and only once. + result = [ + ("worker", 1), + ("helper", 1), + ("worker", 2), + pipeline_process.POISONPILL, + ] + while result: + task = output.get() - # One "1"s is passed to the worker and one to the helper. - self.assertNotEqual(task, ERROR) + # One "1"s is passed to the worker and one to the helper. + self.assertNotEqual(task, ERROR) - # The messages received should be exactly the same as the result. - self.assertTrue(task in result) - result.remove(task) + # The messages received should be exactly the same as the result. + self.assertTrue(task in result) + result.remove(task) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/bestflags/pipeline_worker.py b/bestflags/pipeline_worker.py index 1ac8ac03..d045dc26 100644 --- a/bestflags/pipeline_worker.py +++ b/bestflags/pipeline_worker.py @@ -13,130 +13,135 @@ to be the same as t1 is referred to as resolving the result of t2. The worker invokes the work method of the tasks that are not duplicate. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import pipeline_process def Helper(stage, done_dict, helper_queue, completed_queue, result_queue): - """Helper that filters duplicate tasks. - - This method Continuously pulls duplicate tasks from the helper_queue. The - duplicate tasks need not be compiled/tested. This method also pulls completed - tasks from the worker queue and let the results of the duplicate tasks be the - same as their corresponding finished task. - - Args: - stage: The current stage of the pipeline, for example, build stage or test - stage. - done_dict: A dictionary of tasks that are done. The key of the dictionary is - the identifier of the task. The value of the dictionary is the results of - performing the corresponding task. - helper_queue: A queue of duplicate tasks whose results need to be resolved. - This is a communication channel between the pipeline_process and this - helper process. - completed_queue: A queue of tasks that have been built/tested. The results - of these tasks are needed to resolve the results of the duplicate tasks. - This is the communication channel between the workers and this helper - process. - result_queue: After the results of the duplicate tasks have been resolved, - the duplicate tasks will be sent to the next stage via this queue. - """ - - # The list of duplicate tasks, the results of which need to be resolved. - waiting_list = [] - - while True: - # Pull duplicate task from the helper queue. - if not helper_queue.empty(): - task = helper_queue.get() - - if task == pipeline_process.POISONPILL: - # Poison pill means no more duplicate task from the helper queue. - break - - # The task has not been performed before. - assert not task.Done(stage) - - # The identifier of this task. - identifier = task.GetIdentifier(stage) - - # If a duplicate task comes before the corresponding resolved results from - # the completed_queue, it will be put in the waiting list. If the result - # arrives before the duplicate task, the duplicate task will be resolved - # right away. - if identifier in done_dict: - # This task has been encountered before and the result is available. The - # result can be resolved right away. - task.SetResult(stage, done_dict[identifier]) - result_queue.put(task) - else: - waiting_list.append(task) - - # Check and get completed tasks from completed_queue. - GetResultFromCompletedQueue(stage, completed_queue, done_dict, waiting_list, - result_queue) - - # Wait to resolve the results of the remaining duplicate tasks. - while waiting_list: - GetResultFromCompletedQueue(stage, completed_queue, done_dict, waiting_list, - result_queue) - - -def GetResultFromCompletedQueue(stage, completed_queue, done_dict, waiting_list, - result_queue): - """Pull results from the completed queue and resolves duplicate tasks. - - Args: - stage: The current stage of the pipeline, for example, build stage or test - stage. - completed_queue: A queue of tasks that have been performed. The results of - these tasks are needed to resolve the results of the duplicate tasks. This - is the communication channel between the workers and this method. - done_dict: A dictionary of tasks that are done. The key of the dictionary is - the optimization flags of the task. The value of the dictionary is the - compilation results of the corresponding task. - waiting_list: The list of duplicate tasks, the results of which need to be - resolved. - result_queue: After the results of the duplicate tasks have been resolved, - the duplicate tasks will be sent to the next stage via this queue. - - This helper method tries to pull a completed task from the completed queue. - If it gets a task from the queue, it resolves the results of all the relevant - duplicate tasks in the waiting list. Relevant tasks are the tasks that have - the same flags as the currently received results from the completed_queue. - """ - # Pull completed task from the worker queue. - if not completed_queue.empty(): - (identifier, result) = completed_queue.get() - done_dict[identifier] = result - - tasks = [t for t in waiting_list if t.GetIdentifier(stage) == identifier] - for duplicate_task in tasks: - duplicate_task.SetResult(stage, result) - result_queue.put(duplicate_task) - waiting_list.remove(duplicate_task) + """Helper that filters duplicate tasks. + + This method Continuously pulls duplicate tasks from the helper_queue. The + duplicate tasks need not be compiled/tested. This method also pulls completed + tasks from the worker queue and let the results of the duplicate tasks be the + same as their corresponding finished task. + + Args: + stage: The current stage of the pipeline, for example, build stage or test + stage. + done_dict: A dictionary of tasks that are done. The key of the dictionary is + the identifier of the task. The value of the dictionary is the results of + performing the corresponding task. + helper_queue: A queue of duplicate tasks whose results need to be resolved. + This is a communication channel between the pipeline_process and this + helper process. + completed_queue: A queue of tasks that have been built/tested. The results + of these tasks are needed to resolve the results of the duplicate tasks. + This is the communication channel between the workers and this helper + process. + result_queue: After the results of the duplicate tasks have been resolved, + the duplicate tasks will be sent to the next stage via this queue. + """ + + # The list of duplicate tasks, the results of which need to be resolved. + waiting_list = [] + + while True: + # Pull duplicate task from the helper queue. + if not helper_queue.empty(): + task = helper_queue.get() + + if task == pipeline_process.POISONPILL: + # Poison pill means no more duplicate task from the helper queue. + break + + # The task has not been performed before. + assert not task.Done(stage) + + # The identifier of this task. + identifier = task.GetIdentifier(stage) + + # If a duplicate task comes before the corresponding resolved results from + # the completed_queue, it will be put in the waiting list. If the result + # arrives before the duplicate task, the duplicate task will be resolved + # right away. + if identifier in done_dict: + # This task has been encountered before and the result is available. The + # result can be resolved right away. + task.SetResult(stage, done_dict[identifier]) + result_queue.put(task) + else: + waiting_list.append(task) + + # Check and get completed tasks from completed_queue. + GetResultFromCompletedQueue( + stage, completed_queue, done_dict, waiting_list, result_queue + ) + + # Wait to resolve the results of the remaining duplicate tasks. + while waiting_list: + GetResultFromCompletedQueue( + stage, completed_queue, done_dict, waiting_list, result_queue + ) + + +def GetResultFromCompletedQueue( + stage, completed_queue, done_dict, waiting_list, result_queue +): + """Pull results from the completed queue and resolves duplicate tasks. + + Args: + stage: The current stage of the pipeline, for example, build stage or test + stage. + completed_queue: A queue of tasks that have been performed. The results of + these tasks are needed to resolve the results of the duplicate tasks. This + is the communication channel between the workers and this method. + done_dict: A dictionary of tasks that are done. The key of the dictionary is + the optimization flags of the task. The value of the dictionary is the + compilation results of the corresponding task. + waiting_list: The list of duplicate tasks, the results of which need to be + resolved. + result_queue: After the results of the duplicate tasks have been resolved, + the duplicate tasks will be sent to the next stage via this queue. + + This helper method tries to pull a completed task from the completed queue. + If it gets a task from the queue, it resolves the results of all the relevant + duplicate tasks in the waiting list. Relevant tasks are the tasks that have + the same flags as the currently received results from the completed_queue. + """ + # Pull completed task from the worker queue. + if not completed_queue.empty(): + (identifier, result) = completed_queue.get() + done_dict[identifier] = result + + tasks = [ + t for t in waiting_list if t.GetIdentifier(stage) == identifier + ] + for duplicate_task in tasks: + duplicate_task.SetResult(stage, result) + result_queue.put(duplicate_task) + waiting_list.remove(duplicate_task) def Worker(stage, task, helper_queue, result_queue): - """Worker that performs the task. - - This method calls the work method of the input task and distribute the result - to the helper and the next stage. - - Args: - stage: The current stage of the pipeline, for example, build stage or test - stage. - task: Input task that needs to be performed. - helper_queue: Queue that holds the completed tasks and the results. This is - the communication channel between the worker and the helper. - result_queue: Queue that holds the completed tasks and the results. This is - the communication channel between the worker and the next stage. - """ - - # The task has not been completed before. - assert not task.Done(stage) - - task.Work(stage) - helper_queue.put((task.GetIdentifier(stage), task.GetResult(stage))) - result_queue.put(task) + """Worker that performs the task. + + This method calls the work method of the input task and distribute the result + to the helper and the next stage. + + Args: + stage: The current stage of the pipeline, for example, build stage or test + stage. + task: Input task that needs to be performed. + helper_queue: Queue that holds the completed tasks and the results. This is + the communication channel between the worker and the helper. + result_queue: Queue that holds the completed tasks and the results. This is + the communication channel between the worker and the next stage. + """ + + # The task has not been completed before. + assert not task.Done(stage) + + task.Work(stage) + helper_queue.put((task.GetIdentifier(stage), task.GetResult(stage))) + result_queue.put(task) diff --git a/bestflags/pipeline_worker_test.py b/bestflags/pipeline_worker_test.py index 842fc542..3fca0294 100644 --- a/bestflags/pipeline_worker_test.py +++ b/bestflags/pipeline_worker_test.py @@ -8,7 +8,7 @@ Part of the Chrome build flags optimization. This module tests the helper method and the worker method. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import multiprocessing import random @@ -19,110 +19,117 @@ from mock_task import MockTask import pipeline_process import pipeline_worker + # Pick an integer at random. TEST_STAGE = -3 def MockTaskCostGenerator(): - """Calls a random number generator and returns a negative number.""" - return random.randint(-sys.maxint - 1, -1) + """Calls a random number generator and returns a negative number.""" + return random.randint(-sys.maxint - 1, -1) class PipelineWorkerTest(unittest.TestCase): - """This class tests the pipeline_worker functions. - - Given the same identifier, the cost should result the same from the - pipeline_worker functions. - """ + """This class tests the pipeline_worker functions. - def testHelper(self): - """"Test the helper. - - Call the helper method twice, and test the results. The results should be - the same, i.e., the cost should be the same. + Given the same identifier, the cost should result the same from the + pipeline_worker functions. """ - # Set up the input, helper and output queue for the helper method. - manager = multiprocessing.Manager() - helper_queue = manager.Queue() - result_queue = manager.Queue() - completed_queue = manager.Queue() - - # Set up the helper process that holds the helper method. - helper_process = multiprocessing.Process( - target=pipeline_worker.Helper, - args=(TEST_STAGE, {}, helper_queue, completed_queue, result_queue)) - helper_process.start() - - # A dictionary defines the mock result to the helper. - mock_result = {1: 1995, 2: 59, 9: 1027} - - # Test if there is a task that is done before, whether the duplicate task - # will have the same result. Here, two different scenarios are tested. That - # is the mock results are added to the completed_queue before and after the - # corresponding mock tasks being added to the input queue. - completed_queue.put((9, mock_result[9])) - - # The output of the helper should contain all the following tasks. - results = [1, 1, 2, 9] - - # Testing the correctness of having tasks having the same identifier, here - # 1. - for result in results: - helper_queue.put(MockTask(TEST_STAGE, result, MockTaskCostGenerator())) - - completed_queue.put((2, mock_result[2])) - completed_queue.put((1, mock_result[1])) - - # Signal there is no more duplicate task. - helper_queue.put(pipeline_process.POISONPILL) - helper_process.join() - - while results: - task = result_queue.get() - identifier = task.GetIdentifier(TEST_STAGE) - self.assertTrue(identifier in results) - if identifier in mock_result: - self.assertTrue(task.GetResult(TEST_STAGE), mock_result[identifier]) - results.remove(identifier) - - def testWorker(self): - """"Test the worker method. - - The worker should process all the input tasks and output the tasks to the - helper and result queue. - """ - - manager = multiprocessing.Manager() - result_queue = manager.Queue() - completed_queue = manager.Queue() - - # A dictionary defines the mock tasks and their corresponding results. - mock_work_tasks = {1: 86, 2: 788} - - mock_tasks = [] - - for flag, cost in mock_work_tasks.iteritems(): - mock_tasks.append(MockTask(TEST_STAGE, flag, cost)) - - # Submit the mock tasks to the worker. - for mock_task in mock_tasks: - pipeline_worker.Worker(TEST_STAGE, mock_task, completed_queue, - result_queue) - - # The tasks, from the output queue, should be the same as the input and - # should be performed. - for task in mock_tasks: - output = result_queue.get() - self.assertEqual(output, task) - self.assertTrue(output.Done(TEST_STAGE)) - - # The tasks, from the completed_queue, should be defined in the - # mock_work_tasks dictionary. - for flag, cost in mock_work_tasks.iteritems(): - helper_input = completed_queue.get() - self.assertEqual(helper_input, (flag, cost)) - - -if __name__ == '__main__': - unittest.main() + def testHelper(self): + """ "Test the helper. + + Call the helper method twice, and test the results. The results should be + the same, i.e., the cost should be the same. + """ + + # Set up the input, helper and output queue for the helper method. + manager = multiprocessing.Manager() + helper_queue = manager.Queue() + result_queue = manager.Queue() + completed_queue = manager.Queue() + + # Set up the helper process that holds the helper method. + helper_process = multiprocessing.Process( + target=pipeline_worker.Helper, + args=(TEST_STAGE, {}, helper_queue, completed_queue, result_queue), + ) + helper_process.start() + + # A dictionary defines the mock result to the helper. + mock_result = {1: 1995, 2: 59, 9: 1027} + + # Test if there is a task that is done before, whether the duplicate task + # will have the same result. Here, two different scenarios are tested. That + # is the mock results are added to the completed_queue before and after the + # corresponding mock tasks being added to the input queue. + completed_queue.put((9, mock_result[9])) + + # The output of the helper should contain all the following tasks. + results = [1, 1, 2, 9] + + # Testing the correctness of having tasks having the same identifier, here + # 1. + for result in results: + helper_queue.put( + MockTask(TEST_STAGE, result, MockTaskCostGenerator()) + ) + + completed_queue.put((2, mock_result[2])) + completed_queue.put((1, mock_result[1])) + + # Signal there is no more duplicate task. + helper_queue.put(pipeline_process.POISONPILL) + helper_process.join() + + while results: + task = result_queue.get() + identifier = task.GetIdentifier(TEST_STAGE) + self.assertTrue(identifier in results) + if identifier in mock_result: + self.assertTrue( + task.GetResult(TEST_STAGE), mock_result[identifier] + ) + results.remove(identifier) + + def testWorker(self): + """ "Test the worker method. + + The worker should process all the input tasks and output the tasks to the + helper and result queue. + """ + + manager = multiprocessing.Manager() + result_queue = manager.Queue() + completed_queue = manager.Queue() + + # A dictionary defines the mock tasks and their corresponding results. + mock_work_tasks = {1: 86, 2: 788} + + mock_tasks = [] + + for flag, cost in mock_work_tasks.iteritems(): + mock_tasks.append(MockTask(TEST_STAGE, flag, cost)) + + # Submit the mock tasks to the worker. + for mock_task in mock_tasks: + pipeline_worker.Worker( + TEST_STAGE, mock_task, completed_queue, result_queue + ) + + # The tasks, from the output queue, should be the same as the input and + # should be performed. + for task in mock_tasks: + output = result_queue.get() + self.assertEqual(output, task) + self.assertTrue(output.Done(TEST_STAGE)) + + # The tasks, from the completed_queue, should be defined in the + # mock_work_tasks dictionary. + for flag, cost in mock_work_tasks.iteritems(): + helper_input = completed_queue.get() + self.assertEqual(helper_input, (flag, cost)) + + +if __name__ == "__main__": + unittest.main() diff --git a/bestflags/steering.py b/bestflags/steering.py index 41173e42..a640507d 100644 --- a/bestflags/steering.py +++ b/bestflags/steering.py @@ -6,111 +6,111 @@ Part of the Chrome build flags optimization. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import pipeline_process def Steering(cache, generations, input_queue, result_queue): - """The core method template that produces the next generation of tasks to run. - - This method waits for the results of the tasks from the previous generation. - Upon the arrival of all these results, the method uses them to generate the - next generation of tasks. - - The main logic of producing the next generation from previous generation is - application specific. For example, in the genetic algorithm, a task is - produced by combining two parents tasks, while in the hill climbing algorithm, - a task is generated by its immediate neighbor. The method 'Next' is overridden - in the concrete subclasses of the class Generation to produce the next - application-specific generation. The steering method invokes the 'Next' - method, produces the next generation and submits the tasks in this generation - to the next stage, e.g., the build/compilation stage. - - Args: - cache: It stores the experiments that have been conducted before. Used to - avoid duplicate works. - generations: The initial generations of tasks to be run. - input_queue: The input results from the last stage of the framework. These - results will trigger new iteration of the algorithm. - result_queue: The output task queue for this pipeline stage. The new tasks - generated by the steering algorithm will be sent to the next stage via - this queue. - """ - - # Generations that have pending tasks to be executed. Pending tasks are those - # whose results are not ready. The tasks that have their results ready are - # referenced to as ready tasks. Once there is no pending generation, the - # algorithm terminates. - waiting = generations - - # Record how many initial tasks there are. If there is no task at all, the - # algorithm can terminate right away. - num_tasks = 0 - - # Submit all the tasks in the initial generations to the next stage of the - # framework. The next stage can be the build/compilation stage. - for generation in generations: - # Only send the task that has not been performed before to the next stage. - for task in [task for task in generation.Pool() if task not in cache]: - result_queue.put(task) - cache.add(task) - num_tasks += 1 - - # If there is no task to be executed at all, the algorithm returns right away. - if not num_tasks: - # Inform the next stage that there will be no more task. + """The core method template that produces the next generation of tasks to run. + + This method waits for the results of the tasks from the previous generation. + Upon the arrival of all these results, the method uses them to generate the + next generation of tasks. + + The main logic of producing the next generation from previous generation is + application specific. For example, in the genetic algorithm, a task is + produced by combining two parents tasks, while in the hill climbing algorithm, + a task is generated by its immediate neighbor. The method 'Next' is overridden + in the concrete subclasses of the class Generation to produce the next + application-specific generation. The steering method invokes the 'Next' + method, produces the next generation and submits the tasks in this generation + to the next stage, e.g., the build/compilation stage. + + Args: + cache: It stores the experiments that have been conducted before. Used to + avoid duplicate works. + generations: The initial generations of tasks to be run. + input_queue: The input results from the last stage of the framework. These + results will trigger new iteration of the algorithm. + result_queue: The output task queue for this pipeline stage. The new tasks + generated by the steering algorithm will be sent to the next stage via + this queue. + """ + + # Generations that have pending tasks to be executed. Pending tasks are those + # whose results are not ready. The tasks that have their results ready are + # referenced to as ready tasks. Once there is no pending generation, the + # algorithm terminates. + waiting = generations + + # Record how many initial tasks there are. If there is no task at all, the + # algorithm can terminate right away. + num_tasks = 0 + + # Submit all the tasks in the initial generations to the next stage of the + # framework. The next stage can be the build/compilation stage. + for generation in generations: + # Only send the task that has not been performed before to the next stage. + for task in [task for task in generation.Pool() if task not in cache]: + result_queue.put(task) + cache.add(task) + num_tasks += 1 + + # If there is no task to be executed at all, the algorithm returns right away. + if not num_tasks: + # Inform the next stage that there will be no more task. + result_queue.put(pipeline_process.POISONPILL) + return + + # The algorithm is done if there is no pending generation. A generation is + # pending if it has pending task. + while waiting: + # Busy-waiting for the next task. + if input_queue.empty(): + continue + + # If there is a task whose result is ready from the last stage of the + # feedback loop, there will be one less pending task. + + task = input_queue.get() + + # Store the result of this ready task. Intermediate results can be used to + # generate report for final result or be used to reboot from a crash from + # the failure of any module of the framework. + task.LogSteeringCost() + + # Find out which pending generation this ready task belongs to. This pending + # generation will have one less pending task. The "next" expression iterates + # the generations in waiting until the first generation whose UpdateTask + # method returns true. + generation = next(gen for gen in waiting if gen.UpdateTask(task)) + + # If there is still any pending task, do nothing. + if not generation.Done(): + continue + + # All the tasks in the generation are finished. The generation is ready to + # produce the next generation. + waiting.remove(generation) + + # Check whether a generation should generate the next generation. + # A generation may not generate the next generation, e.g., because a + # fixpoint has been reached, there has not been any improvement for a few + # generations or a local maxima is reached. + if not generation.IsImproved(): + continue + + for new_generation in generation.Next(cache): + # Make sure that each generation should contain at least one task. + assert new_generation.Pool() + waiting.append(new_generation) + + # Send the tasks of the new generations to the next stage for execution. + for new_task in new_generation.Pool(): + result_queue.put(new_task) + cache.add(new_task) + + # Steering algorithm is finished and it informs the next stage that there will + # be no more task. result_queue.put(pipeline_process.POISONPILL) - return - - # The algorithm is done if there is no pending generation. A generation is - # pending if it has pending task. - while waiting: - # Busy-waiting for the next task. - if input_queue.empty(): - continue - - # If there is a task whose result is ready from the last stage of the - # feedback loop, there will be one less pending task. - - task = input_queue.get() - - # Store the result of this ready task. Intermediate results can be used to - # generate report for final result or be used to reboot from a crash from - # the failure of any module of the framework. - task.LogSteeringCost() - - # Find out which pending generation this ready task belongs to. This pending - # generation will have one less pending task. The "next" expression iterates - # the generations in waiting until the first generation whose UpdateTask - # method returns true. - generation = next(gen for gen in waiting if gen.UpdateTask(task)) - - # If there is still any pending task, do nothing. - if not generation.Done(): - continue - - # All the tasks in the generation are finished. The generation is ready to - # produce the next generation. - waiting.remove(generation) - - # Check whether a generation should generate the next generation. - # A generation may not generate the next generation, e.g., because a - # fixpoint has been reached, there has not been any improvement for a few - # generations or a local maxima is reached. - if not generation.IsImproved(): - continue - - for new_generation in generation.Next(cache): - # Make sure that each generation should contain at least one task. - assert new_generation.Pool() - waiting.append(new_generation) - - # Send the tasks of the new generations to the next stage for execution. - for new_task in new_generation.Pool(): - result_queue.put(new_task) - cache.add(new_task) - - # Steering algorithm is finished and it informs the next stage that there will - # be no more task. - result_queue.put(pipeline_process.POISONPILL) diff --git a/bestflags/steering_test.py b/bestflags/steering_test.py index 8ad0b3cb..ac91e925 100644 --- a/bestflags/steering_test.py +++ b/bestflags/steering_test.py @@ -6,7 +6,7 @@ Part of the Chrome build flags optimization. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import multiprocessing import unittest @@ -16,6 +16,7 @@ from mock_task import IdentifierMockTask import pipeline_process import steering + # Pick an integer at random. STEERING_TEST_STAGE = -8 @@ -31,140 +32,153 @@ STRIDE = 7 class MockGeneration(Generation): - """This class emulates an actual generation. - - It will output the next_generations when the method Next is called. The - next_generations is initiated when the MockGeneration instance is constructed. - """ - - def __init__(self, tasks, next_generations): - """Set up the next generations for this task. + """This class emulates an actual generation. - Args: - tasks: A set of tasks to be run. - next_generations: A list of generations as the next generation of the - current generation. + It will output the next_generations when the method Next is called. The + next_generations is initiated when the MockGeneration instance is constructed. """ - Generation.__init__(self, tasks, None) - self._next_generations = next_generations - def Next(self, _): - return self._next_generations + def __init__(self, tasks, next_generations): + """Set up the next generations for this task. - def IsImproved(self): - if self._next_generations: - return True - return False + Args: + tasks: A set of tasks to be run. + next_generations: A list of generations as the next generation of the + current generation. + """ + Generation.__init__(self, tasks, None) + self._next_generations = next_generations + def Next(self, _): + return self._next_generations -class SteeringTest(unittest.TestCase): - """This class test the steering method. + def IsImproved(self): + if self._next_generations: + return True + return False - The steering algorithm should return if there is no new task in the initial - generation. The steering algorithm should send all the tasks to the next stage - and should terminate once there is no pending generation. A generation is - pending if it contains pending task. A task is pending if its (test) result - is not ready. - """ - def testSteering(self): - """Test that the steering algorithm processes all the tasks properly. +class SteeringTest(unittest.TestCase): + """This class test the steering method. - Test that the steering algorithm sends all the tasks to the next stage. Test - that the steering algorithm terminates once all the tasks have been - processed, i.e., the results for the tasks are all ready. + The steering algorithm should return if there is no new task in the initial + generation. The steering algorithm should send all the tasks to the next stage + and should terminate once there is no pending generation. A generation is + pending if it contains pending task. A task is pending if its (test) result + is not ready. """ - # A list of generations used to test the steering stage. - generations = [] - - task_index = 0 - previous_generations = None - - # Generate a sequence of generations to be tested. Each generation will - # output the next generation in reverse order of the list when the "Next" - # method is called. - for _ in range(NUMBER_OF_GENERATIONS): - # Use a consecutive sequence of numbers as identifiers for the set of - # tasks put into a generation. - test_ranges = range(task_index, task_index + NUMBER_OF_TASKS) - tasks = [IdentifierMockTask(STEERING_TEST_STAGE, t) for t in test_ranges] - steering_tasks = set(tasks) - - # Let the previous generation as the offspring generation of the current - # generation. - current_generation = MockGeneration(steering_tasks, previous_generations) - generations.insert(0, current_generation) - previous_generations = [current_generation] - - task_index += NUMBER_OF_TASKS - - # If there is no generation at all, the unittest returns right away. - if not current_generation: - return - - # Set up the input and result queue for the steering method. - manager = multiprocessing.Manager() - input_queue = manager.Queue() - result_queue = manager.Queue() - - steering_process = multiprocessing.Process( - target=steering.Steering, - args=(set(), [current_generation], input_queue, result_queue)) - steering_process.start() - - # Test that each generation is processed properly. I.e., the generations are - # processed in order. - while generations: - generation = generations.pop(0) - tasks = [task for task in generation.Pool()] - - # Test that all the tasks are processed once and only once. - while tasks: - task = result_queue.get() - - assert task in tasks - tasks.remove(task) - - input_queue.put(task) + def testSteering(self): + """Test that the steering algorithm processes all the tasks properly. + + Test that the steering algorithm sends all the tasks to the next stage. Test + that the steering algorithm terminates once all the tasks have been + processed, i.e., the results for the tasks are all ready. + """ + + # A list of generations used to test the steering stage. + generations = [] + + task_index = 0 + previous_generations = None + + # Generate a sequence of generations to be tested. Each generation will + # output the next generation in reverse order of the list when the "Next" + # method is called. + for _ in range(NUMBER_OF_GENERATIONS): + # Use a consecutive sequence of numbers as identifiers for the set of + # tasks put into a generation. + test_ranges = range(task_index, task_index + NUMBER_OF_TASKS) + tasks = [ + IdentifierMockTask(STEERING_TEST_STAGE, t) for t in test_ranges + ] + steering_tasks = set(tasks) + + # Let the previous generation as the offspring generation of the current + # generation. + current_generation = MockGeneration( + steering_tasks, previous_generations + ) + generations.insert(0, current_generation) + previous_generations = [current_generation] + + task_index += NUMBER_OF_TASKS + + # If there is no generation at all, the unittest returns right away. + if not current_generation: + return + + # Set up the input and result queue for the steering method. + manager = multiprocessing.Manager() + input_queue = manager.Queue() + result_queue = manager.Queue() + + steering_process = multiprocessing.Process( + target=steering.Steering, + args=(set(), [current_generation], input_queue, result_queue), + ) + steering_process.start() + + # Test that each generation is processed properly. I.e., the generations are + # processed in order. + while generations: + generation = generations.pop(0) + tasks = [task for task in generation.Pool()] + + # Test that all the tasks are processed once and only once. + while tasks: + task = result_queue.get() + + assert task in tasks + tasks.remove(task) + + input_queue.put(task) - task = result_queue.get() + task = result_queue.get() - # Test that the steering algorithm returns properly after processing all - # the generations. - assert task == pipeline_process.POISONPILL + # Test that the steering algorithm returns properly after processing all + # the generations. + assert task == pipeline_process.POISONPILL - steering_process.join() + steering_process.join() - def testCache(self): - """The steering algorithm returns immediately if there is no new tasks. + def testCache(self): + """The steering algorithm returns immediately if there is no new tasks. - If all the new tasks have been cached before, the steering algorithm does - not have to execute these tasks again and thus can terminate right away. - """ + If all the new tasks have been cached before, the steering algorithm does + not have to execute these tasks again and thus can terminate right away. + """ - # Put a set of tasks in the cache and add this set to initial generation. - test_ranges = range(NUMBER_OF_TASKS) - tasks = [IdentifierMockTask(STEERING_TEST_STAGE, t) for t in test_ranges] - steering_tasks = set(tasks) + # Put a set of tasks in the cache and add this set to initial generation. + test_ranges = range(NUMBER_OF_TASKS) + tasks = [ + IdentifierMockTask(STEERING_TEST_STAGE, t) for t in test_ranges + ] + steering_tasks = set(tasks) - current_generation = MockGeneration(steering_tasks, None) + current_generation = MockGeneration(steering_tasks, None) - # Set up the input and result queue for the steering method. - manager = multiprocessing.Manager() - input_queue = manager.Queue() - result_queue = manager.Queue() + # Set up the input and result queue for the steering method. + manager = multiprocessing.Manager() + input_queue = manager.Queue() + result_queue = manager.Queue() - steering_process = multiprocessing.Process( - target=steering.Steering, - args=(steering_tasks, [current_generation], input_queue, result_queue)) + steering_process = multiprocessing.Process( + target=steering.Steering, + args=( + steering_tasks, + [current_generation], + input_queue, + result_queue, + ), + ) - steering_process.start() + steering_process.start() - # Test that the steering method returns right away. - assert result_queue.get() == pipeline_process.POISONPILL - steering_process.join() + # Test that the steering method returns right away. + assert result_queue.get() == pipeline_process.POISONPILL + steering_process.join() -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/bestflags/task.py b/bestflags/task.py index 86a251f9..cd2f0146 100644 --- a/bestflags/task.py +++ b/bestflags/task.py @@ -12,18 +12,19 @@ the image and the checksum field of a Task. The executor module will put the execution output to the execution field. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import os import subprocess import sys from uuid import uuid4 + BUILD_STAGE = 1 TEST_STAGE = 2 # Message indicating that the build or test failed. -ERROR_STRING = 'error' +ERROR_STRING = "error" # The maximum number of tries a build can have. Some compilations may fail due # to unexpected environment circumstance. This variable defines how many tries @@ -38,413 +39,456 @@ TEST_TRIES = 3 # Create the file/directory if it does not already exist. def _CreateDirectory(file_name): - directory = os.path.dirname(file_name) - if not os.path.exists(directory): - os.makedirs(directory) + directory = os.path.dirname(file_name) + if not os.path.exists(directory): + os.makedirs(directory) class Task(object): - """A single reproducing entity. - - A single test of performance with a particular set of flags. It records the - flag set, the image, the check sum of the image and the cost. - """ - - # The command that will be used in the build stage to compile the tasks. - BUILD_COMMAND = None - # The command that will be used in the test stage to test the tasks. - TEST_COMMAND = None - # The directory to log the compilation and test results. - LOG_DIRECTORY = None - - @staticmethod - def InitLogCommand(build_command, test_command, log_directory): - """Set up the build and test command for the task and the log directory. - - This framework is generic. It lets the client specify application specific - compile and test methods by passing different build_command and - test_command. - - Args: - build_command: The command that will be used in the build stage to compile - this task. - test_command: The command that will be used in the test stage to test this - task. - log_directory: The directory to log the compilation and test results. - """ - - Task.BUILD_COMMAND = build_command - Task.TEST_COMMAND = test_command - Task.LOG_DIRECTORY = log_directory - - def __init__(self, flag_set): - """Set up the optimization flag selection for this task. - - Args: - flag_set: The optimization flag set that is encapsulated by this task. - """ - - self._flag_set = flag_set - - # A unique identifier that distinguishes this task from other tasks. - self._task_identifier = uuid4() - - self._log_path = (Task.LOG_DIRECTORY, self._task_identifier) - - # Initiate the hash value. The hash value is used so as not to recompute it - # every time the hash method is called. - self._hash_value = None - - # Indicate that the task has not been compiled/tested. - self._build_cost = None - self._exe_cost = None - self._checksum = None - self._image = None - self._file_length = None - self._text_length = None - - def __eq__(self, other): - """Test whether two tasks are equal. - - Two tasks are equal if their flag_set are equal. + """A single reproducing entity. - Args: - other: The other task with which this task is tested equality. - Returns: - True if the encapsulated flag sets are equal. - """ - if isinstance(other, Task): - return self.GetFlags() == other.GetFlags() - return False - - def __hash__(self): - if self._hash_value is None: - # Cache the hash value of the flags, so as not to recompute them. - self._hash_value = hash(self._flag_set) - return self._hash_value - - def GetIdentifier(self, stage): - """Get the identifier of the task in the stage. - - The flag set uniquely identifies a task in the build stage. The checksum of - the image of the task uniquely identifies the task in the test stage. - - Args: - stage: The stage (build/test) in which this method is called. - Returns: - Return the flag set in build stage and return the checksum in test stage. + A single test of performance with a particular set of flags. It records the + flag set, the image, the check sum of the image and the cost. """ - # Define the dictionary for different stage function lookup. - get_identifier_functions = {BUILD_STAGE: self.FormattedFlags, - TEST_STAGE: self.__GetCheckSum} + # The command that will be used in the build stage to compile the tasks. + BUILD_COMMAND = None + # The command that will be used in the test stage to test the tasks. + TEST_COMMAND = None + # The directory to log the compilation and test results. + LOG_DIRECTORY = None + + @staticmethod + def InitLogCommand(build_command, test_command, log_directory): + """Set up the build and test command for the task and the log directory. + + This framework is generic. It lets the client specify application specific + compile and test methods by passing different build_command and + test_command. + + Args: + build_command: The command that will be used in the build stage to compile + this task. + test_command: The command that will be used in the test stage to test this + task. + log_directory: The directory to log the compilation and test results. + """ + + Task.BUILD_COMMAND = build_command + Task.TEST_COMMAND = test_command + Task.LOG_DIRECTORY = log_directory + + def __init__(self, flag_set): + """Set up the optimization flag selection for this task. + + Args: + flag_set: The optimization flag set that is encapsulated by this task. + """ - assert stage in get_identifier_functions - return get_identifier_functions[stage]() + self._flag_set = flag_set + + # A unique identifier that distinguishes this task from other tasks. + self._task_identifier = uuid4() + + self._log_path = (Task.LOG_DIRECTORY, self._task_identifier) + + # Initiate the hash value. The hash value is used so as not to recompute it + # every time the hash method is called. + self._hash_value = None + + # Indicate that the task has not been compiled/tested. + self._build_cost = None + self._exe_cost = None + self._checksum = None + self._image = None + self._file_length = None + self._text_length = None - def GetResult(self, stage): - """Get the performance results of the task in the stage. + def __eq__(self, other): + """Test whether two tasks are equal. - Args: - stage: The stage (build/test) in which this method is called. - Returns: - Performance results. - """ + Two tasks are equal if their flag_set are equal. - # Define the dictionary for different stage function lookup. - get_result_functions = {BUILD_STAGE: self.__GetBuildResult, - TEST_STAGE: self.GetTestResult} + Args: + other: The other task with which this task is tested equality. + Returns: + True if the encapsulated flag sets are equal. + """ + if isinstance(other, Task): + return self.GetFlags() == other.GetFlags() + return False - assert stage in get_result_functions + def __hash__(self): + if self._hash_value is None: + # Cache the hash value of the flags, so as not to recompute them. + self._hash_value = hash(self._flag_set) + return self._hash_value - return get_result_functions[stage]() + def GetIdentifier(self, stage): + """Get the identifier of the task in the stage. - def SetResult(self, stage, result): - """Set the performance results of the task in the stage. + The flag set uniquely identifies a task in the build stage. The checksum of + the image of the task uniquely identifies the task in the test stage. - This method is called by the pipeling_worker to set the results for - duplicated tasks. + Args: + stage: The stage (build/test) in which this method is called. + Returns: + Return the flag set in build stage and return the checksum in test stage. + """ - Args: - stage: The stage (build/test) in which this method is called. - result: The performance results of the stage. - """ + # Define the dictionary for different stage function lookup. + get_identifier_functions = { + BUILD_STAGE: self.FormattedFlags, + TEST_STAGE: self.__GetCheckSum, + } - # Define the dictionary for different stage function lookup. - set_result_functions = {BUILD_STAGE: self.__SetBuildResult, - TEST_STAGE: self.__SetTestResult} + assert stage in get_identifier_functions + return get_identifier_functions[stage]() - assert stage in set_result_functions + def GetResult(self, stage): + """Get the performance results of the task in the stage. - set_result_functions[stage](result) + Args: + stage: The stage (build/test) in which this method is called. + Returns: + Performance results. + """ - def Done(self, stage): - """Check whether the stage is done. + # Define the dictionary for different stage function lookup. + get_result_functions = { + BUILD_STAGE: self.__GetBuildResult, + TEST_STAGE: self.GetTestResult, + } - Args: - stage: The stage to be checked, build or test. - Returns: - True if the stage is done. - """ + assert stage in get_result_functions - # Define the dictionary for different result string lookup. - done_string = {BUILD_STAGE: self._build_cost, TEST_STAGE: self._exe_cost} + return get_result_functions[stage]() - assert stage in done_string + def SetResult(self, stage, result): + """Set the performance results of the task in the stage. - return done_string[stage] is not None + This method is called by the pipeling_worker to set the results for + duplicated tasks. - def Work(self, stage): - """Perform the task. + Args: + stage: The stage (build/test) in which this method is called. + result: The performance results of the stage. + """ + + # Define the dictionary for different stage function lookup. + set_result_functions = { + BUILD_STAGE: self.__SetBuildResult, + TEST_STAGE: self.__SetTestResult, + } + + assert stage in set_result_functions + + set_result_functions[stage](result) + + def Done(self, stage): + """Check whether the stage is done. + + Args: + stage: The stage to be checked, build or test. + Returns: + True if the stage is done. + """ + + # Define the dictionary for different result string lookup. + done_string = { + BUILD_STAGE: self._build_cost, + TEST_STAGE: self._exe_cost, + } + + assert stage in done_string + + return done_string[stage] is not None + + def Work(self, stage): + """Perform the task. + + Args: + stage: The stage in which the task is performed, compile or test. + """ + + # Define the dictionary for different stage function lookup. + work_functions = {BUILD_STAGE: self.__Compile, TEST_STAGE: self.__Test} + + assert stage in work_functions + + work_functions[stage]() + + def FormattedFlags(self): + """Format the optimization flag set of this task. + + Returns: + The formatted optimization flag set that is encapsulated by this task. + """ + return str(self._flag_set.FormattedForUse()) + + def GetFlags(self): + """Get the optimization flag set of this task. + + Returns: + The optimization flag set that is encapsulated by this task. + """ + + return self._flag_set + + def __GetCheckSum(self): + """Get the compilation image checksum of this task. + + Returns: + The compilation image checksum of this task. + """ + + # The checksum should be computed before this method is called. + assert self._checksum is not None + return self._checksum + + def __Compile(self): + """Run a compile. + + This method compile an image using the present flags, get the image, + test the existent of the image and gathers monitoring information, and sets + the internal cost (fitness) for this set of flags. + """ + + # Format the flags as a string as input to compile command. The unique + # identifier is passed to the compile command. If concurrent processes are + # used to compile different tasks, these processes can use the identifier to + # write to different file. + flags = self._flag_set.FormattedForUse() + command = "%s %s %s" % ( + Task.BUILD_COMMAND, + " ".join(flags), + self._task_identifier, + ) + + # Try BUILD_TRIES number of times before confirming that the build fails. + for _ in range(BUILD_TRIES): + try: + # Execute the command and get the execution status/results. + p = subprocess.Popen( + command.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + (out, err) = p.communicate() + + if out: + out = out.strip() + if out != ERROR_STRING: + # Each build results contains the checksum of the result image, the + # performance cost of the build, the compilation image, the length + # of the build, and the length of the text section of the build. + ( + checksum, + cost, + image, + file_length, + text_length, + ) = out.split() + # Build successfully. + break + + # Build failed. + cost = ERROR_STRING + except _: + # If there is exception getting the cost information of the build, the + # build failed. + cost = ERROR_STRING + + # Convert the build cost from String to integer. The build cost is used to + # compare a task with another task. Set the build cost of the failing task + # to the max integer. The for loop will keep trying until either there is a + # success or BUILD_TRIES number of tries have been conducted. + self._build_cost = sys.maxint if cost == ERROR_STRING else float(cost) + + self._checksum = checksum + self._file_length = file_length + self._text_length = text_length + self._image = image + + self.__LogBuildCost(err) + + def __Test(self): + """__Test the task against benchmark(s) using the input test command.""" + + # Ensure that the task is compiled before being tested. + assert self._image is not None + + # If the task does not compile, no need to test. + if self._image == ERROR_STRING: + self._exe_cost = ERROR_STRING + return + + # The unique identifier is passed to the test command. If concurrent + # processes are used to compile different tasks, these processes can use the + # identifier to write to different file. + command = "%s %s %s" % ( + Task.TEST_COMMAND, + self._image, + self._task_identifier, + ) + + # Try TEST_TRIES number of times before confirming that the build fails. + for _ in range(TEST_TRIES): + try: + p = subprocess.Popen( + command.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + (out, err) = p.communicate() + + if out: + out = out.strip() + if out != ERROR_STRING: + # The test results contains the performance cost of the test. + cost = out + # Test successfully. + break + + # Test failed. + cost = ERROR_STRING + except _: + # If there is exception getting the cost information of the test, the + # test failed. The for loop will keep trying until either there is a + # success or TEST_TRIES number of tries have been conducted. + cost = ERROR_STRING + + self._exe_cost = sys.maxint if (cost == ERROR_STRING) else float(cost) + + self.__LogTestCost(err) + + def __SetBuildResult( + self, (checksum, build_cost, image, file_length, text_length) + ): + self._checksum = checksum + self._build_cost = build_cost + self._image = image + self._file_length = file_length + self._text_length = text_length + + def __GetBuildResult(self): + return ( + self._checksum, + self._build_cost, + self._image, + self._file_length, + self._text_length, + ) + + def GetTestResult(self): + return self._exe_cost + + def __SetTestResult(self, exe_cost): + self._exe_cost = exe_cost + + def LogSteeringCost(self): + """Log the performance results for the task. + + This method is called by the steering stage and this method writes the + results out to a file. The results include the build and the test results. + """ + + steering_log = "%s/%s/steering.txt" % self._log_path + + _CreateDirectory(steering_log) + + with open(steering_log, "w") as out_file: + # Include the build and the test results. + steering_result = ( + self._flag_set, + self._checksum, + self._build_cost, + self._image, + self._file_length, + self._text_length, + self._exe_cost, + ) + + # Write out the result in the comma-separated format (CSV). + out_file.write("%s,%s,%s,%s,%s,%s,%s\n" % steering_result) + + def __LogBuildCost(self, log): + """Log the build results for the task. + + The build results include the compilation time of the build, the result + image, the checksum, the file length and the text length of the image. + The file length of the image includes the length of the file of the image. + The text length only includes the length of the text section of the image. + + Args: + log: The build log of this task. + """ + + build_result_log = "%s/%s/build.txt" % self._log_path + + _CreateDirectory(build_result_log) + + with open(build_result_log, "w") as out_file: + build_result = ( + self._flag_set, + self._build_cost, + self._image, + self._checksum, + self._file_length, + self._text_length, + ) - Args: - stage: The stage in which the task is performed, compile or test. - """ + # Write out the result in the comma-separated format (CSV). + out_file.write("%s,%s,%s,%s,%s,%s\n" % build_result) - # Define the dictionary for different stage function lookup. - work_functions = {BUILD_STAGE: self.__Compile, TEST_STAGE: self.__Test} + # The build information about running the build. + build_run_log = "%s/%s/build_log.txt" % self._log_path + _CreateDirectory(build_run_log) - assert stage in work_functions + with open(build_run_log, "w") as out_log_file: + # Write out the execution information. + out_log_file.write("%s" % log) - work_functions[stage]() + def __LogTestCost(self, log): + """Log the test results for the task. - def FormattedFlags(self): - """Format the optimization flag set of this task. + The test results include the runtime execution time of the test. - Returns: - The formatted optimization flag set that is encapsulated by this task. - """ - return str(self._flag_set.FormattedForUse()) + Args: + log: The test log of this task. + """ - def GetFlags(self): - """Get the optimization flag set of this task. + test_log = "%s/%s/test.txt" % self._log_path - Returns: - The optimization flag set that is encapsulated by this task. - """ + _CreateDirectory(test_log) - return self._flag_set + with open(test_log, "w") as out_file: + test_result = (self._flag_set, self._checksum, self._exe_cost) - def __GetCheckSum(self): - """Get the compilation image checksum of this task. + # Write out the result in the comma-separated format (CSV). + out_file.write("%s,%s,%s\n" % test_result) - Returns: - The compilation image checksum of this task. - """ - - # The checksum should be computed before this method is called. - assert self._checksum is not None - return self._checksum - - def __Compile(self): - """Run a compile. + # The execution information about running the test. + test_run_log = "%s/%s/test_log.txt" % self._log_path - This method compile an image using the present flags, get the image, - test the existent of the image and gathers monitoring information, and sets - the internal cost (fitness) for this set of flags. - """ - - # Format the flags as a string as input to compile command. The unique - # identifier is passed to the compile command. If concurrent processes are - # used to compile different tasks, these processes can use the identifier to - # write to different file. - flags = self._flag_set.FormattedForUse() - command = '%s %s %s' % (Task.BUILD_COMMAND, ' '.join(flags), - self._task_identifier) - - # Try BUILD_TRIES number of times before confirming that the build fails. - for _ in range(BUILD_TRIES): - try: - # Execute the command and get the execution status/results. - p = subprocess.Popen(command.split(), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (out, err) = p.communicate() - - if out: - out = out.strip() - if out != ERROR_STRING: - # Each build results contains the checksum of the result image, the - # performance cost of the build, the compilation image, the length - # of the build, and the length of the text section of the build. - (checksum, cost, image, file_length, text_length) = out.split() - # Build successfully. - break - - # Build failed. - cost = ERROR_STRING - except _: - # If there is exception getting the cost information of the build, the - # build failed. - cost = ERROR_STRING - - # Convert the build cost from String to integer. The build cost is used to - # compare a task with another task. Set the build cost of the failing task - # to the max integer. The for loop will keep trying until either there is a - # success or BUILD_TRIES number of tries have been conducted. - self._build_cost = sys.maxint if cost == ERROR_STRING else float(cost) - - self._checksum = checksum - self._file_length = file_length - self._text_length = text_length - self._image = image - - self.__LogBuildCost(err) - - def __Test(self): - """__Test the task against benchmark(s) using the input test command.""" - - # Ensure that the task is compiled before being tested. - assert self._image is not None - - # If the task does not compile, no need to test. - if self._image == ERROR_STRING: - self._exe_cost = ERROR_STRING - return - - # The unique identifier is passed to the test command. If concurrent - # processes are used to compile different tasks, these processes can use the - # identifier to write to different file. - command = '%s %s %s' % (Task.TEST_COMMAND, self._image, - self._task_identifier) - - # Try TEST_TRIES number of times before confirming that the build fails. - for _ in range(TEST_TRIES): - try: - p = subprocess.Popen(command.split(), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (out, err) = p.communicate() - - if out: - out = out.strip() - if out != ERROR_STRING: - # The test results contains the performance cost of the test. - cost = out - # Test successfully. - break - - # Test failed. - cost = ERROR_STRING - except _: - # If there is exception getting the cost information of the test, the - # test failed. The for loop will keep trying until either there is a - # success or TEST_TRIES number of tries have been conducted. - cost = ERROR_STRING - - self._exe_cost = sys.maxint if (cost == ERROR_STRING) else float(cost) - - self.__LogTestCost(err) - - def __SetBuildResult(self, (checksum, build_cost, image, file_length, - text_length)): - self._checksum = checksum - self._build_cost = build_cost - self._image = image - self._file_length = file_length - self._text_length = text_length - - def __GetBuildResult(self): - return (self._checksum, self._build_cost, self._image, self._file_length, - self._text_length) - - def GetTestResult(self): - return self._exe_cost - - def __SetTestResult(self, exe_cost): - self._exe_cost = exe_cost - - def LogSteeringCost(self): - """Log the performance results for the task. - - This method is called by the steering stage and this method writes the - results out to a file. The results include the build and the test results. - """ + _CreateDirectory(test_run_log) - steering_log = '%s/%s/steering.txt' % self._log_path + with open(test_run_log, "w") as out_log_file: + # Append the test log information. + out_log_file.write("%s" % log) - _CreateDirectory(steering_log) + def IsImproved(self, other): + """Compare the current task with another task. - with open(steering_log, 'w') as out_file: - # Include the build and the test results. - steering_result = (self._flag_set, self._checksum, self._build_cost, - self._image, self._file_length, self._text_length, - self._exe_cost) + Args: + other: The other task against which the current task is compared. - # Write out the result in the comma-separated format (CSV). - out_file.write('%s,%s,%s,%s,%s,%s,%s\n' % steering_result) - - def __LogBuildCost(self, log): - """Log the build results for the task. - - The build results include the compilation time of the build, the result - image, the checksum, the file length and the text length of the image. - The file length of the image includes the length of the file of the image. - The text length only includes the length of the text section of the image. - - Args: - log: The build log of this task. - """ - - build_result_log = '%s/%s/build.txt' % self._log_path - - _CreateDirectory(build_result_log) - - with open(build_result_log, 'w') as out_file: - build_result = (self._flag_set, self._build_cost, self._image, - self._checksum, self._file_length, self._text_length) - - # Write out the result in the comma-separated format (CSV). - out_file.write('%s,%s,%s,%s,%s,%s\n' % build_result) - - # The build information about running the build. - build_run_log = '%s/%s/build_log.txt' % self._log_path - _CreateDirectory(build_run_log) - - with open(build_run_log, 'w') as out_log_file: - # Write out the execution information. - out_log_file.write('%s' % log) - - def __LogTestCost(self, log): - """Log the test results for the task. - - The test results include the runtime execution time of the test. - - Args: - log: The test log of this task. - """ - - test_log = '%s/%s/test.txt' % self._log_path - - _CreateDirectory(test_log) - - with open(test_log, 'w') as out_file: - test_result = (self._flag_set, self._checksum, self._exe_cost) - - # Write out the result in the comma-separated format (CSV). - out_file.write('%s,%s,%s\n' % test_result) - - # The execution information about running the test. - test_run_log = '%s/%s/test_log.txt' % self._log_path - - _CreateDirectory(test_run_log) - - with open(test_run_log, 'w') as out_log_file: - # Append the test log information. - out_log_file.write('%s' % log) - - def IsImproved(self, other): - """Compare the current task with another task. - - Args: - other: The other task against which the current task is compared. - - Returns: - True if this task has improvement upon the other task. - """ + Returns: + True if this task has improvement upon the other task. + """ - # The execution costs must have been initiated. - assert self._exe_cost is not None - assert other.GetTestResult() is not None + # The execution costs must have been initiated. + assert self._exe_cost is not None + assert other.GetTestResult() is not None - return self._exe_cost < other.GetTestResult() + return self._exe_cost < other.GetTestResult() diff --git a/bestflags/task_test.py b/bestflags/task_test.py index fa43bc7d..1b559bbb 100644 --- a/bestflags/task_test.py +++ b/bestflags/task_test.py @@ -6,7 +6,7 @@ Part of the Chrome build flags optimization. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import random import sys @@ -15,6 +15,7 @@ import unittest import task from task import Task + # The number of flags be tested. NUM_FLAGS = 20 @@ -26,149 +27,159 @@ RANDOM_TESTRESULT = 100 class MockFlagSet(object): - """This class emulates a set of flags. - - It returns the flags and hash value, when the FormattedForUse method and the - __hash__ method is called, respectively. These values are initialized when the - MockFlagSet instance is constructed. - """ - - def __init__(self, flags=0, hash_value=-1): - self._flags = flags - self._hash_value = hash_value - - def __eq__(self, other): - assert isinstance(other, MockFlagSet) - return self._flags == other.FormattedForUse() - - def FormattedForUse(self): - return self._flags - - def __hash__(self): - return self._hash_value - - def GetHash(self): - return self._hash_value - - -class TaskTest(unittest.TestCase): - """This class test the Task class.""" - - def testEqual(self): - """Test the equal method of the task. - - Two tasks are equal if and only if their encapsulated flag_sets are equal. - """ - - flags = range(NUM_FLAGS) - - # Two tasks having the same flag set should be equivalent. - flag_sets = [MockFlagSet(flag) for flag in flags] - for flag_set in flag_sets: - assert Task(flag_set) == Task(flag_set) + """This class emulates a set of flags. - # Two tasks having different flag set should be different. - for flag_set in flag_sets: - test_task = Task(flag_set) - other_flag_sets = [flags for flags in flag_sets if flags != flag_set] - for flag_set1 in other_flag_sets: - assert test_task != Task(flag_set1) - - def testHash(self): - """Test the hash method of the task. - - Two tasks are equal if and only if their encapsulated flag_sets are equal. + It returns the flags and hash value, when the FormattedForUse method and the + __hash__ method is called, respectively. These values are initialized when the + MockFlagSet instance is constructed. """ - # Random identifier that is not relevant in this test. - identifier = random.randint(-sys.maxint - 1, -1) - - flag_sets = [MockFlagSet(identifier, value) for value in range(NUM_FLAGS)] - for flag_set in flag_sets: - # The hash of a task is the same as the hash of its flag set. - hash_task = Task(flag_set) - hash_value = hash(hash_task) - assert hash_value == flag_set.GetHash() - - # The hash of a task does not change. - assert hash_value == hash(hash_task) + def __init__(self, flags=0, hash_value=-1): + self._flags = flags + self._hash_value = hash_value - def testGetIdentifier(self): - """Test the get identifier method of the task. + def __eq__(self, other): + assert isinstance(other, MockFlagSet) + return self._flags == other.FormattedForUse() - The get identifier method should returns the flag set in the build stage. - """ - - flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)] - for flag_set in flag_sets: - identifier_task = Task(flag_set) + def FormattedForUse(self): + return self._flags - identifier = identifier_task.GetIdentifier(task.BUILD_STAGE) + def __hash__(self): + return self._hash_value - # The task formats the flag set into a string. - assert identifier == str(flag_set.FormattedForUse()) + def GetHash(self): + return self._hash_value - def testGetSetResult(self): - """Test the get and set result methods of the task. - - The get result method should return the same results as were set. - """ - flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)] - for flag_set in flag_sets: - result_task = Task(flag_set) - - # The get result method should return the same results as were set, in - # build stage. Currently, the build result is a 5-element tuple containing - # the checksum of the result image, the performance cost of the build, the - # compilation image, the length of the build, and the length of the text - # section of the build. - result = tuple([random.randint(0, RANDOM_BUILD_RESULT) for _ in range(5)]) - result_task.SetResult(task.BUILD_STAGE, result) - assert result == result_task.GetResult(task.BUILD_STAGE) - - # The checksum is the identifier of the test stage. - identifier = result_task.GetIdentifier(task.TEST_STAGE) - # The first element of the result tuple is the checksum. - assert identifier == result[0] - - # The get result method should return the same results as were set, in - # test stage. - random_test_result = random.randint(0, RANDOM_TESTRESULT) - result_task.SetResult(task.TEST_STAGE, random_test_result) - test_result = result_task.GetResult(task.TEST_STAGE) - assert test_result == random_test_result - - def testDone(self): - """Test the done methods of the task. - - The done method should return false is the task has not perform and return - true after the task is finished. - """ - - flags = range(NUM_FLAGS) - - flag_sets = [MockFlagSet(flag) for flag in flags] - for flag_set in flag_sets: - work_task = Task(flag_set) - - # The task has not been compiled nor tested. - assert not work_task.Done(task.TEST_STAGE) - assert not work_task.Done(task.BUILD_STAGE) - - # After the task has been compiled, it should indicate finished in BUILD - # stage. - result = tuple([random.randint(0, RANDOM_BUILD_RESULT) for _ in range(5)]) - work_task.SetResult(task.BUILD_STAGE, result) - assert not work_task.Done(task.TEST_STAGE) - assert work_task.Done(task.BUILD_STAGE) - - # After the task has been tested, it should indicate finished in TEST - # stage. - work_task.SetResult(task.TEST_STAGE, random.randint(0, RANDOM_TESTRESULT)) - assert work_task.Done(task.TEST_STAGE) - assert work_task.Done(task.BUILD_STAGE) +class TaskTest(unittest.TestCase): + """This class test the Task class.""" + def testEqual(self): + """Test the equal method of the task. + + Two tasks are equal if and only if their encapsulated flag_sets are equal. + """ + + flags = range(NUM_FLAGS) -if __name__ == '__main__': - unittest.main() + # Two tasks having the same flag set should be equivalent. + flag_sets = [MockFlagSet(flag) for flag in flags] + for flag_set in flag_sets: + assert Task(flag_set) == Task(flag_set) + + # Two tasks having different flag set should be different. + for flag_set in flag_sets: + test_task = Task(flag_set) + other_flag_sets = [ + flags for flags in flag_sets if flags != flag_set + ] + for flag_set1 in other_flag_sets: + assert test_task != Task(flag_set1) + + def testHash(self): + """Test the hash method of the task. + + Two tasks are equal if and only if their encapsulated flag_sets are equal. + """ + + # Random identifier that is not relevant in this test. + identifier = random.randint(-sys.maxint - 1, -1) + + flag_sets = [ + MockFlagSet(identifier, value) for value in range(NUM_FLAGS) + ] + for flag_set in flag_sets: + # The hash of a task is the same as the hash of its flag set. + hash_task = Task(flag_set) + hash_value = hash(hash_task) + assert hash_value == flag_set.GetHash() + + # The hash of a task does not change. + assert hash_value == hash(hash_task) + + def testGetIdentifier(self): + """Test the get identifier method of the task. + + The get identifier method should returns the flag set in the build stage. + """ + + flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)] + for flag_set in flag_sets: + identifier_task = Task(flag_set) + + identifier = identifier_task.GetIdentifier(task.BUILD_STAGE) + + # The task formats the flag set into a string. + assert identifier == str(flag_set.FormattedForUse()) + + def testGetSetResult(self): + """Test the get and set result methods of the task. + + The get result method should return the same results as were set. + """ + + flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)] + for flag_set in flag_sets: + result_task = Task(flag_set) + + # The get result method should return the same results as were set, in + # build stage. Currently, the build result is a 5-element tuple containing + # the checksum of the result image, the performance cost of the build, the + # compilation image, the length of the build, and the length of the text + # section of the build. + result = tuple( + [random.randint(0, RANDOM_BUILD_RESULT) for _ in range(5)] + ) + result_task.SetResult(task.BUILD_STAGE, result) + assert result == result_task.GetResult(task.BUILD_STAGE) + + # The checksum is the identifier of the test stage. + identifier = result_task.GetIdentifier(task.TEST_STAGE) + # The first element of the result tuple is the checksum. + assert identifier == result[0] + + # The get result method should return the same results as were set, in + # test stage. + random_test_result = random.randint(0, RANDOM_TESTRESULT) + result_task.SetResult(task.TEST_STAGE, random_test_result) + test_result = result_task.GetResult(task.TEST_STAGE) + assert test_result == random_test_result + + def testDone(self): + """Test the done methods of the task. + + The done method should return false is the task has not perform and return + true after the task is finished. + """ + + flags = range(NUM_FLAGS) + + flag_sets = [MockFlagSet(flag) for flag in flags] + for flag_set in flag_sets: + work_task = Task(flag_set) + + # The task has not been compiled nor tested. + assert not work_task.Done(task.TEST_STAGE) + assert not work_task.Done(task.BUILD_STAGE) + + # After the task has been compiled, it should indicate finished in BUILD + # stage. + result = tuple( + [random.randint(0, RANDOM_BUILD_RESULT) for _ in range(5)] + ) + work_task.SetResult(task.BUILD_STAGE, result) + assert not work_task.Done(task.TEST_STAGE) + assert work_task.Done(task.BUILD_STAGE) + + # After the task has been tested, it should indicate finished in TEST + # stage. + work_task.SetResult( + task.TEST_STAGE, random.randint(0, RANDOM_TESTRESULT) + ) + assert work_task.Done(task.TEST_STAGE) + assert work_task.Done(task.BUILD_STAGE) + + +if __name__ == "__main__": + unittest.main() diff --git a/bestflags/testing_batch.py b/bestflags/testing_batch.py index 902500a3..d8a7932f 100644 --- a/bestflags/testing_batch.py +++ b/bestflags/testing_batch.py @@ -9,7 +9,7 @@ Test the best branching hill climbing algorithms, genetic algorithm and iterative elimination algorithm. """ -__author__ = 'yuhenglong@google.com (Yuheng Long)' +__author__ = "yuhenglong@google.com (Yuheng Long)" import multiprocessing import random @@ -29,6 +29,7 @@ from task import BUILD_STAGE from task import Task from task import TEST_STAGE + # The number of flags be tested. NUM_FLAGS = 5 @@ -43,408 +44,413 @@ MUTATION_RATE = 0.03 def _GenerateRandomRasks(specs): - """Generate a task that has random values. + """Generate a task that has random values. - Args: - specs: A list of spec from which the flag set is created. + Args: + specs: A list of spec from which the flag set is created. - Returns: - A set containing a task that has random values. - """ + Returns: + A set containing a task that has random values. + """ - flag_set = [] + flag_set = [] - for spec in specs: - numeric_flag_match = flags.Search(spec) - if numeric_flag_match: - # Numeric flags. - start = int(numeric_flag_match.group('start')) - end = int(numeric_flag_match.group('end')) + for spec in specs: + numeric_flag_match = flags.Search(spec) + if numeric_flag_match: + # Numeric flags. + start = int(numeric_flag_match.group("start")) + end = int(numeric_flag_match.group("end")) - value = random.randint(start - 1, end - 1) - if value != start - 1: - # If the value falls in the range, this flag is enabled. - flag_set.append(Flag(spec, value)) - else: - # Boolean flags. - if random.randint(0, 1): - flag_set.append(Flag(spec)) + value = random.randint(start - 1, end - 1) + if value != start - 1: + # If the value falls in the range, this flag is enabled. + flag_set.append(Flag(spec, value)) + else: + # Boolean flags. + if random.randint(0, 1): + flag_set.append(Flag(spec)) - return set([Task(FlagSet(flag_set))]) + return set([Task(FlagSet(flag_set))]) def _GenerateAllFlagsTasks(specs): - """Generate a task that all the flags are enable. + """Generate a task that all the flags are enable. - All the boolean flags in the specs will be enabled and all the numeric flag - with have the largest legal value. + All the boolean flags in the specs will be enabled and all the numeric flag + with have the largest legal value. - Args: - specs: A list of spec from which the flag set is created. + Args: + specs: A list of spec from which the flag set is created. - Returns: - A set containing a task that has all flags enabled. - """ + Returns: + A set containing a task that has all flags enabled. + """ - flag_set = [] + flag_set = [] - for spec in specs: - numeric_flag_match = flags.Search(spec) + for spec in specs: + numeric_flag_match = flags.Search(spec) - if numeric_flag_match: - value = (int(numeric_flag_match.group('end')) - 1) - else: - value = -1 - flag_set.append(Flag(spec, value)) + if numeric_flag_match: + value = int(numeric_flag_match.group("end")) - 1 + else: + value = -1 + flag_set.append(Flag(spec, value)) - return set([Task(FlagSet(flag_set))]) + return set([Task(FlagSet(flag_set))]) def _GenerateNoFlagTask(): - return set([Task(FlagSet([]))]) + return set([Task(FlagSet([]))]) def GenerateRandomGATasks(specs, num_tasks, num_trials): - """Generate a set of tasks for the Genetic Algorithm. + """Generate a set of tasks for the Genetic Algorithm. - Args: - specs: A list of spec from which the flag set is created. - num_tasks: number of tasks that should be generated. - num_trials: the maximum number of tries should be attempted to generate the - set of tasks. + Args: + specs: A list of spec from which the flag set is created. + num_tasks: number of tasks that should be generated. + num_trials: the maximum number of tries should be attempted to generate the + set of tasks. - Returns: - A set of randomly generated tasks. - """ + Returns: + A set of randomly generated tasks. + """ - tasks = set([]) + tasks = set([]) - total_trials = 0 - while len(tasks) < num_tasks and total_trials < num_trials: - new_flag = FlagSet([Flag(spec) for spec in specs if random.randint(0, 1)]) - new_task = GATask(new_flag) + total_trials = 0 + while len(tasks) < num_tasks and total_trials < num_trials: + new_flag = FlagSet( + [Flag(spec) for spec in specs if random.randint(0, 1)] + ) + new_task = GATask(new_flag) - if new_task in tasks: - total_trials += 1 - else: - tasks.add(new_task) - total_trials = 0 + if new_task in tasks: + total_trials += 1 + else: + tasks.add(new_task) + total_trials = 0 - return tasks + return tasks def _GenerateInitialFlags(specs, spec): - """Generate the flag_set of a task in the flag elimination algorithm. - - Set the value of all the flags to the largest value, except for the flag that - contains spec. - - For example, if the specs are [-finline-limit=[1-1000], -fstrict-aliasing] and - the spec is -finline-limit=[1-1000], then the result is - [-finline-limit=[1-1000]:-finline-limit=998, - -fstrict-aliasing:-fstrict-aliasing] - - Args: - specs: an array of specifications from which the result flag_set is created. - The flag_set contains one and only one flag that contain the specification + """Generate the flag_set of a task in the flag elimination algorithm. + + Set the value of all the flags to the largest value, except for the flag that + contains spec. + + For example, if the specs are [-finline-limit=[1-1000], -fstrict-aliasing] and + the spec is -finline-limit=[1-1000], then the result is + [-finline-limit=[1-1000]:-finline-limit=998, + -fstrict-aliasing:-fstrict-aliasing] + + Args: + specs: an array of specifications from which the result flag_set is created. + The flag_set contains one and only one flag that contain the specification + spec. + spec: The flag containing this spec should have a value that is smaller than + the highest value the flag can have. + + Returns: + An array of flags, each of which contains one spec in specs. All the values + of the flags are the largest values in specs, expect the one that contains spec. - spec: The flag containing this spec should have a value that is smaller than - the highest value the flag can have. - - Returns: - An array of flags, each of which contains one spec in specs. All the values - of the flags are the largest values in specs, expect the one that contains - spec. - """ + """ - flag_set = [] - for other_spec in specs: - numeric_flag_match = flags.Search(other_spec) - # Found the spec in the array specs. - if other_spec == spec: - # Numeric flag will have a value that is smaller than the largest value - # and Boolean flag will be deleted. - if numeric_flag_match: - end = int(numeric_flag_match.group('end')) - flag_set.append(flags.Flag(other_spec, end - 2)) + flag_set = [] + for other_spec in specs: + numeric_flag_match = flags.Search(other_spec) + # Found the spec in the array specs. + if other_spec == spec: + # Numeric flag will have a value that is smaller than the largest value + # and Boolean flag will be deleted. + if numeric_flag_match: + end = int(numeric_flag_match.group("end")) + flag_set.append(flags.Flag(other_spec, end - 2)) - continue + continue - # other_spec != spec - if numeric_flag_match: - # numeric flag - end = int(numeric_flag_match.group('end')) - flag_set.append(flags.Flag(other_spec, end - 1)) - continue + # other_spec != spec + if numeric_flag_match: + # numeric flag + end = int(numeric_flag_match.group("end")) + flag_set.append(flags.Flag(other_spec, end - 1)) + continue - # boolean flag - flag_set.append(flags.Flag(other_spec)) + # boolean flag + flag_set.append(flags.Flag(other_spec)) - return flag_set + return flag_set def _GenerateAllIterativeEliminationTasks(specs): - """Generate the initial tasks for the negative flag elimination algorithm. + """Generate the initial tasks for the negative flag elimination algorithm. - Generate the base line task that turns on all the boolean flags and sets the - value to be the largest value for the numeric flag. + Generate the base line task that turns on all the boolean flags and sets the + value to be the largest value for the numeric flag. - For example, if the specs are [-finline-limit=[1-1000], -fstrict-aliasing], - the base line is [-finline-limit=[1-1000]:-finline-limit=999, - -fstrict-aliasing:-fstrict-aliasing] + For example, if the specs are [-finline-limit=[1-1000], -fstrict-aliasing], + the base line is [-finline-limit=[1-1000]:-finline-limit=999, + -fstrict-aliasing:-fstrict-aliasing] - Generate a set of task, each turns off one of the flag or sets a value that is - smaller than the largest value for the flag. + Generate a set of task, each turns off one of the flag or sets a value that is + smaller than the largest value for the flag. - Args: - specs: an array of specifications from which the result flag_set is created. + Args: + specs: an array of specifications from which the result flag_set is created. - Returns: - An array containing one generation of the initial tasks for the negative - flag elimination algorithm. - """ + Returns: + An array containing one generation of the initial tasks for the negative + flag elimination algorithm. + """ - # The set of tasks to be generated. - results = set([]) - flag_set = [] + # The set of tasks to be generated. + results = set([]) + flag_set = [] - for spec in specs: - numeric_flag_match = flags.Search(spec) - if numeric_flag_match: - # Numeric flag. - end_value = int(numeric_flag_match.group('end')) - flag_set.append(flags.Flag(spec, end_value - 1)) - continue + for spec in specs: + numeric_flag_match = flags.Search(spec) + if numeric_flag_match: + # Numeric flag. + end_value = int(numeric_flag_match.group("end")) + flag_set.append(flags.Flag(spec, end_value - 1)) + continue - # Boolean flag. - flag_set.append(flags.Flag(spec)) + # Boolean flag. + flag_set.append(flags.Flag(spec)) - # The base line task that set all the flags to their largest values. - parent_task = Task(flags.FlagSet(flag_set)) - results.add(parent_task) + # The base line task that set all the flags to their largest values. + parent_task = Task(flags.FlagSet(flag_set)) + results.add(parent_task) - for spec in specs: - results.add(Task(flags.FlagSet(_GenerateInitialFlags(specs, spec)))) + for spec in specs: + results.add(Task(flags.FlagSet(_GenerateInitialFlags(specs, spec)))) - return [IterativeEliminationFirstGeneration(results, parent_task)] + return [IterativeEliminationFirstGeneration(results, parent_task)] def _ComputeCost(cost_func, specs, flag_set): - """Compute the mock cost of the flag_set using the input cost function. + """Compute the mock cost of the flag_set using the input cost function. - All the boolean flags in the specs will be enabled and all the numeric flag - with have the largest legal value. + All the boolean flags in the specs will be enabled and all the numeric flag + with have the largest legal value. - Args: - cost_func: The cost function which is used to compute the mock cost of a - dictionary of flags. - specs: All the specs that are used in the algorithm. This is used to check - whether certain flag is disabled in the flag_set dictionary. - flag_set: a dictionary of the spec and flag pairs. + Args: + cost_func: The cost function which is used to compute the mock cost of a + dictionary of flags. + specs: All the specs that are used in the algorithm. This is used to check + whether certain flag is disabled in the flag_set dictionary. + flag_set: a dictionary of the spec and flag pairs. - Returns: - The mock cost of the input dictionary of the flags. - """ + Returns: + The mock cost of the input dictionary of the flags. + """ - values = [] + values = [] - for spec in specs: - # If a flag is enabled, its value is added. Otherwise a padding 0 is added. - values.append(flag_set[spec].GetValue() if spec in flag_set else 0) + for spec in specs: + # If a flag is enabled, its value is added. Otherwise a padding 0 is added. + values.append(flag_set[spec].GetValue() if spec in flag_set else 0) - # The cost function string can use the values array. - return eval(cost_func) + # The cost function string can use the values array. + return eval(cost_func) def _GenerateTestFlags(num_flags, upper_bound, file_name): - """Generate a set of mock flags and write it to a configuration file. + """Generate a set of mock flags and write it to a configuration file. - Generate a set of mock flags + Generate a set of mock flags - Args: - num_flags: Number of numeric flags to be generated. - upper_bound: The value of the upper bound of the range. - file_name: The configuration file name into which the mock flags are put. - """ + Args: + num_flags: Number of numeric flags to be generated. + upper_bound: The value of the upper bound of the range. + file_name: The configuration file name into which the mock flags are put. + """ - with open(file_name, 'w') as output_file: - num_flags = int(num_flags) - upper_bound = int(upper_bound) - for i in range(num_flags): - output_file.write('%s=[1-%d]\n' % (i, upper_bound)) + with open(file_name, "w") as output_file: + num_flags = int(num_flags) + upper_bound = int(upper_bound) + for i in range(num_flags): + output_file.write("%s=[1-%d]\n" % (i, upper_bound)) def _TestAlgorithm(cost_func, specs, generations, best_result): - """Test the best result the algorithm should return. - - Set up the framework, run the input algorithm and verify the result. - - Args: - cost_func: The cost function which is used to compute the mock cost of a - dictionary of flags. - specs: All the specs that are used in the algorithm. This is used to check - whether certain flag is disabled in the flag_set dictionary. - generations: The initial generations to be evaluated. - best_result: The expected best result of the algorithm. If best_result is - -1, the algorithm may or may not return the best value. Therefore, no - assertion will be inserted. - """ - - # Set up the utilities to test the framework. - manager = multiprocessing.Manager() - input_queue = manager.Queue() - output_queue = manager.Queue() - pp_steer = multiprocessing.Process( - target=Steering, - args=(set(), generations, output_queue, input_queue)) - pp_steer.start() - - # The best result of the algorithm so far. - result = sys.maxint - - while True: - task = input_queue.get() - - # POISONPILL signal the ends of the algorithm. - if task == pipeline_process.POISONPILL: - break - - task.SetResult(BUILD_STAGE, (0, 0, 0, 0, 0)) - - # Compute the mock cost for the task. - task_result = _ComputeCost(cost_func, specs, task.GetFlags()) - task.SetResult(TEST_STAGE, task_result) - - # If the mock result of the current task is the best so far, set this - # result to be the best result. - if task_result < result: - result = task_result - - output_queue.put(task) - - pp_steer.join() - - # Only do this test when best_result is not -1. - if best_result != -1: - assert best_result == result - - -class MockAlgorithmsTest(unittest.TestCase): - """This class mock tests different steering algorithms. - - The steering algorithms are responsible for generating the next set of tasks - to run in each iteration. This class does a functional testing on the - algorithms. It mocks out the computation of the fitness function from the - build and test phases by letting the user define the fitness function. - """ - - def _GenerateFlagSpecifications(self): - """Generate the testing specifications.""" - - mock_test_file = 'scale_mock_test' - _GenerateTestFlags(NUM_FLAGS, FLAG_RANGES, mock_test_file) - return flags.ReadConf(mock_test_file) - - def testBestHillClimb(self): - """Test the best hill climb algorithm. - - Test whether it finds the best results as expected. + """Test the best result the algorithm should return. + + Set up the framework, run the input algorithm and verify the result. + + Args: + cost_func: The cost function which is used to compute the mock cost of a + dictionary of flags. + specs: All the specs that are used in the algorithm. This is used to check + whether certain flag is disabled in the flag_set dictionary. + generations: The initial generations to be evaluated. + best_result: The expected best result of the algorithm. If best_result is + -1, the algorithm may or may not return the best value. Therefore, no + assertion will be inserted. """ - # Initiate the build/test command and the log directory. - Task.InitLogCommand(None, None, 'output') + # Set up the utilities to test the framework. + manager = multiprocessing.Manager() + input_queue = manager.Queue() + output_queue = manager.Queue() + pp_steer = multiprocessing.Process( + target=Steering, args=(set(), generations, output_queue, input_queue) + ) + pp_steer.start() - # Generate the testing specs. - specs = self._GenerateFlagSpecifications() + # The best result of the algorithm so far. + result = sys.maxint - # Generate the initial generations for a test whose cost function is the - # summation of the values of all the flags. - generation_tasks = _GenerateAllFlagsTasks(specs) - generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)] + while True: + task = input_queue.get() - # Test the algorithm. The cost function is the summation of all the values - # of all the flags. Therefore, the best value is supposed to be 0, i.e., - # when all the flags are disabled. - _TestAlgorithm('sum(values[0:len(values)])', specs, generations, 0) + # POISONPILL signal the ends of the algorithm. + if task == pipeline_process.POISONPILL: + break - # This test uses a cost function that is the negative of the previous cost - # function. Therefore, the best result should be found in task with all the - # flags enabled. - cost_function = 'sys.maxint - sum(values[0:len(values)])' - all_flags = list(generation_tasks)[0].GetFlags() - cost = _ComputeCost(cost_function, specs, all_flags) + task.SetResult(BUILD_STAGE, (0, 0, 0, 0, 0)) - # Generate the initial generations. - generation_tasks = _GenerateNoFlagTask() - generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)] + # Compute the mock cost for the task. + task_result = _ComputeCost(cost_func, specs, task.GetFlags()) + task.SetResult(TEST_STAGE, task_result) - # Test the algorithm. The cost function is negative of the summation of all - # the values of all the flags. Therefore, the best value is supposed to be - # 0, i.e., when all the flags are disabled. - _TestAlgorithm(cost_function, specs, generations, cost) + # If the mock result of the current task is the best so far, set this + # result to be the best result. + if task_result < result: + result = task_result - def testGeneticAlgorithm(self): - """Test the Genetic Algorithm. + output_queue.put(task) - Do a functional testing here and see how well it scales. - """ - - # Initiate the build/test command and the log directory. - Task.InitLogCommand(None, None, 'output') + pp_steer.join() - # Generate the testing specs. - specs = self._GenerateFlagSpecifications() - # Initiate the build/test command and the log directory. - GAGeneration.InitMetaData(STOP_THRESHOLD, NUM_CHROMOSOMES, NUM_TRIALS, - specs, MUTATION_RATE) + # Only do this test when best_result is not -1. + if best_result != -1: + assert best_result == result - # Generate the initial generations. - generation_tasks = GenerateRandomGATasks(specs, NUM_CHROMOSOMES, NUM_TRIALS) - generations = [GAGeneration(generation_tasks, set([]), 0)] - # Test the algorithm. - _TestAlgorithm('sum(values[0:len(values)])', specs, generations, -1) - cost_func = 'sys.maxint - sum(values[0:len(values)])' - _TestAlgorithm(cost_func, specs, generations, -1) - - def testIterativeElimination(self): - """Test the iterative elimination algorithm. +class MockAlgorithmsTest(unittest.TestCase): + """This class mock tests different steering algorithms. - Test whether it finds the best results as expected. + The steering algorithms are responsible for generating the next set of tasks + to run in each iteration. This class does a functional testing on the + algorithms. It mocks out the computation of the fitness function from the + build and test phases by letting the user define the fitness function. """ - # Initiate the build/test command and the log directory. - Task.InitLogCommand(None, None, 'output') - - # Generate the testing specs. - specs = self._GenerateFlagSpecifications() - - # Generate the initial generations. The generation contains the base line - # task that turns on all the flags and tasks that each turn off one of the - # flags. - generations = _GenerateAllIterativeEliminationTasks(specs) - - # Test the algorithm. The cost function is the summation of all the values - # of all the flags. Therefore, the best value is supposed to be 0, i.e., - # when all the flags are disabled. - _TestAlgorithm('sum(values[0:len(values)])', specs, generations, 0) - - # This test uses a cost function that is the negative of the previous cost - # function. Therefore, the best result should be found in task with all the - # flags enabled. - all_flags_tasks = _GenerateAllFlagsTasks(specs) - cost_function = 'sys.maxint - sum(values[0:len(values)])' - # Compute the cost of the task that turns on all the flags. - all_flags = list(all_flags_tasks)[0].GetFlags() - cost = _ComputeCost(cost_function, specs, all_flags) - - # Test the algorithm. The cost function is negative of the summation of all - # the values of all the flags. Therefore, the best value is supposed to be - # 0, i.e., when all the flags are disabled. - # The concrete type of the generation decides how the next generation will - # be generated. - _TestAlgorithm(cost_function, specs, generations, cost) - - -if __name__ == '__main__': - unittest.main() + def _GenerateFlagSpecifications(self): + """Generate the testing specifications.""" + + mock_test_file = "scale_mock_test" + _GenerateTestFlags(NUM_FLAGS, FLAG_RANGES, mock_test_file) + return flags.ReadConf(mock_test_file) + + def testBestHillClimb(self): + """Test the best hill climb algorithm. + + Test whether it finds the best results as expected. + """ + + # Initiate the build/test command and the log directory. + Task.InitLogCommand(None, None, "output") + + # Generate the testing specs. + specs = self._GenerateFlagSpecifications() + + # Generate the initial generations for a test whose cost function is the + # summation of the values of all the flags. + generation_tasks = _GenerateAllFlagsTasks(specs) + generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)] + + # Test the algorithm. The cost function is the summation of all the values + # of all the flags. Therefore, the best value is supposed to be 0, i.e., + # when all the flags are disabled. + _TestAlgorithm("sum(values[0:len(values)])", specs, generations, 0) + + # This test uses a cost function that is the negative of the previous cost + # function. Therefore, the best result should be found in task with all the + # flags enabled. + cost_function = "sys.maxint - sum(values[0:len(values)])" + all_flags = list(generation_tasks)[0].GetFlags() + cost = _ComputeCost(cost_function, specs, all_flags) + + # Generate the initial generations. + generation_tasks = _GenerateNoFlagTask() + generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)] + + # Test the algorithm. The cost function is negative of the summation of all + # the values of all the flags. Therefore, the best value is supposed to be + # 0, i.e., when all the flags are disabled. + _TestAlgorithm(cost_function, specs, generations, cost) + + def testGeneticAlgorithm(self): + """Test the Genetic Algorithm. + + Do a functional testing here and see how well it scales. + """ + + # Initiate the build/test command and the log directory. + Task.InitLogCommand(None, None, "output") + + # Generate the testing specs. + specs = self._GenerateFlagSpecifications() + # Initiate the build/test command and the log directory. + GAGeneration.InitMetaData( + STOP_THRESHOLD, NUM_CHROMOSOMES, NUM_TRIALS, specs, MUTATION_RATE + ) + + # Generate the initial generations. + generation_tasks = GenerateRandomGATasks( + specs, NUM_CHROMOSOMES, NUM_TRIALS + ) + generations = [GAGeneration(generation_tasks, set([]), 0)] + + # Test the algorithm. + _TestAlgorithm("sum(values[0:len(values)])", specs, generations, -1) + cost_func = "sys.maxint - sum(values[0:len(values)])" + _TestAlgorithm(cost_func, specs, generations, -1) + + def testIterativeElimination(self): + """Test the iterative elimination algorithm. + + Test whether it finds the best results as expected. + """ + + # Initiate the build/test command and the log directory. + Task.InitLogCommand(None, None, "output") + + # Generate the testing specs. + specs = self._GenerateFlagSpecifications() + + # Generate the initial generations. The generation contains the base line + # task that turns on all the flags and tasks that each turn off one of the + # flags. + generations = _GenerateAllIterativeEliminationTasks(specs) + + # Test the algorithm. The cost function is the summation of all the values + # of all the flags. Therefore, the best value is supposed to be 0, i.e., + # when all the flags are disabled. + _TestAlgorithm("sum(values[0:len(values)])", specs, generations, 0) + + # This test uses a cost function that is the negative of the previous cost + # function. Therefore, the best result should be found in task with all the + # flags enabled. + all_flags_tasks = _GenerateAllFlagsTasks(specs) + cost_function = "sys.maxint - sum(values[0:len(values)])" + # Compute the cost of the task that turns on all the flags. + all_flags = list(all_flags_tasks)[0].GetFlags() + cost = _ComputeCost(cost_function, specs, all_flags) + + # Test the algorithm. The cost function is negative of the summation of all + # the values of all the flags. Therefore, the best value is supposed to be + # 0, i.e., when all the flags are disabled. + # The concrete type of the generation decides how the next generation will + # be generated. + _TestAlgorithm(cost_function, specs, generations, cost) + + +if __name__ == "__main__": + unittest.main() diff --git a/binary_search_tool/binary_search_perforce.py b/binary_search_tool/binary_search_perforce.py index e60c972a..b4332ab6 100755 --- a/binary_search_tool/binary_search_perforce.py +++ b/binary_search_tool/binary_search_perforce.py @@ -8,8 +8,8 @@ from __future__ import division from __future__ import print_function -import math import argparse +import math import os import re import sys @@ -18,496 +18,562 @@ import tempfile from cros_utils import command_executer from cros_utils import logger + verbose = True def _GetP4ClientSpec(client_name, p4_paths): - p4_string = '' - for p4_path in p4_paths: - if ' ' not in p4_path: - p4_string += ' -a %s' % p4_path - else: - p4_string += ' -a "' + (' //' + client_name + '/').join(p4_path) + '"' - - return p4_string - - -def GetP4Command(client_name, p4_port, p4_paths, checkoutdir, p4_snapshot=''): - command = '' - - if p4_snapshot: - command += 'mkdir -p ' + checkoutdir + p4_string = "" for p4_path in p4_paths: - real_path = p4_path[1] - if real_path.endswith('...'): - real_path = real_path.replace('/...', '') - command += ( - '; mkdir -p ' + checkoutdir + '/' + os.path.dirname(real_path)) - command += ('&& rsync -lr ' + p4_snapshot + '/' + real_path + ' ' + - checkoutdir + '/' + os.path.dirname(real_path)) + if " " not in p4_path: + p4_string += " -a %s" % p4_path + else: + p4_string += ( + ' -a "' + (" //" + client_name + "/").join(p4_path) + '"' + ) + + return p4_string + + +def GetP4Command(client_name, p4_port, p4_paths, checkoutdir, p4_snapshot=""): + command = "" + + if p4_snapshot: + command += "mkdir -p " + checkoutdir + for p4_path in p4_paths: + real_path = p4_path[1] + if real_path.endswith("..."): + real_path = real_path.replace("/...", "") + command += ( + "; mkdir -p " + + checkoutdir + + "/" + + os.path.dirname(real_path) + ) + command += ( + "&& rsync -lr " + + p4_snapshot + + "/" + + real_path + + " " + + checkoutdir + + "/" + + os.path.dirname(real_path) + ) + return command + + command += " export P4CONFIG=.p4config" + command += " && mkdir -p " + checkoutdir + command += " && cd " + checkoutdir + command += " && cp ${HOME}/.p4config ." + command += " && chmod u+w .p4config" + command += ' && echo "P4PORT=' + p4_port + '" >> .p4config' + command += ' && echo "P4CLIENT=' + client_name + '" >> .p4config' + command += " && g4 client " + _GetP4ClientSpec(client_name, p4_paths) + command += " && g4 sync " + command += " && cd -" return command - command += ' export P4CONFIG=.p4config' - command += ' && mkdir -p ' + checkoutdir - command += ' && cd ' + checkoutdir - command += ' && cp ${HOME}/.p4config .' - command += ' && chmod u+w .p4config' - command += ' && echo "P4PORT=' + p4_port + '" >> .p4config' - command += ' && echo "P4CLIENT=' + client_name + '" >> .p4config' - command += (' && g4 client ' + _GetP4ClientSpec(client_name, p4_paths)) - command += ' && g4 sync ' - command += ' && cd -' - return command - class BinarySearchPoint(object): - """Class of binary search point.""" + """Class of binary search point.""" - def __init__(self, revision, status, tag=None): - self.revision = revision - self.status = status - self.tag = tag + def __init__(self, revision, status, tag=None): + self.revision = revision + self.status = status + self.tag = tag class BinarySearcherForPass(object): - """Class of pass level binary searcher.""" - - def __init__(self, logger_to_set=None): - self.current = 0 - self.lo = 0 - self.hi = 0 - self.total = 0 - if logger_to_set is not None: - self.logger = logger_to_set - else: - self.logger = logger.GetLogger() - - def GetNext(self): - # For the first run, update self.hi with total pass/transformation count - if self.hi == 0: - self.hi = self.total - self.current = (self.hi + self.lo) // 2 - message = ('Bisecting between: (%d, %d)' % (self.lo, self.hi)) - self.logger.LogOutput(message, print_to_console=verbose) - message = ('Current limit number: %d' % self.current) - self.logger.LogOutput(message, print_to_console=verbose) - return self.current - - def SetStatus(self, status): - """Set lo/hi status based on test script result - - If status == 0, it means that runtime error is not introduced until current - pass/transformation, so we need to increase lower bound for binary search. - - If status == 1, it means that runtime error still happens with current pass/ - transformation, so we need to decrease upper bound for binary search. - - Returns: - True if we find the bad pass/transformation, or cannot find bad one after - decreasing to the first pass/transformation. Otherwise False. - """ - assert status in (0, 1, 125), status - - if self.current == 0: - message = ('Runtime error occurs before first pass/transformation. ' - 'Stop binary searching.') - self.logger.LogOutput(message, print_to_console=verbose) - return True - - if status == 0: - message = ('Runtime error is not reproduced, increasing lower bound.') - self.logger.LogOutput(message, print_to_console=verbose) - self.lo = self.current + 1 - elif status == 1: - message = ('Runtime error is reproduced, decreasing upper bound..') - self.logger.LogOutput(message, print_to_console=verbose) - self.hi = self.current - - if self.lo >= self.hi: - return True - - return False + """Class of pass level binary searcher.""" + + def __init__(self, logger_to_set=None): + self.current = 0 + self.lo = 0 + self.hi = 0 + self.total = 0 + if logger_to_set is not None: + self.logger = logger_to_set + else: + self.logger = logger.GetLogger() + + def GetNext(self): + # For the first run, update self.hi with total pass/transformation count + if self.hi == 0: + self.hi = self.total + self.current = (self.hi + self.lo) // 2 + message = "Bisecting between: (%d, %d)" % (self.lo, self.hi) + self.logger.LogOutput(message, print_to_console=verbose) + message = "Current limit number: %d" % self.current + self.logger.LogOutput(message, print_to_console=verbose) + return self.current + + def SetStatus(self, status): + """Set lo/hi status based on test script result + + If status == 0, it means that runtime error is not introduced until current + pass/transformation, so we need to increase lower bound for binary search. + + If status == 1, it means that runtime error still happens with current pass/ + transformation, so we need to decrease upper bound for binary search. + + Returns: + True if we find the bad pass/transformation, or cannot find bad one after + decreasing to the first pass/transformation. Otherwise False. + """ + assert status in (0, 1, 125), status + + if self.current == 0: + message = ( + "Runtime error occurs before first pass/transformation. " + "Stop binary searching." + ) + self.logger.LogOutput(message, print_to_console=verbose) + return True + + if status == 0: + message = "Runtime error is not reproduced, increasing lower bound." + self.logger.LogOutput(message, print_to_console=verbose) + self.lo = self.current + 1 + elif status == 1: + message = "Runtime error is reproduced, decreasing upper bound.." + self.logger.LogOutput(message, print_to_console=verbose) + self.hi = self.current + + if self.lo >= self.hi: + return True + + return False class BinarySearcher(object): - """Class of binary searcher.""" - - def __init__(self, logger_to_set=None): - self.sorted_list = [] - self.index_log = [] - self.status_log = [] - self.skipped_indices = [] - self.current = 0 - self.points = {} - self.lo = 0 - self.hi = 0 - if logger_to_set is not None: - self.logger = logger_to_set - else: - self.logger = logger.GetLogger() - - def SetSortedList(self, sorted_list): - assert sorted_list - self.sorted_list = sorted_list - self.index_log = [] - self.hi = len(sorted_list) - 1 - self.lo = 0 - self.points = {} - for i in range(len(self.sorted_list)): - bsp = BinarySearchPoint(self.sorted_list[i], -1, 'Not yet done.') - self.points[i] = bsp - - def SetStatus(self, status, tag=None): - message = ('Revision: %s index: %d returned: %d' % - (self.sorted_list[self.current], self.current, status)) - self.logger.LogOutput(message, print_to_console=verbose) - assert status in (0, 1, 125), status - self.index_log.append(self.current) - self.status_log.append(status) - bsp = BinarySearchPoint(self.sorted_list[self.current], status, tag) - self.points[self.current] = bsp - - if status == 125: - self.skipped_indices.append(self.current) - - if status in (0, 1): - if status == 0: - self.lo = self.current + 1 - elif status == 1: - self.hi = self.current - self.logger.LogOutput('lo: %d hi: %d\n' % (self.lo, self.hi)) - self.current = (self.lo + self.hi) // 2 - - if self.lo == self.hi: - message = ('Search complete. First bad version: %s' - ' at index: %d' % (self.sorted_list[self.current], self.lo)) - self.logger.LogOutput(message) - return True - - for index in range(self.lo, self.hi): - if index not in self.skipped_indices: - return False - self.logger.LogOutput( - 'All skipped indices between: %d and %d\n' % (self.lo, self.hi), - print_to_console=verbose) - return True - - # Does a better job with chromeos flakiness. - def GetNextFlakyBinary(self): - t = (self.lo, self.current, self.hi) - q = [t] - while q: - element = q.pop(0) - if element[1] in self.skipped_indices: - # Go top - to_add = (element[0], (element[0] + element[1]) // 2, element[1]) - q.append(to_add) - # Go bottom - to_add = (element[1], (element[1] + element[2]) // 2, element[2]) - q.append(to_add) - else: - self.current = element[1] - return - assert q, 'Queue should never be 0-size!' - - def GetNextFlakyLinear(self): - current_hi = self.current - current_lo = self.current - while True: - if current_hi < self.hi and current_hi not in self.skipped_indices: - self.current = current_hi - break - if current_lo >= self.lo and current_lo not in self.skipped_indices: - self.current = current_lo - break - if current_lo < self.lo and current_hi >= self.hi: - break - - current_hi += 1 - current_lo -= 1 - - def GetNext(self): - self.current = (self.hi + self.lo) // 2 - # Try going forward if current is skipped. - if self.current in self.skipped_indices: - self.GetNextFlakyBinary() - - # TODO: Add an estimated time remaining as well. - message = ('Estimated tries: min: %d max: %d\n' % (1 + math.log( - self.hi - self.lo, 2), self.hi - self.lo - len(self.skipped_indices))) - self.logger.LogOutput(message, print_to_console=verbose) - message = ('lo: %d hi: %d current: %d version: %s\n' % - (self.lo, self.hi, self.current, self.sorted_list[self.current])) - self.logger.LogOutput(message, print_to_console=verbose) - self.logger.LogOutput(str(self), print_to_console=verbose) - return self.sorted_list[self.current] - - def SetLoRevision(self, lo_revision): - self.lo = self.sorted_list.index(lo_revision) - - def SetHiRevision(self, hi_revision): - self.hi = self.sorted_list.index(hi_revision) - - def GetAllPoints(self): - to_return = '' - for i in range(len(self.sorted_list)): - to_return += ( - '%d %d %s\n' % (self.points[i].status, i, self.points[i].revision)) - - return to_return - - def __str__(self): - to_return = '' - to_return += 'Current: %d\n' % self.current - to_return += str(self.index_log) + '\n' - revision_log = [] - for index in self.index_log: - revision_log.append(self.sorted_list[index]) - to_return += str(revision_log) + '\n' - to_return += str(self.status_log) + '\n' - to_return += 'Skipped indices:\n' - to_return += str(self.skipped_indices) + '\n' - to_return += self.GetAllPoints() - return to_return + """Class of binary searcher.""" + + def __init__(self, logger_to_set=None): + self.sorted_list = [] + self.index_log = [] + self.status_log = [] + self.skipped_indices = [] + self.current = 0 + self.points = {} + self.lo = 0 + self.hi = 0 + if logger_to_set is not None: + self.logger = logger_to_set + else: + self.logger = logger.GetLogger() + + def SetSortedList(self, sorted_list): + assert sorted_list + self.sorted_list = sorted_list + self.index_log = [] + self.hi = len(sorted_list) - 1 + self.lo = 0 + self.points = {} + for i in range(len(self.sorted_list)): + bsp = BinarySearchPoint(self.sorted_list[i], -1, "Not yet done.") + self.points[i] = bsp + + def SetStatus(self, status, tag=None): + message = "Revision: %s index: %d returned: %d" % ( + self.sorted_list[self.current], + self.current, + status, + ) + self.logger.LogOutput(message, print_to_console=verbose) + assert status in (0, 1, 125), status + self.index_log.append(self.current) + self.status_log.append(status) + bsp = BinarySearchPoint(self.sorted_list[self.current], status, tag) + self.points[self.current] = bsp + + if status == 125: + self.skipped_indices.append(self.current) + + if status in (0, 1): + if status == 0: + self.lo = self.current + 1 + elif status == 1: + self.hi = self.current + self.logger.LogOutput("lo: %d hi: %d\n" % (self.lo, self.hi)) + self.current = (self.lo + self.hi) // 2 + + if self.lo == self.hi: + message = ( + "Search complete. First bad version: %s" + " at index: %d" + % ( + self.sorted_list[self.current], + self.lo, + ) + ) + self.logger.LogOutput(message) + return True + + for index in range(self.lo, self.hi): + if index not in self.skipped_indices: + return False + self.logger.LogOutput( + "All skipped indices between: %d and %d\n" % (self.lo, self.hi), + print_to_console=verbose, + ) + return True + + # Does a better job with chromeos flakiness. + def GetNextFlakyBinary(self): + t = (self.lo, self.current, self.hi) + q = [t] + while q: + element = q.pop(0) + if element[1] in self.skipped_indices: + # Go top + to_add = ( + element[0], + (element[0] + element[1]) // 2, + element[1], + ) + q.append(to_add) + # Go bottom + to_add = ( + element[1], + (element[1] + element[2]) // 2, + element[2], + ) + q.append(to_add) + else: + self.current = element[1] + return + assert q, "Queue should never be 0-size!" + + def GetNextFlakyLinear(self): + current_hi = self.current + current_lo = self.current + while True: + if current_hi < self.hi and current_hi not in self.skipped_indices: + self.current = current_hi + break + if current_lo >= self.lo and current_lo not in self.skipped_indices: + self.current = current_lo + break + if current_lo < self.lo and current_hi >= self.hi: + break + + current_hi += 1 + current_lo -= 1 + + def GetNext(self): + self.current = (self.hi + self.lo) // 2 + # Try going forward if current is skipped. + if self.current in self.skipped_indices: + self.GetNextFlakyBinary() + + # TODO: Add an estimated time remaining as well. + message = "Estimated tries: min: %d max: %d\n" % ( + 1 + math.log(self.hi - self.lo, 2), + self.hi - self.lo - len(self.skipped_indices), + ) + self.logger.LogOutput(message, print_to_console=verbose) + message = "lo: %d hi: %d current: %d version: %s\n" % ( + self.lo, + self.hi, + self.current, + self.sorted_list[self.current], + ) + self.logger.LogOutput(message, print_to_console=verbose) + self.logger.LogOutput(str(self), print_to_console=verbose) + return self.sorted_list[self.current] + + def SetLoRevision(self, lo_revision): + self.lo = self.sorted_list.index(lo_revision) + + def SetHiRevision(self, hi_revision): + self.hi = self.sorted_list.index(hi_revision) + + def GetAllPoints(self): + to_return = "" + for i in range(len(self.sorted_list)): + to_return += "%d %d %s\n" % ( + self.points[i].status, + i, + self.points[i].revision, + ) + + return to_return + + def __str__(self): + to_return = "" + to_return += "Current: %d\n" % self.current + to_return += str(self.index_log) + "\n" + revision_log = [] + for index in self.index_log: + revision_log.append(self.sorted_list[index]) + to_return += str(revision_log) + "\n" + to_return += str(self.status_log) + "\n" + to_return += "Skipped indices:\n" + to_return += str(self.skipped_indices) + "\n" + to_return += self.GetAllPoints() + return to_return class RevisionInfo(object): - """Class of reversion info.""" + """Class of reversion info.""" - def __init__(self, date, client, description): - self.date = date - self.client = client - self.description = description - self.status = -1 + def __init__(self, date, client, description): + self.date = date + self.client = client + self.description = description + self.status = -1 class VCSBinarySearcher(object): - """Class of VCS binary searcher.""" + """Class of VCS binary searcher.""" - def __init__(self): - self.bs = BinarySearcher() - self.rim = {} - self.current_ce = None - self.checkout_dir = None - self.current_revision = None + def __init__(self): + self.bs = BinarySearcher() + self.rim = {} + self.current_ce = None + self.checkout_dir = None + self.current_revision = None - def Initialize(self): - pass + def Initialize(self): + pass - def GetNextRevision(self): - pass + def GetNextRevision(self): + pass - def CheckoutRevision(self, current_revision): - pass + def CheckoutRevision(self, current_revision): + pass - def SetStatus(self, status): - pass + def SetStatus(self, status): + pass - def Cleanup(self): - pass + def Cleanup(self): + pass - def SetGoodRevision(self, revision): - if revision is None: - return - assert revision in self.bs.sorted_list - self.bs.SetLoRevision(revision) + def SetGoodRevision(self, revision): + if revision is None: + return + assert revision in self.bs.sorted_list + self.bs.SetLoRevision(revision) - def SetBadRevision(self, revision): - if revision is None: - return - assert revision in self.bs.sorted_list - self.bs.SetHiRevision(revision) + def SetBadRevision(self, revision): + if revision is None: + return + assert revision in self.bs.sorted_list + self.bs.SetHiRevision(revision) class P4BinarySearcher(VCSBinarySearcher): - """Class of P4 binary searcher.""" - - def __init__(self, p4_port, p4_paths, test_command): - VCSBinarySearcher.__init__(self) - self.p4_port = p4_port - self.p4_paths = p4_paths - self.test_command = test_command - self.checkout_dir = tempfile.mkdtemp() - self.ce = command_executer.GetCommandExecuter() - self.client_name = 'binary-searcher-$HOSTNAME-$USER' - self.job_log_root = '/home/asharif/www/coreboot_triage/' - self.changes = None - - def Initialize(self): - self.Cleanup() - command = GetP4Command(self.client_name, self.p4_port, self.p4_paths, 1, - self.checkout_dir) - self.ce.RunCommand(command) - command = 'cd %s && g4 changes ...' % self.checkout_dir - _, out, _ = self.ce.RunCommandWOutput(command) - self.changes = re.findall(r'Change (\d+)', out) - change_infos = re.findall( - r'Change (\d+) on ([\d/]+) by ' - r"([^\s]+) ('[^']*')", out) - for change_info in change_infos: - ri = RevisionInfo(change_info[1], change_info[2], change_info[3]) - self.rim[change_info[0]] = ri - # g4 gives changes in reverse chronological order. - self.changes.reverse() - self.bs.SetSortedList(self.changes) - - def SetStatus(self, status): - self.rim[self.current_revision].status = status - return self.bs.SetStatus(status) - - def GetNextRevision(self): - next_revision = self.bs.GetNext() - self.current_revision = next_revision - return next_revision - - def CleanupCLs(self): - if not os.path.isfile(self.checkout_dir + '/.p4config'): - command = 'cd %s' % self.checkout_dir - command += ' && cp ${HOME}/.p4config .' - command += ' && echo "P4PORT=' + self.p4_port + '" >> .p4config' - command += ' && echo "P4CLIENT=' + self.client_name + '" >> .p4config' - self.ce.RunCommand(command) - command = 'cd %s' % self.checkout_dir - command += '; g4 changes -c %s' % self.client_name - _, out, _ = self.ce.RunCommandWOutput(command) - changes = re.findall(r'Change (\d+)', out) - if changes: - command = 'cd %s' % self.checkout_dir - for change in changes: - command += '; g4 revert -c %s' % change - self.ce.RunCommand(command) - - def CleanupClient(self): - command = 'cd %s' % self.checkout_dir - command += '; g4 revert ...' - command += '; g4 client -d %s' % self.client_name - self.ce.RunCommand(command) - - def Cleanup(self): - self.CleanupCLs() - self.CleanupClient() - - def __str__(self): - to_return = '' - for change in self.changes: - ri = self.rim[change] - if ri.status == -1: - to_return = '%s\t%d\n' % (change, ri.status) - else: - to_return += ('%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\n' % - (change, ri.status, ri.date, ri.client, ri.description, - self.job_log_root + change + '.cmd', self.job_log_root + - change + '.out', self.job_log_root + change + '.err')) - return to_return + """Class of P4 binary searcher.""" + + def __init__(self, p4_port, p4_paths, test_command): + VCSBinarySearcher.__init__(self) + self.p4_port = p4_port + self.p4_paths = p4_paths + self.test_command = test_command + self.checkout_dir = tempfile.mkdtemp() + self.ce = command_executer.GetCommandExecuter() + self.client_name = "binary-searcher-$HOSTNAME-$USER" + self.job_log_root = "/home/asharif/www/coreboot_triage/" + self.changes = None + + def Initialize(self): + self.Cleanup() + command = GetP4Command( + self.client_name, self.p4_port, self.p4_paths, 1, self.checkout_dir + ) + self.ce.RunCommand(command) + command = "cd %s && g4 changes ..." % self.checkout_dir + _, out, _ = self.ce.RunCommandWOutput(command) + self.changes = re.findall(r"Change (\d+)", out) + change_infos = re.findall( + r"Change (\d+) on ([\d/]+) by " r"([^\s]+) ('[^']*')", out + ) + for change_info in change_infos: + ri = RevisionInfo(change_info[1], change_info[2], change_info[3]) + self.rim[change_info[0]] = ri + # g4 gives changes in reverse chronological order. + self.changes.reverse() + self.bs.SetSortedList(self.changes) + + def SetStatus(self, status): + self.rim[self.current_revision].status = status + return self.bs.SetStatus(status) + + def GetNextRevision(self): + next_revision = self.bs.GetNext() + self.current_revision = next_revision + return next_revision + + def CleanupCLs(self): + if not os.path.isfile(self.checkout_dir + "/.p4config"): + command = "cd %s" % self.checkout_dir + command += " && cp ${HOME}/.p4config ." + command += ' && echo "P4PORT=' + self.p4_port + '" >> .p4config' + command += ( + ' && echo "P4CLIENT=' + self.client_name + '" >> .p4config' + ) + self.ce.RunCommand(command) + command = "cd %s" % self.checkout_dir + command += "; g4 changes -c %s" % self.client_name + _, out, _ = self.ce.RunCommandWOutput(command) + changes = re.findall(r"Change (\d+)", out) + if changes: + command = "cd %s" % self.checkout_dir + for change in changes: + command += "; g4 revert -c %s" % change + self.ce.RunCommand(command) + + def CleanupClient(self): + command = "cd %s" % self.checkout_dir + command += "; g4 revert ..." + command += "; g4 client -d %s" % self.client_name + self.ce.RunCommand(command) + + def Cleanup(self): + self.CleanupCLs() + self.CleanupClient() + + def __str__(self): + to_return = "" + for change in self.changes: + ri = self.rim[change] + if ri.status == -1: + to_return = "%s\t%d\n" % (change, ri.status) + else: + to_return += "%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( + change, + ri.status, + ri.date, + ri.client, + ri.description, + self.job_log_root + change + ".cmd", + self.job_log_root + change + ".out", + self.job_log_root + change + ".err", + ) + return to_return class P4GCCBinarySearcher(P4BinarySearcher): - """Class of P4 gcc binary searcher.""" - - # TODO: eventually get these patches from g4 instead of creating them manually - def HandleBrokenCLs(self, current_revision): - cr = int(current_revision) - problematic_ranges = [] - problematic_ranges.append([44528, 44539]) - problematic_ranges.append([44528, 44760]) - problematic_ranges.append([44335, 44882]) - command = 'pwd' - for pr in problematic_ranges: - if cr in range(pr[0], pr[1]): - patch_file = '/home/asharif/triage_tool/%d-%d.patch' % (pr[0], pr[1]) - with open(patch_file, encoding='utf-8') as f: - patch = f.read() - files = re.findall('--- (//.*)', patch) - command += '; cd %s' % self.checkout_dir - for f in files: - command += '; g4 open %s' % f - command += '; patch -p2 < %s' % patch_file - self.current_ce.RunCommand(command) - - def CheckoutRevision(self, current_revision): - job_logger = logger.Logger( - self.job_log_root, current_revision, True, subdir='') - self.current_ce = command_executer.GetCommandExecuter(job_logger) - - self.CleanupCLs() - # Change the revision of only the gcc part of the toolchain. - command = ( - 'cd %s/gcctools/google_vendor_src_branch/gcc ' - '&& g4 revert ...; g4 sync @%s' % (self.checkout_dir, current_revision)) - self.current_ce.RunCommand(command) - - self.HandleBrokenCLs(current_revision) + """Class of P4 gcc binary searcher.""" + + # TODO: eventually get these patches from g4 instead of creating them manually + def HandleBrokenCLs(self, current_revision): + cr = int(current_revision) + problematic_ranges = [] + problematic_ranges.append([44528, 44539]) + problematic_ranges.append([44528, 44760]) + problematic_ranges.append([44335, 44882]) + command = "pwd" + for pr in problematic_ranges: + if cr in range(pr[0], pr[1]): + patch_file = "/home/asharif/triage_tool/%d-%d.patch" % ( + pr[0], + pr[1], + ) + with open(patch_file, encoding="utf-8") as f: + patch = f.read() + files = re.findall("--- (//.*)", patch) + command += "; cd %s" % self.checkout_dir + for f in files: + command += "; g4 open %s" % f + command += "; patch -p2 < %s" % patch_file + self.current_ce.RunCommand(command) + + def CheckoutRevision(self, current_revision): + job_logger = logger.Logger( + self.job_log_root, current_revision, True, subdir="" + ) + self.current_ce = command_executer.GetCommandExecuter(job_logger) + + self.CleanupCLs() + # Change the revision of only the gcc part of the toolchain. + command = ( + "cd %s/gcctools/google_vendor_src_branch/gcc " + "&& g4 revert ...; g4 sync @%s" + % (self.checkout_dir, current_revision) + ) + self.current_ce.RunCommand(command) + + self.HandleBrokenCLs(current_revision) def Main(argv): - """The main function.""" - # Common initializations - ### command_executer.InitCommandExecuter(True) - ce = command_executer.GetCommandExecuter() - - parser = argparse.ArgumentParser() - parser.add_argument( - '-n', - '--num_tries', - dest='num_tries', - default='100', - help='Number of tries.') - parser.add_argument( - '-g', - '--good_revision', - dest='good_revision', - help='Last known good revision.') - parser.add_argument( - '-b', - '--bad_revision', - dest='bad_revision', - help='Last known bad revision.') - parser.add_argument( - '-s', '--script', dest='script', help='Script to run for every version.') - options = parser.parse_args(argv) - # First get all revisions - p4_paths = [ - '//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...', - '//depot2/gcctools/google_vendor_src_branch/binutils/' - 'binutils-2.20.1-mobile/...', - '//depot2/gcctools/google_vendor_src_branch/' - 'binutils/binutils-20100303/...' - ] - p4gccbs = P4GCCBinarySearcher('perforce2:2666', p4_paths, '') - - # Main loop: - terminated = False - num_tries = int(options.num_tries) - script = os.path.expanduser(options.script) - - try: - p4gccbs.Initialize() - p4gccbs.SetGoodRevision(options.good_revision) - p4gccbs.SetBadRevision(options.bad_revision) - while not terminated and num_tries > 0: - current_revision = p4gccbs.GetNextRevision() - - # Now run command to get the status - ce = command_executer.GetCommandExecuter() - command = '%s %s' % (script, p4gccbs.checkout_dir) - status = ce.RunCommand(command) - message = ( - 'Revision: %s produced: %d status\n' % (current_revision, status)) - logger.GetLogger().LogOutput(message, print_to_console=verbose) - terminated = p4gccbs.SetStatus(status) - num_tries -= 1 - logger.GetLogger().LogOutput(str(p4gccbs), print_to_console=verbose) - - if not terminated: - logger.GetLogger().LogOutput( - 'Tries: %d expired.' % num_tries, print_to_console=verbose) - logger.GetLogger().LogOutput(str(p4gccbs.bs), print_to_console=verbose) - except (KeyboardInterrupt, SystemExit): - logger.GetLogger().LogOutput('Cleaning up...') - finally: - logger.GetLogger().LogOutput(str(p4gccbs.bs), print_to_console=verbose) - p4gccbs.Cleanup() - - -if __name__ == '__main__': - Main(sys.argv[1:]) + """The main function.""" + # Common initializations + ### command_executer.InitCommandExecuter(True) + ce = command_executer.GetCommandExecuter() + + parser = argparse.ArgumentParser() + parser.add_argument( + "-n", + "--num_tries", + dest="num_tries", + default="100", + help="Number of tries.", + ) + parser.add_argument( + "-g", + "--good_revision", + dest="good_revision", + help="Last known good revision.", + ) + parser.add_argument( + "-b", + "--bad_revision", + dest="bad_revision", + help="Last known bad revision.", + ) + parser.add_argument( + "-s", "--script", dest="script", help="Script to run for every version." + ) + options = parser.parse_args(argv) + # First get all revisions + p4_paths = [ + "//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...", + "//depot2/gcctools/google_vendor_src_branch/binutils/" + "binutils-2.20.1-mobile/...", + "//depot2/gcctools/google_vendor_src_branch/" + "binutils/binutils-20100303/...", + ] + p4gccbs = P4GCCBinarySearcher("perforce2:2666", p4_paths, "") + + # Main loop: + terminated = False + num_tries = int(options.num_tries) + script = os.path.expanduser(options.script) + + try: + p4gccbs.Initialize() + p4gccbs.SetGoodRevision(options.good_revision) + p4gccbs.SetBadRevision(options.bad_revision) + while not terminated and num_tries > 0: + current_revision = p4gccbs.GetNextRevision() + + # Now run command to get the status + ce = command_executer.GetCommandExecuter() + command = "%s %s" % (script, p4gccbs.checkout_dir) + status = ce.RunCommand(command) + message = "Revision: %s produced: %d status\n" % ( + current_revision, + status, + ) + logger.GetLogger().LogOutput(message, print_to_console=verbose) + terminated = p4gccbs.SetStatus(status) + num_tries -= 1 + logger.GetLogger().LogOutput(str(p4gccbs), print_to_console=verbose) + + if not terminated: + logger.GetLogger().LogOutput( + "Tries: %d expired." % num_tries, print_to_console=verbose + ) + logger.GetLogger().LogOutput(str(p4gccbs.bs), print_to_console=verbose) + except (KeyboardInterrupt, SystemExit): + logger.GetLogger().LogOutput("Cleaning up...") + finally: + logger.GetLogger().LogOutput(str(p4gccbs.bs), print_to_console=verbose) + p4gccbs.Cleanup() + + +if __name__ == "__main__": + Main(sys.argv[1:]) diff --git a/binary_search_tool/binary_search_state.py b/binary_search_tool/binary_search_state.py index 1dc2bb20..1ede37f8 100755 --- a/binary_search_tool/binary_search_state.py +++ b/binary_search_tool/binary_search_state.py @@ -30,871 +30,993 @@ from binary_search_tool import pass_mapping from cros_utils import command_executer from cros_utils import logger -GOOD_SET_VAR = 'BISECT_GOOD_SET' -BAD_SET_VAR = 'BISECT_BAD_SET' -STATE_FILE = '%s.state' % sys.argv[0] +GOOD_SET_VAR = "BISECT_GOOD_SET" +BAD_SET_VAR = "BISECT_BAD_SET" + +STATE_FILE = "%s.state" % sys.argv[0] HIDDEN_STATE_FILE = os.path.join( - os.path.dirname(STATE_FILE), '.%s' % os.path.basename(STATE_FILE)) + os.path.dirname(STATE_FILE), ".%s" % os.path.basename(STATE_FILE) +) @contextlib.contextmanager def SetFile(env_var, items): - """Generate set files that can be used by switch/test scripts. - - Generate temporary set file (good/bad) holding contents of good/bad items for - the current binary search iteration. Store the name of each file as an - environment variable so all child processes can access it. - - This function is a contextmanager, meaning it's meant to be used with the - "with" statement in Python. This is so cleanup and setup happens automatically - and cleanly. Execution of the outer "with" statement happens at the "yield" - statement. + """Generate set files that can be used by switch/test scripts. - Args: - env_var: What environment variable to store the file name in. - items: What items are in this set. - """ - with tempfile.NamedTemporaryFile('w', encoding='utf-8') as f: - os.environ[env_var] = f.name - f.write('\n'.join(items)) - f.flush() - yield + Generate temporary set file (good/bad) holding contents of good/bad items for + the current binary search iteration. Store the name of each file as an + environment variable so all child processes can access it. - -class BinarySearchState(object): - """The binary search state class.""" - - def __init__(self, get_initial_items, switch_to_good, switch_to_bad, - test_setup_script, test_script, incremental, prune, pass_bisect, - ir_diff, iterations, prune_iterations, verify, file_args, - verbose): - """BinarySearchState constructor, see Run for full args documentation.""" - self.get_initial_items = get_initial_items - self.switch_to_good = switch_to_good - self.switch_to_bad = switch_to_bad - self.test_setup_script = test_setup_script - self.test_script = test_script - self.incremental = incremental - self.prune = prune - self.pass_bisect = pass_bisect - self.ir_diff = ir_diff - self.iterations = iterations - self.prune_iterations = prune_iterations - self.verify = verify - self.file_args = file_args - self.verbose = verbose - - self.l = logger.GetLogger() - self.ce = command_executer.GetCommandExecuter() - - self.resumed = False - self.prune_cycles = 0 - self.search_cycles = 0 - self.binary_search = None - self.all_items = None - self.cmd_script = None - self.mode = None - self.PopulateItemsUsingCommand(self.get_initial_items) - self.currently_good_items = set() - self.currently_bad_items = set() - self.found_items = set() - self.known_good = set() - - self.start_time = time.time() - - def SwitchToGood(self, item_list): - """Switch given items to "good" set.""" - if self.incremental: - self.l.LogOutput( - 'Incremental set. Wanted to switch %s to good' % str(item_list), - print_to_console=self.verbose) - incremental_items = [ - item for item in item_list if item not in self.currently_good_items - ] - item_list = incremental_items - self.l.LogOutput( - 'Incremental set. Actually switching %s to good' % str(item_list), - print_to_console=self.verbose) - - if not item_list: - return - - self.l.LogOutput( - 'Switching %s to good' % str(item_list), print_to_console=self.verbose) - self.RunSwitchScript(self.switch_to_good, item_list) - self.currently_good_items = self.currently_good_items.union(set(item_list)) - self.currently_bad_items.difference_update(set(item_list)) - - def SwitchToBad(self, item_list): - """Switch given items to "bad" set.""" - if self.incremental: - self.l.LogOutput( - 'Incremental set. Wanted to switch %s to bad' % str(item_list), - print_to_console=self.verbose) - incremental_items = [ - item for item in item_list if item not in self.currently_bad_items - ] - item_list = incremental_items - self.l.LogOutput( - 'Incremental set. Actually switching %s to bad' % str(item_list), - print_to_console=self.verbose) - - if not item_list: - return - - self.l.LogOutput( - 'Switching %s to bad' % str(item_list), print_to_console=self.verbose) - self.RunSwitchScript(self.switch_to_bad, item_list) - self.currently_bad_items = self.currently_bad_items.union(set(item_list)) - self.currently_good_items.difference_update(set(item_list)) - - def RunSwitchScript(self, switch_script, item_list): - """Pass given items to switch script. + This function is a contextmanager, meaning it's meant to be used with the + "with" statement in Python. This is so cleanup and setup happens automatically + and cleanly. Execution of the outer "with" statement happens at the "yield" + statement. Args: - switch_script: path to switch script - item_list: list of all items to be switched + env_var: What environment variable to store the file name in. + items: What items are in this set. """ - if self.file_args: - with tempfile.NamedTemporaryFile('w', encoding='utf-8') as f: - f.write('\n'.join(item_list)) + with tempfile.NamedTemporaryFile("w", encoding="utf-8") as f: + os.environ[env_var] = f.name + f.write("\n".join(items)) f.flush() - command = '%s %s' % (switch_script, f.name) - ret, _, _ = self.ce.RunCommandWExceptionCleanup( - command, print_to_console=self.verbose) - else: - command = '%s %s' % (switch_script, ' '.join(item_list)) - try: - ret, _, _ = self.ce.RunCommandWExceptionCleanup( - command, print_to_console=self.verbose) - except OSError as e: - if e.errno == errno.E2BIG: - raise RuntimeError('Too many arguments for switch script! Use ' - '--file_args') - assert ret == 0, 'Switch script %s returned %d' % (switch_script, ret) - - def TestScript(self): - """Run test script and return exit code from script.""" - command = self.test_script - ret, _, _ = self.ce.RunCommandWExceptionCleanup(command) - return ret - - def TestSetupScript(self): - """Run test setup script and return exit code from script.""" - if not self.test_setup_script: - return 0 - - command = self.test_setup_script - ret, _, _ = self.ce.RunCommandWExceptionCleanup(command) - return ret - - def GenerateBadCommandScript(self, bad_items): - """Generate command line script for building bad item.""" - assert not self.prune, 'Prune must be false if pass_bisect is set.' - assert len(bad_items) == 1, 'Pruning is off, but number of bad ' \ - 'items found was not 1.' - item = list(bad_items)[0] - command = '%s %s' % (self.pass_bisect, item) - ret, _, _ = self.ce.RunCommandWExceptionCleanup( - command, print_to_console=self.verbose) - return ret - - def DoVerify(self): - """Verify correctness of test environment. - - Verify that a "good" set of items produces a "good" result and that a "bad" - set of items produces a "bad" result. To be run directly before running - DoSearch. If verify is False this step is skipped. - """ - if not self.verify: - return - - self.l.LogOutput('VERIFICATION') - self.l.LogOutput('Beginning tests to verify good/bad sets\n') - - self._OutputProgress('Verifying items from GOOD set\n') - with SetFile(GOOD_SET_VAR, self.all_items), SetFile(BAD_SET_VAR, []): - self.l.LogOutput('Resetting all items to good to verify.') - self.SwitchToGood(self.all_items) - status = self.TestSetupScript() - assert status == 0, 'When reset_to_good, test setup should succeed.' - status = self.TestScript() - assert status == 0, 'When reset_to_good, status should be 0.' - - self._OutputProgress('Verifying items from BAD set\n') - with SetFile(GOOD_SET_VAR, []), SetFile(BAD_SET_VAR, self.all_items): - self.l.LogOutput('Resetting all items to bad to verify.') - self.SwitchToBad(self.all_items) - status = self.TestSetupScript() - # The following assumption is not true; a bad image might not - # successfully push onto a device. - # assert status == 0, 'When reset_to_bad, test setup should succeed.' - if status == 0: - status = self.TestScript() - assert status == 1, 'When reset_to_bad, status should be 1.' - - def DoSearchBadItems(self): - """Perform full search for bad items. - - Perform full search until prune_iterations number of bad items are found. - """ - while (True and len(self.all_items) > 1 and - self.prune_cycles < self.prune_iterations): - terminated = self.DoBinarySearchBadItems() - self.prune_cycles += 1 - if not terminated: - break - # Prune is set. - prune_index = self.binary_search.current - - # If found item is last item, no new items can be found - if prune_index == len(self.all_items) - 1: - self.l.LogOutput('First bad item is the last item. Breaking.') - self.l.LogOutput('Bad items are: %s' % self.all_items[-1]) - self.found_items.add(self.all_items[-1]) - break - - # If already seen item we have no new bad items to find, finish up - if self.all_items[prune_index] in self.found_items: - self.l.LogOutput( - 'Found item already found before: %s.' % - self.all_items[prune_index], - print_to_console=self.verbose) - self.l.LogOutput('No more bad items remaining. Done searching.') - self.l.LogOutput('Bad items are: %s' % ' '.join(self.found_items)) - break - - new_all_items = list(self.all_items) - # Move prune item to the end of the list. - new_all_items.append(new_all_items.pop(prune_index)) - self.found_items.add(new_all_items[-1]) - - # Everything below newly found bad item is now known to be a good item. - # Take these good items out of the equation to save time on the next - # search. We save these known good items so they are still sent to the - # switch_to_good script. - if prune_index: - self.known_good.update(new_all_items[:prune_index]) - new_all_items = new_all_items[prune_index:] - - self.l.LogOutput( - 'Old list: %s. New list: %s' % (str(self.all_items), - str(new_all_items)), - print_to_console=self.verbose) - - if not self.prune: - self.l.LogOutput('Not continuning further, --prune is not set') - break - # FIXME: Do we need to Convert the currently good items to bad - self.PopulateItemsUsingList(new_all_items) - - # If pass level bisecting is set, generate a script which contains command - # line options to rebuild bad item. - if self.pass_bisect: - status = self.GenerateBadCommandScript(self.found_items) - if status == 0: - self.cmd_script = os.path.join( - os.path.dirname(self.pass_bisect), 'cmd_script.sh') - self.l.LogOutput('Command script generated at %s.' % self.cmd_script) - else: - raise RuntimeError('Error while generating command script.') - - def DoBinarySearchBadItems(self): - """Perform single iteration of binary search.""" - # If in resume mode don't reset search_cycles - if not self.resumed: - self.search_cycles = 0 - else: - self.resumed = False - - terminated = False - while self.search_cycles < self.iterations and not terminated: - self.SaveState() - self.OutputIterationProgressBadItem() + yield - self.search_cycles += 1 - [bad_items, good_items] = self.GetNextItems() - with SetFile(GOOD_SET_VAR, good_items), SetFile(BAD_SET_VAR, bad_items): - # TODO: bad_items should come first. - self.SwitchToGood(good_items) - self.SwitchToBad(bad_items) - status = self.TestSetupScript() - if status == 0: - status = self.TestScript() - terminated = self.binary_search.SetStatus(status) +class BinarySearchState(object): + """The binary search state class.""" - if terminated: - self.l.LogOutput('Terminated!', print_to_console=self.verbose) - if not terminated: - self.l.LogOutput('Ran out of iterations searching...') - self.l.LogOutput(str(self), print_to_console=self.verbose) - return terminated + def __init__( + self, + get_initial_items, + switch_to_good, + switch_to_bad, + test_setup_script, + test_script, + incremental, + prune, + pass_bisect, + ir_diff, + iterations, + prune_iterations, + verify, + file_args, + verbose, + ): + """BinarySearchState constructor, see Run for full args documentation.""" + self.get_initial_items = get_initial_items + self.switch_to_good = switch_to_good + self.switch_to_bad = switch_to_bad + self.test_setup_script = test_setup_script + self.test_script = test_script + self.incremental = incremental + self.prune = prune + self.pass_bisect = pass_bisect + self.ir_diff = ir_diff + self.iterations = iterations + self.prune_iterations = prune_iterations + self.verify = verify + self.file_args = file_args + self.verbose = verbose + + self.l = logger.GetLogger() + self.ce = command_executer.GetCommandExecuter() + + self.resumed = False + self.prune_cycles = 0 + self.search_cycles = 0 + self.binary_search = None + self.all_items = None + self.cmd_script = None + self.mode = None + self.PopulateItemsUsingCommand(self.get_initial_items) + self.currently_good_items = set() + self.currently_bad_items = set() + self.found_items = set() + self.known_good = set() + + self.start_time = time.time() + + def SwitchToGood(self, item_list): + """Switch given items to "good" set.""" + if self.incremental: + self.l.LogOutput( + "Incremental set. Wanted to switch %s to good" % str(item_list), + print_to_console=self.verbose, + ) + incremental_items = [ + item + for item in item_list + if item not in self.currently_good_items + ] + item_list = incremental_items + self.l.LogOutput( + "Incremental set. Actually switching %s to good" + % str(item_list), + print_to_console=self.verbose, + ) + + if not item_list: + return - def CollectPassName(self, pass_info): - """Mapping opt-bisect output of pass info to debugcounter name.""" - self.l.LogOutput('Pass info: %s' % pass_info, print_to_console=self.verbose) + self.l.LogOutput( + "Switching %s to good" % str(item_list), + print_to_console=self.verbose, + ) + self.RunSwitchScript(self.switch_to_good, item_list) + self.currently_good_items = self.currently_good_items.union( + set(item_list) + ) + self.currently_bad_items.difference_update(set(item_list)) + + def SwitchToBad(self, item_list): + """Switch given items to "bad" set.""" + if self.incremental: + self.l.LogOutput( + "Incremental set. Wanted to switch %s to bad" % str(item_list), + print_to_console=self.verbose, + ) + incremental_items = [ + item + for item in item_list + if item not in self.currently_bad_items + ] + item_list = incremental_items + self.l.LogOutput( + "Incremental set. Actually switching %s to bad" + % str(item_list), + print_to_console=self.verbose, + ) + + if not item_list: + return - for desc in pass_mapping.pass_name: - if desc in pass_info: - return pass_mapping.pass_name[desc] + self.l.LogOutput( + "Switching %s to bad" % str(item_list), + print_to_console=self.verbose, + ) + self.RunSwitchScript(self.switch_to_bad, item_list) + self.currently_bad_items = self.currently_bad_items.union( + set(item_list) + ) + self.currently_good_items.difference_update(set(item_list)) + + def RunSwitchScript(self, switch_script, item_list): + """Pass given items to switch script. + + Args: + switch_script: path to switch script + item_list: list of all items to be switched + """ + if self.file_args: + with tempfile.NamedTemporaryFile("w", encoding="utf-8") as f: + f.write("\n".join(item_list)) + f.flush() + command = "%s %s" % (switch_script, f.name) + ret, _, _ = self.ce.RunCommandWExceptionCleanup( + command, print_to_console=self.verbose + ) + else: + command = "%s %s" % (switch_script, " ".join(item_list)) + try: + ret, _, _ = self.ce.RunCommandWExceptionCleanup( + command, print_to_console=self.verbose + ) + except OSError as e: + if e.errno == errno.E2BIG: + raise RuntimeError( + "Too many arguments for switch script! Use " + "--file_args" + ) + assert ret == 0, "Switch script %s returned %d" % (switch_script, ret) + + def TestScript(self): + """Run test script and return exit code from script.""" + command = self.test_script + ret, _, _ = self.ce.RunCommandWExceptionCleanup(command) + return ret + + def TestSetupScript(self): + """Run test setup script and return exit code from script.""" + if not self.test_setup_script: + return 0 + + command = self.test_setup_script + ret, _, _ = self.ce.RunCommandWExceptionCleanup(command) + return ret + + def GenerateBadCommandScript(self, bad_items): + """Generate command line script for building bad item.""" + assert not self.prune, "Prune must be false if pass_bisect is set." + assert len(bad_items) == 1, ( + "Pruning is off, but number of bad " "items found was not 1." + ) + item = list(bad_items)[0] + command = "%s %s" % (self.pass_bisect, item) + ret, _, _ = self.ce.RunCommandWExceptionCleanup( + command, print_to_console=self.verbose + ) + return ret + + def DoVerify(self): + """Verify correctness of test environment. + + Verify that a "good" set of items produces a "good" result and that a "bad" + set of items produces a "bad" result. To be run directly before running + DoSearch. If verify is False this step is skipped. + """ + if not self.verify: + return + + self.l.LogOutput("VERIFICATION") + self.l.LogOutput("Beginning tests to verify good/bad sets\n") + + self._OutputProgress("Verifying items from GOOD set\n") + with SetFile(GOOD_SET_VAR, self.all_items), SetFile(BAD_SET_VAR, []): + self.l.LogOutput("Resetting all items to good to verify.") + self.SwitchToGood(self.all_items) + status = self.TestSetupScript() + assert status == 0, "When reset_to_good, test setup should succeed." + status = self.TestScript() + assert status == 0, "When reset_to_good, status should be 0." + + self._OutputProgress("Verifying items from BAD set\n") + with SetFile(GOOD_SET_VAR, []), SetFile(BAD_SET_VAR, self.all_items): + self.l.LogOutput("Resetting all items to bad to verify.") + self.SwitchToBad(self.all_items) + status = self.TestSetupScript() + # The following assumption is not true; a bad image might not + # successfully push onto a device. + # assert status == 0, 'When reset_to_bad, test setup should succeed.' + if status == 0: + status = self.TestScript() + assert status == 1, "When reset_to_bad, status should be 1." + + def DoSearchBadItems(self): + """Perform full search for bad items. + + Perform full search until prune_iterations number of bad items are found. + """ + while ( + True + and len(self.all_items) > 1 + and self.prune_cycles < self.prune_iterations + ): + terminated = self.DoBinarySearchBadItems() + self.prune_cycles += 1 + if not terminated: + break + # Prune is set. + prune_index = self.binary_search.current + + # If found item is last item, no new items can be found + if prune_index == len(self.all_items) - 1: + self.l.LogOutput("First bad item is the last item. Breaking.") + self.l.LogOutput("Bad items are: %s" % self.all_items[-1]) + self.found_items.add(self.all_items[-1]) + break + + # If already seen item we have no new bad items to find, finish up + if self.all_items[prune_index] in self.found_items: + self.l.LogOutput( + "Found item already found before: %s." + % self.all_items[prune_index], + print_to_console=self.verbose, + ) + self.l.LogOutput("No more bad items remaining. Done searching.") + self.l.LogOutput( + "Bad items are: %s" % " ".join(self.found_items) + ) + break + + new_all_items = list(self.all_items) + # Move prune item to the end of the list. + new_all_items.append(new_all_items.pop(prune_index)) + self.found_items.add(new_all_items[-1]) + + # Everything below newly found bad item is now known to be a good item. + # Take these good items out of the equation to save time on the next + # search. We save these known good items so they are still sent to the + # switch_to_good script. + if prune_index: + self.known_good.update(new_all_items[:prune_index]) + new_all_items = new_all_items[prune_index:] + + self.l.LogOutput( + "Old list: %s. New list: %s" + % (str(self.all_items), str(new_all_items)), + print_to_console=self.verbose, + ) + + if not self.prune: + self.l.LogOutput("Not continuning further, --prune is not set") + break + # FIXME: Do we need to Convert the currently good items to bad + self.PopulateItemsUsingList(new_all_items) + + # If pass level bisecting is set, generate a script which contains command + # line options to rebuild bad item. + if self.pass_bisect: + status = self.GenerateBadCommandScript(self.found_items) + if status == 0: + self.cmd_script = os.path.join( + os.path.dirname(self.pass_bisect), "cmd_script.sh" + ) + self.l.LogOutput( + "Command script generated at %s." % self.cmd_script + ) + else: + raise RuntimeError("Error while generating command script.") + + def DoBinarySearchBadItems(self): + """Perform single iteration of binary search.""" + # If in resume mode don't reset search_cycles + if not self.resumed: + self.search_cycles = 0 + else: + self.resumed = False + + terminated = False + while self.search_cycles < self.iterations and not terminated: + self.SaveState() + self.OutputIterationProgressBadItem() + + self.search_cycles += 1 + [bad_items, good_items] = self.GetNextItems() + + with SetFile(GOOD_SET_VAR, good_items), SetFile( + BAD_SET_VAR, bad_items + ): + # TODO: bad_items should come first. + self.SwitchToGood(good_items) + self.SwitchToBad(bad_items) + status = self.TestSetupScript() + if status == 0: + status = self.TestScript() + terminated = self.binary_search.SetStatus(status) + + if terminated: + self.l.LogOutput("Terminated!", print_to_console=self.verbose) + if not terminated: + self.l.LogOutput("Ran out of iterations searching...") + self.l.LogOutput(str(self), print_to_console=self.verbose) + return terminated + + def CollectPassName(self, pass_info): + """Mapping opt-bisect output of pass info to debugcounter name.""" + self.l.LogOutput( + "Pass info: %s" % pass_info, print_to_console=self.verbose + ) + + for desc in pass_mapping.pass_name: + if desc in pass_info: + return pass_mapping.pass_name[desc] + + # If pass not found, return None + return None + + def BuildWithPassLimit(self, limit, generate_ir=False): + """Rebuild bad item with pass level bisect limit + + Run command line script generated by GenerateBadCommandScript(), with + pass level limit flags. + + Returns: + pass_num: current number of the pass, or total number of passes if + limit set to -1. + pass_name: The debugcounter name of current limit pass. + """ + os.environ["LIMIT_FLAGS"] = "-mllvm -opt-bisect-limit=" + str(limit) + if generate_ir: + os.environ["LIMIT_FLAGS"] += " -S -emit-llvm" + self.l.LogOutput( + "Limit flags: %s" % os.environ["LIMIT_FLAGS"], + print_to_console=self.verbose, + ) + command = self.cmd_script + _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False) + + # Massages we get will be like this: + # BISECT: running pass (9) <Pass Description> on <function> (<file>) + # BISECT: running pass (10) <Pass Description> on <module> (<file>) + # BISECT: NOT running pass (11) <Pass Description> on <SCG> (<file>) + # BISECT: NOT running pass (12) <Pass Description> on <SCG> (<file>) + # We want to get the pass description of last running pass, to have + # transformation level bisect on it. + if "BISECT: " not in msg: + raise RuntimeError( + "No bisect info printed, OptBisect may not be " + "supported by the compiler." + ) + + lines = msg.split("\n") + pass_num = 0 + last_pass = "" + for l in lines: + if "running pass" in l: + # For situation of limit==-1, we want the total number of passes + if limit != -1 and "BISECT: NOT " in l: + break + pass_num += 1 + last_pass = l + if limit not in (-1, pass_num): + raise ValueError( + "[Error] While building, limit number does not match." + ) + return pass_num, self.CollectPassName(last_pass) + + def BuildWithTransformLimit( + self, limit, pass_name=None, pass_limit=-1, generate_ir=False + ): + """Rebuild bad item with transformation level bisect limit + + Run command line script generated by GenerateBadCommandScript(), with + pass level limit flags and transformation level limit flags. + + Args: + limit: transformation level limit for bad item. + pass_name: name of bad pass debugcounter from pass level bisect result. + pass_limit: pass level limit from pass level bisect result. + generate_ir: Whether to generate IR comparison. + + Returns: + Total number of transformations if limit set to -1, else return 0. + """ + counter_name = pass_name + + os.environ["LIMIT_FLAGS"] = ( + "-mllvm -opt-bisect-limit=" + + str(pass_limit) + + " -mllvm -debug-counter=" + + counter_name + + "-count=" + + str(limit) + + " -mllvm -print-debug-counter" + ) + if generate_ir: + os.environ["LIMIT_FLAGS"] += " -S -emit-llvm" + self.l.LogOutput( + "Limit flags: %s" % os.environ["LIMIT_FLAGS"], + print_to_console=self.verbose, + ) + command = self.cmd_script + _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False) + + if "Counters and values:" not in msg: + # Print pass level IR diff only if transformation level bisection does + # not work. + if self.ir_diff: + self.PrintIRDiff(pass_limit) + raise RuntimeError( + "No bisect info printed, DebugCounter may not be " + "supported by the compiler." + ) + + # With debugcounter enabled, there will be DebugCounter counting info in + # the output. + lines = msg.split("\n") + for l in lines: + if pass_name in l: + # Output of debugcounter will be like: + # instcombine-visit: {10, 0, 20} + # dce-transform: {1, 0, -1} + # which indicates {Count, Skip, StopAfter}. + # The last number should be the limit we set. + # We want the first number as the total transformation count. + # Split each line by ,|{|} and we can get l_list as: + # ['instcombine: ', '10', '0', '20', ''] + # and we will need the second item in it. + l_list = re.split(",|{|}", l) + count = int(l_list[1]) + if limit == -1: + return count + # The returned value is only useful when limit == -1, which shows total + # transformation count. + return 0 + + def PrintIRDiff(self, pass_index, pass_name=None, trans_index=-1): + bad_item = list(self.found_items)[0] + self.l.LogOutput( + "IR difference before and after bad pass/transformation:", + print_to_console=self.verbose, + ) + + if trans_index == -1: + # Pass level IR diff + self.BuildWithPassLimit(pass_index, self.ir_diff) + good_ir = os.path.join(tempfile.tempdir, "good.s") + shutil.copyfile(bad_item, good_ir) + pass_index += 1 + self.BuildWithPassLimit(pass_index, self.ir_diff) + else: + # Transformation level IR diff + self.BuildWithTransformLimit( + trans_index, pass_name, pass_index, self.ir_diff + ) + good_ir = os.path.join(tempfile.tempdir, "good.s") + shutil.copyfile(bad_item, good_ir) + trans_index += 1 + self.BuildWithTransformLimit( + trans_index, pass_name, pass_index, self.ir_diff + ) + + bad_ir = os.path.join(tempfile.tempdir, "bad.s") + shutil.copyfile(bad_item, bad_ir) + + command = "diff %s %s" % (good_ir, bad_ir) + _, _, _ = self.ce.RunCommandWOutput( + command, print_to_console=self.verbose + ) + + def DoSearchBadPass(self): + """Perform full search for bad pass of bad item.""" + logger.GetLogger().LogOutput( + "Starting to bisect bad pass for bad item." + ) + + # Pass level bisection + self.mode = "pass" + self.binary_search = binary_search_perforce.BinarySearcherForPass( + logger_to_set=self.l + ) + self.binary_search.total, _ = self.BuildWithPassLimit(-1) + logger.GetLogger().LogOutput( + "Total %s number: %d" % (self.mode, self.binary_search.total) + ) + + pass_index, pass_name = self.DoBinarySearchBadPass() + + if not pass_name and pass_index == 0: + raise ValueError("Bisecting passes cannot reproduce good result.") + logger.GetLogger().LogOutput("Bad pass found: %s." % pass_name) + + # Transformation level bisection. + logger.GetLogger().LogOutput( + "Starting to bisect at transformation level." + ) + + self.mode = "transform" + self.binary_search = binary_search_perforce.BinarySearcherForPass( + logger_to_set=self.l + ) + self.binary_search.total = self.BuildWithTransformLimit( + -1, pass_name, pass_index + ) + logger.GetLogger().LogOutput( + "Total %s number: %d" % (self.mode, self.binary_search.total) + ) + + trans_index, _ = self.DoBinarySearchBadPass(pass_index, pass_name) + if trans_index == 0: + raise ValueError( + "Bisecting %s cannot reproduce good result." % pass_name + ) + + if self.ir_diff: + self.PrintIRDiff(pass_index, pass_name, trans_index) + + logger.GetLogger().LogOutput( + "Bisection result for bad item %s:\n" + "Bad pass: %s at number %d\n" + "Bad transformation number: %d" + % (self.found_items, pass_name, pass_index, trans_index) + ) + + def DoBinarySearchBadPass(self, pass_index=-1, pass_name=None): + """Perform single iteration of binary search at pass level + + Args: + pass_index: Works for transformation level bisection, indicates the limit + number of pass from pass level bisecting result. + pass_name: Works for transformation level bisection, indicates + DebugCounter name of the bad pass from pass level bisecting + result. + + Returns: + index: Index of problematic pass/transformation. + pass_name: Works for pass level bisection, returns DebugCounter name for + bad pass. + """ + # If in resume mode don't reset search_cycles + if not self.resumed: + self.search_cycles = 0 + else: + self.resumed = False + + terminated = False + index = 0 + while self.search_cycles < self.iterations and not terminated: + self.SaveState() + self.OutputIterationProgressBadPass() + + self.search_cycles += 1 + current = self.binary_search.GetNext() + + if self.mode == "pass": + index, pass_name = self.BuildWithPassLimit(current) + else: + self.BuildWithTransformLimit(current, pass_name, pass_index) + index = current + + # TODO: Newly generated object should not directly replace original + # one, need to put it somewhere and symbol link original one to it. + # Will update cmd_script to do it. + + status = self.TestSetupScript() + assert status == 0, "Test setup should succeed." + status = self.TestScript() + terminated = self.binary_search.SetStatus(status) + + if terminated: + self.l.LogOutput("Terminated!", print_to_console=self.verbose) + if not terminated: + self.l.LogOutput("Ran out of iterations searching...") + self.l.LogOutput(str(self), print_to_console=self.verbose) + return index, pass_name + + def PopulateItemsUsingCommand(self, command): + """Update all_items and binary search logic from executable. + + This method is mainly required for enumerating the initial list of items + from the get_initial_items script. + + Args: + command: path to executable that will enumerate items. + """ + ce = command_executer.GetCommandExecuter() + _, out, _ = ce.RunCommandWExceptionCleanup( + command, return_output=True, print_to_console=self.verbose + ) + all_items = out.split() + self.PopulateItemsUsingList(all_items) + + def PopulateItemsUsingList(self, all_items): + """Update all_items and binary searching logic from list. + + Args: + all_items: new list of all_items + """ + self.all_items = all_items + self.binary_search = binary_search_perforce.BinarySearcher( + logger_to_set=self.l + ) + self.binary_search.SetSortedList(self.all_items) + + def SaveState(self): + """Save state to STATE_FILE. + + SaveState will create a new unique, hidden state file to hold data from + object. Then atomically overwrite the STATE_FILE symlink to point to the + new data. + + Raises: + OSError if STATE_FILE already exists but is not a symlink. + """ + ce, l = self.ce, self.l + self.ce, self.l, self.binary_search.logger = None, None, None + old_state = None + + _, path = tempfile.mkstemp(prefix=HIDDEN_STATE_FILE, dir=".") + with open(path, "wb") as f: + pickle.dump(self, f) + + if os.path.exists(STATE_FILE): + if os.path.islink(STATE_FILE): + old_state = os.readlink(STATE_FILE) + else: + raise OSError( + ( + "%s already exists and is not a symlink!\n" + "State file saved to %s" % (STATE_FILE, path) + ) + ) + + # Create new link and atomically overwrite old link + temp_link = "%s.link" % HIDDEN_STATE_FILE + os.symlink(path, temp_link) + os.rename(temp_link, STATE_FILE) + + if old_state: + os.remove(old_state) + + self.ce, self.l, self.binary_search.logger = ce, l, l + + @classmethod + def LoadState(cls): + """Create BinarySearchState object from STATE_FILE.""" + if not os.path.isfile(STATE_FILE): + return None + try: + with open(STATE_FILE, "rb") as f: + bss = pickle.load(f) + bss.l = logger.GetLogger() + bss.ce = command_executer.GetCommandExecuter() + bss.binary_search.logger = bss.l + bss.start_time = time.time() + + # Set resumed to be True so we can enter DoBinarySearch without the + # method resetting our current search_cycles to 0. + bss.resumed = True + + # Set currently_good_items and currently_bad_items to empty so that the + # first iteration after resuming will always be non-incremental. This + # is just in case the environment changes, the user makes manual + # changes, or a previous switch_script corrupted the environment. + bss.currently_good_items = set() + bss.currently_bad_items = set() + + binary_search_perforce.verbose = bss.verbose + return bss + except Exception: + return None + + def RemoveState(self): + """Remove STATE_FILE and its symlinked data from file system.""" + if os.path.exists(STATE_FILE): + if os.path.islink(STATE_FILE): + real_file = os.readlink(STATE_FILE) + os.remove(real_file) + os.remove(STATE_FILE) + + def GetNextItems(self): + """Get next items for binary search based on result of the last test run.""" + border_item = self.binary_search.GetNext() + index = self.all_items.index(border_item) + + next_bad_items = self.all_items[: index + 1] + next_good_items = self.all_items[index + 1 :] + list(self.known_good) + + return [next_bad_items, next_good_items] + + def ElapsedTimeString(self): + """Return h m s format of elapsed time since execution has started.""" + diff = int(time.time() - self.start_time) + seconds = diff % 60 + minutes = (diff // 60) % 60 + hours = diff // (60 * 60) + + seconds = str(seconds).rjust(2) + minutes = str(minutes).rjust(2) + hours = str(hours).rjust(2) + + return "%sh %sm %ss" % (hours, minutes, seconds) + + def _OutputProgress(self, progress_text): + """Output current progress of binary search to console and logs. + + Args: + progress_text: The progress to display to the user. + """ + progress = ( + "\n***** PROGRESS (elapsed time: %s) *****\n" + "%s" + "************************************************" + ) + progress = progress % (self.ElapsedTimeString(), progress_text) + self.l.LogOutput(progress) + + def OutputIterationProgressBadItem(self): + out = ( + "Search %d of estimated %d.\n" + "Prune %d of max %d.\n" + "Current bad items found:\n" + "%s\n" + ) + out = out % ( + self.search_cycles + 1, + math.ceil(math.log(len(self.all_items), 2)), + self.prune_cycles + 1, + self.prune_iterations, + ", ".join(self.found_items), + ) + self._OutputProgress(out) + + def OutputIterationProgressBadPass(self): + out = "Search %d of estimated %d.\n" "Current limit: %s\n" + out = out % ( + self.search_cycles + 1, + math.ceil(math.log(self.binary_search.total, 2)), + self.binary_search.current, + ) + self._OutputProgress(out) + + def __str__(self): + ret = "" + ret += "all: %s\n" % str(self.all_items) + ret += "currently_good: %s\n" % str(self.currently_good_items) + ret += "currently_bad: %s\n" % str(self.currently_bad_items) + ret += str(self.binary_search) + return ret - # If pass not found, return None - return None - def BuildWithPassLimit(self, limit, generate_ir=False): - """Rebuild bad item with pass level bisect limit +class MockBinarySearchState(BinarySearchState): + """Mock class for BinarySearchState.""" + + def __init__(self, **kwargs): + # Initialize all arguments to None + default_kwargs = { + "get_initial_items": 'echo "1"', + "switch_to_good": None, + "switch_to_bad": None, + "test_setup_script": None, + "test_script": None, + "incremental": True, + "prune": False, + "pass_bisect": None, + "ir_diff": False, + "iterations": 50, + "prune_iterations": 100, + "verify": True, + "file_args": False, + "verbose": False, + } + default_kwargs.update(kwargs) + super(MockBinarySearchState, self).__init__(**default_kwargs) - Run command line script generated by GenerateBadCommandScript(), with - pass level limit flags. - Returns: - pass_num: current number of the pass, or total number of passes if - limit set to -1. - pass_name: The debugcounter name of current limit pass. - """ - os.environ['LIMIT_FLAGS'] = '-mllvm -opt-bisect-limit=' + str(limit) - if generate_ir: - os.environ['LIMIT_FLAGS'] += ' -S -emit-llvm' - self.l.LogOutput( - 'Limit flags: %s' % os.environ['LIMIT_FLAGS'], - print_to_console=self.verbose) - command = self.cmd_script - _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False) - - # Massages we get will be like this: - # BISECT: running pass (9) <Pass Description> on <function> (<file>) - # BISECT: running pass (10) <Pass Description> on <module> (<file>) - # BISECT: NOT running pass (11) <Pass Description> on <SCG> (<file>) - # BISECT: NOT running pass (12) <Pass Description> on <SCG> (<file>) - # We want to get the pass description of last running pass, to have - # transformation level bisect on it. - if 'BISECT: ' not in msg: - raise RuntimeError('No bisect info printed, OptBisect may not be ' - 'supported by the compiler.') - - lines = msg.split('\n') - pass_num = 0 - last_pass = '' - for l in lines: - if 'running pass' in l: - # For situation of limit==-1, we want the total number of passes - if limit != -1 and 'BISECT: NOT ' in l: - break - pass_num += 1 - last_pass = l - if limit not in (-1, pass_num): - raise ValueError('[Error] While building, limit number does not match.') - return pass_num, self.CollectPassName(last_pass) - - def BuildWithTransformLimit(self, - limit, - pass_name=None, - pass_limit=-1, - generate_ir=False): - """Rebuild bad item with transformation level bisect limit - - Run command line script generated by GenerateBadCommandScript(), with - pass level limit flags and transformation level limit flags. +def _CanonicalizeScript(script_name): + """Return canonical path to script. Args: - limit: transformation level limit for bad item. - pass_name: name of bad pass debugcounter from pass level bisect result. - pass_limit: pass level limit from pass level bisect result. - generate_ir: Whether to generate IR comparison. + script_name: Relative or absolute path to script Returns: - Total number of transformations if limit set to -1, else return 0. + Canonicalized script path """ - counter_name = pass_name - - os.environ['LIMIT_FLAGS'] = '-mllvm -opt-bisect-limit=' + \ - str(pass_limit) + \ - ' -mllvm -debug-counter=' + counter_name + \ - '-count=' + str(limit) + \ - ' -mllvm -print-debug-counter' - if generate_ir: - os.environ['LIMIT_FLAGS'] += ' -S -emit-llvm' - self.l.LogOutput( - 'Limit flags: %s' % os.environ['LIMIT_FLAGS'], - print_to_console=self.verbose) - command = self.cmd_script - _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False) - - if 'Counters and values:' not in msg: - # Print pass level IR diff only if transformation level bisection does - # not work. - if self.ir_diff: - self.PrintIRDiff(pass_limit) - raise RuntimeError('No bisect info printed, DebugCounter may not be ' - 'supported by the compiler.') - - # With debugcounter enabled, there will be DebugCounter counting info in - # the output. - lines = msg.split('\n') - for l in lines: - if pass_name in l: - # Output of debugcounter will be like: - # instcombine-visit: {10, 0, 20} - # dce-transform: {1, 0, -1} - # which indicates {Count, Skip, StopAfter}. - # The last number should be the limit we set. - # We want the first number as the total transformation count. - # Split each line by ,|{|} and we can get l_list as: - # ['instcombine: ', '10', '0', '20', ''] - # and we will need the second item in it. - l_list = re.split(',|{|}', l) - count = int(l_list[1]) - if limit == -1: - return count - # The returned value is only useful when limit == -1, which shows total - # transformation count. - return 0 - - def PrintIRDiff(self, pass_index, pass_name=None, trans_index=-1): - bad_item = list(self.found_items)[0] - self.l.LogOutput( - 'IR difference before and after bad pass/transformation:', - print_to_console=self.verbose) - - if trans_index == -1: - # Pass level IR diff - self.BuildWithPassLimit(pass_index, self.ir_diff) - good_ir = os.path.join(tempfile.tempdir, 'good.s') - shutil.copyfile(bad_item, good_ir) - pass_index += 1 - self.BuildWithPassLimit(pass_index, self.ir_diff) - else: - # Transformation level IR diff - self.BuildWithTransformLimit(trans_index, pass_name, pass_index, - self.ir_diff) - good_ir = os.path.join(tempfile.tempdir, 'good.s') - shutil.copyfile(bad_item, good_ir) - trans_index += 1 - self.BuildWithTransformLimit(trans_index, pass_name, pass_index, - self.ir_diff) - - bad_ir = os.path.join(tempfile.tempdir, 'bad.s') - shutil.copyfile(bad_item, bad_ir) - - command = 'diff %s %s' % (good_ir, bad_ir) - _, _, _ = self.ce.RunCommandWOutput(command, print_to_console=self.verbose) - - def DoSearchBadPass(self): - """Perform full search for bad pass of bad item.""" - logger.GetLogger().LogOutput('Starting to bisect bad pass for bad item.') - - # Pass level bisection - self.mode = 'pass' - self.binary_search = binary_search_perforce.BinarySearcherForPass( - logger_to_set=self.l) - self.binary_search.total, _ = self.BuildWithPassLimit(-1) - logger.GetLogger().LogOutput( - 'Total %s number: %d' % (self.mode, self.binary_search.total)) - - pass_index, pass_name = self.DoBinarySearchBadPass() - - if (not pass_name and pass_index == 0): - raise ValueError('Bisecting passes cannot reproduce good result.') - logger.GetLogger().LogOutput('Bad pass found: %s.' % pass_name) - - # Transformation level bisection. - logger.GetLogger().LogOutput('Starting to bisect at transformation level.') - - self.mode = 'transform' - self.binary_search = binary_search_perforce.BinarySearcherForPass( - logger_to_set=self.l) - self.binary_search.total = self.BuildWithTransformLimit( - -1, pass_name, pass_index) - logger.GetLogger().LogOutput( - 'Total %s number: %d' % (self.mode, self.binary_search.total)) - - trans_index, _ = self.DoBinarySearchBadPass(pass_index, pass_name) - if trans_index == 0: - raise ValueError('Bisecting %s cannot reproduce good result.' % pass_name) - - if self.ir_diff: - self.PrintIRDiff(pass_index, pass_name, trans_index) - - logger.GetLogger().LogOutput( - 'Bisection result for bad item %s:\n' - 'Bad pass: %s at number %d\n' - 'Bad transformation number: %d' % (self.found_items, pass_name, - pass_index, trans_index)) - - def DoBinarySearchBadPass(self, pass_index=-1, pass_name=None): - """Perform single iteration of binary search at pass level + script_name = os.path.expanduser(script_name) + if not script_name.startswith("/"): + return os.path.join(".", script_name) + + +def Run( + get_initial_items, + switch_to_good, + switch_to_bad, + test_script, + test_setup_script=None, + iterations=50, + prune=False, + pass_bisect=None, + ir_diff=False, + noincremental=False, + file_args=False, + verify=True, + prune_iterations=100, + verbose=False, + resume=False, +): + """Run binary search tool. + + Equivalent to running through terminal. Args: - pass_index: Works for transformation level bisection, indicates the limit - number of pass from pass level bisecting result. - pass_name: Works for transformation level bisection, indicates - DebugCounter name of the bad pass from pass level bisecting - result. + get_initial_items: Script to enumerate all items being binary searched + switch_to_good: Script that will take items as input and switch them to good + set + switch_to_bad: Script that will take items as input and switch them to bad + set + test_script: Script that will determine if the current combination of good + and bad items make a "good" or "bad" result. + test_setup_script: Script to do necessary setup (building, compilation, + etc.) for test_script. + iterations: How many binary search iterations to run before exiting. + prune: If False the binary search tool will stop when the first bad item is + found. Otherwise then binary search tool will continue searching until all + bad items are found (or prune_iterations is reached). + pass_bisect: Script that takes single bad item from POPULATE_BAD and returns + the compiler command used to generate the bad item. This will turn on + pass/ transformation level bisection for the bad item. Requires that + 'prune' be set to False, and needs support of `-opt-bisect-limit`(pass) + and `-print-debug-counter`(transformation) from LLVM. + ir_diff: Whether to print IR differences before and after bad + pass/transformation to verbose output. Defaults to False, only works when + pass_bisect is enabled. + noincremental: Whether to send "diffs" of good/bad items to switch scripts. + file_args: If True then arguments to switch scripts will be a file name + containing a newline separated list of the items to switch. + verify: If True, run tests to ensure initial good/bad sets actually produce + a good/bad result. + prune_iterations: Max number of bad items to search for. + verbose: If True will print extra debug information to user. + resume: If True will resume using STATE_FILE. Returns: - index: Index of problematic pass/transformation. - pass_name: Works for pass level bisection, returns DebugCounter name for - bad pass. + 0 for success, error otherwise """ - # If in resume mode don't reset search_cycles - if not self.resumed: - self.search_cycles = 0 + # Notice that all the argument checks are in the Run() function rather than + # in the Main() function. It is not common to do so but some wrappers are + # going to call Run() directly and bypass checks in Main() function. + if resume: + logger.GetLogger().LogOutput("Resuming from %s" % STATE_FILE) + bss = BinarySearchState.LoadState() + if not bss: + logger.GetLogger().LogOutput( + "%s is not a valid binary_search_tool state file, cannot resume!" + % STATE_FILE + ) + return 1 + logger.GetLogger().LogOutput( + "Note: resuming from previous state, " + "ignoring given options and loading saved " + "options instead." + ) else: - self.resumed = False - - terminated = False - index = 0 - while self.search_cycles < self.iterations and not terminated: - self.SaveState() - self.OutputIterationProgressBadPass() - - self.search_cycles += 1 - current = self.binary_search.GetNext() - - if self.mode == 'pass': - index, pass_name = self.BuildWithPassLimit(current) - else: - self.BuildWithTransformLimit(current, pass_name, pass_index) - index = current - - # TODO: Newly generated object should not directly replace original - # one, need to put it somewhere and symbol link original one to it. - # Will update cmd_script to do it. - - status = self.TestSetupScript() - assert status == 0, 'Test setup should succeed.' - status = self.TestScript() - terminated = self.binary_search.SetStatus(status) - - if terminated: - self.l.LogOutput('Terminated!', print_to_console=self.verbose) - if not terminated: - self.l.LogOutput('Ran out of iterations searching...') - self.l.LogOutput(str(self), print_to_console=self.verbose) - return index, pass_name - - def PopulateItemsUsingCommand(self, command): - """Update all_items and binary search logic from executable. - - This method is mainly required for enumerating the initial list of items - from the get_initial_items script. - - Args: - command: path to executable that will enumerate items. - """ - ce = command_executer.GetCommandExecuter() - _, out, _ = ce.RunCommandWExceptionCleanup( - command, return_output=True, print_to_console=self.verbose) - all_items = out.split() - self.PopulateItemsUsingList(all_items) - - def PopulateItemsUsingList(self, all_items): - """Update all_items and binary searching logic from list. - - Args: - all_items: new list of all_items - """ - self.all_items = all_items - self.binary_search = binary_search_perforce.BinarySearcher( - logger_to_set=self.l) - self.binary_search.SetSortedList(self.all_items) - - def SaveState(self): - """Save state to STATE_FILE. - - SaveState will create a new unique, hidden state file to hold data from - object. Then atomically overwrite the STATE_FILE symlink to point to the - new data. - - Raises: - OSError if STATE_FILE already exists but is not a symlink. - """ - ce, l = self.ce, self.l - self.ce, self.l, self.binary_search.logger = None, None, None - old_state = None - - _, path = tempfile.mkstemp(prefix=HIDDEN_STATE_FILE, dir='.') - with open(path, 'wb') as f: - pickle.dump(self, f) - - if os.path.exists(STATE_FILE): - if os.path.islink(STATE_FILE): - old_state = os.readlink(STATE_FILE) - else: - raise OSError(('%s already exists and is not a symlink!\n' - 'State file saved to %s' % (STATE_FILE, path))) - - # Create new link and atomically overwrite old link - temp_link = '%s.link' % HIDDEN_STATE_FILE - os.symlink(path, temp_link) - os.rename(temp_link, STATE_FILE) - - if old_state: - os.remove(old_state) - - self.ce, self.l, self.binary_search.logger = ce, l, l - - @classmethod - def LoadState(cls): - """Create BinarySearchState object from STATE_FILE.""" - if not os.path.isfile(STATE_FILE): - return None - try: - with open(STATE_FILE, 'rb') as f: - bss = pickle.load(f) - bss.l = logger.GetLogger() - bss.ce = command_executer.GetCommandExecuter() - bss.binary_search.logger = bss.l - bss.start_time = time.time() - - # Set resumed to be True so we can enter DoBinarySearch without the - # method resetting our current search_cycles to 0. - bss.resumed = True - - # Set currently_good_items and currently_bad_items to empty so that the - # first iteration after resuming will always be non-incremental. This - # is just in case the environment changes, the user makes manual - # changes, or a previous switch_script corrupted the environment. - bss.currently_good_items = set() - bss.currently_bad_items = set() - - binary_search_perforce.verbose = bss.verbose - return bss - except Exception: - return None - - def RemoveState(self): - """Remove STATE_FILE and its symlinked data from file system.""" - if os.path.exists(STATE_FILE): - if os.path.islink(STATE_FILE): - real_file = os.readlink(STATE_FILE) - os.remove(real_file) - os.remove(STATE_FILE) - - def GetNextItems(self): - """Get next items for binary search based on result of the last test run.""" - border_item = self.binary_search.GetNext() - index = self.all_items.index(border_item) - - next_bad_items = self.all_items[:index + 1] - next_good_items = self.all_items[index + 1:] + list(self.known_good) - - return [next_bad_items, next_good_items] - - def ElapsedTimeString(self): - """Return h m s format of elapsed time since execution has started.""" - diff = int(time.time() - self.start_time) - seconds = diff % 60 - minutes = (diff // 60) % 60 - hours = diff // (60 * 60) - - seconds = str(seconds).rjust(2) - minutes = str(minutes).rjust(2) - hours = str(hours).rjust(2) - - return '%sh %sm %ss' % (hours, minutes, seconds) - - def _OutputProgress(self, progress_text): - """Output current progress of binary search to console and logs. - - Args: - progress_text: The progress to display to the user. - """ - progress = ('\n***** PROGRESS (elapsed time: %s) *****\n' - '%s' - '************************************************') - progress = progress % (self.ElapsedTimeString(), progress_text) - self.l.LogOutput(progress) - - def OutputIterationProgressBadItem(self): - out = ('Search %d of estimated %d.\n' - 'Prune %d of max %d.\n' - 'Current bad items found:\n' - '%s\n') - out = out % (self.search_cycles + 1, - math.ceil(math.log(len(self.all_items), 2)), self.prune_cycles - + 1, self.prune_iterations, ', '.join(self.found_items)) - self._OutputProgress(out) - - def OutputIterationProgressBadPass(self): - out = ('Search %d of estimated %d.\n' 'Current limit: %s\n') - out = out % (self.search_cycles + 1, - math.ceil(math.log(self.binary_search.total, 2)), - self.binary_search.current) - self._OutputProgress(out) - - def __str__(self): - ret = '' - ret += 'all: %s\n' % str(self.all_items) - ret += 'currently_good: %s\n' % str(self.currently_good_items) - ret += 'currently_bad: %s\n' % str(self.currently_bad_items) - ret += str(self.binary_search) - return ret - - -class MockBinarySearchState(BinarySearchState): - """Mock class for BinarySearchState.""" - - def __init__(self, **kwargs): - # Initialize all arguments to None - default_kwargs = { - 'get_initial_items': 'echo "1"', - 'switch_to_good': None, - 'switch_to_bad': None, - 'test_setup_script': None, - 'test_script': None, - 'incremental': True, - 'prune': False, - 'pass_bisect': None, - 'ir_diff': False, - 'iterations': 50, - 'prune_iterations': 100, - 'verify': True, - 'file_args': False, - 'verbose': False - } - default_kwargs.update(kwargs) - super(MockBinarySearchState, self).__init__(**default_kwargs) - - -def _CanonicalizeScript(script_name): - """Return canonical path to script. - - Args: - script_name: Relative or absolute path to script - - Returns: - Canonicalized script path - """ - script_name = os.path.expanduser(script_name) - if not script_name.startswith('/'): - return os.path.join('.', script_name) - - -def Run(get_initial_items, - switch_to_good, - switch_to_bad, - test_script, - test_setup_script=None, - iterations=50, - prune=False, - pass_bisect=None, - ir_diff=False, - noincremental=False, - file_args=False, - verify=True, - prune_iterations=100, - verbose=False, - resume=False): - """Run binary search tool. - - Equivalent to running through terminal. - - Args: - get_initial_items: Script to enumerate all items being binary searched - switch_to_good: Script that will take items as input and switch them to good - set - switch_to_bad: Script that will take items as input and switch them to bad - set - test_script: Script that will determine if the current combination of good - and bad items make a "good" or "bad" result. - test_setup_script: Script to do necessary setup (building, compilation, - etc.) for test_script. - iterations: How many binary search iterations to run before exiting. - prune: If False the binary search tool will stop when the first bad item is - found. Otherwise then binary search tool will continue searching until all - bad items are found (or prune_iterations is reached). - pass_bisect: Script that takes single bad item from POPULATE_BAD and returns - the compiler command used to generate the bad item. This will turn on - pass/ transformation level bisection for the bad item. Requires that - 'prune' be set to False, and needs support of `-opt-bisect-limit`(pass) - and `-print-debug-counter`(transformation) from LLVM. - ir_diff: Whether to print IR differences before and after bad - pass/transformation to verbose output. Defaults to False, only works when - pass_bisect is enabled. - noincremental: Whether to send "diffs" of good/bad items to switch scripts. - file_args: If True then arguments to switch scripts will be a file name - containing a newline separated list of the items to switch. - verify: If True, run tests to ensure initial good/bad sets actually produce - a good/bad result. - prune_iterations: Max number of bad items to search for. - verbose: If True will print extra debug information to user. - resume: If True will resume using STATE_FILE. - - Returns: - 0 for success, error otherwise - """ - # Notice that all the argument checks are in the Run() function rather than - # in the Main() function. It is not common to do so but some wrappers are - # going to call Run() directly and bypass checks in Main() function. - if resume: - logger.GetLogger().LogOutput('Resuming from %s' % STATE_FILE) - bss = BinarySearchState.LoadState() - if not bss: - logger.GetLogger().LogOutput( - '%s is not a valid binary_search_tool state file, cannot resume!' % - STATE_FILE) - return 1 - logger.GetLogger().LogOutput('Note: resuming from previous state, ' - 'ignoring given options and loading saved ' - 'options instead.') - else: - if not (get_initial_items and switch_to_good and switch_to_bad and - test_script): - logger.GetLogger().LogOutput('The following options are required: ' - '[-i, -g, -b, -t] | [-r]') - return 1 - if pass_bisect and prune: - logger.GetLogger().LogOutput('"--pass_bisect" only works when ' - '"--prune" is set to be False.') - return 1 - if not pass_bisect and ir_diff: - logger.GetLogger().LogOutput('"--ir_diff" only works when ' - '"--pass_bisect" is enabled.') - - switch_to_good = _CanonicalizeScript(switch_to_good) - switch_to_bad = _CanonicalizeScript(switch_to_bad) - if test_setup_script: - test_setup_script = _CanonicalizeScript(test_setup_script) + if not ( + get_initial_items + and switch_to_good + and switch_to_bad + and test_script + ): + logger.GetLogger().LogOutput( + "The following options are required: " "[-i, -g, -b, -t] | [-r]" + ) + return 1 + if pass_bisect and prune: + logger.GetLogger().LogOutput( + '"--pass_bisect" only works when ' + '"--prune" is set to be False.' + ) + return 1 + if not pass_bisect and ir_diff: + logger.GetLogger().LogOutput( + '"--ir_diff" only works when ' '"--pass_bisect" is enabled.' + ) + + switch_to_good = _CanonicalizeScript(switch_to_good) + switch_to_bad = _CanonicalizeScript(switch_to_bad) + if test_setup_script: + test_setup_script = _CanonicalizeScript(test_setup_script) + if pass_bisect: + pass_bisect = _CanonicalizeScript(pass_bisect) + test_script = _CanonicalizeScript(test_script) + get_initial_items = _CanonicalizeScript(get_initial_items) + incremental = not noincremental + + binary_search_perforce.verbose = verbose + + bss = BinarySearchState( + get_initial_items, + switch_to_good, + switch_to_bad, + test_setup_script, + test_script, + incremental, + prune, + pass_bisect, + ir_diff, + iterations, + prune_iterations, + verify, + file_args, + verbose, + ) + bss.DoVerify() + + bss.DoSearchBadItems() if pass_bisect: - pass_bisect = _CanonicalizeScript(pass_bisect) - test_script = _CanonicalizeScript(test_script) - get_initial_items = _CanonicalizeScript(get_initial_items) - incremental = not noincremental - - binary_search_perforce.verbose = verbose - - bss = BinarySearchState(get_initial_items, switch_to_good, switch_to_bad, - test_setup_script, test_script, incremental, prune, - pass_bisect, ir_diff, iterations, prune_iterations, - verify, file_args, verbose) - bss.DoVerify() - - bss.DoSearchBadItems() - if pass_bisect: - bss.DoSearchBadPass() - bss.RemoveState() - logger.GetLogger().LogOutput( - 'Total execution time: %s' % bss.ElapsedTimeString()) + bss.DoSearchBadPass() + bss.RemoveState() + logger.GetLogger().LogOutput( + "Total execution time: %s" % bss.ElapsedTimeString() + ) - return 0 + return 0 def Main(argv): - """The main function.""" - # Common initializations + """The main function.""" + # Common initializations - parser = argparse.ArgumentParser() - common.BuildArgParser(parser) - logger.GetLogger().LogOutput(' '.join(argv)) - options = parser.parse_args(argv) + parser = argparse.ArgumentParser() + common.BuildArgParser(parser) + logger.GetLogger().LogOutput(" ".join(argv)) + options = parser.parse_args(argv) - # Get dictionary of all options - args = vars(options) - return Run(**args) + # Get dictionary of all options + args = vars(options) + return Run(**args) -if __name__ == '__main__': - sys.exit(Main(sys.argv[1:])) +if __name__ == "__main__": + sys.exit(Main(sys.argv[1:])) diff --git a/binary_search_tool/bisect_driver.py b/binary_search_tool/bisect_driver.py index b94266c7..2f6cd85b 100644 --- a/binary_search_tool/bisect_driver.py +++ b/binary_search_tool/bisect_driver.py @@ -25,394 +25,408 @@ import contextlib import fcntl import os import shutil -import subprocess import stat +import subprocess import sys -VALID_MODES = ('POPULATE_GOOD', 'POPULATE_BAD', 'TRIAGE') -GOOD_CACHE = 'good' -BAD_CACHE = 'bad' -LIST_FILE = os.path.join(GOOD_CACHE, '_LIST') -CONTINUE_ON_MISSING = os.environ.get('BISECT_CONTINUE_ON_MISSING', None) == '1' -CONTINUE_ON_REDUNDANCY = os.environ.get('BISECT_CONTINUE_ON_REDUNDANCY', - None) == '1' -WRAPPER_SAFE_MODE = os.environ.get('BISECT_WRAPPER_SAFE_MODE', None) == '1' +VALID_MODES = ("POPULATE_GOOD", "POPULATE_BAD", "TRIAGE") +GOOD_CACHE = "good" +BAD_CACHE = "bad" +LIST_FILE = os.path.join(GOOD_CACHE, "_LIST") + +CONTINUE_ON_MISSING = os.environ.get("BISECT_CONTINUE_ON_MISSING", None) == "1" +CONTINUE_ON_REDUNDANCY = ( + os.environ.get("BISECT_CONTINUE_ON_REDUNDANCY", None) == "1" +) +WRAPPER_SAFE_MODE = os.environ.get("BISECT_WRAPPER_SAFE_MODE", None) == "1" class Error(Exception): - """The general compiler wrapper error class.""" + """The general compiler wrapper error class.""" @contextlib.contextmanager def lock_file(path, mode): - """Lock file and block if other process has lock on file. - - Acquire exclusive lock for file. Only blocks other processes if they attempt - to also acquire lock through this method. If only reading (modes 'r' and 'rb') - then the lock is shared (i.e. many reads can happen concurrently, but only one - process may write at a time). - - This function is a contextmanager, meaning it's meant to be used with the - "with" statement in Python. This is so cleanup and setup happens automatically - and cleanly. Execution of the outer "with" statement happens at the "yield" - statement. Execution resumes after the yield when the outer "with" statement - ends. - - Args: - path: path to file being locked - mode: mode to open file with ('w', 'r', etc.) - """ - with open(path, mode) as f: - # Apply FD_CLOEXEC argument to fd. This ensures that the file descriptor - # won't be leaked to any child processes. - current_args = fcntl.fcntl(f.fileno(), fcntl.F_GETFD) - fcntl.fcntl(f.fileno(), fcntl.F_SETFD, current_args | fcntl.FD_CLOEXEC) - - # Reads can share the lock as no race conditions exist. If write is needed, - # give writing process exclusive access to the file. - if f.mode == 'r' or f.mode == 'rb': - lock_type = fcntl.LOCK_SH - else: - lock_type = fcntl.LOCK_EX - - try: - fcntl.lockf(f, lock_type) - yield f - f.flush() - finally: - fcntl.lockf(f, fcntl.LOCK_UN) + """Lock file and block if other process has lock on file. + + Acquire exclusive lock for file. Only blocks other processes if they attempt + to also acquire lock through this method. If only reading (modes 'r' and 'rb') + then the lock is shared (i.e. many reads can happen concurrently, but only one + process may write at a time). + + This function is a contextmanager, meaning it's meant to be used with the + "with" statement in Python. This is so cleanup and setup happens automatically + and cleanly. Execution of the outer "with" statement happens at the "yield" + statement. Execution resumes after the yield when the outer "with" statement + ends. + + Args: + path: path to file being locked + mode: mode to open file with ('w', 'r', etc.) + """ + with open(path, mode) as f: + # Apply FD_CLOEXEC argument to fd. This ensures that the file descriptor + # won't be leaked to any child processes. + current_args = fcntl.fcntl(f.fileno(), fcntl.F_GETFD) + fcntl.fcntl(f.fileno(), fcntl.F_SETFD, current_args | fcntl.FD_CLOEXEC) + + # Reads can share the lock as no race conditions exist. If write is needed, + # give writing process exclusive access to the file. + if f.mode == "r" or f.mode == "rb": + lock_type = fcntl.LOCK_SH + else: + lock_type = fcntl.LOCK_EX + + try: + fcntl.lockf(f, lock_type) + yield f + f.flush() + finally: + fcntl.lockf(f, fcntl.LOCK_UN) def log_to_file(path, execargs, link_from=None, link_to=None): - """Common logging function. + """Common logging function. - Log current working directory, current execargs, and a from-to relationship - between files. - """ - with lock_file(path, 'a') as log: - log.write('cd: %s; %s\n' % (os.getcwd(), ' '.join(execargs))) - if link_from and link_to: - log.write('%s -> %s\n' % (link_from, link_to)) + Log current working directory, current execargs, and a from-to relationship + between files. + """ + with lock_file(path, "a") as log: + log.write("cd: %s; %s\n" % (os.getcwd(), " ".join(execargs))) + if link_from and link_to: + log.write("%s -> %s\n" % (link_from, link_to)) def exec_and_return(execargs): - """Execute process and return. + """Execute process and return. - Execute according to execargs and return immediately. Don't inspect - stderr or stdout. - """ - return subprocess.call(execargs) + Execute according to execargs and return immediately. Don't inspect + stderr or stdout. + """ + return subprocess.call(execargs) def which_cache(obj_file): - """Determine which cache an object belongs to. - - The binary search tool creates two files for each search iteration listing - the full set of bad objects and full set of good objects. We use this to - determine where an object file should be linked from (good or bad). - """ - bad_set_file = os.environ.get('BISECT_BAD_SET') - if in_object_list(obj_file, bad_set_file): - return BAD_CACHE - else: - return GOOD_CACHE + """Determine which cache an object belongs to. + + The binary search tool creates two files for each search iteration listing + the full set of bad objects and full set of good objects. We use this to + determine where an object file should be linked from (good or bad). + """ + bad_set_file = os.environ.get("BISECT_BAD_SET") + if in_object_list(obj_file, bad_set_file): + return BAD_CACHE + else: + return GOOD_CACHE def makedirs(path): - """Try to create directories in path.""" - try: - os.makedirs(path) - except os.error: - if not os.path.isdir(path): - raise + """Try to create directories in path.""" + try: + os.makedirs(path) + except os.error: + if not os.path.isdir(path): + raise def get_obj_path(execargs): - """Get the object path for the object file in the list of arguments. - - Returns: - Absolute object path from execution args (-o argument). If no object being - outputted, then return empty string. -o argument is checked only if -c is - also present. - """ - try: - i = execargs.index('-o') - _ = execargs.index('-c') - except ValueError: - return '' - - obj_path = execargs[i + 1] - # Ignore args that do not create a file. - if obj_path in ( - '-', - '/dev/null', - ): - return '' - # Ignore files ending in .tmp. - if obj_path.endswith(('.tmp',)): - return '' - # Ignore configuration files generated by Automake/Autoconf/CMake etc. - if (obj_path.endswith('conftest.o') or - obj_path.endswith('CMakeFiles/test.o') or - obj_path.find('CMakeTmp') != -1 or - os.path.abspath(obj_path).find('CMakeTmp') != -1): - return '' - - return os.path.abspath(obj_path) + """Get the object path for the object file in the list of arguments. + + Returns: + Absolute object path from execution args (-o argument). If no object being + outputted, then return empty string. -o argument is checked only if -c is + also present. + """ + try: + i = execargs.index("-o") + _ = execargs.index("-c") + except ValueError: + return "" + + obj_path = execargs[i + 1] + # Ignore args that do not create a file. + if obj_path in ( + "-", + "/dev/null", + ): + return "" + # Ignore files ending in .tmp. + if obj_path.endswith((".tmp",)): + return "" + # Ignore configuration files generated by Automake/Autoconf/CMake etc. + if ( + obj_path.endswith("conftest.o") + or obj_path.endswith("CMakeFiles/test.o") + or obj_path.find("CMakeTmp") != -1 + or os.path.abspath(obj_path).find("CMakeTmp") != -1 + ): + return "" + + return os.path.abspath(obj_path) def get_dep_path(execargs): - """Get the dep file path for the dep file in the list of arguments. + """Get the dep file path for the dep file in the list of arguments. - Returns: - Absolute path of dependency file path from execution args (-o argument). If - no dependency being outputted then return empty string. - """ - if '-MD' not in execargs and '-MMD' not in execargs: - return '' + Returns: + Absolute path of dependency file path from execution args (-o argument). If + no dependency being outputted then return empty string. + """ + if "-MD" not in execargs and "-MMD" not in execargs: + return "" - # If -MF is given this is the path of the dependency file. Otherwise the - # dependency file is the value of -o but with a .d extension - if '-MF' in execargs: - i = execargs.index('-MF') - dep_path = execargs[i + 1] - return os.path.abspath(dep_path) + # If -MF is given this is the path of the dependency file. Otherwise the + # dependency file is the value of -o but with a .d extension + if "-MF" in execargs: + i = execargs.index("-MF") + dep_path = execargs[i + 1] + return os.path.abspath(dep_path) - full_obj_path = get_obj_path(execargs) - if not full_obj_path: - return '' + full_obj_path = get_obj_path(execargs) + if not full_obj_path: + return "" - return full_obj_path[:-2] + '.d' + return full_obj_path[:-2] + ".d" def get_dwo_path(execargs): - """Get the dwo file path for the dwo file in the list of arguments. + """Get the dwo file path for the dwo file in the list of arguments. - Returns: - Absolute dwo file path from execution args (-gsplit-dwarf argument) If no - dwo file being outputted then return empty string. - """ - if '-gsplit-dwarf' not in execargs: - return '' + Returns: + Absolute dwo file path from execution args (-gsplit-dwarf argument) If no + dwo file being outputted then return empty string. + """ + if "-gsplit-dwarf" not in execargs: + return "" - full_obj_path = get_obj_path(execargs) - if not full_obj_path: - return '' + full_obj_path = get_obj_path(execargs) + if not full_obj_path: + return "" - return full_obj_path[:-2] + '.dwo' + return full_obj_path[:-2] + ".dwo" def in_object_list(obj_name, list_filename): - """Check if object file name exist in file with object list.""" - if not obj_name: - return False + """Check if object file name exist in file with object list.""" + if not obj_name: + return False - with lock_file(list_filename, 'r') as list_file: - for line in list_file: - if line.strip() == obj_name: - return True + with lock_file(list_filename, "r") as list_file: + for line in list_file: + if line.strip() == obj_name: + return True - return False + return False def get_side_effects(execargs): - """Determine side effects generated by compiler + """Determine side effects generated by compiler - Returns: - List of paths of objects that the compiler generates as side effects. - """ - side_effects = [] + Returns: + List of paths of objects that the compiler generates as side effects. + """ + side_effects = [] - # Cache dependency files - full_dep_path = get_dep_path(execargs) - if full_dep_path: - side_effects.append(full_dep_path) + # Cache dependency files + full_dep_path = get_dep_path(execargs) + if full_dep_path: + side_effects.append(full_dep_path) - # Cache dwo files - full_dwo_path = get_dwo_path(execargs) - if full_dwo_path: - side_effects.append(full_dwo_path) + # Cache dwo files + full_dwo_path = get_dwo_path(execargs) + if full_dwo_path: + side_effects.append(full_dwo_path) - return side_effects + return side_effects def cache_file(execargs, bisect_dir, cache, abs_file_path): - """Cache compiler output file (.o/.d/.dwo). - - Args: - execargs: compiler execution arguments. - bisect_dir: The directory where bisection caches live. - cache: Which cache the file will be cached to (GOOD/BAD). - abs_file_path: Absolute path to file being cached. - - Returns: - True if caching was successful, False otherwise. - """ - # os.path.join fails with absolute paths, use + instead - bisect_path = os.path.join(bisect_dir, cache) + abs_file_path - bisect_path_dir = os.path.dirname(bisect_path) - makedirs(bisect_path_dir) - pop_log = os.path.join(bisect_dir, cache, '_POPULATE_LOG') - log_to_file(pop_log, execargs, abs_file_path, bisect_path) - - try: - if os.path.exists(abs_file_path): - if os.path.exists(bisect_path): - # File exists - population_dir = os.path.join(bisect_dir, cache) - with lock_file(os.path.join(population_dir, '_DUPS'), - 'a') as dup_object_list: - dup_object_list.write('%s\n' % abs_file_path) - if CONTINUE_ON_REDUNDANCY: - return True - raise Exception( - 'Trying to cache file %s multiple times. To avoid the error, set ' \ - 'BISECT_CONTINUE_ON_REDUNDANCY to 1. For reference, the list of ' \ - 'such files will be written to %s' % (abs_file_path, os.path.join( - population_dir, '_DUPS'))) - - shutil.copy2(abs_file_path, bisect_path) - # Set cache object to be read-only so later compilations can't - # accidentally overwrite it. - os.chmod(bisect_path, 0o444) - return True - else: - # File not found (happens when compilation fails but error code is still - # 0) - return False - except Exception: - print('Could not cache file %s' % abs_file_path, file=sys.stderr) - raise + """Cache compiler output file (.o/.d/.dwo). + + Args: + execargs: compiler execution arguments. + bisect_dir: The directory where bisection caches live. + cache: Which cache the file will be cached to (GOOD/BAD). + abs_file_path: Absolute path to file being cached. + + Returns: + True if caching was successful, False otherwise. + """ + # os.path.join fails with absolute paths, use + instead + bisect_path = os.path.join(bisect_dir, cache) + abs_file_path + bisect_path_dir = os.path.dirname(bisect_path) + makedirs(bisect_path_dir) + pop_log = os.path.join(bisect_dir, cache, "_POPULATE_LOG") + log_to_file(pop_log, execargs, abs_file_path, bisect_path) + + try: + if os.path.exists(abs_file_path): + if os.path.exists(bisect_path): + # File exists + population_dir = os.path.join(bisect_dir, cache) + with lock_file( + os.path.join(population_dir, "_DUPS"), "a" + ) as dup_object_list: + dup_object_list.write("%s\n" % abs_file_path) + if CONTINUE_ON_REDUNDANCY: + return True + raise Exception( + "Trying to cache file %s multiple times. To avoid the error, set " + "BISECT_CONTINUE_ON_REDUNDANCY to 1. For reference, the list of " + "such files will be written to %s" + % (abs_file_path, os.path.join(population_dir, "_DUPS")) + ) + + shutil.copy2(abs_file_path, bisect_path) + # Set cache object to be read-only so later compilations can't + # accidentally overwrite it. + os.chmod(bisect_path, 0o444) + return True + else: + # File not found (happens when compilation fails but error code is still + # 0) + return False + except Exception: + print("Could not cache file %s" % abs_file_path, file=sys.stderr) + raise def restore_file(bisect_dir, cache, abs_file_path): - """Restore file from cache (.o/.d/.dwo). - - Args: - bisect_dir: The directory where bisection caches live. - cache: Which cache the file will be restored from (GOOD/BAD). - abs_file_path: Absolute path to file being restored. - """ - # os.path.join fails with absolute paths, use + instead - cached_path = os.path.join(bisect_dir, cache) + abs_file_path - if os.path.exists(cached_path): - if os.path.exists(abs_file_path): - os.remove(abs_file_path) - shutil.copy2(cached_path, abs_file_path) - # Add write permission to the restored object files as some packages - # (such as kernels) may need write permission to delete files. - os.chmod(abs_file_path, os.stat(abs_file_path).st_mode | stat.S_IWUSR) - else: - raise Error(('%s is missing from %s cache! Unsure how to proceed. Make ' - 'will now crash.' % (cache, cached_path))) + """Restore file from cache (.o/.d/.dwo). + + Args: + bisect_dir: The directory where bisection caches live. + cache: Which cache the file will be restored from (GOOD/BAD). + abs_file_path: Absolute path to file being restored. + """ + # os.path.join fails with absolute paths, use + instead + cached_path = os.path.join(bisect_dir, cache) + abs_file_path + if os.path.exists(cached_path): + if os.path.exists(abs_file_path): + os.remove(abs_file_path) + shutil.copy2(cached_path, abs_file_path) + # Add write permission to the restored object files as some packages + # (such as kernels) may need write permission to delete files. + os.chmod(abs_file_path, os.stat(abs_file_path).st_mode | stat.S_IWUSR) + else: + raise Error( + ( + "%s is missing from %s cache! Unsure how to proceed. Make " + "will now crash." % (cache, cached_path) + ) + ) def bisect_populate(execargs, bisect_dir, population_name): - """Add necessary information to the bisect cache for the given execution. - - Extract the necessary information for bisection from the compiler - execution arguments and put it into the bisection cache. This - includes copying the created object file, adding the object - file path to the cache list and keeping a log of the execution. - - Args: - execargs: compiler execution arguments. - bisect_dir: bisection directory. - population_name: name of the cache being populated (good/bad). - """ - retval = exec_and_return(execargs) - if retval: - return retval - - full_obj_path = get_obj_path(execargs) - # This is not a normal compiler call because it doesn't have a -o argument, - # or the -o argument has an unusable output file. - # It's likely that this compiler call was actually made to invoke the linker, - # or as part of a configuratoin test. In this case we want to simply call the - # compiler and return. - if not full_obj_path: - return retval - - # Return if not able to cache the object file - if not cache_file(execargs, bisect_dir, population_name, full_obj_path): - return retval + """Add necessary information to the bisect cache for the given execution. + + Extract the necessary information for bisection from the compiler + execution arguments and put it into the bisection cache. This + includes copying the created object file, adding the object + file path to the cache list and keeping a log of the execution. + + Args: + execargs: compiler execution arguments. + bisect_dir: bisection directory. + population_name: name of the cache being populated (good/bad). + """ + retval = exec_and_return(execargs) + if retval: + return retval - population_dir = os.path.join(bisect_dir, population_name) - with lock_file(os.path.join(population_dir, '_LIST'), 'a') as object_list: - object_list.write('%s\n' % full_obj_path) + full_obj_path = get_obj_path(execargs) + # This is not a normal compiler call because it doesn't have a -o argument, + # or the -o argument has an unusable output file. + # It's likely that this compiler call was actually made to invoke the linker, + # or as part of a configuratoin test. In this case we want to simply call the + # compiler and return. + if not full_obj_path: + return retval - for side_effect in get_side_effects(execargs): - _ = cache_file(execargs, bisect_dir, population_name, side_effect) + # Return if not able to cache the object file + if not cache_file(execargs, bisect_dir, population_name, full_obj_path): + return retval - return retval + population_dir = os.path.join(bisect_dir, population_name) + with lock_file(os.path.join(population_dir, "_LIST"), "a") as object_list: + object_list.write("%s\n" % full_obj_path) + for side_effect in get_side_effects(execargs): + _ = cache_file(execargs, bisect_dir, population_name, side_effect) -def bisect_triage(execargs, bisect_dir): - """Use object object file from appropriate cache (good/bad). - - Given a populated bisection directory, use the object file saved - into one of the caches (good/bad) according to what is specified - in the good/bad sets. The good/bad sets are generated by the - high level binary search tool. Additionally restore any possible - side effects of compiler. - - Args: - execargs: compiler execution arguments. - bisect_dir: populated bisection directory. - """ - full_obj_path = get_obj_path(execargs) - obj_list = os.path.join(bisect_dir, LIST_FILE) - - # If the output isn't an object file just call compiler - if not full_obj_path: - return exec_and_return(execargs) - - # If this isn't a bisected object just call compiler - # This shouldn't happen! - if not in_object_list(full_obj_path, obj_list): - if CONTINUE_ON_MISSING: - log_file = os.path.join(bisect_dir, '_MISSING_CACHED_OBJ_LOG') - log_to_file(log_file, execargs, '? compiler', full_obj_path) - return exec_and_return(execargs) - else: - raise Error(('%s is missing from cache! To ignore export ' - 'BISECT_CONTINUE_ON_MISSING=1. See documentation for more ' - 'details on this option.' % full_obj_path)) - - cache = which_cache(full_obj_path) - - # If using safe WRAPPER_SAFE_MODE option call compiler and overwrite the - # result from the good/bad cache. This option is safe and covers all compiler - # side effects, but is very slow! - if WRAPPER_SAFE_MODE: - retval = exec_and_return(execargs) - if retval: - return retval - os.remove(full_obj_path) - restore_file(bisect_dir, cache, full_obj_path) return retval - # Generate compiler side effects. Trick Make into thinking compiler was - # actually executed. - for side_effect in get_side_effects(execargs): - restore_file(bisect_dir, cache, side_effect) - # If generated object file happened to be pruned/cleaned by Make then link it - # over from cache again. - if not os.path.exists(full_obj_path): - restore_file(bisect_dir, cache, full_obj_path) - - return 0 +def bisect_triage(execargs, bisect_dir): + """Use object object file from appropriate cache (good/bad). + + Given a populated bisection directory, use the object file saved + into one of the caches (good/bad) according to what is specified + in the good/bad sets. The good/bad sets are generated by the + high level binary search tool. Additionally restore any possible + side effects of compiler. + + Args: + execargs: compiler execution arguments. + bisect_dir: populated bisection directory. + """ + full_obj_path = get_obj_path(execargs) + obj_list = os.path.join(bisect_dir, LIST_FILE) + + # If the output isn't an object file just call compiler + if not full_obj_path: + return exec_and_return(execargs) + + # If this isn't a bisected object just call compiler + # This shouldn't happen! + if not in_object_list(full_obj_path, obj_list): + if CONTINUE_ON_MISSING: + log_file = os.path.join(bisect_dir, "_MISSING_CACHED_OBJ_LOG") + log_to_file(log_file, execargs, "? compiler", full_obj_path) + return exec_and_return(execargs) + else: + raise Error( + ( + "%s is missing from cache! To ignore export " + "BISECT_CONTINUE_ON_MISSING=1. See documentation for more " + "details on this option." % full_obj_path + ) + ) + + cache = which_cache(full_obj_path) + + # If using safe WRAPPER_SAFE_MODE option call compiler and overwrite the + # result from the good/bad cache. This option is safe and covers all compiler + # side effects, but is very slow! + if WRAPPER_SAFE_MODE: + retval = exec_and_return(execargs) + if retval: + return retval + os.remove(full_obj_path) + restore_file(bisect_dir, cache, full_obj_path) + return retval + + # Generate compiler side effects. Trick Make into thinking compiler was + # actually executed. + for side_effect in get_side_effects(execargs): + restore_file(bisect_dir, cache, side_effect) + + # If generated object file happened to be pruned/cleaned by Make then link it + # over from cache again. + if not os.path.exists(full_obj_path): + restore_file(bisect_dir, cache, full_obj_path) + + return 0 def bisect_driver(bisect_stage, bisect_dir, execargs): - """Call appropriate bisection stage according to value in bisect_stage.""" - if bisect_stage == 'POPULATE_GOOD': - return bisect_populate(execargs, bisect_dir, GOOD_CACHE) - elif bisect_stage == 'POPULATE_BAD': - return bisect_populate(execargs, bisect_dir, BAD_CACHE) - elif bisect_stage == 'TRIAGE': - return bisect_triage(execargs, bisect_dir) - else: - raise ValueError('wrong value for BISECT_STAGE: %s' % bisect_stage) + """Call appropriate bisection stage according to value in bisect_stage.""" + if bisect_stage == "POPULATE_GOOD": + return bisect_populate(execargs, bisect_dir, GOOD_CACHE) + elif bisect_stage == "POPULATE_BAD": + return bisect_populate(execargs, bisect_dir, BAD_CACHE) + elif bisect_stage == "TRIAGE": + return bisect_triage(execargs, bisect_dir) + else: + raise ValueError("wrong value for BISECT_STAGE: %s" % bisect_stage) diff --git a/binary_search_tool/common.py b/binary_search_tool/common.py index a087ee93..b8a7a1d2 100644 --- a/binary_search_tool/common.py +++ b/binary_search_tool/common.py @@ -27,270 +27,298 @@ import collections import os import sys + # Programatically adding utils python path to PYTHONPATH if os.path.isabs(sys.argv[0]): - utils_pythonpath = os.path.abspath('{0}/..'.format( - os.path.dirname(sys.argv[0]))) + utils_pythonpath = os.path.abspath( + "{0}/..".format(os.path.dirname(sys.argv[0])) + ) else: - wdir = os.getcwd() - utils_pythonpath = os.path.abspath('{0}/{1}/..'.format( - wdir, os.path.dirname(sys.argv[0]))) + wdir = os.getcwd() + utils_pythonpath = os.path.abspath( + "{0}/{1}/..".format(wdir, os.path.dirname(sys.argv[0])) + ) sys.path.append(utils_pythonpath) class ArgumentDict(collections.OrderedDict): - """Wrapper around OrderedDict, represents CLI arguments for program. - - AddArgument enforces the following layout: - { - ['-n', '--iterations'] : { - 'dest': 'iterations', - 'type': int, - 'help': 'Number of iterations to try in the search.', - 'default': 50 - } - [arg_name1, arg_name2, ...] : { - arg_option1 : arg_option_val1, - ... - }, - ... - } - """ - _POSSIBLE_OPTIONS = [ - 'action', 'nargs', 'const', 'default', 'type', 'choices', 'required', - 'help', 'metavar', 'dest' - ] - - def AddArgument(self, *args, **kwargs): - """Add argument to ArgsDict, has same signature as argparse.add_argument - - Emulates the the argparse.add_argument method so the internal OrderedDict - can be safely and easily populated. Each call to this method will have a 1-1 - corresponding call to argparse.add_argument once BuildArgParser is called. - - Args: - *args: The names for the argument (-V, --verbose, etc.) - **kwargs: The options for the argument, corresponds to the args of - argparse.add_argument - - Returns: - None - - Raises: - TypeError: if args is empty or if option in kwargs is not a valid - option for argparse.add_argument. + """Wrapper around OrderedDict, represents CLI arguments for program. + + AddArgument enforces the following layout: + { + ['-n', '--iterations'] : { + 'dest': 'iterations', + 'type': int, + 'help': 'Number of iterations to try in the search.', + 'default': 50 + } + [arg_name1, arg_name2, ...] : { + arg_option1 : arg_option_val1, + ... + }, + ... + } """ - if not args: - raise TypeError('Argument needs at least one name') - - for key in kwargs: - if key not in self._POSSIBLE_OPTIONS: - raise TypeError('Invalid option "%s" for argument %s' % (key, args[0])) - self[args] = kwargs + _POSSIBLE_OPTIONS = [ + "action", + "nargs", + "const", + "default", + "type", + "choices", + "required", + "help", + "metavar", + "dest", + ] + + def AddArgument(self, *args, **kwargs): + """Add argument to ArgsDict, has same signature as argparse.add_argument + + Emulates the the argparse.add_argument method so the internal OrderedDict + can be safely and easily populated. Each call to this method will have a 1-1 + corresponding call to argparse.add_argument once BuildArgParser is called. + + Args: + *args: The names for the argument (-V, --verbose, etc.) + **kwargs: The options for the argument, corresponds to the args of + argparse.add_argument + + Returns: + None + + Raises: + TypeError: if args is empty or if option in kwargs is not a valid + option for argparse.add_argument. + """ + if not args: + raise TypeError("Argument needs at least one name") + + for key in kwargs: + if key not in self._POSSIBLE_OPTIONS: + raise TypeError( + 'Invalid option "%s" for argument %s' % (key, args[0]) + ) + + self[args] = kwargs _ArgsDict = ArgumentDict() def GetArgsDict(): - """_ArgsDict singleton method""" - if not _ArgsDict: - _BuildArgsDict(_ArgsDict) - return _ArgsDict + """_ArgsDict singleton method""" + if not _ArgsDict: + _BuildArgsDict(_ArgsDict) + return _ArgsDict def BuildArgParser(parser, override=False): - """Add all arguments from singleton ArgsDict to parser. + """Add all arguments from singleton ArgsDict to parser. - Will take argparse parser and add all arguments in ArgsDict. Will ignore - the default and required options if override is set to True. + Will take argparse parser and add all arguments in ArgsDict. Will ignore + the default and required options if override is set to True. - Args: - parser: type argparse.ArgumentParser, will call add_argument for every item - in _ArgsDict - override: True if being called from run_bisect.py. Used to say that default - and required options are to be ignored + Args: + parser: type argparse.ArgumentParser, will call add_argument for every item + in _ArgsDict + override: True if being called from run_bisect.py. Used to say that default + and required options are to be ignored - Returns: - None - """ - ArgsDict = GetArgsDict() + Returns: + None + """ + ArgsDict = GetArgsDict() - # Have no defaults when overriding - for arg_names, arg_options in ArgsDict.items(): - if override: - arg_options = arg_options.copy() - arg_options.pop('default', None) - arg_options.pop('required', None) + # Have no defaults when overriding + for arg_names, arg_options in ArgsDict.items(): + if override: + arg_options = arg_options.copy() + arg_options.pop("default", None) + arg_options.pop("required", None) - parser.add_argument(*arg_names, **arg_options) + parser.add_argument(*arg_names, **arg_options) def StrToBool(str_in): - if str_in.lower() in ['true', 't', '1']: - return True - if str_in.lower() in ['false', 'f', '0']: - return False + if str_in.lower() in ["true", "t", "1"]: + return True + if str_in.lower() in ["false", "f", "0"]: + return False - raise AttributeError('%s is not a valid boolean string' % str_in) + raise AttributeError("%s is not a valid boolean string" % str_in) def _BuildArgsDict(args): - """Populate ArgumentDict with all arguments""" - args.AddArgument( - '-n', - '--iterations', - dest='iterations', - type=int, - help='Number of iterations to try in the search.', - default=50) - args.AddArgument( - '-i', - '--get_initial_items', - dest='get_initial_items', - help='Script to run to get the initial objects. ' - 'If your script requires user input ' - 'the --verbose option must be used') - args.AddArgument( - '-g', - '--switch_to_good', - dest='switch_to_good', - help='Script to run to switch to good. ' - 'If your switch script requires user input ' - 'the --verbose option must be used') - args.AddArgument( - '-b', - '--switch_to_bad', - dest='switch_to_bad', - help='Script to run to switch to bad. ' - 'If your switch script requires user input ' - 'the --verbose option must be used') - args.AddArgument( - '-I', - '--test_setup_script', - dest='test_setup_script', - help='Optional script to perform building, flashing, ' - 'and other setup before the test script runs.') - args.AddArgument( - '-t', - '--test_script', - dest='test_script', - help='Script to run to test the ' - 'output after packages are built.') - # No input (evals to False), - # --prune (evals to True), - # --prune=False, - # --prune=True - args.AddArgument( - '-p', - '--prune', - dest='prune', - nargs='?', - const=True, - default=False, - type=StrToBool, - metavar='bool', - help='If True, continue until all bad items are found. ' - 'Defaults to False.') - args.AddArgument( - '-P', - '--pass_bisect', - dest='pass_bisect', - default=None, - help='Script to generate another script for pass level bisect, ' - 'which contains command line options to build bad item. ' - 'This will also turn on pass/transformation level bisection. ' - 'Needs support of `-opt-bisect-limit`(pass) and ' - '`-print-debug-counter`(transformation) from LLVM. ' - 'For now it only supports one single bad item, so to use it, ' - 'prune must be set to False.') - # No input (evals to False), - # --ir_diff (evals to True), - # --ir_diff=False, - # --ir_diff=True - args.AddArgument( - '-d', - '--ir_diff', - dest='ir_diff', - nargs='?', - const=True, - default=False, - type=StrToBool, - metavar='bool', - help='Whether to print IR differences before and after bad ' - 'pass/transformation to verbose output. Defaults to False, ' - 'only works when pass_bisect is enabled.') - # No input (evals to False), - # --noincremental (evals to True), - # --noincremental=False, - # --noincremental=True - args.AddArgument( - '-c', - '--noincremental', - dest='noincremental', - nargs='?', - const=True, - default=False, - type=StrToBool, - metavar='bool', - help="If True, don't propagate good/bad changes " - 'incrementally. Defaults to False.') - # No input (evals to False), - # --file_args (evals to True), - # --file_args=False, - # --file_args=True - args.AddArgument( - '-f', - '--file_args', - dest='file_args', - nargs='?', - const=True, - default=False, - type=StrToBool, - metavar='bool', - help='Whether to use a file to pass arguments to scripts. ' - 'Defaults to False.') - # No input (evals to True), - # --verify (evals to True), - # --verify=False, - # --verify=True - args.AddArgument( - '--verify', - dest='verify', - nargs='?', - const=True, - default=True, - type=StrToBool, - metavar='bool', - help='Whether to run verify iterations before searching. ' - 'Defaults to True.') - args.AddArgument( - '-N', - '--prune_iterations', - dest='prune_iterations', - type=int, - help='Number of prune iterations to try in the search.', - default=100) - # No input (evals to False), - # --verbose (evals to True), - # --verbose=False, - # --verbose=True - args.AddArgument( - '-V', - '--verbose', - dest='verbose', - nargs='?', - const=True, - default=False, - type=StrToBool, - metavar='bool', - help='If True, print full output to console.') - args.AddArgument( - '-r', - '--resume', - dest='resume', - action='store_true', - help='Resume bisection tool execution from state file.' - 'Useful if the last bisection was terminated ' - 'before it could properly finish.') + """Populate ArgumentDict with all arguments""" + args.AddArgument( + "-n", + "--iterations", + dest="iterations", + type=int, + help="Number of iterations to try in the search.", + default=50, + ) + args.AddArgument( + "-i", + "--get_initial_items", + dest="get_initial_items", + help="Script to run to get the initial objects. " + "If your script requires user input " + "the --verbose option must be used", + ) + args.AddArgument( + "-g", + "--switch_to_good", + dest="switch_to_good", + help="Script to run to switch to good. " + "If your switch script requires user input " + "the --verbose option must be used", + ) + args.AddArgument( + "-b", + "--switch_to_bad", + dest="switch_to_bad", + help="Script to run to switch to bad. " + "If your switch script requires user input " + "the --verbose option must be used", + ) + args.AddArgument( + "-I", + "--test_setup_script", + dest="test_setup_script", + help="Optional script to perform building, flashing, " + "and other setup before the test script runs.", + ) + args.AddArgument( + "-t", + "--test_script", + dest="test_script", + help="Script to run to test the " "output after packages are built.", + ) + # No input (evals to False), + # --prune (evals to True), + # --prune=False, + # --prune=True + args.AddArgument( + "-p", + "--prune", + dest="prune", + nargs="?", + const=True, + default=False, + type=StrToBool, + metavar="bool", + help="If True, continue until all bad items are found. " + "Defaults to False.", + ) + args.AddArgument( + "-P", + "--pass_bisect", + dest="pass_bisect", + default=None, + help="Script to generate another script for pass level bisect, " + "which contains command line options to build bad item. " + "This will also turn on pass/transformation level bisection. " + "Needs support of `-opt-bisect-limit`(pass) and " + "`-print-debug-counter`(transformation) from LLVM. " + "For now it only supports one single bad item, so to use it, " + "prune must be set to False.", + ) + # No input (evals to False), + # --ir_diff (evals to True), + # --ir_diff=False, + # --ir_diff=True + args.AddArgument( + "-d", + "--ir_diff", + dest="ir_diff", + nargs="?", + const=True, + default=False, + type=StrToBool, + metavar="bool", + help="Whether to print IR differences before and after bad " + "pass/transformation to verbose output. Defaults to False, " + "only works when pass_bisect is enabled.", + ) + # No input (evals to False), + # --noincremental (evals to True), + # --noincremental=False, + # --noincremental=True + args.AddArgument( + "-c", + "--noincremental", + dest="noincremental", + nargs="?", + const=True, + default=False, + type=StrToBool, + metavar="bool", + help="If True, don't propagate good/bad changes " + "incrementally. Defaults to False.", + ) + # No input (evals to False), + # --file_args (evals to True), + # --file_args=False, + # --file_args=True + args.AddArgument( + "-f", + "--file_args", + dest="file_args", + nargs="?", + const=True, + default=False, + type=StrToBool, + metavar="bool", + help="Whether to use a file to pass arguments to scripts. " + "Defaults to False.", + ) + # No input (evals to True), + # --verify (evals to True), + # --verify=False, + # --verify=True + args.AddArgument( + "--verify", + dest="verify", + nargs="?", + const=True, + default=True, + type=StrToBool, + metavar="bool", + help="Whether to run verify iterations before searching. " + "Defaults to True.", + ) + args.AddArgument( + "-N", + "--prune_iterations", + dest="prune_iterations", + type=int, + help="Number of prune iterations to try in the search.", + default=100, + ) + # No input (evals to False), + # --verbose (evals to True), + # --verbose=False, + # --verbose=True + args.AddArgument( + "-V", + "--verbose", + dest="verbose", + nargs="?", + const=True, + default=False, + type=StrToBool, + metavar="bool", + help="If True, print full output to console.", + ) + args.AddArgument( + "-r", + "--resume", + dest="resume", + action="store_true", + help="Resume bisection tool execution from state file." + "Useful if the last bisection was terminated " + "before it could properly finish.", + ) diff --git a/binary_search_tool/compiler_wrapper.py b/binary_search_tool/compiler_wrapper.py index a1dcb1b7..acb7f9eb 100755 --- a/binary_search_tool/compiler_wrapper.py +++ b/binary_search_tool/compiler_wrapper.py @@ -28,41 +28,42 @@ import sys from binary_search_tool import bisect_driver -WRAPPED = '%s.real' % sys.argv[0] -BISECT_STAGE = os.environ.get('BISECT_STAGE') -DEFAULT_BISECT_DIR = os.path.expanduser('~/ANDROID_BISECT') -BISECT_DIR = os.environ.get('BISECT_DIR') or DEFAULT_BISECT_DIR + +WRAPPED = "%s.real" % sys.argv[0] +BISECT_STAGE = os.environ.get("BISECT_STAGE") +DEFAULT_BISECT_DIR = os.path.expanduser("~/ANDROID_BISECT") +BISECT_DIR = os.environ.get("BISECT_DIR") or DEFAULT_BISECT_DIR def ProcessArgFile(arg_file): - args = [] - # Read in entire file at once and parse as if in shell - with open(arg_file, 'r', encoding='utf-8') as f: - args.extend(shlex.split(f.read())) + args = [] + # Read in entire file at once and parse as if in shell + with open(arg_file, "r", encoding="utf-8") as f: + args.extend(shlex.split(f.read())) - return args + return args def Main(_): - if not os.path.islink(sys.argv[0]): - print("Compiler wrapper can't be called directly!") - return 1 + if not os.path.islink(sys.argv[0]): + print("Compiler wrapper can't be called directly!") + return 1 - execargs = [WRAPPED] + sys.argv[1:] + execargs = [WRAPPED] + sys.argv[1:] - if BISECT_STAGE not in bisect_driver.VALID_MODES or '-o' not in execargs: - os.execv(WRAPPED, [WRAPPED] + sys.argv[1:]) + if BISECT_STAGE not in bisect_driver.VALID_MODES or "-o" not in execargs: + os.execv(WRAPPED, [WRAPPED] + sys.argv[1:]) - # Handle @file argument syntax with compiler - for idx, _ in enumerate(execargs): - # @file can be nested in other @file arguments, use While to re-evaluate - # the first argument of the embedded file. - while execargs[idx][0] == '@': - args_in_file = ProcessArgFile(execargs[idx][1:]) - execargs = execargs[0:idx] + args_in_file + execargs[idx + 1:] + # Handle @file argument syntax with compiler + for idx, _ in enumerate(execargs): + # @file can be nested in other @file arguments, use While to re-evaluate + # the first argument of the embedded file. + while execargs[idx][0] == "@": + args_in_file = ProcessArgFile(execargs[idx][1:]) + execargs = execargs[0:idx] + args_in_file + execargs[idx + 1 :] - bisect_driver.bisect_driver(BISECT_STAGE, BISECT_DIR, execargs) + bisect_driver.bisect_driver(BISECT_STAGE, BISECT_DIR, execargs) -if __name__ == '__main__': - sys.exit(Main(sys.argv[1:])) +if __name__ == "__main__": + sys.exit(Main(sys.argv[1:])) diff --git a/binary_search_tool/cros_pkg/create_cleanup_script.py b/binary_search_tool/cros_pkg/create_cleanup_script.py index 0c85a88c..aebf523a 100755 --- a/binary_search_tool/cros_pkg/create_cleanup_script.py +++ b/binary_search_tool/cros_pkg/create_cleanup_script.py @@ -20,13 +20,13 @@ import sys def Usage(parser, msg): - print('ERROR: ' + msg) - parser.print_help() - sys.exit(1) + print("ERROR: " + msg) + parser.print_help() + sys.exit(1) def Main(argv): - """Generate a script to undo changes done by setup.sh + """Generate a script to undo changes done by setup.sh The script setup.sh makes a change that needs to be undone, namely it creates a soft link making /build/${board} point @@ -40,80 +40,91 @@ def Main(argv): This function takes arguments that tell it exactly what setup.sh actually did, then generates a script to undo those exact changes. - """ - - parser = argparse.ArgumentParser() - parser.add_argument( - '--board', - dest='board', - required=True, - help='Chromeos board for packages/image.') - - parser.add_argument( - '--old_tree_missing', - dest='tree_existed', - action='store_false', - help='Did /build/${BOARD} exist.', - default=True) - - parser.add_argument( - '--renamed_tree', - dest='renamed_tree', - action='store_true', - help='Was /build/${BOARD} saved & renamed.', - default=False) - - parser.add_argument( - '--old_link', - dest='old_link', - help=('The original build tree soft link.')) - - options = parser.parse_args(argv[1:]) - - if options.old_link or options.renamed_tree: - if not options.tree_existed: - Usage( - parser, 'If --tree_existed is False, cannot have ' - '--renamed_tree or --old_link') - - if options.old_link and options.renamed_tree: - Usage(parser, '--old_link and --renamed_tree are incompatible options.') - - if options.tree_existed: - if not options.old_link and not options.renamed_tree: - Usage( - parser, 'If --tree_existed is True, then must have either ' - '--old_link or --renamed_tree') - - out_filename = 'cros_pkg/' + options.board + '_cleanup.sh' - - with open(out_filename, 'w', encoding='utf-8') as out_file: - out_file.write('#!/bin/bash\n\n') - # First, remove the 'new' soft link. - out_file.write('sudo rm /build/%s\n' % options.board) + """ + + parser = argparse.ArgumentParser() + parser.add_argument( + "--board", + dest="board", + required=True, + help="Chromeos board for packages/image.", + ) + + parser.add_argument( + "--old_tree_missing", + dest="tree_existed", + action="store_false", + help="Did /build/${BOARD} exist.", + default=True, + ) + + parser.add_argument( + "--renamed_tree", + dest="renamed_tree", + action="store_true", + help="Was /build/${BOARD} saved & renamed.", + default=False, + ) + + parser.add_argument( + "--old_link", + dest="old_link", + help=("The original build tree soft link."), + ) + + options = parser.parse_args(argv[1:]) + + if options.old_link or options.renamed_tree: + if not options.tree_existed: + Usage( + parser, + "If --tree_existed is False, cannot have " + "--renamed_tree or --old_link", + ) + + if options.old_link and options.renamed_tree: + Usage(parser, "--old_link and --renamed_tree are incompatible options.") + if options.tree_existed: - if options.renamed_tree: - # Old build tree existed and was a real tree, so it got - # renamed. Move the renamed tree back to the original tree. - out_file.write('sudo mv /build/%s.save /build/%s\n' % (options.board, - options.board)) - else: - # Old tree existed and was already a soft link. Re-create the - # original soft link. - original_link = options.old_link - if original_link[0] == "'": - original_link = original_link[1:] - if original_link[-1] == "'": - original_link = original_link[:-1] - out_file.write( - 'sudo ln -s %s /build/%s\n' % (original_link, options.board)) - out_file.write('\n') - # Remove common.sh file - out_file.write('rm common/common.sh\n') - - return 0 - - -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) + if not options.old_link and not options.renamed_tree: + Usage( + parser, + "If --tree_existed is True, then must have either " + "--old_link or --renamed_tree", + ) + + out_filename = "cros_pkg/" + options.board + "_cleanup.sh" + + with open(out_filename, "w", encoding="utf-8") as out_file: + out_file.write("#!/bin/bash\n\n") + # First, remove the 'new' soft link. + out_file.write("sudo rm /build/%s\n" % options.board) + if options.tree_existed: + if options.renamed_tree: + # Old build tree existed and was a real tree, so it got + # renamed. Move the renamed tree back to the original tree. + out_file.write( + "sudo mv /build/%s.save /build/%s\n" + % (options.board, options.board) + ) + else: + # Old tree existed and was already a soft link. Re-create the + # original soft link. + original_link = options.old_link + if original_link[0] == "'": + original_link = original_link[1:] + if original_link[-1] == "'": + original_link = original_link[:-1] + out_file.write( + "sudo ln -s %s /build/%s\n" % (original_link, options.board) + ) + out_file.write("\n") + # Remove common.sh file + out_file.write("rm common/common.sh\n") + + return 0 + + +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/binary_search_tool/pass_mapping.py b/binary_search_tool/pass_mapping.py index 618509b6..c8a616b5 100644 --- a/binary_search_tool/pass_mapping.py +++ b/binary_search_tool/pass_mapping.py @@ -15,20 +15,12 @@ pass_name = { # For users who make local changes to passes, please add a map from pass # description to newly introduced DebugCounter name for transformation # level bisection purpose. - 'Hoist/decompose integer division and remainder': - 'div-rem-pairs-transform', - 'Early CSE': - 'early-cse', - 'Falkor HW Prefetch Fix Late Phase': - 'falkor-hwpf', - 'Combine redundant instructions': - 'instcombine-visit', - 'Machine Copy Propagation Pass': - 'machine-cp-fwd', - 'Global Value Numbering': - 'newgvn-phi', - 'PredicateInfo Printer': - 'predicateinfo-rename', - 'SI Insert Waitcnts': - 'si-insert-waitcnts-forceexp', + "Hoist/decompose integer division and remainder": "div-rem-pairs-transform", + "Early CSE": "early-cse", + "Falkor HW Prefetch Fix Late Phase": "falkor-hwpf", + "Combine redundant instructions": "instcombine-visit", + "Machine Copy Propagation Pass": "machine-cp-fwd", + "Global Value Numbering": "newgvn-phi", + "PredicateInfo Printer": "predicateinfo-rename", + "SI Insert Waitcnts": "si-insert-waitcnts-forceexp", } diff --git a/binary_search_tool/run_bisect.py b/binary_search_tool/run_bisect.py index 18669cc1..eeda98cc 100755 --- a/binary_search_tool/run_bisect.py +++ b/binary_search_tool/run_bisect.py @@ -17,293 +17,336 @@ import sys from binary_search_tool import binary_search_state from binary_search_tool import common - from cros_utils import command_executer from cros_utils import logger class Bisector(object, metaclass=abc.ABCMeta): - """The abstract base class for Bisectors.""" - - def __init__(self, options, overrides=None): - """Constructor for Bisector abstract base class - - Args: - options: positional arguments for specific mode (board, remote, etc.) - overrides: optional dict of overrides for argument defaults - """ - self.options = options - self.overrides = overrides - if not overrides: - self.overrides = {} - self.logger = logger.GetLogger() - self.ce = command_executer.GetCommandExecuter() - - def _PrettyPrintArgs(self, args, overrides): - """Output arguments in a nice, human readable format - - Will print and log all arguments for the bisecting tool and make note of - which arguments have been overridden. - - Example output: - ./run_bisect.py package daisy 172.17.211.184 -I "" -t cros_pkg/my_test.sh - Performing ChromeOS Package bisection - Method Config: - board : daisy - remote : 172.17.211.184 - - Bisection Config: (* = overridden) - get_initial_items : cros_pkg/get_initial_items.sh - switch_to_good : cros_pkg/switch_to_good.sh - switch_to_bad : cros_pkg/switch_to_bad.sh - * test_setup_script : - * test_script : cros_pkg/my_test.sh - prune : True - noincremental : False - file_args : True - - Args: - args: The args to be given to binary_search_state.Run. This represents - how the bisection tool will run (with overridden arguments already - added in). - overrides: The dict of overriden arguments provided by the user. This is - provided so the user can be told which arguments were - overriden and with what value. - """ - # Output method config (board, remote, etc.) - options = vars(self.options) - out = '\nPerforming %s bisection\n' % self.method_name - out += 'Method Config:\n' - max_key_len = max([len(str(x)) for x in options.keys()]) - for key in sorted(options): - val = options[key] - key_str = str(key).rjust(max_key_len) - val_str = str(val) - out += ' %s : %s\n' % (key_str, val_str) - - # Output bisection config (scripts, prune, etc.) - out += '\nBisection Config: (* = overridden)\n' - max_key_len = max([len(str(x)) for x in args.keys()]) - # Print args in common._ArgsDict order - args_order = [x['dest'] for x in common.GetArgsDict().values()] - for key in sorted(args, key=args_order.index): - val = args[key] - key_str = str(key).rjust(max_key_len) - val_str = str(val) - changed_str = '*' if key in overrides else ' ' - - out += ' %s %s : %s\n' % (changed_str, key_str, val_str) - - out += '\n' - self.logger.LogOutput(out) - - def ArgOverride(self, args, overrides, pretty_print=True): - """Override arguments based on given overrides and provide nice output - - Args: - args: dict of arguments to be passed to binary_search_state.Run (runs - dict.update, causing args to be mutated). - overrides: dict of arguments to update args with - pretty_print: if True print out args/overrides to user in pretty format - """ - args.update(overrides) - if pretty_print: - self._PrettyPrintArgs(args, overrides) - - @abc.abstractmethod - def PreRun(self): - pass - - @abc.abstractmethod - def Run(self): - pass - - @abc.abstractmethod - def PostRun(self): - pass + """The abstract base class for Bisectors.""" + + def __init__(self, options, overrides=None): + """Constructor for Bisector abstract base class + + Args: + options: positional arguments for specific mode (board, remote, etc.) + overrides: optional dict of overrides for argument defaults + """ + self.options = options + self.overrides = overrides + if not overrides: + self.overrides = {} + self.logger = logger.GetLogger() + self.ce = command_executer.GetCommandExecuter() + + def _PrettyPrintArgs(self, args, overrides): + """Output arguments in a nice, human readable format + + Will print and log all arguments for the bisecting tool and make note of + which arguments have been overridden. + + Example output: + ./run_bisect.py package daisy 172.17.211.184 -I "" -t cros_pkg/my_test.sh + Performing ChromeOS Package bisection + Method Config: + board : daisy + remote : 172.17.211.184 + + Bisection Config: (* = overridden) + get_initial_items : cros_pkg/get_initial_items.sh + switch_to_good : cros_pkg/switch_to_good.sh + switch_to_bad : cros_pkg/switch_to_bad.sh + * test_setup_script : + * test_script : cros_pkg/my_test.sh + prune : True + noincremental : False + file_args : True + + Args: + args: The args to be given to binary_search_state.Run. This represents + how the bisection tool will run (with overridden arguments already + added in). + overrides: The dict of overriden arguments provided by the user. This is + provided so the user can be told which arguments were + overriden and with what value. + """ + # Output method config (board, remote, etc.) + options = vars(self.options) + out = "\nPerforming %s bisection\n" % self.method_name + out += "Method Config:\n" + max_key_len = max([len(str(x)) for x in options.keys()]) + for key in sorted(options): + val = options[key] + key_str = str(key).rjust(max_key_len) + val_str = str(val) + out += " %s : %s\n" % (key_str, val_str) + + # Output bisection config (scripts, prune, etc.) + out += "\nBisection Config: (* = overridden)\n" + max_key_len = max([len(str(x)) for x in args.keys()]) + # Print args in common._ArgsDict order + args_order = [x["dest"] for x in common.GetArgsDict().values()] + for key in sorted(args, key=args_order.index): + val = args[key] + key_str = str(key).rjust(max_key_len) + val_str = str(val) + changed_str = "*" if key in overrides else " " + + out += " %s %s : %s\n" % (changed_str, key_str, val_str) + + out += "\n" + self.logger.LogOutput(out) + + def ArgOverride(self, args, overrides, pretty_print=True): + """Override arguments based on given overrides and provide nice output + + Args: + args: dict of arguments to be passed to binary_search_state.Run (runs + dict.update, causing args to be mutated). + overrides: dict of arguments to update args with + pretty_print: if True print out args/overrides to user in pretty format + """ + args.update(overrides) + if pretty_print: + self._PrettyPrintArgs(args, overrides) + + @abc.abstractmethod + def PreRun(self): + pass + + @abc.abstractmethod + def Run(self): + pass + + @abc.abstractmethod + def PostRun(self): + pass class BisectPackage(Bisector): - """The class for package bisection steps.""" - - cros_pkg_setup = 'cros_pkg/setup.sh' - cros_pkg_cleanup = 'cros_pkg/%s_cleanup.sh' - - def __init__(self, options, overrides): - super(BisectPackage, self).__init__(options, overrides) - self.method_name = 'ChromeOS Package' - self.default_kwargs = { - 'get_initial_items': 'cros_pkg/get_initial_items.sh', - 'switch_to_good': 'cros_pkg/switch_to_good.sh', - 'switch_to_bad': 'cros_pkg/switch_to_bad.sh', - 'test_setup_script': 'cros_pkg/test_setup.sh', - 'test_script': 'cros_pkg/interactive_test.sh', - 'noincremental': False, - 'prune': True, - 'file_args': True - } - self.setup_cmd = ' '.join( - (self.cros_pkg_setup, self.options.board, self.options.remote)) - self.ArgOverride(self.default_kwargs, self.overrides) - - def PreRun(self): - ret, _, _ = self.ce.RunCommandWExceptionCleanup( - self.setup_cmd, print_to_console=True) - if ret: - self.logger.LogError('Package bisector setup failed w/ error %d' % ret) - return 1 - return 0 - - def Run(self): - return binary_search_state.Run(**self.default_kwargs) - - def PostRun(self): - cmd = self.cros_pkg_cleanup % self.options.board - ret, _, _ = self.ce.RunCommandWExceptionCleanup(cmd, print_to_console=True) - if ret: - self.logger.LogError('Package bisector cleanup failed w/ error %d' % ret) - return 1 - - self.logger.LogOutput(('Cleanup successful! To restore the bisection ' - 'environment run the following:\n' - ' cd %s; %s') % (os.getcwd(), self.setup_cmd)) - return 0 + """The class for package bisection steps.""" + + cros_pkg_setup = "cros_pkg/setup.sh" + cros_pkg_cleanup = "cros_pkg/%s_cleanup.sh" + + def __init__(self, options, overrides): + super(BisectPackage, self).__init__(options, overrides) + self.method_name = "ChromeOS Package" + self.default_kwargs = { + "get_initial_items": "cros_pkg/get_initial_items.sh", + "switch_to_good": "cros_pkg/switch_to_good.sh", + "switch_to_bad": "cros_pkg/switch_to_bad.sh", + "test_setup_script": "cros_pkg/test_setup.sh", + "test_script": "cros_pkg/interactive_test.sh", + "noincremental": False, + "prune": True, + "file_args": True, + } + self.setup_cmd = " ".join( + (self.cros_pkg_setup, self.options.board, self.options.remote) + ) + self.ArgOverride(self.default_kwargs, self.overrides) + + def PreRun(self): + ret, _, _ = self.ce.RunCommandWExceptionCleanup( + self.setup_cmd, print_to_console=True + ) + if ret: + self.logger.LogError( + "Package bisector setup failed w/ error %d" % ret + ) + return 1 + return 0 + + def Run(self): + return binary_search_state.Run(**self.default_kwargs) + + def PostRun(self): + cmd = self.cros_pkg_cleanup % self.options.board + ret, _, _ = self.ce.RunCommandWExceptionCleanup( + cmd, print_to_console=True + ) + if ret: + self.logger.LogError( + "Package bisector cleanup failed w/ error %d" % ret + ) + return 1 + + self.logger.LogOutput( + ( + "Cleanup successful! To restore the bisection " + "environment run the following:\n" + " cd %s; %s" + ) + % (os.getcwd(), self.setup_cmd) + ) + return 0 class BisectObject(Bisector): - """The class for object bisection steps.""" - - sysroot_wrapper_setup = 'sysroot_wrapper/setup.sh' - sysroot_wrapper_cleanup = 'sysroot_wrapper/cleanup.sh' - - def __init__(self, options, overrides): - super(BisectObject, self).__init__(options, overrides) - self.method_name = 'ChromeOS Object' - self.default_kwargs = { - 'get_initial_items': 'sysroot_wrapper/get_initial_items.sh', - 'switch_to_good': 'sysroot_wrapper/switch_to_good.sh', - 'switch_to_bad': 'sysroot_wrapper/switch_to_bad.sh', - 'test_setup_script': 'sysroot_wrapper/test_setup.sh', - 'test_script': 'sysroot_wrapper/interactive_test.sh', - 'noincremental': False, - 'prune': True, - 'file_args': True - } - self.options = options - if options.dir: - os.environ['BISECT_DIR'] = options.dir - self.options.dir = os.environ.get('BISECT_DIR', '/tmp/sysroot_bisect') - self.setup_cmd = ' '.join( - (self.sysroot_wrapper_setup, self.options.board, self.options.remote, - self.options.package, str(self.options.reboot).lower(), - shlex.quote(self.options.use_flags))) - - self.ArgOverride(self.default_kwargs, overrides) - - def PreRun(self): - ret, _, _ = self.ce.RunCommandWExceptionCleanup( - self.setup_cmd, print_to_console=True) - if ret: - self.logger.LogError('Object bisector setup failed w/ error %d' % ret) - return 1 - - os.environ['BISECT_STAGE'] = 'TRIAGE' - return 0 - - def Run(self): - return binary_search_state.Run(**self.default_kwargs) - - def PostRun(self): - cmd = self.sysroot_wrapper_cleanup - ret, _, _ = self.ce.RunCommandWExceptionCleanup(cmd, print_to_console=True) - if ret: - self.logger.LogError('Object bisector cleanup failed w/ error %d' % ret) - return 1 - self.logger.LogOutput(('Cleanup successful! To restore the bisection ' - 'environment run the following:\n' - ' cd %s; %s') % (os.getcwd(), self.setup_cmd)) - return 0 + """The class for object bisection steps.""" + + sysroot_wrapper_setup = "sysroot_wrapper/setup.sh" + sysroot_wrapper_cleanup = "sysroot_wrapper/cleanup.sh" + + def __init__(self, options, overrides): + super(BisectObject, self).__init__(options, overrides) + self.method_name = "ChromeOS Object" + self.default_kwargs = { + "get_initial_items": "sysroot_wrapper/get_initial_items.sh", + "switch_to_good": "sysroot_wrapper/switch_to_good.sh", + "switch_to_bad": "sysroot_wrapper/switch_to_bad.sh", + "test_setup_script": "sysroot_wrapper/test_setup.sh", + "test_script": "sysroot_wrapper/interactive_test.sh", + "noincremental": False, + "prune": True, + "file_args": True, + } + self.options = options + if options.dir: + os.environ["BISECT_DIR"] = options.dir + self.options.dir = os.environ.get("BISECT_DIR", "/tmp/sysroot_bisect") + self.setup_cmd = " ".join( + ( + self.sysroot_wrapper_setup, + self.options.board, + self.options.remote, + self.options.package, + str(self.options.reboot).lower(), + shlex.quote(self.options.use_flags), + ) + ) + + self.ArgOverride(self.default_kwargs, overrides) + + def PreRun(self): + ret, _, _ = self.ce.RunCommandWExceptionCleanup( + self.setup_cmd, print_to_console=True + ) + if ret: + self.logger.LogError( + "Object bisector setup failed w/ error %d" % ret + ) + return 1 + + os.environ["BISECT_STAGE"] = "TRIAGE" + return 0 + + def Run(self): + return binary_search_state.Run(**self.default_kwargs) + + def PostRun(self): + cmd = self.sysroot_wrapper_cleanup + ret, _, _ = self.ce.RunCommandWExceptionCleanup( + cmd, print_to_console=True + ) + if ret: + self.logger.LogError( + "Object bisector cleanup failed w/ error %d" % ret + ) + return 1 + self.logger.LogOutput( + ( + "Cleanup successful! To restore the bisection " + "environment run the following:\n" + " cd %s; %s" + ) + % (os.getcwd(), self.setup_cmd) + ) + return 0 class BisectAndroid(Bisector): - """The class for Android bisection steps.""" - - android_setup = 'android/setup.sh' - android_cleanup = 'android/cleanup.sh' - default_dir = os.path.expanduser('~/ANDROID_BISECT') - - def __init__(self, options, overrides): - super(BisectAndroid, self).__init__(options, overrides) - self.method_name = 'Android' - self.default_kwargs = { - 'get_initial_items': 'android/get_initial_items.sh', - 'switch_to_good': 'android/switch_to_good.sh', - 'switch_to_bad': 'android/switch_to_bad.sh', - 'test_setup_script': 'android/test_setup.sh', - 'test_script': 'android/interactive_test.sh', - 'prune': True, - 'file_args': True, - 'noincremental': False, - } - self.options = options - if options.dir: - os.environ['BISECT_DIR'] = options.dir - self.options.dir = os.environ.get('BISECT_DIR', self.default_dir) - - num_jobs = "NUM_JOBS='%s'" % self.options.num_jobs - device_id = '' - if self.options.device_id: - device_id = "ANDROID_SERIAL='%s'" % self.options.device_id - - self.setup_cmd = ' '.join( - (num_jobs, device_id, self.android_setup, self.options.android_src)) - - self.ArgOverride(self.default_kwargs, overrides) - - def PreRun(self): - ret, _, _ = self.ce.RunCommandWExceptionCleanup( - self.setup_cmd, print_to_console=True) - if ret: - self.logger.LogError('Android bisector setup failed w/ error %d' % ret) - return 1 - - os.environ['BISECT_STAGE'] = 'TRIAGE' - return 0 - - def Run(self): - return binary_search_state.Run(**self.default_kwargs) - - def PostRun(self): - cmd = self.android_cleanup - ret, _, _ = self.ce.RunCommandWExceptionCleanup(cmd, print_to_console=True) - if ret: - self.logger.LogError('Android bisector cleanup failed w/ error %d' % ret) - return 1 - self.logger.LogOutput(('Cleanup successful! To restore the bisection ' - 'environment run the following:\n' - ' cd %s; %s') % (os.getcwd(), self.setup_cmd)) - return 0 + """The class for Android bisection steps.""" + + android_setup = "android/setup.sh" + android_cleanup = "android/cleanup.sh" + default_dir = os.path.expanduser("~/ANDROID_BISECT") + + def __init__(self, options, overrides): + super(BisectAndroid, self).__init__(options, overrides) + self.method_name = "Android" + self.default_kwargs = { + "get_initial_items": "android/get_initial_items.sh", + "switch_to_good": "android/switch_to_good.sh", + "switch_to_bad": "android/switch_to_bad.sh", + "test_setup_script": "android/test_setup.sh", + "test_script": "android/interactive_test.sh", + "prune": True, + "file_args": True, + "noincremental": False, + } + self.options = options + if options.dir: + os.environ["BISECT_DIR"] = options.dir + self.options.dir = os.environ.get("BISECT_DIR", self.default_dir) + + num_jobs = "NUM_JOBS='%s'" % self.options.num_jobs + device_id = "" + if self.options.device_id: + device_id = "ANDROID_SERIAL='%s'" % self.options.device_id + + self.setup_cmd = " ".join( + (num_jobs, device_id, self.android_setup, self.options.android_src) + ) + + self.ArgOverride(self.default_kwargs, overrides) + + def PreRun(self): + ret, _, _ = self.ce.RunCommandWExceptionCleanup( + self.setup_cmd, print_to_console=True + ) + if ret: + self.logger.LogError( + "Android bisector setup failed w/ error %d" % ret + ) + return 1 + + os.environ["BISECT_STAGE"] = "TRIAGE" + return 0 + + def Run(self): + return binary_search_state.Run(**self.default_kwargs) + + def PostRun(self): + cmd = self.android_cleanup + ret, _, _ = self.ce.RunCommandWExceptionCleanup( + cmd, print_to_console=True + ) + if ret: + self.logger.LogError( + "Android bisector cleanup failed w/ error %d" % ret + ) + return 1 + self.logger.LogOutput( + ( + "Cleanup successful! To restore the bisection " + "environment run the following:\n" + " cd %s; %s" + ) + % (os.getcwd(), self.setup_cmd) + ) + return 0 def Run(bisector): - log = logger.GetLogger() + log = logger.GetLogger() - log.LogOutput('Setting up Bisection tool') - ret = bisector.PreRun() - if ret: - return ret + log.LogOutput("Setting up Bisection tool") + ret = bisector.PreRun() + if ret: + return ret - log.LogOutput('Running Bisection tool') - ret = bisector.Run() - if ret: - return ret + log.LogOutput("Running Bisection tool") + ret = bisector.Run() + if ret: + return ret - log.LogOutput('Cleaning up Bisection tool') - ret = bisector.PostRun() - if ret: - return ret + log.LogOutput("Cleaning up Bisection tool") + ret = bisector.PostRun() + if ret: + return ret - return 0 + return 0 _HELP_EPILOG = """ @@ -318,92 +361,113 @@ See below for full override argument reference: def Main(argv): - override_parser = argparse.ArgumentParser( - add_help=False, - argument_default=argparse.SUPPRESS, - usage='run_bisect.py {mode} [options]') - common.BuildArgParser(override_parser, override=True) - - epilog = _HELP_EPILOG + override_parser.format_help() - parser = argparse.ArgumentParser( - epilog=epilog, formatter_class=RawTextHelpFormatter) - subparsers = parser.add_subparsers( - title='Bisect mode', - description=('Which bisection method to ' - 'use. Each method has ' - 'specific setup and ' - 'arguments. Please consult ' - 'the README for more ' - 'information.')) - - parser_package = subparsers.add_parser('package') - parser_package.add_argument('board', help='Board to target') - parser_package.add_argument('remote', help='Remote machine to test on') - parser_package.set_defaults(handler=BisectPackage) - - parser_object = subparsers.add_parser('object') - parser_object.add_argument('board', help='Board to target') - parser_object.add_argument('remote', help='Remote machine to test on') - parser_object.add_argument('package', help='Package to emerge and test') - parser_object.add_argument( - '--use_flags', - required=False, - default='', - help='Use flags passed to emerge') - parser_object.add_argument( - '--noreboot', - action='store_false', - dest='reboot', - help='Do not reboot after updating the package (default: False)') - parser_object.add_argument( - '--dir', - help=('Bisection directory to use, sets ' - '$BISECT_DIR if provided. Defaults to ' - 'current value of $BISECT_DIR (or ' - '/tmp/sysroot_bisect if $BISECT_DIR is ' - 'empty).')) - parser_object.set_defaults(handler=BisectObject) - - parser_android = subparsers.add_parser('android') - parser_android.add_argument('android_src', help='Path to android source tree') - parser_android.add_argument( - '--dir', - help=('Bisection directory to use, sets ' - '$BISECT_DIR if provided. Defaults to ' - 'current value of $BISECT_DIR (or ' - '~/ANDROID_BISECT/ if $BISECT_DIR is ' - 'empty).')) - parser_android.add_argument( - '-j', - '--num_jobs', - type=int, - default=1, - help=('Number of jobs that make and various ' - 'scripts for bisector can spawn. Setting ' - 'this value too high can freeze up your ' - 'machine!')) - parser_android.add_argument( - '--device_id', - default='', - help=('Device id for device used for testing. ' - 'Use this if you have multiple Android ' - 'devices plugged into your machine.')) - parser_android.set_defaults(handler=BisectAndroid) - - options, remaining = parser.parse_known_args(argv) - if remaining: - overrides = override_parser.parse_args(remaining) - overrides = vars(overrides) - else: - overrides = {} - - subcmd = options.handler - del options.handler - - bisector = subcmd(options, overrides) - return Run(bisector) - - -if __name__ == '__main__': - os.chdir(os.path.dirname(__file__)) - sys.exit(Main(sys.argv[1:])) + override_parser = argparse.ArgumentParser( + add_help=False, + argument_default=argparse.SUPPRESS, + usage="run_bisect.py {mode} [options]", + ) + common.BuildArgParser(override_parser, override=True) + + epilog = _HELP_EPILOG + override_parser.format_help() + parser = argparse.ArgumentParser( + epilog=epilog, formatter_class=RawTextHelpFormatter + ) + subparsers = parser.add_subparsers( + title="Bisect mode", + description=( + "Which bisection method to " + "use. Each method has " + "specific setup and " + "arguments. Please consult " + "the README for more " + "information." + ), + ) + + parser_package = subparsers.add_parser("package") + parser_package.add_argument("board", help="Board to target") + parser_package.add_argument("remote", help="Remote machine to test on") + parser_package.set_defaults(handler=BisectPackage) + + parser_object = subparsers.add_parser("object") + parser_object.add_argument("board", help="Board to target") + parser_object.add_argument("remote", help="Remote machine to test on") + parser_object.add_argument("package", help="Package to emerge and test") + parser_object.add_argument( + "--use_flags", + required=False, + default="", + help="Use flags passed to emerge", + ) + parser_object.add_argument( + "--noreboot", + action="store_false", + dest="reboot", + help="Do not reboot after updating the package (default: False)", + ) + parser_object.add_argument( + "--dir", + help=( + "Bisection directory to use, sets " + "$BISECT_DIR if provided. Defaults to " + "current value of $BISECT_DIR (or " + "/tmp/sysroot_bisect if $BISECT_DIR is " + "empty)." + ), + ) + parser_object.set_defaults(handler=BisectObject) + + parser_android = subparsers.add_parser("android") + parser_android.add_argument( + "android_src", help="Path to android source tree" + ) + parser_android.add_argument( + "--dir", + help=( + "Bisection directory to use, sets " + "$BISECT_DIR if provided. Defaults to " + "current value of $BISECT_DIR (or " + "~/ANDROID_BISECT/ if $BISECT_DIR is " + "empty)." + ), + ) + parser_android.add_argument( + "-j", + "--num_jobs", + type=int, + default=1, + help=( + "Number of jobs that make and various " + "scripts for bisector can spawn. Setting " + "this value too high can freeze up your " + "machine!" + ), + ) + parser_android.add_argument( + "--device_id", + default="", + help=( + "Device id for device used for testing. " + "Use this if you have multiple Android " + "devices plugged into your machine." + ), + ) + parser_android.set_defaults(handler=BisectAndroid) + + options, remaining = parser.parse_known_args(argv) + if remaining: + overrides = override_parser.parse_args(remaining) + overrides = vars(overrides) + else: + overrides = {} + + subcmd = options.handler + del options.handler + + bisector = subcmd(options, overrides) + return Run(bisector) + + +if __name__ == "__main__": + os.chdir(os.path.dirname(__file__)) + sys.exit(Main(sys.argv[1:])) diff --git a/binary_search_tool/run_bisect_tests.py b/binary_search_tool/run_bisect_tests.py index 097c375b..22092ff9 100755 --- a/binary_search_tool/run_bisect_tests.py +++ b/binary_search_tool/run_bisect_tests.py @@ -14,81 +14,88 @@ import sys from cros_utils import command_executer -TEST_DIR = 'full_bisect_test' -DEFAULT_BISECT_DIR = '/tmp/sysroot_bisect' +TEST_DIR = "full_bisect_test" +DEFAULT_BISECT_DIR = "/tmp/sysroot_bisect" -def populate_good_files(top_dir, ce, bisect_dir=DEFAULT_BISECT_DIR): - # 'make clean' - work_dir = os.path.join(top_dir, TEST_DIR, 'work') - cmd = 'rm -f %s/*.o' % work_dir - status = ce.RunCommand(cmd) - if status != 0: - print('Error trying to clean out work directory: %s' % cmd) - return status - # set up the 'good' source files - script = os.path.join(top_dir, TEST_DIR, 'make_sources_good.sh') - status = ce.RunCommand(script) - if status != 0: - print('Error setting up "good" source files: %s' % script) +def populate_good_files(top_dir, ce, bisect_dir=DEFAULT_BISECT_DIR): + # 'make clean' + work_dir = os.path.join(top_dir, TEST_DIR, "work") + cmd = "rm -f %s/*.o" % work_dir + status = ce.RunCommand(cmd) + if status != 0: + print("Error trying to clean out work directory: %s" % cmd) + return status + + # set up the 'good' source files + script = os.path.join(top_dir, TEST_DIR, "make_sources_good.sh") + status = ce.RunCommand(script) + if status != 0: + print('Error setting up "good" source files: %s' % script) + return status + + export_bisect = "export BISECT_DIR=%s; " % bisect_dir + # build the good source files + script_path = os.path.join(top_dir, TEST_DIR) + if os.path.exists("/usr/bin/x86_64-cros-linux-gnu-gcc"): + build_script = "chromeos_build.sh" + else: + build_script = "build.sh" + cmd = "%s export BISECT_STAGE=POPULATE_GOOD; pushd %s; ./%s; popd" % ( + export_bisect, + script_path, + build_script, + ) + status = ce.RunCommand(cmd) return status - export_bisect = 'export BISECT_DIR=%s; ' % bisect_dir - # build the good source files - script_path = os.path.join(top_dir, TEST_DIR) - if os.path.exists('/usr/bin/x86_64-cros-linux-gnu-gcc'): - build_script = 'chromeos_build.sh' - else: - build_script = 'build.sh' - cmd = ('%s export BISECT_STAGE=POPULATE_GOOD; pushd %s; ./%s; popd' % - (export_bisect, script_path, build_script)) - status = ce.RunCommand(cmd) - return status - def populate_bad_files(top_dir, ce, bisect_dir=DEFAULT_BISECT_DIR): - # 'make clean' - work_dir = os.path.join(top_dir, TEST_DIR, 'work') - cmd = 'rm -f %s/*.o' % work_dir - status = ce.RunCommand(cmd) - if status != 0: - print('Error trying to clean out work directory: %s' % cmd) + # 'make clean' + work_dir = os.path.join(top_dir, TEST_DIR, "work") + cmd = "rm -f %s/*.o" % work_dir + status = ce.RunCommand(cmd) + if status != 0: + print("Error trying to clean out work directory: %s" % cmd) + return status + + # set up the 'bad' source files + script = os.path.join(top_dir, TEST_DIR, "make_sources_bad.sh") + status = ce.RunCommand(script) + if status != 0: + print('Error setting up "bad" source files: %s' % script) + return status + + export_bisect = "export BISECT_DIR=%s; " % bisect_dir + # build the bad source files + script_path = os.path.join(top_dir, TEST_DIR) + if os.path.exists("/usr/bin/x86_64-cros-linux-gnu-gcc"): + build_script = "chromeos_build.sh" + else: + build_script = "build.sh" + cmd = "%s export BISECT_STAGE=POPULATE_BAD; pushd %s; ./%s ; popd" % ( + export_bisect, + script_path, + build_script, + ) + status = ce.RunCommand(cmd) return status - # set up the 'bad' source files - script = os.path.join(top_dir, TEST_DIR, 'make_sources_bad.sh') - status = ce.RunCommand(script) - if status != 0: - print('Error setting up "bad" source files: %s' % script) - return status - - export_bisect = 'export BISECT_DIR=%s; ' % bisect_dir - # build the bad source files - script_path = os.path.join(top_dir, TEST_DIR) - if os.path.exists('/usr/bin/x86_64-cros-linux-gnu-gcc'): - build_script = 'chromeos_build.sh' - else: - build_script = 'build.sh' - cmd = ('%s export BISECT_STAGE=POPULATE_BAD; pushd %s; ./%s ; popd' % - (export_bisect, script_path, build_script)) - status = ce.RunCommand(cmd) - return status - def run_main_bisection_test(top_dir, ce): - test_script = os.path.join(top_dir, TEST_DIR, 'main-bisect-test.sh') - status = ce.RunCommand(test_script) - return status + test_script = os.path.join(top_dir, TEST_DIR, "main-bisect-test.sh") + status = ce.RunCommand(test_script) + return status def verify_compiler_and_wrapper(): - # We don't need to do any special setup if running inside a ChromeOS - # chroot. - if os.path.exists('/usr/bin/x86_64-cros-linux-gnu-gcc'): - return True + # We don't need to do any special setup if running inside a ChromeOS + # chroot. + if os.path.exists("/usr/bin/x86_64-cros-linux-gnu-gcc"): + return True - message = """ + message = """ *** IMPORTANT --- READ THIS CAREFULLY!! *** This test uses the command 'gcc' to compile the good/bad versions of the @@ -100,78 +107,93 @@ thing". Is your compiler wrapper properly set up? [Y/n] """ - print(message) - inp = sys.stdin.readline() - inp = inp.strip() - inp = inp.lower() - return not inp or inp == 'y' or inp == 'yes' + print(message) + inp = sys.stdin.readline() + inp = inp.strip() + inp = inp.lower() + return not inp or inp == "y" or inp == "yes" def Main(argv): - parser = argparse.ArgumentParser() - parser.add_argument( - '--dir', - dest='directory', - help='Bisection work tree, where good & bad object ' - 'files go. Default is /tmp/sysroot_bisect') - - options = parser.parse_args(argv) - - # Make sure the compiler wrapper & soft links are properly set up. - wrapper_is_setup = verify_compiler_and_wrapper() - if not wrapper_is_setup: - print('Exiting now. Please re-run after you have set up the compiler ' - 'wrapper.') - return 0 - - # Make sure we're in the correct directory for running this test. - cwd = os.getcwd() - if not os.path.exists(os.path.join(cwd, 'full_bisect_test')): - print('Error: Wrong directory. This script must be run from the top level' - ' of the binary_search_tool tree (under toolchain_utils).') - return 1 - - ce = command_executer.GetCommandExecuter() - bisect_dir = options.directory - if not bisect_dir: - bisect_dir = DEFAULT_BISECT_DIR - - # Make sure BISECT_DIR is clean - if os.path.exists(bisect_dir): - cmd = 'rm -Rf %s/*' % bisect_dir - retv = ce.RunCommand(cmd) + parser = argparse.ArgumentParser() + parser.add_argument( + "--dir", + dest="directory", + help="Bisection work tree, where good & bad object " + "files go. Default is /tmp/sysroot_bisect", + ) + + options = parser.parse_args(argv) + + # Make sure the compiler wrapper & soft links are properly set up. + wrapper_is_setup = verify_compiler_and_wrapper() + if not wrapper_is_setup: + print( + "Exiting now. Please re-run after you have set up the compiler " + "wrapper." + ) + return 0 + + # Make sure we're in the correct directory for running this test. + cwd = os.getcwd() + if not os.path.exists(os.path.join(cwd, "full_bisect_test")): + print( + "Error: Wrong directory. This script must be run from the top level" + " of the binary_search_tool tree (under toolchain_utils)." + ) + return 1 + + ce = command_executer.GetCommandExecuter() + bisect_dir = options.directory + if not bisect_dir: + bisect_dir = DEFAULT_BISECT_DIR + + # Make sure BISECT_DIR is clean + if os.path.exists(bisect_dir): + cmd = "rm -Rf %s/*" % bisect_dir + retv = ce.RunCommand(cmd) + if retv != 0: + return retv + + retv = populate_good_files(cwd, ce, bisect_dir) if retv != 0: - return retv - - retv = populate_good_files(cwd, ce, bisect_dir) - if retv != 0: - return retv + return retv - retv = populate_bad_files(cwd, ce, bisect_dir) - if retv != 0: + retv = populate_bad_files(cwd, ce, bisect_dir) + if retv != 0: + return retv + + # Set up good/bad work soft links + cmd = "rm -f %s/%s/good-objects; ln -s %s/good %s/%s/good-objects" % ( + cwd, + TEST_DIR, + bisect_dir, + cwd, + TEST_DIR, + ) + + status = ce.RunCommand(cmd) + if status != 0: + print("Error executing: %s; exiting now." % cmd) + return status + + cmd = "rm -f %s/%s/bad-objects; ln -s %s/bad %s/%s/bad-objects" % ( + cwd, + TEST_DIR, + bisect_dir, + cwd, + TEST_DIR, + ) + + status = ce.RunCommand(cmd) + if status != 0: + print("Error executing: %s; exiting now." % cmd) + return status + + retv = run_main_bisection_test(cwd, ce) return retv - # Set up good/bad work soft links - cmd = ('rm -f %s/%s/good-objects; ln -s %s/good %s/%s/good-objects' % - (cwd, TEST_DIR, bisect_dir, cwd, TEST_DIR)) - - status = ce.RunCommand(cmd) - if status != 0: - print('Error executing: %s; exiting now.' % cmd) - return status - - cmd = ('rm -f %s/%s/bad-objects; ln -s %s/bad %s/%s/bad-objects' % - (cwd, TEST_DIR, bisect_dir, cwd, TEST_DIR)) - - status = ce.RunCommand(cmd) - if status != 0: - print('Error executing: %s; exiting now.' % cmd) - return status - - retv = run_main_bisection_test(cwd, ce) - return retv - -if __name__ == '__main__': - retval = Main(sys.argv[1:]) - sys.exit(retval) +if __name__ == "__main__": + retval = Main(sys.argv[1:]) + sys.exit(retval) diff --git a/binary_search_tool/sysroot_wrapper/testing_test.py b/binary_search_tool/sysroot_wrapper/testing_test.py index 04b69b74..2523c0be 100755 --- a/binary_search_tool/sysroot_wrapper/testing_test.py +++ b/binary_search_tool/sysroot_wrapper/testing_test.py @@ -17,26 +17,30 @@ import subprocess import sys import os -base_path = ('/var/cache/chromeos-chrome/chrome-src-internal/src/out_daisy/' - 'Release/obj/') +base_path = ( + "/var/cache/chromeos-chrome/chrome-src-internal/src/out_daisy/" + "Release/obj/" +) bad_files = [ - os.path.join(base_path, 'base/base.cpu.o'), - os.path.join(base_path, 'base/base.version.o'), - os.path.join(base_path, 'apps/apps.launcher.o') + os.path.join(base_path, "base/base.cpu.o"), + os.path.join(base_path, "base/base.version.o"), + os.path.join(base_path, "apps/apps.launcher.o"), ] -bisect_dir = os.environ.get('BISECT_DIR', '/tmp/sysroot_bisect') +bisect_dir = os.environ.get("BISECT_DIR", "/tmp/sysroot_bisect") def Main(_): - for test_file in bad_files: - test_file = test_file.strip() - cmd = ['grep', test_file, os.path.join(bisect_dir, 'BAD_SET')] - ret = subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if not ret: - return 1 - return 0 - - -if __name__ == '__main__': - sys.exit(Main(sys.argv[1:])) + for test_file in bad_files: + test_file = test_file.strip() + cmd = ["grep", test_file, os.path.join(bisect_dir, "BAD_SET")] + ret = subprocess.call( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + if not ret: + return 1 + return 0 + + +if __name__ == "__main__": + sys.exit(Main(sys.argv[1:])) diff --git a/binary_search_tool/test/binary_search_tool_test.py b/binary_search_tool/test/binary_search_tool_test.py index 493c2e35..f9070989 100755 --- a/binary_search_tool/test/binary_search_tool_test.py +++ b/binary_search_tool/test/binary_search_tool_test.py @@ -9,7 +9,7 @@ from __future__ import division from __future__ import print_function -__author__ = 'shenhan@google.com (Han Shen)' +__author__ = "shenhan@google.com (Han Shen)" import os import random @@ -25,545 +25,597 @@ from binary_search_tool.test import gen_obj def GenObj(): - obj_num = random.randint(100, 1000) - bad_obj_num = random.randint(obj_num // 100, obj_num // 20) - if bad_obj_num == 0: - bad_obj_num = 1 - gen_obj.Main(['--obj_num', str(obj_num), '--bad_obj_num', str(bad_obj_num)]) + obj_num = random.randint(100, 1000) + bad_obj_num = random.randint(obj_num // 100, obj_num // 20) + if bad_obj_num == 0: + bad_obj_num = 1 + gen_obj.Main(["--obj_num", str(obj_num), "--bad_obj_num", str(bad_obj_num)]) def CleanObj(): - os.remove(common.OBJECTS_FILE) - os.remove(common.WORKING_SET_FILE) - print('Deleted "{0}" and "{1}"'.format(common.OBJECTS_FILE, - common.WORKING_SET_FILE)) + os.remove(common.OBJECTS_FILE) + os.remove(common.WORKING_SET_FILE) + print( + 'Deleted "{0}" and "{1}"'.format( + common.OBJECTS_FILE, common.WORKING_SET_FILE + ) + ) class BisectTest(unittest.TestCase): - """Tests for run_bisect.py""" - - def setUp(self): - with open('./is_setup', 'w', encoding='utf-8'): - pass - - try: - os.remove(binary_search_state.STATE_FILE) - except OSError: - pass - - def tearDown(self): - try: - os.remove('./is_setup') - os.remove(os.readlink(binary_search_state.STATE_FILE)) - os.remove(binary_search_state.STATE_FILE) - except OSError: - pass - - class FullBisector(run_bisect.Bisector): - """Test bisector to test run_bisect.py with""" - - def __init__(self, options, overrides): - super(BisectTest.FullBisector, self).__init__(options, overrides) - - def PreRun(self): - GenObj() - return 0 - - def Run(self): - return binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - prune=True, - file_args=True) - - def PostRun(self): - CleanObj() - return 0 - - def test_full_bisector(self): - ret = run_bisect.Run(self.FullBisector({}, {})) - self.assertEqual(ret, 0) - self.assertFalse(os.path.exists(common.OBJECTS_FILE)) - self.assertFalse(os.path.exists(common.WORKING_SET_FILE)) - - def check_output(self): - _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( - ('grep "Bad items are: " logs/binary_search_tool_test.py.out | ' - 'tail -n1')) - ls = out.splitlines() - self.assertEqual(len(ls), 1) - line = ls[0] - - _, _, bad_ones = line.partition('Bad items are: ') - bad_ones = bad_ones.split() - expected_result = common.ReadObjectsFile() - - # Reconstruct objects file from bad_ones and compare - actual_result = [0] * len(expected_result) - for bad_obj in bad_ones: - actual_result[int(bad_obj)] = 1 - - self.assertEqual(actual_result, expected_result) + """Tests for run_bisect.py""" + + def setUp(self): + with open("./is_setup", "w", encoding="utf-8"): + pass + + try: + os.remove(binary_search_state.STATE_FILE) + except OSError: + pass + + def tearDown(self): + try: + os.remove("./is_setup") + os.remove(os.readlink(binary_search_state.STATE_FILE)) + os.remove(binary_search_state.STATE_FILE) + except OSError: + pass + + class FullBisector(run_bisect.Bisector): + """Test bisector to test run_bisect.py with""" + + def __init__(self, options, overrides): + super(BisectTest.FullBisector, self).__init__(options, overrides) + + def PreRun(self): + GenObj() + return 0 + + def Run(self): + return binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + prune=True, + file_args=True, + ) + + def PostRun(self): + CleanObj() + return 0 + + def test_full_bisector(self): + ret = run_bisect.Run(self.FullBisector({}, {})) + self.assertEqual(ret, 0) + self.assertFalse(os.path.exists(common.OBJECTS_FILE)) + self.assertFalse(os.path.exists(common.WORKING_SET_FILE)) + + def check_output(self): + _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( + ( + 'grep "Bad items are: " logs/binary_search_tool_test.py.out | ' + "tail -n1" + ) + ) + ls = out.splitlines() + self.assertEqual(len(ls), 1) + line = ls[0] + + _, _, bad_ones = line.partition("Bad items are: ") + bad_ones = bad_ones.split() + expected_result = common.ReadObjectsFile() + + # Reconstruct objects file from bad_ones and compare + actual_result = [0] * len(expected_result) + for bad_obj in bad_ones: + actual_result[int(bad_obj)] = 1 + + self.assertEqual(actual_result, expected_result) class BisectingUtilsTest(unittest.TestCase): - """Tests for bisecting tool.""" - - def setUp(self): - """Generate [100-1000] object files, and 1-5% of which are bad ones.""" - GenObj() - - with open('./is_setup', 'w', encoding='utf-8'): - pass - - try: - os.remove(binary_search_state.STATE_FILE) - except OSError: - pass - - def tearDown(self): - """Cleanup temp files.""" - CleanObj() - - try: - os.remove(os.readlink(binary_search_state.STATE_FILE)) - except OSError: - pass - - cleanup_list = [ - './is_setup', binary_search_state.STATE_FILE, 'noinc_prune_bad', - 'noinc_prune_good', './cmd_script.sh' - ] - for f in cleanup_list: - if os.path.exists(f): - os.remove(f) - - def runTest(self): - ret = binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - prune=True, - file_args=True) - self.assertEqual(ret, 0) - self.check_output() - - def test_arg_parse(self): - args = [ - '--get_initial_items', './gen_init_list.py', '--switch_to_good', - './switch_to_good.py', '--switch_to_bad', './switch_to_bad.py', - '--test_script', './is_good.py', '--prune', '--file_args' - ] - ret = binary_search_state.Main(args) - self.assertEqual(ret, 0) - self.check_output() - - def test_test_setup_script(self): - os.remove('./is_setup') - with self.assertRaises(AssertionError): - ret = binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - prune=True, - file_args=True) - - ret = binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - test_setup_script='./test_setup.py', - prune=True, - file_args=True) - self.assertEqual(ret, 0) - self.check_output() - - def test_bad_test_setup_script(self): - with self.assertRaises(AssertionError): - binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - test_setup_script='./test_setup_bad.py', - prune=True, - file_args=True) - - def test_bad_save_state(self): - state_file = binary_search_state.STATE_FILE - hidden_state_file = os.path.basename(binary_search_state.HIDDEN_STATE_FILE) - - with open(state_file, 'w', encoding='utf-8') as f: - f.write('test123') - - bss = binary_search_state.MockBinarySearchState() - with self.assertRaises(OSError): - bss.SaveState() - - with open(state_file, 'r', encoding='utf-8') as f: - self.assertEqual(f.read(), 'test123') - - os.remove(state_file) - - # Cleanup generated save state that has no symlink - files = os.listdir(os.getcwd()) - save_states = [x for x in files if x.startswith(hidden_state_file)] - _ = [os.remove(x) for x in save_states] - - def test_save_state(self): - state_file = binary_search_state.STATE_FILE - - bss = binary_search_state.MockBinarySearchState() - bss.SaveState() - self.assertTrue(os.path.exists(state_file)) - first_state = os.readlink(state_file) - - bss.SaveState() - second_state = os.readlink(state_file) - self.assertTrue(os.path.exists(state_file)) - self.assertTrue(second_state != first_state) - self.assertFalse(os.path.exists(first_state)) - - bss.RemoveState() - self.assertFalse(os.path.islink(state_file)) - self.assertFalse(os.path.exists(second_state)) - - def test_load_state(self): - test_items = [1, 2, 3, 4, 5] - - bss = binary_search_state.MockBinarySearchState() - bss.all_items = test_items - bss.currently_good_items = set([1, 2, 3]) - bss.currently_bad_items = set([4, 5]) - bss.SaveState() - - bss = None - - bss2 = binary_search_state.MockBinarySearchState.LoadState() - self.assertEqual(bss2.all_items, test_items) - self.assertEqual(bss2.currently_good_items, set([])) - self.assertEqual(bss2.currently_bad_items, set([])) - - def test_tmp_cleanup(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='echo "0\n1\n2\n3"', - switch_to_good='./switch_tmp.py', - file_args=True) - bss.SwitchToGood(['0', '1', '2', '3']) - - tmp_file = None - with open('tmp_file', 'r', encoding='utf-8') as f: - tmp_file = f.read() - os.remove('tmp_file') - - self.assertFalse(os.path.exists(tmp_file)) - ws = common.ReadWorkingSet() - for i in range(3): - self.assertEqual(ws[i], 42) - - def test_verify_fail(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_bad.py', - switch_to_bad='./switch_to_good.py', - test_script='./is_good.py', - prune=True, - file_args=True, - verify=True) - with self.assertRaises(AssertionError): - bss.DoVerify() - - def test_early_terminate(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - prune=True, - file_args=True, - iterations=1) - bss.DoSearchBadItems() - self.assertFalse(bss.found_items) - - def test_no_prune(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - test_setup_script='./test_setup.py', - prune=False, - file_args=True) - bss.DoSearchBadItems() - self.assertEqual(len(bss.found_items), 1) - - bad_objs = common.ReadObjectsFile() - found_obj = int(bss.found_items.pop()) - self.assertEqual(bad_objs[found_obj], 1) - - def test_set_file(self): - binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good_set_file.py', - switch_to_bad='./switch_to_bad_set_file.py', - test_script='./is_good.py', - prune=True, - file_args=True, - verify=True) - self.check_output() - - def test_noincremental_prune(self): - ret = binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good_noinc_prune.py', - switch_to_bad='./switch_to_bad_noinc_prune.py', - test_script='./is_good_noinc_prune.py', - test_setup_script='./test_setup.py', - prune=True, - noincremental=True, - file_args=True, - verify=False) - self.assertEqual(ret, 0) - self.check_output() - - def check_output(self): - _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( - ('grep "Bad items are: " logs/binary_search_tool_test.py.out | ' - 'tail -n1')) - ls = out.splitlines() - self.assertEqual(len(ls), 1) - line = ls[0] - - _, _, bad_ones = line.partition('Bad items are: ') - bad_ones = bad_ones.split() - expected_result = common.ReadObjectsFile() - - # Reconstruct objects file from bad_ones and compare - actual_result = [0] * len(expected_result) - for bad_obj in bad_ones: - actual_result[int(bad_obj)] = 1 - - self.assertEqual(actual_result, expected_result) + """Tests for bisecting tool.""" + + def setUp(self): + """Generate [100-1000] object files, and 1-5% of which are bad ones.""" + GenObj() + + with open("./is_setup", "w", encoding="utf-8"): + pass + + try: + os.remove(binary_search_state.STATE_FILE) + except OSError: + pass + + def tearDown(self): + """Cleanup temp files.""" + CleanObj() + + try: + os.remove(os.readlink(binary_search_state.STATE_FILE)) + except OSError: + pass + + cleanup_list = [ + "./is_setup", + binary_search_state.STATE_FILE, + "noinc_prune_bad", + "noinc_prune_good", + "./cmd_script.sh", + ] + for f in cleanup_list: + if os.path.exists(f): + os.remove(f) + + def runTest(self): + ret = binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + prune=True, + file_args=True, + ) + self.assertEqual(ret, 0) + self.check_output() + + def test_arg_parse(self): + args = [ + "--get_initial_items", + "./gen_init_list.py", + "--switch_to_good", + "./switch_to_good.py", + "--switch_to_bad", + "./switch_to_bad.py", + "--test_script", + "./is_good.py", + "--prune", + "--file_args", + ] + ret = binary_search_state.Main(args) + self.assertEqual(ret, 0) + self.check_output() + + def test_test_setup_script(self): + os.remove("./is_setup") + with self.assertRaises(AssertionError): + ret = binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + prune=True, + file_args=True, + ) + + ret = binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + test_setup_script="./test_setup.py", + prune=True, + file_args=True, + ) + self.assertEqual(ret, 0) + self.check_output() + + def test_bad_test_setup_script(self): + with self.assertRaises(AssertionError): + binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + test_setup_script="./test_setup_bad.py", + prune=True, + file_args=True, + ) + + def test_bad_save_state(self): + state_file = binary_search_state.STATE_FILE + hidden_state_file = os.path.basename( + binary_search_state.HIDDEN_STATE_FILE + ) + + with open(state_file, "w", encoding="utf-8") as f: + f.write("test123") + + bss = binary_search_state.MockBinarySearchState() + with self.assertRaises(OSError): + bss.SaveState() + + with open(state_file, "r", encoding="utf-8") as f: + self.assertEqual(f.read(), "test123") + + os.remove(state_file) + + # Cleanup generated save state that has no symlink + files = os.listdir(os.getcwd()) + save_states = [x for x in files if x.startswith(hidden_state_file)] + _ = [os.remove(x) for x in save_states] + + def test_save_state(self): + state_file = binary_search_state.STATE_FILE + + bss = binary_search_state.MockBinarySearchState() + bss.SaveState() + self.assertTrue(os.path.exists(state_file)) + first_state = os.readlink(state_file) + + bss.SaveState() + second_state = os.readlink(state_file) + self.assertTrue(os.path.exists(state_file)) + self.assertTrue(second_state != first_state) + self.assertFalse(os.path.exists(first_state)) + + bss.RemoveState() + self.assertFalse(os.path.islink(state_file)) + self.assertFalse(os.path.exists(second_state)) + + def test_load_state(self): + test_items = [1, 2, 3, 4, 5] + + bss = binary_search_state.MockBinarySearchState() + bss.all_items = test_items + bss.currently_good_items = set([1, 2, 3]) + bss.currently_bad_items = set([4, 5]) + bss.SaveState() + + bss = None + + bss2 = binary_search_state.MockBinarySearchState.LoadState() + self.assertEqual(bss2.all_items, test_items) + self.assertEqual(bss2.currently_good_items, set([])) + self.assertEqual(bss2.currently_bad_items, set([])) + + def test_tmp_cleanup(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items='echo "0\n1\n2\n3"', + switch_to_good="./switch_tmp.py", + file_args=True, + ) + bss.SwitchToGood(["0", "1", "2", "3"]) + + tmp_file = None + with open("tmp_file", "r", encoding="utf-8") as f: + tmp_file = f.read() + os.remove("tmp_file") + + self.assertFalse(os.path.exists(tmp_file)) + ws = common.ReadWorkingSet() + for i in range(3): + self.assertEqual(ws[i], 42) + + def test_verify_fail(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_bad.py", + switch_to_bad="./switch_to_good.py", + test_script="./is_good.py", + prune=True, + file_args=True, + verify=True, + ) + with self.assertRaises(AssertionError): + bss.DoVerify() + + def test_early_terminate(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + prune=True, + file_args=True, + iterations=1, + ) + bss.DoSearchBadItems() + self.assertFalse(bss.found_items) + + def test_no_prune(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + test_setup_script="./test_setup.py", + prune=False, + file_args=True, + ) + bss.DoSearchBadItems() + self.assertEqual(len(bss.found_items), 1) + + bad_objs = common.ReadObjectsFile() + found_obj = int(bss.found_items.pop()) + self.assertEqual(bad_objs[found_obj], 1) + + def test_set_file(self): + binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good_set_file.py", + switch_to_bad="./switch_to_bad_set_file.py", + test_script="./is_good.py", + prune=True, + file_args=True, + verify=True, + ) + self.check_output() + + def test_noincremental_prune(self): + ret = binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good_noinc_prune.py", + switch_to_bad="./switch_to_bad_noinc_prune.py", + test_script="./is_good_noinc_prune.py", + test_setup_script="./test_setup.py", + prune=True, + noincremental=True, + file_args=True, + verify=False, + ) + self.assertEqual(ret, 0) + self.check_output() + + def check_output(self): + _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( + ( + 'grep "Bad items are: " logs/binary_search_tool_test.py.out | ' + "tail -n1" + ) + ) + ls = out.splitlines() + self.assertEqual(len(ls), 1) + line = ls[0] + + _, _, bad_ones = line.partition("Bad items are: ") + bad_ones = bad_ones.split() + expected_result = common.ReadObjectsFile() + + # Reconstruct objects file from bad_ones and compare + actual_result = [0] * len(expected_result) + for bad_obj in bad_ones: + actual_result[int(bad_obj)] = 1 + + self.assertEqual(actual_result, expected_result) class BisectingUtilsPassTest(BisectingUtilsTest): - """Tests for bisecting tool at pass/transformation level.""" - - def check_pass_output(self, pass_name, pass_num, trans_num): - _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( - ('grep "Bad pass: " logs/binary_search_tool_test.py.out | ' - 'tail -n1')) - ls = out.splitlines() - self.assertEqual(len(ls), 1) - line = ls[0] - _, _, bad_info = line.partition('Bad pass: ') - actual_info = pass_name + ' at number ' + str(pass_num) - self.assertEqual(actual_info, bad_info) - - _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( - ('grep "Bad transformation number: ' - '" logs/binary_search_tool_test.py.out | ' - 'tail -n1')) - ls = out.splitlines() - self.assertEqual(len(ls), 1) - line = ls[0] - _, _, bad_info = line.partition('Bad transformation number: ') - actual_info = str(trans_num) - self.assertEqual(actual_info, bad_info) - - def test_with_prune(self): - ret = binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - pass_bisect='./generate_cmd.py', - prune=True, - file_args=True) - self.assertEqual(ret, 1) - - def test_gen_cmd_script(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - pass_bisect='./generate_cmd.py', - prune=False, - file_args=True) - bss.DoSearchBadItems() - cmd_script_path = bss.cmd_script - self.assertTrue(os.path.exists(cmd_script_path)) - - def test_no_pass_support(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - pass_bisect='./generate_cmd.py', - prune=False, - file_args=True) - bss.cmd_script = './cmd_script_no_support.py' - # No support for -opt-bisect-limit - with self.assertRaises(RuntimeError): - bss.BuildWithPassLimit(-1) - - def test_no_transform_support(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - pass_bisect='./generate_cmd.py', - prune=False, - file_args=True) - bss.cmd_script = './cmd_script_no_support.py' - # No support for -print-debug-counter - with self.assertRaises(RuntimeError): - bss.BuildWithTransformLimit(-1, 'counter_name') - - def test_pass_transform_bisect(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - pass_bisect='./generate_cmd.py', - prune=False, - file_args=True) - pass_num = 4 - trans_num = 19 - bss.cmd_script = './cmd_script.py %d %d' % (pass_num, trans_num) - bss.DoSearchBadPass() - self.check_pass_output('instcombine-visit', pass_num, trans_num) - - def test_result_not_reproduced_pass(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - pass_bisect='./generate_cmd.py', - prune=False, - file_args=True) - # Fails reproducing at pass level. - pass_num = 0 - trans_num = 19 - bss.cmd_script = './cmd_script.py %d %d' % (pass_num, trans_num) - with self.assertRaises(ValueError): - bss.DoSearchBadPass() - - def test_result_not_reproduced_transform(self): - bss = binary_search_state.MockBinarySearchState( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - pass_bisect='./generate_cmd.py', - prune=False, - file_args=True) - # Fails reproducing at transformation level. - pass_num = 4 - trans_num = 0 - bss.cmd_script = './cmd_script.py %d %d' % (pass_num, trans_num) - with self.assertRaises(ValueError): - bss.DoSearchBadPass() + """Tests for bisecting tool at pass/transformation level.""" + + def check_pass_output(self, pass_name, pass_num, trans_num): + _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( + ( + 'grep "Bad pass: " logs/binary_search_tool_test.py.out | ' + "tail -n1" + ) + ) + ls = out.splitlines() + self.assertEqual(len(ls), 1) + line = ls[0] + _, _, bad_info = line.partition("Bad pass: ") + actual_info = pass_name + " at number " + str(pass_num) + self.assertEqual(actual_info, bad_info) + + _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( + ( + 'grep "Bad transformation number: ' + '" logs/binary_search_tool_test.py.out | ' + "tail -n1" + ) + ) + ls = out.splitlines() + self.assertEqual(len(ls), 1) + line = ls[0] + _, _, bad_info = line.partition("Bad transformation number: ") + actual_info = str(trans_num) + self.assertEqual(actual_info, bad_info) + + def test_with_prune(self): + ret = binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + pass_bisect="./generate_cmd.py", + prune=True, + file_args=True, + ) + self.assertEqual(ret, 1) + + def test_gen_cmd_script(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + pass_bisect="./generate_cmd.py", + prune=False, + file_args=True, + ) + bss.DoSearchBadItems() + cmd_script_path = bss.cmd_script + self.assertTrue(os.path.exists(cmd_script_path)) + + def test_no_pass_support(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + pass_bisect="./generate_cmd.py", + prune=False, + file_args=True, + ) + bss.cmd_script = "./cmd_script_no_support.py" + # No support for -opt-bisect-limit + with self.assertRaises(RuntimeError): + bss.BuildWithPassLimit(-1) + + def test_no_transform_support(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + pass_bisect="./generate_cmd.py", + prune=False, + file_args=True, + ) + bss.cmd_script = "./cmd_script_no_support.py" + # No support for -print-debug-counter + with self.assertRaises(RuntimeError): + bss.BuildWithTransformLimit(-1, "counter_name") + + def test_pass_transform_bisect(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + pass_bisect="./generate_cmd.py", + prune=False, + file_args=True, + ) + pass_num = 4 + trans_num = 19 + bss.cmd_script = "./cmd_script.py %d %d" % (pass_num, trans_num) + bss.DoSearchBadPass() + self.check_pass_output("instcombine-visit", pass_num, trans_num) + + def test_result_not_reproduced_pass(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + pass_bisect="./generate_cmd.py", + prune=False, + file_args=True, + ) + # Fails reproducing at pass level. + pass_num = 0 + trans_num = 19 + bss.cmd_script = "./cmd_script.py %d %d" % (pass_num, trans_num) + with self.assertRaises(ValueError): + bss.DoSearchBadPass() + + def test_result_not_reproduced_transform(self): + bss = binary_search_state.MockBinarySearchState( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + pass_bisect="./generate_cmd.py", + prune=False, + file_args=True, + ) + # Fails reproducing at transformation level. + pass_num = 4 + trans_num = 0 + bss.cmd_script = "./cmd_script.py %d %d" % (pass_num, trans_num) + with self.assertRaises(ValueError): + bss.DoSearchBadPass() class BisectStressTest(unittest.TestCase): - """Stress tests for bisecting tool.""" - - def test_every_obj_bad(self): - amt = 25 - gen_obj.Main(['--obj_num', str(amt), '--bad_obj_num', str(amt)]) - ret = binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_script='./is_good.py', - prune=True, - file_args=True, - verify=False) - self.assertEqual(ret, 0) - self.check_output() - - def test_every_index_is_bad(self): - amt = 25 - for i in range(amt): - obj_list = ['0'] * amt - obj_list[i] = '1' - obj_list = ','.join(obj_list) - gen_obj.Main(['--obj_list', obj_list]) - ret = binary_search_state.Run( - get_initial_items='./gen_init_list.py', - switch_to_good='./switch_to_good.py', - switch_to_bad='./switch_to_bad.py', - test_setup_script='./test_setup.py', - test_script='./is_good.py', - prune=True, - file_args=True) - self.assertEqual(ret, 0) - self.check_output() - - def check_output(self): - _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( - ('grep "Bad items are: " logs/binary_search_tool_test.py.out | ' - 'tail -n1')) - ls = out.splitlines() - self.assertEqual(len(ls), 1) - line = ls[0] - - _, _, bad_ones = line.partition('Bad items are: ') - bad_ones = bad_ones.split() - expected_result = common.ReadObjectsFile() - - # Reconstruct objects file from bad_ones and compare - actual_result = [0] * len(expected_result) - for bad_obj in bad_ones: - actual_result[int(bad_obj)] = 1 - - self.assertEqual(actual_result, expected_result) + """Stress tests for bisecting tool.""" + + def test_every_obj_bad(self): + amt = 25 + gen_obj.Main(["--obj_num", str(amt), "--bad_obj_num", str(amt)]) + ret = binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_script="./is_good.py", + prune=True, + file_args=True, + verify=False, + ) + self.assertEqual(ret, 0) + self.check_output() + + def test_every_index_is_bad(self): + amt = 25 + for i in range(amt): + obj_list = ["0"] * amt + obj_list[i] = "1" + obj_list = ",".join(obj_list) + gen_obj.Main(["--obj_list", obj_list]) + ret = binary_search_state.Run( + get_initial_items="./gen_init_list.py", + switch_to_good="./switch_to_good.py", + switch_to_bad="./switch_to_bad.py", + test_setup_script="./test_setup.py", + test_script="./is_good.py", + prune=True, + file_args=True, + ) + self.assertEqual(ret, 0) + self.check_output() + + def check_output(self): + _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( + ( + 'grep "Bad items are: " logs/binary_search_tool_test.py.out | ' + "tail -n1" + ) + ) + ls = out.splitlines() + self.assertEqual(len(ls), 1) + line = ls[0] + + _, _, bad_ones = line.partition("Bad items are: ") + bad_ones = bad_ones.split() + expected_result = common.ReadObjectsFile() + + # Reconstruct objects file from bad_ones and compare + actual_result = [0] * len(expected_result) + for bad_obj in bad_ones: + actual_result[int(bad_obj)] = 1 + + self.assertEqual(actual_result, expected_result) def Main(argv): - num_tests = 2 - if len(argv) > 1: - num_tests = int(argv[1]) - - suite = unittest.TestSuite() - for _ in range(0, num_tests): - suite.addTest(BisectingUtilsTest()) - suite.addTest(BisectingUtilsTest('test_arg_parse')) - suite.addTest(BisectingUtilsTest('test_test_setup_script')) - suite.addTest(BisectingUtilsTest('test_bad_test_setup_script')) - suite.addTest(BisectingUtilsTest('test_bad_save_state')) - suite.addTest(BisectingUtilsTest('test_save_state')) - suite.addTest(BisectingUtilsTest('test_load_state')) - suite.addTest(BisectingUtilsTest('test_tmp_cleanup')) - suite.addTest(BisectingUtilsTest('test_verify_fail')) - suite.addTest(BisectingUtilsTest('test_early_terminate')) - suite.addTest(BisectingUtilsTest('test_no_prune')) - suite.addTest(BisectingUtilsTest('test_set_file')) - suite.addTest(BisectingUtilsTest('test_noincremental_prune')) - suite.addTest(BisectingUtilsPassTest('test_with_prune')) - suite.addTest(BisectingUtilsPassTest('test_gen_cmd_script')) - suite.addTest(BisectingUtilsPassTest('test_no_pass_support')) - suite.addTest(BisectingUtilsPassTest('test_no_transform_support')) - suite.addTest(BisectingUtilsPassTest('test_pass_transform_bisect')) - suite.addTest(BisectingUtilsPassTest('test_result_not_reproduced_pass')) - suite.addTest(BisectingUtilsPassTest('test_result_not_reproduced_transform')) - suite.addTest(BisectTest('test_full_bisector')) - suite.addTest(BisectStressTest('test_every_obj_bad')) - suite.addTest(BisectStressTest('test_every_index_is_bad')) - runner = unittest.TextTestRunner() - runner.run(suite) - - -if __name__ == '__main__': - Main(sys.argv) + num_tests = 2 + if len(argv) > 1: + num_tests = int(argv[1]) + + suite = unittest.TestSuite() + for _ in range(0, num_tests): + suite.addTest(BisectingUtilsTest()) + suite.addTest(BisectingUtilsTest("test_arg_parse")) + suite.addTest(BisectingUtilsTest("test_test_setup_script")) + suite.addTest(BisectingUtilsTest("test_bad_test_setup_script")) + suite.addTest(BisectingUtilsTest("test_bad_save_state")) + suite.addTest(BisectingUtilsTest("test_save_state")) + suite.addTest(BisectingUtilsTest("test_load_state")) + suite.addTest(BisectingUtilsTest("test_tmp_cleanup")) + suite.addTest(BisectingUtilsTest("test_verify_fail")) + suite.addTest(BisectingUtilsTest("test_early_terminate")) + suite.addTest(BisectingUtilsTest("test_no_prune")) + suite.addTest(BisectingUtilsTest("test_set_file")) + suite.addTest(BisectingUtilsTest("test_noincremental_prune")) + suite.addTest(BisectingUtilsPassTest("test_with_prune")) + suite.addTest(BisectingUtilsPassTest("test_gen_cmd_script")) + suite.addTest(BisectingUtilsPassTest("test_no_pass_support")) + suite.addTest(BisectingUtilsPassTest("test_no_transform_support")) + suite.addTest(BisectingUtilsPassTest("test_pass_transform_bisect")) + suite.addTest(BisectingUtilsPassTest("test_result_not_reproduced_pass")) + suite.addTest( + BisectingUtilsPassTest("test_result_not_reproduced_transform") + ) + suite.addTest(BisectTest("test_full_bisector")) + suite.addTest(BisectStressTest("test_every_obj_bad")) + suite.addTest(BisectStressTest("test_every_index_is_bad")) + runner = unittest.TextTestRunner() + runner.run(suite) + + +if __name__ == "__main__": + Main(sys.argv) diff --git a/binary_search_tool/test/cmd_script.py b/binary_search_tool/test/cmd_script.py index 2f026edd..b668280e 100755 --- a/binary_search_tool/test/cmd_script.py +++ b/binary_search_tool/test/cmd_script.py @@ -20,57 +20,62 @@ from binary_search_tool.test import common def Main(argv): - if not os.path.exists('./is_setup'): - return 1 - - if len(argv) != 3: - return 1 - - limit_flags = os.environ['LIMIT_FLAGS'] - opt_bisect_exist = False - debug_counter_exist = False - - for option in limit_flags.split(): - if '-opt-bisect-limit' in option: - opt_bisect_limit = int(option.split('=')[-1]) - opt_bisect_exist = True - if '-debug-counter=' in option: - debug_counter = int(option.split('=')[-1]) - debug_counter_exist = True - - if not opt_bisect_exist: - return 1 - - # Manually set total number and bad number - total_pass = 10 - total_transform = 20 - bad_pass = int(argv[1]) - bad_transform = int(argv[2]) - - if opt_bisect_limit == -1: - opt_bisect_limit = total_pass - - for i in range(1, total_pass + 1): - bisect_str = 'BISECT: %srunning pass (%d) Combine redundant ' \ - 'instructions on function (f1)' \ - % ('NOT ' if i > opt_bisect_limit else '', i) - print(bisect_str, file=sys.stderr) - - if debug_counter_exist: - print('Counters and values:', file=sys.stderr) - print( - 'instcombine-visit : {%d, 0, %d}' % (total_transform, debug_counter), - file=sys.stderr) - - if opt_bisect_limit > bad_pass or \ - (debug_counter_exist and debug_counter > bad_transform): - common.WriteWorkingSet([1]) - else: - common.WriteWorkingSet([0]) - - return 0 - - -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) + if not os.path.exists("./is_setup"): + return 1 + + if len(argv) != 3: + return 1 + + limit_flags = os.environ["LIMIT_FLAGS"] + opt_bisect_exist = False + debug_counter_exist = False + + for option in limit_flags.split(): + if "-opt-bisect-limit" in option: + opt_bisect_limit = int(option.split("=")[-1]) + opt_bisect_exist = True + if "-debug-counter=" in option: + debug_counter = int(option.split("=")[-1]) + debug_counter_exist = True + + if not opt_bisect_exist: + return 1 + + # Manually set total number and bad number + total_pass = 10 + total_transform = 20 + bad_pass = int(argv[1]) + bad_transform = int(argv[2]) + + if opt_bisect_limit == -1: + opt_bisect_limit = total_pass + + for i in range(1, total_pass + 1): + bisect_str = ( + "BISECT: %srunning pass (%d) Combine redundant " + "instructions on function (f1)" + % ("NOT " if i > opt_bisect_limit else "", i) + ) + print(bisect_str, file=sys.stderr) + + if debug_counter_exist: + print("Counters and values:", file=sys.stderr) + print( + "instcombine-visit : {%d, 0, %d}" + % (total_transform, debug_counter), + file=sys.stderr, + ) + + if opt_bisect_limit > bad_pass or ( + debug_counter_exist and debug_counter > bad_transform + ): + common.WriteWorkingSet([1]) + else: + common.WriteWorkingSet([0]) + + return 0 + + +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/binary_search_tool/test/cmd_script_no_support.py b/binary_search_tool/test/cmd_script_no_support.py index 0cc9fedc..d2c8c39b 100644 --- a/binary_search_tool/test/cmd_script_no_support.py +++ b/binary_search_tool/test/cmd_script_no_support.py @@ -16,14 +16,15 @@ import sys def Main(): - if not os.path.exists('./is_setup'): - return 1 - print( - 'No support for -opt-bisect-limit or -print-debug-counter.', - file=sys.stderr) - return 0 - - -if __name__ == '__main__': - retval = Main() - sys.exit(retval) + if not os.path.exists("./is_setup"): + return 1 + print( + "No support for -opt-bisect-limit or -print-debug-counter.", + file=sys.stderr, + ) + return 0 + + +if __name__ == "__main__": + retval = Main() + sys.exit(retval) diff --git a/binary_search_tool/test/common.py b/binary_search_tool/test/common.py index 98f40096..fa33f20c 100755 --- a/binary_search_tool/test/common.py +++ b/binary_search_tool/test/common.py @@ -8,35 +8,35 @@ DEFAULT_OBJECT_NUMBER = 1238 DEFAULT_BAD_OBJECT_NUMBER = 23 -OBJECTS_FILE = 'objects.txt' -WORKING_SET_FILE = 'working_set.txt' +OBJECTS_FILE = "objects.txt" +WORKING_SET_FILE = "working_set.txt" def ReadWorkingSet(): - working_set = [] - with open(WORKING_SET_FILE, 'r', encoding='utf-8') as f: - for l in f: - working_set.append(int(l)) - return working_set + working_set = [] + with open(WORKING_SET_FILE, "r", encoding="utf-8") as f: + for l in f: + working_set.append(int(l)) + return working_set def WriteWorkingSet(working_set): - with open(WORKING_SET_FILE, 'w', encoding='utf-8') as f: - for o in working_set: - f.write('{0}\n'.format(o)) + with open(WORKING_SET_FILE, "w", encoding="utf-8") as f: + for o in working_set: + f.write("{0}\n".format(o)) def ReadObjectsFile(): - objects_file = [] - with open(OBJECTS_FILE, 'r', encoding='utf-8') as f: - for l in f: - objects_file.append(int(l)) - return objects_file + objects_file = [] + with open(OBJECTS_FILE, "r", encoding="utf-8") as f: + for l in f: + objects_file.append(int(l)) + return objects_file def ReadObjectIndex(filename): - object_index = [] - with open(filename, 'r', encoding='utf-8') as f: - for o in f: - object_index.append(int(o)) - return object_index + object_index = [] + with open(filename, "r", encoding="utf-8") as f: + for o in f: + object_index.append(int(o)) + return object_index diff --git a/binary_search_tool/test/gen_init_list.py b/binary_search_tool/test/gen_init_list.py index 718ac877..1fe1b43e 100755 --- a/binary_search_tool/test/gen_init_list.py +++ b/binary_search_tool/test/gen_init_list.py @@ -15,13 +15,14 @@ from binary_search_tool.test import common def Main(): - ce = command_executer.GetCommandExecuter() - _, l, _ = ce.RunCommandWOutput( - 'cat {0} | wc -l'.format(common.OBJECTS_FILE), print_to_console=False) - for i in range(0, int(l)): - print(i) + ce = command_executer.GetCommandExecuter() + _, l, _ = ce.RunCommandWOutput( + "cat {0} | wc -l".format(common.OBJECTS_FILE), print_to_console=False + ) + for i in range(0, int(l)): + print(i) -if __name__ == '__main__': - Main() - sys.exit(0) +if __name__ == "__main__": + Main() + sys.exit(0) diff --git a/binary_search_tool/test/gen_obj.py b/binary_search_tool/test/gen_obj.py index 7ea91788..aa9a9344 100755 --- a/binary_search_tool/test/gen_obj.py +++ b/binary_search_tool/test/gen_obj.py @@ -21,81 +21,91 @@ from binary_search_tool.test import common def Main(argv): - """Generates a list, the value of each element is 0 or 1. - - The number of 1s in the list is specified by bad_obj_num. - The others are all 0s. The total number of 0s and 1s is specified by obj_num. - - Args: - argv: argument from command line - - Returns: - 0 always. - """ - parser = argparse.ArgumentParser() - parser.add_argument( - '-n', - '--obj_num', - dest='obj_num', - default=common.DEFAULT_OBJECT_NUMBER, - help=('Number of total objects.')) - parser.add_argument( - '-b', - '--bad_obj_num', - dest='bad_obj_num', - default=common.DEFAULT_BAD_OBJECT_NUMBER, - help=('Number of bad objects. Must be great than or ' - 'equal to zero and less than total object ' - 'number.')) - parser.add_argument( - '-o', - '--obj_list', - dest='obj_list', - default='', - help=('List of comma seperated objects to generate. ' - 'A 0 means the object is good, a 1 means the ' - 'object is bad.')) - options = parser.parse_args(argv) - - obj_num = int(options.obj_num) - bad_obj_num = int(options.bad_obj_num) - bad_to_gen = int(options.bad_obj_num) - obj_list = options.obj_list - if not obj_list: - obj_list = [] - for i in range(obj_num): - if bad_to_gen > 0 and random.randint(1, obj_num) <= bad_obj_num: - obj_list.append(1) - bad_to_gen -= 1 - else: - obj_list.append(0) - while bad_to_gen > 0: - t = random.randint(0, obj_num - 1) - if obj_list[t] == 0: - obj_list[t] = 1 - bad_to_gen -= 1 - else: - obj_list = obj_list.split(',') - - if os.path.isfile(common.OBJECTS_FILE): - os.remove(common.OBJECTS_FILE) - if os.path.isfile(common.WORKING_SET_FILE): - os.remove(common.WORKING_SET_FILE) - - with open(common.OBJECTS_FILE, 'w', encoding='utf-8') as f: - with open(common.WORKING_SET_FILE, 'w', encoding='utf-8') as w: - for i in obj_list: - f.write('{0}\n'.format(i)) - w.write('{0}\n'.format(i)) - - obj_num = len(obj_list) - bad_obj_num = obj_list.count(1) - print('Generated {0} object files, with {1} bad ones.'.format( - obj_num, bad_obj_num)) - - return 0 - - -if __name__ == '__main__': - retval = Main(sys.argv[1:]) - sys.exit(retval) + """Generates a list, the value of each element is 0 or 1. + + The number of 1s in the list is specified by bad_obj_num. + The others are all 0s. The total number of 0s and 1s is specified by obj_num. + + Args: + argv: argument from command line + + Returns: + 0 always. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + "-n", + "--obj_num", + dest="obj_num", + default=common.DEFAULT_OBJECT_NUMBER, + help=("Number of total objects."), + ) + parser.add_argument( + "-b", + "--bad_obj_num", + dest="bad_obj_num", + default=common.DEFAULT_BAD_OBJECT_NUMBER, + help=( + "Number of bad objects. Must be great than or " + "equal to zero and less than total object " + "number." + ), + ) + parser.add_argument( + "-o", + "--obj_list", + dest="obj_list", + default="", + help=( + "List of comma seperated objects to generate. " + "A 0 means the object is good, a 1 means the " + "object is bad." + ), + ) + options = parser.parse_args(argv) + + obj_num = int(options.obj_num) + bad_obj_num = int(options.bad_obj_num) + bad_to_gen = int(options.bad_obj_num) + obj_list = options.obj_list + if not obj_list: + obj_list = [] + for i in range(obj_num): + if bad_to_gen > 0 and random.randint(1, obj_num) <= bad_obj_num: + obj_list.append(1) + bad_to_gen -= 1 + else: + obj_list.append(0) + while bad_to_gen > 0: + t = random.randint(0, obj_num - 1) + if obj_list[t] == 0: + obj_list[t] = 1 + bad_to_gen -= 1 + else: + obj_list = obj_list.split(",") + + if os.path.isfile(common.OBJECTS_FILE): + os.remove(common.OBJECTS_FILE) + if os.path.isfile(common.WORKING_SET_FILE): + os.remove(common.WORKING_SET_FILE) + + with open(common.OBJECTS_FILE, "w", encoding="utf-8") as f: + with open(common.WORKING_SET_FILE, "w", encoding="utf-8") as w: + for i in obj_list: + f.write("{0}\n".format(i)) + w.write("{0}\n".format(i)) + + obj_num = len(obj_list) + bad_obj_num = obj_list.count(1) + print( + "Generated {0} object files, with {1} bad ones.".format( + obj_num, bad_obj_num + ) + ) + + return 0 + + +if __name__ == "__main__": + retval = Main(sys.argv[1:]) + sys.exit(retval) diff --git a/binary_search_tool/test/generate_cmd.py b/binary_search_tool/test/generate_cmd.py index 08b8c646..bcfe176d 100755 --- a/binary_search_tool/test/generate_cmd.py +++ b/binary_search_tool/test/generate_cmd.py @@ -17,14 +17,14 @@ import sys def Main(): - if not os.path.exists('./is_setup'): - return 1 - file_name = 'cmd_script.sh' - with open(file_name, 'w', encoding='utf-8') as f: - f.write('Generated by generate_cmd.py') - return 0 + if not os.path.exists("./is_setup"): + return 1 + file_name = "cmd_script.sh" + with open(file_name, "w", encoding="utf-8") as f: + f.write("Generated by generate_cmd.py") + return 0 -if __name__ == '__main__': - retval = Main() - sys.exit(retval) +if __name__ == "__main__": + retval = Main() + sys.exit(retval) diff --git a/binary_search_tool/test/is_good.py b/binary_search_tool/test/is_good.py index 8212aede..3be7248f 100755 --- a/binary_search_tool/test/is_good.py +++ b/binary_search_tool/test/is_good.py @@ -15,15 +15,15 @@ from binary_search_tool.test import common def Main(): - if not os.path.exists('./is_setup'): - return 1 - working_set = common.ReadWorkingSet() - for w in working_set: - if w == 1: - return 1 ## False, linking failure - return 0 - - -if __name__ == '__main__': - retval = Main() - sys.exit(retval) + if not os.path.exists("./is_setup"): + return 1 + working_set = common.ReadWorkingSet() + for w in working_set: + if w == 1: + return 1 ## False, linking failure + return 0 + + +if __name__ == "__main__": + retval = Main() + sys.exit(retval) diff --git a/binary_search_tool/test/is_good_noinc_prune.py b/binary_search_tool/test/is_good_noinc_prune.py index 6329f493..4e520162 100755 --- a/binary_search_tool/test/is_good_noinc_prune.py +++ b/binary_search_tool/test/is_good_noinc_prune.py @@ -21,31 +21,31 @@ from binary_search_tool.test import common def Main(): - working_set = common.ReadWorkingSet() + working_set = common.ReadWorkingSet() - with open('noinc_prune_good', 'r', encoding='utf-8') as good_args: - num_good_args = len(good_args.readlines()) + with open("noinc_prune_good", "r", encoding="utf-8") as good_args: + num_good_args = len(good_args.readlines()) - with open('noinc_prune_bad', 'r', encoding='utf-8') as bad_args: - num_bad_args = len(bad_args.readlines()) + with open("noinc_prune_bad", "r", encoding="utf-8") as bad_args: + num_bad_args = len(bad_args.readlines()) - num_args = num_good_args + num_bad_args - if num_args != len(working_set): - print('Only %d args, expected %d' % (num_args, len(working_set))) - print('%d good args, %d bad args' % (num_good_args, num_bad_args)) - return 3 + num_args = num_good_args + num_bad_args + if num_args != len(working_set): + print("Only %d args, expected %d" % (num_args, len(working_set))) + print("%d good args, %d bad args" % (num_good_args, num_bad_args)) + return 3 - os.remove('noinc_prune_bad') - os.remove('noinc_prune_good') + os.remove("noinc_prune_bad") + os.remove("noinc_prune_good") - if not os.path.exists('./is_setup'): - return 1 - for w in working_set: - if w == 1: - return 1 ## False, linking failure - return 0 + if not os.path.exists("./is_setup"): + return 1 + for w in working_set: + if w == 1: + return 1 ## False, linking failure + return 0 -if __name__ == '__main__': - retval = Main() - sys.exit(retval) +if __name__ == "__main__": + retval = Main() + sys.exit(retval) diff --git a/binary_search_tool/test/switch_tmp.py b/binary_search_tool/test/switch_tmp.py index 1d4ccc88..2ff35427 100755 --- a/binary_search_tool/test/switch_tmp.py +++ b/binary_search_tool/test/switch_tmp.py @@ -20,20 +20,20 @@ from binary_search_tool.test import common def Main(argv): - working_set = common.ReadWorkingSet() - object_index = common.ReadObjectIndex(argv[1]) + working_set = common.ReadWorkingSet() + object_index = common.ReadObjectIndex(argv[1]) - # Random number so the results can be checked - for oi in object_index: - working_set[int(oi)] = 42 + # Random number so the results can be checked + for oi in object_index: + working_set[int(oi)] = 42 - common.WriteWorkingSet(working_set) - with open('tmp_file', 'w', encoding='utf-8') as f: - f.write(argv[1]) + common.WriteWorkingSet(working_set) + with open("tmp_file", "w", encoding="utf-8") as f: + f.write(argv[1]) - return 0 + return 0 -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/binary_search_tool/test/switch_to_bad.py b/binary_search_tool/test/switch_to_bad.py index 3a1ec84f..17061dd3 100755 --- a/binary_search_tool/test/switch_to_bad.py +++ b/binary_search_tool/test/switch_to_bad.py @@ -14,19 +14,19 @@ from binary_search_tool.test import common def Main(argv): - """Switch part of the objects file in working set to (possible) bad ones.""" - working_set = common.ReadWorkingSet() - objects_file = common.ReadObjectsFile() - object_index = common.ReadObjectIndex(argv[1]) + """Switch part of the objects file in working set to (possible) bad ones.""" + working_set = common.ReadWorkingSet() + objects_file = common.ReadObjectsFile() + object_index = common.ReadObjectIndex(argv[1]) - for oi in object_index: - working_set[oi] = objects_file[oi] + for oi in object_index: + working_set[oi] = objects_file[oi] - common.WriteWorkingSet(working_set) + common.WriteWorkingSet(working_set) - return 0 + return 0 -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/binary_search_tool/test/switch_to_bad_noinc_prune.py b/binary_search_tool/test/switch_to_bad_noinc_prune.py index a390e9e2..dd57324f 100755 --- a/binary_search_tool/test/switch_to_bad_noinc_prune.py +++ b/binary_search_tool/test/switch_to_bad_noinc_prune.py @@ -27,21 +27,21 @@ from binary_search_tool.test import common def Main(argv): - """Switch part of the objects file in working set to (possible) bad ones.""" - working_set = common.ReadWorkingSet() - objects_file = common.ReadObjectsFile() - object_index = common.ReadObjectIndex(argv[1]) + """Switch part of the objects file in working set to (possible) bad ones.""" + working_set = common.ReadWorkingSet() + objects_file = common.ReadObjectsFile() + object_index = common.ReadObjectIndex(argv[1]) - for oi in object_index: - working_set[oi] = objects_file[oi] + for oi in object_index: + working_set[oi] = objects_file[oi] - shutil.copy(argv[1], './noinc_prune_bad') + shutil.copy(argv[1], "./noinc_prune_bad") - common.WriteWorkingSet(working_set) + common.WriteWorkingSet(working_set) - return 0 + return 0 -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/binary_search_tool/test/switch_to_bad_set_file.py b/binary_search_tool/test/switch_to_bad_set_file.py index a0dbb67b..6a4f9131 100755 --- a/binary_search_tool/test/switch_to_bad_set_file.py +++ b/binary_search_tool/test/switch_to_bad_set_file.py @@ -19,24 +19,24 @@ from binary_search_tool.test import common def Main(_): - """Switch part of the objects file in working set to (possible) bad ones.""" - working_set = common.ReadWorkingSet() - objects_file = common.ReadObjectsFile() + """Switch part of the objects file in working set to (possible) bad ones.""" + working_set = common.ReadWorkingSet() + objects_file = common.ReadObjectsFile() - if not os.path.exists(os.environ['BISECT_BAD_SET']): - print('Bad set file does not exist!') - return 1 + if not os.path.exists(os.environ["BISECT_BAD_SET"]): + print("Bad set file does not exist!") + return 1 - object_index = common.ReadObjectIndex(os.environ['BISECT_BAD_SET']) + object_index = common.ReadObjectIndex(os.environ["BISECT_BAD_SET"]) - for oi in object_index: - working_set[int(oi)] = objects_file[oi] + for oi in object_index: + working_set[int(oi)] = objects_file[oi] - common.WriteWorkingSet(working_set) + common.WriteWorkingSet(working_set) - return 0 + return 0 -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/binary_search_tool/test/switch_to_good.py b/binary_search_tool/test/switch_to_good.py index 50e0ddff..bcbe5c28 100755 --- a/binary_search_tool/test/switch_to_good.py +++ b/binary_search_tool/test/switch_to_good.py @@ -19,17 +19,17 @@ from binary_search_tool.test import common def Main(argv): - working_set = common.ReadWorkingSet() - object_index = common.ReadObjectIndex(argv[1]) + working_set = common.ReadWorkingSet() + object_index = common.ReadObjectIndex(argv[1]) - for oi in object_index: - working_set[int(oi)] = 0 + for oi in object_index: + working_set[int(oi)] = 0 - common.WriteWorkingSet(working_set) + common.WriteWorkingSet(working_set) - return 0 + return 0 -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/binary_search_tool/test/switch_to_good_noinc_prune.py b/binary_search_tool/test/switch_to_good_noinc_prune.py index 5e00a634..37976668 100755 --- a/binary_search_tool/test/switch_to_good_noinc_prune.py +++ b/binary_search_tool/test/switch_to_good_noinc_prune.py @@ -27,19 +27,19 @@ from binary_search_tool.test import common def Main(argv): - working_set = common.ReadWorkingSet() - object_index = common.ReadObjectIndex(argv[1]) + working_set = common.ReadWorkingSet() + object_index = common.ReadObjectIndex(argv[1]) - for oi in object_index: - working_set[int(oi)] = 0 + for oi in object_index: + working_set[int(oi)] = 0 - shutil.copy(argv[1], './noinc_prune_good') + shutil.copy(argv[1], "./noinc_prune_good") - common.WriteWorkingSet(working_set) + common.WriteWorkingSet(working_set) - return 0 + return 0 -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/binary_search_tool/test/switch_to_good_set_file.py b/binary_search_tool/test/switch_to_good_set_file.py index cc884ddc..89b8bf17 100755 --- a/binary_search_tool/test/switch_to_good_set_file.py +++ b/binary_search_tool/test/switch_to_good_set_file.py @@ -23,22 +23,22 @@ from binary_search_tool.test import common def Main(_): - working_set = common.ReadWorkingSet() + working_set = common.ReadWorkingSet() - if not os.path.exists(os.environ['BISECT_GOOD_SET']): - print('Good set file does not exist!') - return 1 + if not os.path.exists(os.environ["BISECT_GOOD_SET"]): + print("Good set file does not exist!") + return 1 - object_index = common.ReadObjectIndex(os.environ['BISECT_GOOD_SET']) + object_index = common.ReadObjectIndex(os.environ["BISECT_GOOD_SET"]) - for oi in object_index: - working_set[int(oi)] = 0 + for oi in object_index: + working_set[int(oi)] = 0 - common.WriteWorkingSet(working_set) + common.WriteWorkingSet(working_set) - return 0 + return 0 -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/binary_search_tool/test/test_setup.py b/binary_search_tool/test/test_setup.py index fa4743a7..4fe8c661 100755 --- a/binary_search_tool/test/test_setup.py +++ b/binary_search_tool/test/test_setup.py @@ -12,13 +12,13 @@ import sys def Main(): - # create ./is_setup - with open('./is_setup', 'w', encoding='utf-8'): - pass + # create ./is_setup + with open("./is_setup", "w", encoding="utf-8"): + pass - return 0 + return 0 -if __name__ == '__main__': - retval = Main() - sys.exit(retval) +if __name__ == "__main__": + retval = Main() + sys.exit(retval) diff --git a/binary_search_tool/test/test_setup_bad.py b/binary_search_tool/test/test_setup_bad.py index 1421009b..f34753bf 100755 --- a/binary_search_tool/test/test_setup_bad.py +++ b/binary_search_tool/test/test_setup_bad.py @@ -12,9 +12,9 @@ import sys def Main(): - return 1 ## False, flashing failure + return 1 ## False, flashing failure -if __name__ == '__main__': - retval = Main() - sys.exit(retval) +if __name__ == "__main__": + retval = Main() + sys.exit(retval) diff --git a/build_chromeos.py b/build_chromeos.py index 6f9c3682..3a6a17e1 100755 --- a/build_chromeos.py +++ b/build_chromeos.py @@ -13,10 +13,13 @@ particular release of ChromeOS. from __future__ import print_function -__author__ = ('asharif@google.com (Ahmad Sharif) ' - 'llozano@google.com (Luis Lozano) ' - 'raymes@google.com (Raymes Khoury) ' - 'shenhan@google.com (Han Shen)') + +__author__ = ( + "asharif@google.com (Ahmad Sharif) " + "llozano@google.com (Luis Lozano) " + "raymes@google.com (Raymes Khoury) " + "shenhan@google.com (Han Shen)" +) import argparse import os @@ -28,262 +31,349 @@ from cros_utils import misc def Usage(parser, message): - print('ERROR: %s' % message) - parser.print_help() - sys.exit(0) + print("ERROR: %s" % message) + parser.print_help() + sys.exit(0) def Main(argv): - """Build ChromeOS.""" - # Common initializations - cmd_executer = command_executer.GetCommandExecuter() - - parser = argparse.ArgumentParser() - parser.add_argument( - '--chromeos_root', - dest='chromeos_root', - help='Target directory for ChromeOS installation.') - parser.add_argument( - '--clobber_chroot', - dest='clobber_chroot', - action='store_true', - help='Delete the chroot and start fresh', - default=False) - parser.add_argument( - '--clobber_board', - dest='clobber_board', - action='store_true', - help='Delete the board and start fresh', - default=False) - parser.add_argument( - '--rebuild', - dest='rebuild', - action='store_true', - help='Rebuild all board packages except the toolchain.', - default=False) - parser.add_argument( - '--cflags', - dest='cflags', - default='', - help='CFLAGS for the ChromeOS packages') - parser.add_argument( - '--cxxflags', - dest='cxxflags', - default='', - help='CXXFLAGS for the ChromeOS packages') - parser.add_argument( - '--ldflags', - dest='ldflags', - default='', - help='LDFLAGS for the ChromeOS packages') - parser.add_argument( - '--board', dest='board', help='ChromeOS target board, e.g. x86-generic') - parser.add_argument( - '--package', dest='package', help='The package needs to be built') - parser.add_argument( - '--label', - dest='label', - help='Optional label symlink to point to build dir.') - parser.add_argument( - '--dev', - dest='dev', - default=False, - action='store_true', - help=('Make the final image in dev mode (eg writable, ' - 'more space on image). Defaults to False.')) - parser.add_argument( - '--debug', - dest='debug', - default=False, - action='store_true', - help=('Optional. Build chrome browser with "-g -O0". ' + """Build ChromeOS.""" + # Common initializations + cmd_executer = command_executer.GetCommandExecuter() + + parser = argparse.ArgumentParser() + parser.add_argument( + "--chromeos_root", + dest="chromeos_root", + help="Target directory for ChromeOS installation.", + ) + parser.add_argument( + "--clobber_chroot", + dest="clobber_chroot", + action="store_true", + help="Delete the chroot and start fresh", + default=False, + ) + parser.add_argument( + "--clobber_board", + dest="clobber_board", + action="store_true", + help="Delete the board and start fresh", + default=False, + ) + parser.add_argument( + "--rebuild", + dest="rebuild", + action="store_true", + help="Rebuild all board packages except the toolchain.", + default=False, + ) + parser.add_argument( + "--cflags", + dest="cflags", + default="", + help="CFLAGS for the ChromeOS packages", + ) + parser.add_argument( + "--cxxflags", + dest="cxxflags", + default="", + help="CXXFLAGS for the ChromeOS packages", + ) + parser.add_argument( + "--ldflags", + dest="ldflags", + default="", + help="LDFLAGS for the ChromeOS packages", + ) + parser.add_argument( + "--board", dest="board", help="ChromeOS target board, e.g. x86-generic" + ) + parser.add_argument( + "--package", dest="package", help="The package needs to be built" + ) + parser.add_argument( + "--label", + dest="label", + help="Optional label symlink to point to build dir.", + ) + parser.add_argument( + "--dev", + dest="dev", + default=False, + action="store_true", + help=( + "Make the final image in dev mode (eg writable, " + "more space on image). Defaults to False." + ), + ) + parser.add_argument( + "--debug", + dest="debug", + default=False, + action="store_true", + help=( + 'Optional. Build chrome browser with "-g -O0". ' "Notice, this also turns on '--dev'. " - 'Defaults to False.')) - parser.add_argument( - '--env', dest='env', default='', help='Env to pass to build_packages.') - parser.add_argument( - '--vanilla', - dest='vanilla', - default=False, - action='store_true', - help='Use default ChromeOS toolchain.') - parser.add_argument( - '--vanilla_image', - dest='vanilla_image', - default=False, - action='store_true', - help=('Use prebuild packages for building the image. ' - 'It also implies the --vanilla option is set.')) - - options = parser.parse_args(argv[1:]) - - if options.chromeos_root is None: - Usage(parser, '--chromeos_root must be set') - options.chromeos_root = os.path.expanduser(options.chromeos_root) - scripts_dir = os.path.join(options.chromeos_root, 'src', 'scripts') - if not os.path.isdir(scripts_dir): - Usage(parser, '--chromeos_root must be set up first. Use setup_chromeos.py') - - if options.board is None: - Usage(parser, '--board must be set') - - if options.debug: - options.dev = True - - build_packages_env = options.env - if build_packages_env.find('EXTRA_BOARD_FLAGS=') != -1: - logger.GetLogger().LogFatal( - ('Passing "EXTRA_BOARD_FLAGS" in "--env" is not supported. ' - 'This flags is used internally by this script. ' - 'Contact the author for more detail.')) - - if options.rebuild: - build_packages_env += ' EXTRA_BOARD_FLAGS=-e' - # EXTRA_BOARD_FLAGS=-e should clean up the object files for the chrome - # browser but it doesn't. So do it here. - misc.RemoveChromeBrowserObjectFiles(options.chromeos_root, options.board) - - # Build with afdo_use by default. - # To change the default use --env="USE=-afdo_use". - build_packages_env = misc.MergeEnvStringWithDict( - build_packages_env, {'USE': 'chrome_internal afdo_use -cros-debug'}) - - build_packages_command = misc.GetBuildPackagesCommand( - board=options.board, usepkg=options.vanilla_image, debug=options.debug) - - if options.package: - build_packages_command += ' {0}'.format(options.package) - - build_image_command = misc.GetBuildImageCommand(options.board, options.dev) - - if options.vanilla or options.vanilla_image: - command = misc.GetSetupBoardCommand( - options.board, - usepkg=options.vanilla_image, - force=options.clobber_board) - command += '; ' + build_packages_env + ' ' + build_packages_command - command += '&& ' + build_packages_env + ' ' + build_image_command - ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command) - return ret - - # Setup board - if not os.path.isdir(options.chromeos_root + '/chroot/build/' + - options.board) or options.clobber_board: - # Run build_tc.py from binary package - ret = cmd_executer.ChrootRunCommand( - options.chromeos_root, - misc.GetSetupBoardCommand(options.board, force=options.clobber_board)) - logger.GetLogger().LogFatalIf(ret, 'setup_board failed') - else: - logger.GetLogger().LogOutput('Did not setup_board ' - 'because it already exists') + "Defaults to False." + ), + ) + parser.add_argument( + "--env", dest="env", default="", help="Env to pass to build_packages." + ) + parser.add_argument( + "--vanilla", + dest="vanilla", + default=False, + action="store_true", + help="Use default ChromeOS toolchain.", + ) + parser.add_argument( + "--vanilla_image", + dest="vanilla_image", + default=False, + action="store_true", + help=( + "Use prebuild packages for building the image. " + "It also implies the --vanilla option is set." + ), + ) + + options = parser.parse_args(argv[1:]) + + if options.chromeos_root is None: + Usage(parser, "--chromeos_root must be set") + options.chromeos_root = os.path.expanduser(options.chromeos_root) + scripts_dir = os.path.join(options.chromeos_root, "src", "scripts") + if not os.path.isdir(scripts_dir): + Usage( + parser, + "--chromeos_root must be set up first. Use setup_chromeos.py", + ) + + if options.board is None: + Usage(parser, "--board must be set") + + if options.debug: + options.dev = True + + build_packages_env = options.env + if build_packages_env.find("EXTRA_BOARD_FLAGS=") != -1: + logger.GetLogger().LogFatal( + ( + 'Passing "EXTRA_BOARD_FLAGS" in "--env" is not supported. ' + "This flags is used internally by this script. " + "Contact the author for more detail." + ) + ) - if options.debug: - # Perform 2-step build_packages to build a debug chrome browser. - - # Firstly, build everything that chromeos-chrome depends on normally. if options.rebuild: - # Give warning about "--rebuild" and "--debug". Under this combination, - # only dependencies of "chromeos-chrome" get rebuilt. - logger.GetLogger().LogWarning( - '--rebuild" does not correctly re-build every package when ' - '"--debug" is enabled. ') - - # Replace EXTRA_BOARD_FLAGS=-e with "-e --onlydeps" - build_packages_env = build_packages_env.replace( - 'EXTRA_BOARD_FLAGS=-e', 'EXTRA_BOARD_FLAGS="-e --onlydeps"') + build_packages_env += " EXTRA_BOARD_FLAGS=-e" + # EXTRA_BOARD_FLAGS=-e should clean up the object files for the chrome + # browser but it doesn't. So do it here. + misc.RemoveChromeBrowserObjectFiles( + options.chromeos_root, options.board + ) + + # Build with afdo_use by default. + # To change the default use --env="USE=-afdo_use". + build_packages_env = misc.MergeEnvStringWithDict( + build_packages_env, {"USE": "chrome_internal afdo_use -cros-debug"} + ) + + build_packages_command = misc.GetBuildPackagesCommand( + board=options.board, usepkg=options.vanilla_image, debug=options.debug + ) + + if options.package: + build_packages_command += " {0}".format(options.package) + + build_image_command = misc.GetBuildImageCommand(options.board, options.dev) + + if options.vanilla or options.vanilla_image: + command = misc.GetSetupBoardCommand( + options.board, + usepkg=options.vanilla_image, + force=options.clobber_board, + ) + command += "; " + build_packages_env + " " + build_packages_command + command += "&& " + build_packages_env + " " + build_image_command + ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command) + return ret + + # Setup board + if ( + not os.path.isdir( + options.chromeos_root + "/chroot/build/" + options.board + ) + or options.clobber_board + ): + # Run build_tc.py from binary package + ret = cmd_executer.ChrootRunCommand( + options.chromeos_root, + misc.GetSetupBoardCommand( + options.board, force=options.clobber_board + ), + ) + logger.GetLogger().LogFatalIf(ret, "setup_board failed") else: - build_packages_env += ' EXTRA_BOARD_FLAGS=--onlydeps' - + logger.GetLogger().LogOutput( + "Did not setup_board " "because it already exists" + ) + + if options.debug: + # Perform 2-step build_packages to build a debug chrome browser. + + # Firstly, build everything that chromeos-chrome depends on normally. + if options.rebuild: + # Give warning about "--rebuild" and "--debug". Under this combination, + # only dependencies of "chromeos-chrome" get rebuilt. + logger.GetLogger().LogWarning( + '--rebuild" does not correctly re-build every package when ' + '"--debug" is enabled. ' + ) + + # Replace EXTRA_BOARD_FLAGS=-e with "-e --onlydeps" + build_packages_env = build_packages_env.replace( + "EXTRA_BOARD_FLAGS=-e", 'EXTRA_BOARD_FLAGS="-e --onlydeps"' + ) + else: + build_packages_env += " EXTRA_BOARD_FLAGS=--onlydeps" + + ret = cmd_executer.ChrootRunCommand( + options.chromeos_root, + 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" ' + 'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" ' + 'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" ' + "CHROME_ORIGIN=SERVER_SOURCE " + "%s " + "%s --skip_chroot_upgrade" + "chromeos-chrome" + % ( + options.board, + options.cflags, + options.board, + options.cxxflags, + options.board, + options.ldflags, + build_packages_env, + build_packages_command, + ), + ) + + logger.GetLogger().LogFatalIf( + ret, + "build_packages failed while trying to build chromeos-chrome deps.", + ) + + # Secondly, build chromeos-chrome using debug mode. + # Replace '--onlydeps' with '--nodeps'. + if options.rebuild: + build_packages_env = build_packages_env.replace( + 'EXTRA_BOARD_FLAGS="-e --onlydeps"', + "EXTRA_BOARD_FLAGS=--nodeps", + ) + else: + build_packages_env = build_packages_env.replace( + "EXTRA_BOARD_FLAGS=--onlydeps", "EXTRA_BOARD_FLAGS=--nodeps" + ) + ret = cmd_executer.ChrootRunCommand( + options.chromeos_root, + 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" ' + 'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" ' + 'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" ' + "CHROME_ORIGIN=SERVER_SOURCE BUILDTYPE=Debug " + "%s " + "%s --skip_chroot_upgrade" + "chromeos-chrome" + % ( + options.board, + options.cflags, + options.board, + options.cxxflags, + options.board, + options.ldflags, + build_packages_env, + build_packages_command, + ), + ) + logger.GetLogger().LogFatalIf( + ret, + "build_packages failed while trying to build debug chromeos-chrome.", + ) + + # Now, we have built chromeos-chrome and all dependencies. + # Finally, remove '-e' from EXTRA_BOARD_FLAGS, + # otherwise, chromeos-chrome gets rebuilt. + build_packages_env = build_packages_env.replace( + "EXTRA_BOARD_FLAGS=--nodeps", "" + ) + + # Up to now, we have a debug built chromos-chrome browser. + # Fall through to build the rest of the world. + + # Build packages ret = cmd_executer.ChrootRunCommand( - options.chromeos_root, 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" ' + options.chromeos_root, + 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" ' 'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" ' 'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" ' - 'CHROME_ORIGIN=SERVER_SOURCE ' - '%s ' - '%s --skip_chroot_upgrade' - 'chromeos-chrome' % (options.board, options.cflags, options.board, - options.cxxflags, options.board, options.ldflags, - build_packages_env, build_packages_command)) - - logger.GetLogger().LogFatalIf(\ - ret, 'build_packages failed while trying to build chromeos-chrome deps.') - - # Secondly, build chromeos-chrome using debug mode. - # Replace '--onlydeps' with '--nodeps'. - if options.rebuild: - build_packages_env = build_packages_env.replace( - 'EXTRA_BOARD_FLAGS="-e --onlydeps"', 'EXTRA_BOARD_FLAGS=--nodeps') - else: - build_packages_env = build_packages_env.replace( - 'EXTRA_BOARD_FLAGS=--onlydeps', 'EXTRA_BOARD_FLAGS=--nodeps') + "CHROME_ORIGIN=SERVER_SOURCE " + "%s " + "%s --skip_chroot_upgrade" + % ( + options.board, + options.cflags, + options.board, + options.cxxflags, + options.board, + options.ldflags, + build_packages_env, + build_packages_command, + ), + ) + + logger.GetLogger().LogFatalIf(ret, "build_packages failed") + if options.package: + return 0 + # Build image ret = cmd_executer.ChrootRunCommand( - options.chromeos_root, 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" ' - 'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" ' - 'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" ' - 'CHROME_ORIGIN=SERVER_SOURCE BUILDTYPE=Debug ' - '%s ' - '%s --skip_chroot_upgrade' - 'chromeos-chrome' % (options.board, options.cflags, options.board, - options.cxxflags, options.board, options.ldflags, - build_packages_env, build_packages_command)) - logger.GetLogger().LogFatalIf( - ret, - 'build_packages failed while trying to build debug chromeos-chrome.') - - # Now, we have built chromeos-chrome and all dependencies. - # Finally, remove '-e' from EXTRA_BOARD_FLAGS, - # otherwise, chromeos-chrome gets rebuilt. - build_packages_env = build_packages_env.replace(\ - 'EXTRA_BOARD_FLAGS=--nodeps', '') - - # Up to now, we have a debug built chromos-chrome browser. - # Fall through to build the rest of the world. - - # Build packages - ret = cmd_executer.ChrootRunCommand( - options.chromeos_root, 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" ' - 'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" ' - 'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" ' - 'CHROME_ORIGIN=SERVER_SOURCE ' - '%s ' - '%s --skip_chroot_upgrade' % - (options.board, options.cflags, options.board, options.cxxflags, - options.board, options.ldflags, build_packages_env, - build_packages_command)) - - logger.GetLogger().LogFatalIf(ret, 'build_packages failed') - if options.package: - return 0 - # Build image - ret = cmd_executer.ChrootRunCommand( - options.chromeos_root, build_packages_env + ' ' + build_image_command) - - logger.GetLogger().LogFatalIf(ret, 'build_image failed') - - flags_file_name = 'flags.txt' - flags_file_path = ('%s/src/build/images/%s/latest/%s' % - (options.chromeos_root, options.board, flags_file_name)) - with open(flags_file_path, 'w', encoding='utf-8') as flags_file: - flags_file.write('CFLAGS=%s\n' % options.cflags) - flags_file.write('CXXFLAGS=%s\n' % options.cxxflags) - flags_file.write('LDFLAGS=%s\n' % options.ldflags) - - if options.label: - image_dir_path = ('%s/src/build/images/%s/latest' % (options.chromeos_root, - options.board)) - real_image_dir_path = os.path.realpath(image_dir_path) - command = ('ln -sf -T %s %s/%s' % (os.path.basename(real_image_dir_path), - os.path.dirname(real_image_dir_path), - options.label)) - - ret = cmd_executer.RunCommand(command) - logger.GetLogger().LogFatalIf( - ret, 'Failed to apply symlink label %s' % options.label) - - return ret - - -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) + options.chromeos_root, build_packages_env + " " + build_image_command + ) + + logger.GetLogger().LogFatalIf(ret, "build_image failed") + + flags_file_name = "flags.txt" + flags_file_path = "%s/src/build/images/%s/latest/%s" % ( + options.chromeos_root, + options.board, + flags_file_name, + ) + with open(flags_file_path, "w", encoding="utf-8") as flags_file: + flags_file.write("CFLAGS=%s\n" % options.cflags) + flags_file.write("CXXFLAGS=%s\n" % options.cxxflags) + flags_file.write("LDFLAGS=%s\n" % options.ldflags) + + if options.label: + image_dir_path = "%s/src/build/images/%s/latest" % ( + options.chromeos_root, + options.board, + ) + real_image_dir_path = os.path.realpath(image_dir_path) + command = "ln -sf -T %s %s/%s" % ( + os.path.basename(real_image_dir_path), + os.path.dirname(real_image_dir_path), + options.label, + ) + + ret = cmd_executer.RunCommand(command) + logger.GetLogger().LogFatalIf( + ret, "Failed to apply symlink label %s" % options.label + ) + + return ret + + +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/build_tc.py b/build_tc.py index 00065f85..8eed86ee 100755 --- a/build_tc.py +++ b/build_tc.py @@ -11,7 +11,8 @@ This script sets up the toolchain if you give it the gcctools directory. from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" import argparse import getpass @@ -19,336 +20,400 @@ import os import sys import tempfile -import tc_enter_chroot from cros_utils import command_executer from cros_utils import constants from cros_utils import misc +import tc_enter_chroot class ToolchainPart(object): - """Class to hold the toolchain pieces.""" - - def __init__(self, - name, - source_path, - chromeos_root, - board, - incremental, - build_env, - gcc_enable_ccache=False): - self._name = name - self._source_path = misc.CanonicalizePath(source_path) - self._chromeos_root = chromeos_root - self._board = board - self._ctarget = misc.GetCtargetFromBoard(self._board, self._chromeos_root) - self._gcc_libs_dest = misc.GetGccLibsDestForBoard(self._board, - self._chromeos_root) - self.tag = '%s-%s' % (name, self._ctarget) - self._ce = command_executer.GetCommandExecuter() - self._mask_file = os.path.join( - self._chromeos_root, 'chroot', - 'etc/portage/package.mask/cross-%s' % self._ctarget) - self._new_mask_file = None - - self._chroot_source_path = os.path.join(constants.MOUNTED_TOOLCHAIN_ROOT, - self._name).lstrip('/') - self._incremental = incremental - self._build_env = build_env - self._gcc_enable_ccache = gcc_enable_ccache - - def RunSetupBoardIfNecessary(self): - cross_symlink = os.path.join(self._chromeos_root, 'chroot', - 'usr/local/bin/emerge-%s' % self._board) - if not os.path.exists(cross_symlink): - command = 'setup_board --board=%s' % self._board - self._ce.ChrootRunCommand(self._chromeos_root, command) - - def Build(self): - rv = 1 - try: - self.UninstallTool() - self.MoveMaskFile() - self.MountSources(False) - self.RemoveCompiledFile() - rv = self.BuildTool() - finally: - self.UnMoveMaskFile() - return rv - - def RemoveCompiledFile(self): - compiled_file = os.path.join(self._chromeos_root, 'chroot', - 'var/tmp/portage/cross-%s' % self._ctarget, - '%s-9999' % self._name, '.compiled') - command = 'rm -f %s' % compiled_file - self._ce.RunCommand(command) - - def MountSources(self, unmount_source): - mount_points = [] - mounted_source_path = os.path.join(self._chromeos_root, 'chroot', - self._chroot_source_path) - src_mp = tc_enter_chroot.MountPoint(self._source_path, mounted_source_path, - getpass.getuser(), 'ro') - mount_points.append(src_mp) - - build_suffix = 'build-%s' % self._ctarget - build_dir = '%s-%s' % (self._source_path, build_suffix) - - if not self._incremental and os.path.exists(build_dir): - command = 'rm -rf %s/*' % build_dir - self._ce.RunCommand(command) - - # Create a -build directory for the objects. - command = 'mkdir -p %s' % build_dir - self._ce.RunCommand(command) - - mounted_build_dir = os.path.join( - self._chromeos_root, 'chroot', - '%s-%s' % (self._chroot_source_path, build_suffix)) - build_mp = tc_enter_chroot.MountPoint(build_dir, mounted_build_dir, - getpass.getuser()) - mount_points.append(build_mp) - - if unmount_source: - unmount_statuses = [mp.UnMount() == 0 for mp in mount_points] - assert all(unmount_statuses), 'Could not unmount all mount points!' - else: - mount_statuses = [mp.DoMount() == 0 for mp in mount_points] - - if not all(mount_statuses): - mounted = [ - mp for mp, status in zip(mount_points, mount_statuses) if status - ] - unmount_statuses = [mp.UnMount() == 0 for mp in mounted] - assert all(unmount_statuses), 'Could not unmount all mount points!' - - def UninstallTool(self): - command = 'sudo CLEAN_DELAY=0 emerge -C cross-%s/%s' % (self._ctarget, - self._name) - self._ce.ChrootRunCommand(self._chromeos_root, command) - - def BuildTool(self): - env = self._build_env - # FEATURES=buildpkg adds minutes of time so we disable it. - # TODO(shenhan): keep '-sandbox' for a while for compatibility, then remove - # it after a while. - features = ('nostrip userpriv userfetch -usersandbox -sandbox noclean ' - '-buildpkg') - env['FEATURES'] = features - - if self._incremental: - env['FEATURES'] += ' keepwork' - - if 'USE' in env: - env['USE'] += ' multislot mounted_%s' % self._name - else: - env['USE'] = 'multislot mounted_%s' % self._name - - # Disable ccache in our compilers. cache may be problematic for us. - # It ignores compiler environments settings and it is not clear if - # the cache hit algorithm verifies all the compiler binaries or - # just the driver. - if self._name == 'gcc' and not self._gcc_enable_ccache: - env['USE'] += ' -wrapper_ccache' - - env['%s_SOURCE_PATH' % self._name.upper()] = ( - os.path.join('/', self._chroot_source_path)) - env['ACCEPT_KEYWORDS'] = '~*' - env_string = ' '.join(['%s="%s"' % var for var in env.items()]) - command = 'emerge =cross-%s/%s-9999' % (self._ctarget, self._name) - full_command = 'sudo %s %s' % (env_string, command) - rv = self._ce.ChrootRunCommand(self._chromeos_root, full_command) - if rv != 0: - return rv - if self._name == 'gcc': - command = ('sudo cp -r /usr/lib/gcc/%s %s' % (self._ctarget, - self._gcc_libs_dest)) - rv = self._ce.ChrootRunCommand(self._chromeos_root, command) - return rv - - def MoveMaskFile(self): - self._new_mask_file = None - if os.path.isfile(self._mask_file): - self._new_mask_file = tempfile.mktemp() - command = 'sudo mv %s %s' % (self._mask_file, self._new_mask_file) - self._ce.RunCommand(command) - - def UnMoveMaskFile(self): - if self._new_mask_file: - command = 'sudo mv %s %s' % (self._new_mask_file, self._mask_file) - self._ce.RunCommand(command) + """Class to hold the toolchain pieces.""" + + def __init__( + self, + name, + source_path, + chromeos_root, + board, + incremental, + build_env, + gcc_enable_ccache=False, + ): + self._name = name + self._source_path = misc.CanonicalizePath(source_path) + self._chromeos_root = chromeos_root + self._board = board + self._ctarget = misc.GetCtargetFromBoard( + self._board, self._chromeos_root + ) + self._gcc_libs_dest = misc.GetGccLibsDestForBoard( + self._board, self._chromeos_root + ) + self.tag = "%s-%s" % (name, self._ctarget) + self._ce = command_executer.GetCommandExecuter() + self._mask_file = os.path.join( + self._chromeos_root, + "chroot", + "etc/portage/package.mask/cross-%s" % self._ctarget, + ) + self._new_mask_file = None + + self._chroot_source_path = os.path.join( + constants.MOUNTED_TOOLCHAIN_ROOT, self._name + ).lstrip("/") + self._incremental = incremental + self._build_env = build_env + self._gcc_enable_ccache = gcc_enable_ccache + + def RunSetupBoardIfNecessary(self): + cross_symlink = os.path.join( + self._chromeos_root, + "chroot", + "usr/local/bin/emerge-%s" % self._board, + ) + if not os.path.exists(cross_symlink): + command = "setup_board --board=%s" % self._board + self._ce.ChrootRunCommand(self._chromeos_root, command) + + def Build(self): + rv = 1 + try: + self.UninstallTool() + self.MoveMaskFile() + self.MountSources(False) + self.RemoveCompiledFile() + rv = self.BuildTool() + finally: + self.UnMoveMaskFile() + return rv + + def RemoveCompiledFile(self): + compiled_file = os.path.join( + self._chromeos_root, + "chroot", + "var/tmp/portage/cross-%s" % self._ctarget, + "%s-9999" % self._name, + ".compiled", + ) + command = "rm -f %s" % compiled_file + self._ce.RunCommand(command) + + def MountSources(self, unmount_source): + mount_points = [] + mounted_source_path = os.path.join( + self._chromeos_root, "chroot", self._chroot_source_path + ) + src_mp = tc_enter_chroot.MountPoint( + self._source_path, mounted_source_path, getpass.getuser(), "ro" + ) + mount_points.append(src_mp) + + build_suffix = "build-%s" % self._ctarget + build_dir = "%s-%s" % (self._source_path, build_suffix) + + if not self._incremental and os.path.exists(build_dir): + command = "rm -rf %s/*" % build_dir + self._ce.RunCommand(command) + + # Create a -build directory for the objects. + command = "mkdir -p %s" % build_dir + self._ce.RunCommand(command) + + mounted_build_dir = os.path.join( + self._chromeos_root, + "chroot", + "%s-%s" % (self._chroot_source_path, build_suffix), + ) + build_mp = tc_enter_chroot.MountPoint( + build_dir, mounted_build_dir, getpass.getuser() + ) + mount_points.append(build_mp) + + if unmount_source: + unmount_statuses = [mp.UnMount() == 0 for mp in mount_points] + assert all(unmount_statuses), "Could not unmount all mount points!" + else: + mount_statuses = [mp.DoMount() == 0 for mp in mount_points] + + if not all(mount_statuses): + mounted = [ + mp + for mp, status in zip(mount_points, mount_statuses) + if status + ] + unmount_statuses = [mp.UnMount() == 0 for mp in mounted] + assert all( + unmount_statuses + ), "Could not unmount all mount points!" + + def UninstallTool(self): + command = "sudo CLEAN_DELAY=0 emerge -C cross-%s/%s" % ( + self._ctarget, + self._name, + ) + self._ce.ChrootRunCommand(self._chromeos_root, command) + + def BuildTool(self): + env = self._build_env + # FEATURES=buildpkg adds minutes of time so we disable it. + # TODO(shenhan): keep '-sandbox' for a while for compatibility, then remove + # it after a while. + features = ( + "nostrip userpriv userfetch -usersandbox -sandbox noclean " + "-buildpkg" + ) + env["FEATURES"] = features + + if self._incremental: + env["FEATURES"] += " keepwork" + + if "USE" in env: + env["USE"] += " multislot mounted_%s" % self._name + else: + env["USE"] = "multislot mounted_%s" % self._name + + # Disable ccache in our compilers. cache may be problematic for us. + # It ignores compiler environments settings and it is not clear if + # the cache hit algorithm verifies all the compiler binaries or + # just the driver. + if self._name == "gcc" and not self._gcc_enable_ccache: + env["USE"] += " -wrapper_ccache" + + env["%s_SOURCE_PATH" % self._name.upper()] = os.path.join( + "/", self._chroot_source_path + ) + env["ACCEPT_KEYWORDS"] = "~*" + env_string = " ".join(['%s="%s"' % var for var in env.items()]) + command = "emerge =cross-%s/%s-9999" % (self._ctarget, self._name) + full_command = "sudo %s %s" % (env_string, command) + rv = self._ce.ChrootRunCommand(self._chromeos_root, full_command) + if rv != 0: + return rv + if self._name == "gcc": + command = "sudo cp -r /usr/lib/gcc/%s %s" % ( + self._ctarget, + self._gcc_libs_dest, + ) + rv = self._ce.ChrootRunCommand(self._chromeos_root, command) + return rv + + def MoveMaskFile(self): + self._new_mask_file = None + if os.path.isfile(self._mask_file): + self._new_mask_file = tempfile.mktemp() + command = "sudo mv %s %s" % (self._mask_file, self._new_mask_file) + self._ce.RunCommand(command) + + def UnMoveMaskFile(self): + if self._new_mask_file: + command = "sudo mv %s %s" % (self._new_mask_file, self._mask_file) + self._ce.RunCommand(command) def Main(argv): - """The main function.""" - # Common initializations - parser = argparse.ArgumentParser() - parser.add_argument( - '-c', - '--chromeos_root', - dest='chromeos_root', - default='../../', - help=('ChromeOS root checkout directory' - ' uses ../.. if none given.')) - parser.add_argument( - '-g', - '--gcc_dir', - dest='gcc_dir', - help='The directory where gcc resides.') - parser.add_argument( - '--binutils_dir', - dest='binutils_dir', - help='The directory where binutils resides.') - parser.add_argument( - '-x', - '--gdb_dir', - dest='gdb_dir', - help='The directory where gdb resides.') - parser.add_argument( - '-b', - '--board', - dest='board', - default='x86-alex', - help='The target board.') - parser.add_argument( - '-n', - '--noincremental', - dest='noincremental', - default=False, - action='store_true', - help='Use FEATURES=keepwork to do incremental builds.') - parser.add_argument( - '--cflags', - dest='cflags', - default='', - help='Build a compiler with specified CFLAGS') - parser.add_argument( - '--cxxflags', - dest='cxxflags', - default='', - help='Build a compiler with specified CXXFLAGS') - parser.add_argument( - '--cflags_for_target', - dest='cflags_for_target', - default='', - help='Build the target libraries with specified flags') - parser.add_argument( - '--cxxflags_for_target', - dest='cxxflags_for_target', - default='', - help='Build the target libraries with specified flags') - parser.add_argument( - '--ldflags', - dest='ldflags', - default='', - help='Build a compiler with specified LDFLAGS') - parser.add_argument( - '-d', - '--debug', - dest='debug', - default=False, - action='store_true', - help='Build a compiler with -g3 -O0 appended to both' - ' CFLAGS and CXXFLAGS.') - parser.add_argument( - '-m', - '--mount_only', - dest='mount_only', - default=False, - action='store_true', - help='Just mount the tool directories.') - parser.add_argument( - '-u', - '--unmount_only', - dest='unmount_only', - default=False, - action='store_true', - help='Just unmount the tool directories.') - parser.add_argument( - '--extra_use_flags', - dest='extra_use_flags', - default='', - help='Extra flag for USE, to be passed to the ebuild. ' - "('multislot' and 'mounted_<tool>' are always passed.)") - parser.add_argument( - '--gcc_enable_ccache', - dest='gcc_enable_ccache', - default=False, - action='store_true', - help='Enable ccache for the gcc invocations') - - options = parser.parse_args(argv) - - chromeos_root = misc.CanonicalizePath(options.chromeos_root) - if options.gcc_dir: - gcc_dir = misc.CanonicalizePath(options.gcc_dir) - assert gcc_dir and os.path.isdir(gcc_dir), 'gcc_dir does not exist!' - if options.binutils_dir: - binutils_dir = misc.CanonicalizePath(options.binutils_dir) - assert os.path.isdir(binutils_dir), 'binutils_dir does not exist!' - if options.gdb_dir: - gdb_dir = misc.CanonicalizePath(options.gdb_dir) - assert os.path.isdir(gdb_dir), 'gdb_dir does not exist!' - if options.unmount_only: - options.mount_only = False - elif options.mount_only: - options.unmount_only = False - build_env = {} - if options.cflags: - build_env['CFLAGS'] = '`portageq envvar CFLAGS` ' + options.cflags - if options.cxxflags: - build_env['CXXFLAGS'] = '`portageq envvar CXXFLAGS` ' + options.cxxflags - if options.cflags_for_target: - build_env['CFLAGS_FOR_TARGET'] = options.cflags_for_target - if options.cxxflags_for_target: - build_env['CXXFLAGS_FOR_TARGET'] = options.cxxflags_for_target - if options.ldflags: - build_env['LDFLAGS'] = options.ldflags - if options.debug: - debug_flags = '-g3 -O0' - if 'CFLAGS' in build_env: - build_env['CFLAGS'] += ' %s' % (debug_flags) - else: - build_env['CFLAGS'] = debug_flags - if 'CXXFLAGS' in build_env: - build_env['CXXFLAGS'] += ' %s' % (debug_flags) - else: - build_env['CXXFLAGS'] = debug_flags - if options.extra_use_flags: - build_env['USE'] = options.extra_use_flags - - # Create toolchain parts - toolchain_parts = {} - for board in options.board.split(','): + """The main function.""" + # Common initializations + parser = argparse.ArgumentParser() + parser.add_argument( + "-c", + "--chromeos_root", + dest="chromeos_root", + default="../../", + help=("ChromeOS root checkout directory" " uses ../.. if none given."), + ) + parser.add_argument( + "-g", + "--gcc_dir", + dest="gcc_dir", + help="The directory where gcc resides.", + ) + parser.add_argument( + "--binutils_dir", + dest="binutils_dir", + help="The directory where binutils resides.", + ) + parser.add_argument( + "-x", + "--gdb_dir", + dest="gdb_dir", + help="The directory where gdb resides.", + ) + parser.add_argument( + "-b", + "--board", + dest="board", + default="x86-alex", + help="The target board.", + ) + parser.add_argument( + "-n", + "--noincremental", + dest="noincremental", + default=False, + action="store_true", + help="Use FEATURES=keepwork to do incremental builds.", + ) + parser.add_argument( + "--cflags", + dest="cflags", + default="", + help="Build a compiler with specified CFLAGS", + ) + parser.add_argument( + "--cxxflags", + dest="cxxflags", + default="", + help="Build a compiler with specified CXXFLAGS", + ) + parser.add_argument( + "--cflags_for_target", + dest="cflags_for_target", + default="", + help="Build the target libraries with specified flags", + ) + parser.add_argument( + "--cxxflags_for_target", + dest="cxxflags_for_target", + default="", + help="Build the target libraries with specified flags", + ) + parser.add_argument( + "--ldflags", + dest="ldflags", + default="", + help="Build a compiler with specified LDFLAGS", + ) + parser.add_argument( + "-d", + "--debug", + dest="debug", + default=False, + action="store_true", + help="Build a compiler with -g3 -O0 appended to both" + " CFLAGS and CXXFLAGS.", + ) + parser.add_argument( + "-m", + "--mount_only", + dest="mount_only", + default=False, + action="store_true", + help="Just mount the tool directories.", + ) + parser.add_argument( + "-u", + "--unmount_only", + dest="unmount_only", + default=False, + action="store_true", + help="Just unmount the tool directories.", + ) + parser.add_argument( + "--extra_use_flags", + dest="extra_use_flags", + default="", + help="Extra flag for USE, to be passed to the ebuild. " + "('multislot' and 'mounted_<tool>' are always passed.)", + ) + parser.add_argument( + "--gcc_enable_ccache", + dest="gcc_enable_ccache", + default=False, + action="store_true", + help="Enable ccache for the gcc invocations", + ) + + options = parser.parse_args(argv) + + chromeos_root = misc.CanonicalizePath(options.chromeos_root) if options.gcc_dir: - tp = ToolchainPart('gcc', gcc_dir, chromeos_root, board, - not options.noincremental, build_env, - options.gcc_enable_ccache) - toolchain_parts[tp.tag] = tp - tp.RunSetupBoardIfNecessary() + gcc_dir = misc.CanonicalizePath(options.gcc_dir) + assert gcc_dir and os.path.isdir(gcc_dir), "gcc_dir does not exist!" if options.binutils_dir: - tp = ToolchainPart('binutils', binutils_dir, chromeos_root, board, - not options.noincremental, build_env) - toolchain_parts[tp.tag] = tp - tp.RunSetupBoardIfNecessary() + binutils_dir = misc.CanonicalizePath(options.binutils_dir) + assert os.path.isdir(binutils_dir), "binutils_dir does not exist!" if options.gdb_dir: - tp = ToolchainPart('gdb', gdb_dir, chromeos_root, board, - not options.noincremental, build_env) - toolchain_parts[tp.tag] = tp - tp.RunSetupBoardIfNecessary() - - rv = 0 - try: - for tag in toolchain_parts: - tp = toolchain_parts[tag] - if options.mount_only or options.unmount_only: - tp.MountSources(options.unmount_only) - else: - rv = rv + tp.Build() - finally: - print('Exiting...') - return rv - - -if __name__ == '__main__': - retval = Main(sys.argv[1:]) - sys.exit(retval) + gdb_dir = misc.CanonicalizePath(options.gdb_dir) + assert os.path.isdir(gdb_dir), "gdb_dir does not exist!" + if options.unmount_only: + options.mount_only = False + elif options.mount_only: + options.unmount_only = False + build_env = {} + if options.cflags: + build_env["CFLAGS"] = "`portageq envvar CFLAGS` " + options.cflags + if options.cxxflags: + build_env["CXXFLAGS"] = "`portageq envvar CXXFLAGS` " + options.cxxflags + if options.cflags_for_target: + build_env["CFLAGS_FOR_TARGET"] = options.cflags_for_target + if options.cxxflags_for_target: + build_env["CXXFLAGS_FOR_TARGET"] = options.cxxflags_for_target + if options.ldflags: + build_env["LDFLAGS"] = options.ldflags + if options.debug: + debug_flags = "-g3 -O0" + if "CFLAGS" in build_env: + build_env["CFLAGS"] += " %s" % (debug_flags) + else: + build_env["CFLAGS"] = debug_flags + if "CXXFLAGS" in build_env: + build_env["CXXFLAGS"] += " %s" % (debug_flags) + else: + build_env["CXXFLAGS"] = debug_flags + if options.extra_use_flags: + build_env["USE"] = options.extra_use_flags + + # Create toolchain parts + toolchain_parts = {} + for board in options.board.split(","): + if options.gcc_dir: + tp = ToolchainPart( + "gcc", + gcc_dir, + chromeos_root, + board, + not options.noincremental, + build_env, + options.gcc_enable_ccache, + ) + toolchain_parts[tp.tag] = tp + tp.RunSetupBoardIfNecessary() + if options.binutils_dir: + tp = ToolchainPart( + "binutils", + binutils_dir, + chromeos_root, + board, + not options.noincremental, + build_env, + ) + toolchain_parts[tp.tag] = tp + tp.RunSetupBoardIfNecessary() + if options.gdb_dir: + tp = ToolchainPart( + "gdb", + gdb_dir, + chromeos_root, + board, + not options.noincremental, + build_env, + ) + toolchain_parts[tp.tag] = tp + tp.RunSetupBoardIfNecessary() + + rv = 0 + try: + for tag in toolchain_parts: + tp = toolchain_parts[tag] + if options.mount_only or options.unmount_only: + tp.MountSources(options.unmount_only) + else: + rv = rv + tp.Build() + finally: + print("Exiting...") + return rv + + +if __name__ == "__main__": + retval = Main(sys.argv[1:]) + sys.exit(retval) diff --git a/buildbot_test_llvm.py b/buildbot_test_llvm.py index 7698a07d..25d269f5 100755 --- a/buildbot_test_llvm.py +++ b/buildbot_test_llvm.py @@ -24,19 +24,19 @@ import os import sys import time +from cros_utils import buildbot_utils from cros_utils import command_executer from cros_utils import logger -from cros_utils import buildbot_utils -CROSTC_ROOT = '/usr/local/google/crostc' -ROLE_ACCOUNT = 'mobiletc-prebuild' +CROSTC_ROOT = "/usr/local/google/crostc" +ROLE_ACCOUNT = "mobiletc-prebuild" TOOLCHAIN_DIR = os.path.dirname(os.path.realpath(__file__)) -MAIL_PROGRAM = '~/var/bin/mail-detective' -VALIDATION_RESULT_DIR = os.path.join(CROSTC_ROOT, 'validation_result') +MAIL_PROGRAM = "~/var/bin/mail-detective" +VALIDATION_RESULT_DIR = os.path.join(CROSTC_ROOT, "validation_result") START_DATE = datetime.date(2016, 1, 1) TEST_PER_DAY = 4 -DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/' +DATA_DIR = "/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/" # Information about Rotating Boards # Board Arch Reference Platform Kernel @@ -63,137 +63,157 @@ DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/' # winky x86_64 rambi baytrail 4.4.* TEST_BOARD = [ - 'atlas', - 'cave', - 'coral', - 'cyan', - 'elm', + "atlas", + "cave", + "coral", + "cyan", + "elm", # 'eve', tested by amd64-llvm-next-toolchain builder. - 'gale', - 'grunt', - 'fizz-moblab', + "gale", + "grunt", + "fizz-moblab", # 'kevin', tested by arm64-llvm-next-toolchain builder. - 'kevin64', - 'lakitu', - 'nyan_kitty', - 'octopus', - 'sentry', - 'tidus', + "kevin64", + "lakitu", + "nyan_kitty", + "octopus", + "sentry", + "tidus", # 'veyron_mighty', tested by arm-llvm-next-toolchain builder. - 'whirlwind', - 'winky', + "whirlwind", + "winky", ] class ToolchainVerifier(object): - """Class for the toolchain verifier.""" - - def __init__(self, board, chromeos_root, weekday, patches, compiler): - self._board = board - self._chromeos_root = chromeos_root - self._base_dir = os.getcwd() - self._ce = command_executer.GetCommandExecuter() - self._l = logger.GetLogger() - self._compiler = compiler - self._build = '%s-%s-toolchain-tryjob' % (board, compiler) - self._patches = patches.split(',') if patches else [] - self._patches_string = '_'.join(str(p) for p in self._patches) - - if not weekday: - self._weekday = time.strftime('%a') - else: - self._weekday = weekday - self._reports = os.path.join(VALIDATION_RESULT_DIR, compiler, board) - - def DoAll(self): - """Main function inside ToolchainComparator class. - - Launch trybot, get image names, create crosperf experiment file, run - crosperf, and copy images into seven-day report directories. - """ - buildbucket_id, _ = buildbot_utils.GetTrybotImage( - self._chromeos_root, - self._build, - self._patches, - tryjob_flags=['--hwtest'], - asynchronous=True) - - return buildbucket_id + """Class for the toolchain verifier.""" + + def __init__(self, board, chromeos_root, weekday, patches, compiler): + self._board = board + self._chromeos_root = chromeos_root + self._base_dir = os.getcwd() + self._ce = command_executer.GetCommandExecuter() + self._l = logger.GetLogger() + self._compiler = compiler + self._build = "%s-%s-toolchain-tryjob" % (board, compiler) + self._patches = patches.split(",") if patches else [] + self._patches_string = "_".join(str(p) for p in self._patches) + + if not weekday: + self._weekday = time.strftime("%a") + else: + self._weekday = weekday + self._reports = os.path.join(VALIDATION_RESULT_DIR, compiler, board) + + def DoAll(self): + """Main function inside ToolchainComparator class. + + Launch trybot, get image names, create crosperf experiment file, run + crosperf, and copy images into seven-day report directories. + """ + buildbucket_id, _ = buildbot_utils.GetTrybotImage( + self._chromeos_root, + self._build, + self._patches, + tryjob_flags=["--hwtest"], + asynchronous=True, + ) + + return buildbucket_id def WriteRotatingReportsData(results_dict, date): - """Write data for waterfall report.""" - fname = '%d-%02d-%02d.builds' % (date.year, date.month, date.day) - filename = os.path.join(DATA_DIR, 'rotating-builders', fname) - with open(filename, 'w', encoding='utf-8') as out_file: - for board in results_dict.keys(): - buildbucket_id = results_dict[board] - out_file.write('%s,%s\n' % (buildbucket_id, board)) + """Write data for waterfall report.""" + fname = "%d-%02d-%02d.builds" % (date.year, date.month, date.day) + filename = os.path.join(DATA_DIR, "rotating-builders", fname) + with open(filename, "w", encoding="utf-8") as out_file: + for board in results_dict.keys(): + buildbucket_id = results_dict[board] + out_file.write("%s,%s\n" % (buildbucket_id, board)) def Main(argv): - """The main function.""" - - # Common initializations - command_executer.InitCommandExecuter() - parser = argparse.ArgumentParser() - parser.add_argument('--chromeos_root', - dest='chromeos_root', - help='The chromeos root from which to run tests.') - parser.add_argument('--weekday', - default='', - dest='weekday', - help='The day of the week for which to run tests.') - parser.add_argument('--board', - default='', - dest='board', - help='The board to test.') - parser.add_argument('--patch', - dest='patches', - default='', - help='The patches to use for the testing, ' - "seprate the patch numbers with ',' " - 'for more than one patches.') - parser.add_argument( - '--compiler', - dest='compiler', - help='Which compiler (llvm, llvm-next or gcc) to use for ' - 'testing.') - - options = parser.parse_args(argv[1:]) - if not options.chromeos_root: - print('Please specify the ChromeOS root directory.') - return 1 - if not options.compiler: - print('Please specify which compiler to test (gcc, llvm, or llvm-next).') - return 1 - - if options.board: - fv = ToolchainVerifier(options.board, options.chromeos_root, - options.weekday, options.patches, options.compiler) - return fv.DoAll() - - today = datetime.date.today() - delta = today - START_DATE - days = delta.days - - start_board = (days * TEST_PER_DAY) % len(TEST_BOARD) - results_dict = dict() - for i in range(TEST_PER_DAY): - try: - board = TEST_BOARD[(start_board + i) % len(TEST_BOARD)] - fv = ToolchainVerifier(board, options.chromeos_root, options.weekday, - options.patches, options.compiler) - buildbucket_id = fv.DoAll() - if buildbucket_id: - results_dict[board] = buildbucket_id - except SystemExit: - logfile = os.path.join(VALIDATION_RESULT_DIR, options.compiler, board) - with open(logfile, 'w', encoding='utf-8') as f: - f.write('Verifier got an exception, please check the log.\n') - WriteRotatingReportsData(results_dict, today) - - -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) + """The main function.""" + + # Common initializations + command_executer.InitCommandExecuter() + parser = argparse.ArgumentParser() + parser.add_argument( + "--chromeos_root", + dest="chromeos_root", + help="The chromeos root from which to run tests.", + ) + parser.add_argument( + "--weekday", + default="", + dest="weekday", + help="The day of the week for which to run tests.", + ) + parser.add_argument( + "--board", default="", dest="board", help="The board to test." + ) + parser.add_argument( + "--patch", + dest="patches", + default="", + help="The patches to use for the testing, " + "seprate the patch numbers with ',' " + "for more than one patches.", + ) + parser.add_argument( + "--compiler", + dest="compiler", + help="Which compiler (llvm, llvm-next or gcc) to use for " "testing.", + ) + + options = parser.parse_args(argv[1:]) + if not options.chromeos_root: + print("Please specify the ChromeOS root directory.") + return 1 + if not options.compiler: + print( + "Please specify which compiler to test (gcc, llvm, or llvm-next)." + ) + return 1 + + if options.board: + fv = ToolchainVerifier( + options.board, + options.chromeos_root, + options.weekday, + options.patches, + options.compiler, + ) + return fv.DoAll() + + today = datetime.date.today() + delta = today - START_DATE + days = delta.days + + start_board = (days * TEST_PER_DAY) % len(TEST_BOARD) + results_dict = dict() + for i in range(TEST_PER_DAY): + try: + board = TEST_BOARD[(start_board + i) % len(TEST_BOARD)] + fv = ToolchainVerifier( + board, + options.chromeos_root, + options.weekday, + options.patches, + options.compiler, + ) + buildbucket_id = fv.DoAll() + if buildbucket_id: + results_dict[board] = buildbucket_id + except SystemExit: + logfile = os.path.join( + VALIDATION_RESULT_DIR, options.compiler, board + ) + with open(logfile, "w", encoding="utf-8") as f: + f.write("Verifier got an exception, please check the log.\n") + WriteRotatingReportsData(results_dict, today) + + +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py index f957066a..9f82fe8f 100755 --- a/buildbot_test_toolchains.py +++ b/buildbot_test_toolchains.py @@ -31,136 +31,143 @@ from cros_utils import command_executer from cros_utils import logger -CROSTC_ROOT = '/usr/local/google/crostc' -NIGHTLY_TESTS_DIR = os.path.join(CROSTC_ROOT, 'nightly-tests') -ROLE_ACCOUNT = 'mobiletc-prebuild' +CROSTC_ROOT = "/usr/local/google/crostc" +NIGHTLY_TESTS_DIR = os.path.join(CROSTC_ROOT, "nightly-tests") +ROLE_ACCOUNT = "mobiletc-prebuild" TOOLCHAIN_DIR = os.path.dirname(os.path.realpath(__file__)) -TMP_TOOLCHAIN_TEST = '/tmp/toolchain-tests' -MAIL_PROGRAM = '~/var/bin/mail-detective' -PENDING_ARCHIVES_DIR = os.path.join(CROSTC_ROOT, 'pending_archives') -NIGHTLY_TESTS_RESULTS = os.path.join(CROSTC_ROOT, 'nightly_test_reports') - -IMAGE_DIR = '{board}-{image_type}' -IMAGE_VERSION_STR = r'{chrome_version}-{tip}\.{branch}\.{branch_branch}' -IMAGE_FS = IMAGE_DIR + '/' + IMAGE_VERSION_STR -TRYBOT_IMAGE_FS = IMAGE_FS + '-{build_id}' +TMP_TOOLCHAIN_TEST = "/tmp/toolchain-tests" +MAIL_PROGRAM = "~/var/bin/mail-detective" +PENDING_ARCHIVES_DIR = os.path.join(CROSTC_ROOT, "pending_archives") +NIGHTLY_TESTS_RESULTS = os.path.join(CROSTC_ROOT, "nightly_test_reports") + +IMAGE_DIR = "{board}-{image_type}" +IMAGE_VERSION_STR = r"{chrome_version}-{tip}\.{branch}\.{branch_branch}" +IMAGE_FS = IMAGE_DIR + "/" + IMAGE_VERSION_STR +TRYBOT_IMAGE_FS = IMAGE_FS + "-{build_id}" IMAGE_RE_GROUPS = { - 'board': r'(?P<board>\S+)', - 'image_type': r'(?P<image_type>\S+)', - 'chrome_version': r'(?P<chrome_version>R\d+)', - 'tip': r'(?P<tip>\d+)', - 'branch': r'(?P<branch>\d+)', - 'branch_branch': r'(?P<branch_branch>\d+)', - 'build_id': r'(?P<build_id>b\d+)' + "board": r"(?P<board>\S+)", + "image_type": r"(?P<image_type>\S+)", + "chrome_version": r"(?P<chrome_version>R\d+)", + "tip": r"(?P<tip>\d+)", + "branch": r"(?P<branch>\d+)", + "branch_branch": r"(?P<branch_branch>\d+)", + "build_id": r"(?P<build_id>b\d+)", } TRYBOT_IMAGE_RE = TRYBOT_IMAGE_FS.format(**IMAGE_RE_GROUPS) -RECIPE_IMAGE_FS = IMAGE_FS + '-{build_id}-{buildbucket_id}' +RECIPE_IMAGE_FS = IMAGE_FS + "-{build_id}-{buildbucket_id}" RECIPE_IMAGE_RE_GROUPS = { - 'board': r'(?P<board>\S+)', - 'image_type': r'(?P<image_type>\S+)', - 'chrome_version': r'(?P<chrome_version>R\d+)', - 'tip': r'(?P<tip>\d+)', - 'branch': r'(?P<branch>\d+)', - 'branch_branch': r'(?P<branch_branch>\d+)', - 'build_id': r'(?P<build_id>\d+)', - 'buildbucket_id': r'(?P<buildbucket_id>\d+)' + "board": r"(?P<board>\S+)", + "image_type": r"(?P<image_type>\S+)", + "chrome_version": r"(?P<chrome_version>R\d+)", + "tip": r"(?P<tip>\d+)", + "branch": r"(?P<branch>\d+)", + "branch_branch": r"(?P<branch_branch>\d+)", + "build_id": r"(?P<build_id>\d+)", + "buildbucket_id": r"(?P<buildbucket_id>\d+)", } RECIPE_IMAGE_RE = RECIPE_IMAGE_FS.format(**RECIPE_IMAGE_RE_GROUPS) # CL that uses LLVM-Next to build the images (includes chrome). -USE_LLVM_NEXT_PATCH = '513590' +USE_LLVM_NEXT_PATCH = "513590" class ToolchainComparator(object): - """Class for doing the nightly tests work.""" - - def __init__(self, - board, - remotes, - chromeos_root, - weekday, - patches, - recipe=False, - test=False, - noschedv2=False): - self._board = board - self._remotes = remotes - self._chromeos_root = chromeos_root - self._base_dir = os.getcwd() - self._ce = command_executer.GetCommandExecuter() - self._l = logger.GetLogger() - self._build = '%s-release-tryjob' % board - self._patches = patches.split(',') if patches else [] - self._patches_string = '_'.join(str(p) for p in self._patches) - self._recipe = recipe - self._test = test - self._noschedv2 = noschedv2 - - if not weekday: - self._weekday = time.strftime('%a') - else: - self._weekday = weekday - self._date = datetime.date.today().strftime('%Y/%m/%d') - timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') - self._reports_dir = os.path.join( - TMP_TOOLCHAIN_TEST if self._test else NIGHTLY_TESTS_RESULTS, - '%s.%s' % (timestamp, board), - ) - - def _GetVanillaImageName(self, trybot_image): - """Given a trybot artifact name, get latest vanilla image name. - - Args: - trybot_image: artifact name such as - 'daisy-release-tryjob/R40-6394.0.0-b1389' - for recipe images, name is in this format: - 'lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032/' - - Returns: - Latest official image name, e.g. 'daisy-release/R57-9089.0.0'. - """ - # For board names with underscores, we need to fix the trybot image name - # to replace the hyphen (for the recipe builder) with the underscore. - # Currently the only such board we use is 'veyron_tiger'. - if trybot_image.find('veyron-tiger') != -1: - trybot_image = trybot_image.replace('veyron-tiger', 'veyron_tiger') - # We need to filter out -tryjob in the trybot_image. - if self._recipe: - trybot = re.sub('-llvm-next-nightly', '-release', trybot_image) - mo = re.search(RECIPE_IMAGE_RE, trybot) - else: - trybot = re.sub('-tryjob', '', trybot_image) - mo = re.search(TRYBOT_IMAGE_RE, trybot) - assert mo - dirname = IMAGE_DIR.replace('\\', '').format(**mo.groupdict()) - return buildbot_utils.GetLatestImage(self._chromeos_root, dirname) - - def _TestImages(self, trybot_image, vanilla_image): - """Create crosperf experiment file. - - Given the names of the trybot, vanilla and non-AFDO images, create the - appropriate crosperf experiment file and launch crosperf on it. - """ - if self._test: - experiment_file_dir = TMP_TOOLCHAIN_TEST - else: - experiment_file_dir = os.path.join(NIGHTLY_TESTS_DIR, self._weekday) - experiment_file_name = '%s_toolchain_experiment.txt' % self._board - - compiler_string = 'llvm' - if USE_LLVM_NEXT_PATCH in self._patches_string: - experiment_file_name = '%s_llvm_next_experiment.txt' % self._board - compiler_string = 'llvm_next' - - experiment_file = os.path.join(experiment_file_dir, experiment_file_name) - experiment_header = """ + """Class for doing the nightly tests work.""" + + def __init__( + self, + board, + remotes, + chromeos_root, + weekday, + patches, + recipe=False, + test=False, + noschedv2=False, + ): + self._board = board + self._remotes = remotes + self._chromeos_root = chromeos_root + self._base_dir = os.getcwd() + self._ce = command_executer.GetCommandExecuter() + self._l = logger.GetLogger() + self._build = "%s-release-tryjob" % board + self._patches = patches.split(",") if patches else [] + self._patches_string = "_".join(str(p) for p in self._patches) + self._recipe = recipe + self._test = test + self._noschedv2 = noschedv2 + + if not weekday: + self._weekday = time.strftime("%a") + else: + self._weekday = weekday + self._date = datetime.date.today().strftime("%Y/%m/%d") + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") + self._reports_dir = os.path.join( + TMP_TOOLCHAIN_TEST if self._test else NIGHTLY_TESTS_RESULTS, + "%s.%s" % (timestamp, board), + ) + + def _GetVanillaImageName(self, trybot_image): + """Given a trybot artifact name, get latest vanilla image name. + + Args: + trybot_image: artifact name such as + 'daisy-release-tryjob/R40-6394.0.0-b1389' + for recipe images, name is in this format: + 'lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032/' + + Returns: + Latest official image name, e.g. 'daisy-release/R57-9089.0.0'. + """ + # For board names with underscores, we need to fix the trybot image name + # to replace the hyphen (for the recipe builder) with the underscore. + # Currently the only such board we use is 'veyron_tiger'. + if trybot_image.find("veyron-tiger") != -1: + trybot_image = trybot_image.replace("veyron-tiger", "veyron_tiger") + # We need to filter out -tryjob in the trybot_image. + if self._recipe: + trybot = re.sub("-llvm-next-nightly", "-release", trybot_image) + mo = re.search(RECIPE_IMAGE_RE, trybot) + else: + trybot = re.sub("-tryjob", "", trybot_image) + mo = re.search(TRYBOT_IMAGE_RE, trybot) + assert mo + dirname = IMAGE_DIR.replace("\\", "").format(**mo.groupdict()) + return buildbot_utils.GetLatestImage(self._chromeos_root, dirname) + + def _TestImages(self, trybot_image, vanilla_image): + """Create crosperf experiment file. + + Given the names of the trybot, vanilla and non-AFDO images, create the + appropriate crosperf experiment file and launch crosperf on it. + """ + if self._test: + experiment_file_dir = TMP_TOOLCHAIN_TEST + else: + experiment_file_dir = os.path.join(NIGHTLY_TESTS_DIR, self._weekday) + experiment_file_name = "%s_toolchain_experiment.txt" % self._board + + compiler_string = "llvm" + if USE_LLVM_NEXT_PATCH in self._patches_string: + experiment_file_name = "%s_llvm_next_experiment.txt" % self._board + compiler_string = "llvm_next" + + experiment_file = os.path.join( + experiment_file_dir, experiment_file_name + ) + experiment_header = """ board: %s remote: %s retries: 1 - """ % (self._board, self._remotes) - # TODO(b/244607231): Add graphic benchmarks removed in crrev.com/c/3869851. - experiment_tests = """ + """ % ( + self._board, + self._remotes, + ) + # TODO(b/244607231): Add graphic benchmarks removed in crrev.com/c/3869851. + experiment_tests = """ benchmark: all_toolchain_perf { suite: telemetry_Crosperf iterations: 5 @@ -176,184 +183,226 @@ class ToolchainComparator(object): } """ - with open(experiment_file, 'w', encoding='utf-8') as f: - f.write(experiment_header) - f.write(experiment_tests) + with open(experiment_file, "w", encoding="utf-8") as f: + f.write(experiment_header) + f.write(experiment_tests) - # Now add vanilla to test file. - official_image = """ + # Now add vanilla to test file. + official_image = """ vanilla_image { chromeos_root: %s build: %s compiler: llvm } - """ % (self._chromeos_root, vanilla_image) - f.write(official_image) - - label_string = '%s_trybot_image' % compiler_string - - # Reuse autotest files from vanilla image for trybot images - autotest_files = os.path.join('/tmp', vanilla_image, 'autotest_files') - experiment_image = """ + """ % ( + self._chromeos_root, + vanilla_image, + ) + f.write(official_image) + + label_string = "%s_trybot_image" % compiler_string + + # Reuse autotest files from vanilla image for trybot images + autotest_files = os.path.join( + "/tmp", vanilla_image, "autotest_files" + ) + experiment_image = """ %s { chromeos_root: %s build: %s autotest_path: %s compiler: %s } - """ % (label_string, self._chromeos_root, trybot_image, autotest_files, - compiler_string) - f.write(experiment_image) - - crosperf = os.path.join(TOOLCHAIN_DIR, 'crosperf', 'crosperf') - noschedv2_opts = '--noschedv2' if self._noschedv2 else '' - no_email = not self._test - command = (f'{crosperf} --no_email={no_email} ' - f'--results_dir={self._reports_dir} --logging_level=verbose ' - f'--json_report=True {noschedv2_opts} {experiment_file}') - - return self._ce.RunCommand(command) - - def _SendEmail(self): - """Find email message generated by crosperf and send it.""" - filename = os.path.join(self._reports_dir, 'msg_body.html') - if (os.path.exists(filename) - and os.path.exists(os.path.expanduser(MAIL_PROGRAM))): - email_title = 'buildbot llvm test results' - if USE_LLVM_NEXT_PATCH in self._patches_string: - email_title = 'buildbot llvm_next test results' - command = ( - 'cat %s | %s -s "%s, %s %s" -team -html' % - (filename, MAIL_PROGRAM, email_title, self._board, self._date)) - self._ce.RunCommand(command) - - def _CopyJson(self): - # Make sure a destination directory exists. - os.makedirs(PENDING_ARCHIVES_DIR, exist_ok=True) - # Copy json report to pending archives directory. - command = 'cp %s/*.json %s/.' % (self._reports_dir, PENDING_ARCHIVES_DIR) - ret = self._ce.RunCommand(command) - # Failing to access json report means that crosperf terminated or all tests - # failed, raise an error. - if ret != 0: - raise RuntimeError( - 'Crosperf failed to run tests, cannot copy json report!') - - def DoAll(self): - """Main function inside ToolchainComparator class. - - Launch trybot, get image names, create crosperf experiment file, run - crosperf, and copy images into seven-day report directories. - """ - if self._recipe: - print('Using recipe buckets to get latest image.') - # crbug.com/1077313: Some boards are not consistently - # spelled, having underscores in some places and dashes in others. - # The image directories consistenly use dashes, so convert underscores - # to dashes to work around this. - trybot_image = buildbot_utils.GetLatestRecipeImage( - self._chromeos_root, - '%s-llvm-next-nightly' % self._board.replace('_', '-')) - else: - # Launch tryjob and wait to get image location. - buildbucket_id, trybot_image = buildbot_utils.GetTrybotImage( - self._chromeos_root, - self._build, - self._patches, - tryjob_flags=['--notests'], - build_toolchain=True) - print('trybot_url: \ - http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=%s' - % buildbucket_id) - - if not trybot_image: - self._l.LogError('Unable to find trybot_image!') - return 2 - - vanilla_image = self._GetVanillaImageName(trybot_image) - - print('trybot_image: %s' % trybot_image) - print('vanilla_image: %s' % vanilla_image) - - ret = self._TestImages(trybot_image, vanilla_image) - # Always try to send report email as crosperf will generate report when - # tests partially succeeded. - if not self._test: - self._SendEmail() - self._CopyJson() - # Non-zero ret here means crosperf tests partially failed, raise error here - # so that toolchain summary report can catch it. - if ret != 0: - raise RuntimeError('Crosperf tests partially failed!') - - return 0 + """ % ( + label_string, + self._chromeos_root, + trybot_image, + autotest_files, + compiler_string, + ) + f.write(experiment_image) + + crosperf = os.path.join(TOOLCHAIN_DIR, "crosperf", "crosperf") + noschedv2_opts = "--noschedv2" if self._noschedv2 else "" + no_email = not self._test + command = ( + f"{crosperf} --no_email={no_email} " + f"--results_dir={self._reports_dir} --logging_level=verbose " + f"--json_report=True {noschedv2_opts} {experiment_file}" + ) + + return self._ce.RunCommand(command) + + def _SendEmail(self): + """Find email message generated by crosperf and send it.""" + filename = os.path.join(self._reports_dir, "msg_body.html") + if os.path.exists(filename) and os.path.exists( + os.path.expanduser(MAIL_PROGRAM) + ): + email_title = "buildbot llvm test results" + if USE_LLVM_NEXT_PATCH in self._patches_string: + email_title = "buildbot llvm_next test results" + command = 'cat %s | %s -s "%s, %s %s" -team -html' % ( + filename, + MAIL_PROGRAM, + email_title, + self._board, + self._date, + ) + self._ce.RunCommand(command) + + def _CopyJson(self): + # Make sure a destination directory exists. + os.makedirs(PENDING_ARCHIVES_DIR, exist_ok=True) + # Copy json report to pending archives directory. + command = "cp %s/*.json %s/." % ( + self._reports_dir, + PENDING_ARCHIVES_DIR, + ) + ret = self._ce.RunCommand(command) + # Failing to access json report means that crosperf terminated or all tests + # failed, raise an error. + if ret != 0: + raise RuntimeError( + "Crosperf failed to run tests, cannot copy json report!" + ) + + def DoAll(self): + """Main function inside ToolchainComparator class. + + Launch trybot, get image names, create crosperf experiment file, run + crosperf, and copy images into seven-day report directories. + """ + if self._recipe: + print("Using recipe buckets to get latest image.") + # crbug.com/1077313: Some boards are not consistently + # spelled, having underscores in some places and dashes in others. + # The image directories consistenly use dashes, so convert underscores + # to dashes to work around this. + trybot_image = buildbot_utils.GetLatestRecipeImage( + self._chromeos_root, + "%s-llvm-next-nightly" % self._board.replace("_", "-"), + ) + else: + # Launch tryjob and wait to get image location. + buildbucket_id, trybot_image = buildbot_utils.GetTrybotImage( + self._chromeos_root, + self._build, + self._patches, + tryjob_flags=["--notests"], + build_toolchain=True, + ) + print( + "trybot_url: \ + http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=%s" + % buildbucket_id + ) + + if not trybot_image: + self._l.LogError("Unable to find trybot_image!") + return 2 + + vanilla_image = self._GetVanillaImageName(trybot_image) + + print("trybot_image: %s" % trybot_image) + print("vanilla_image: %s" % vanilla_image) + + ret = self._TestImages(trybot_image, vanilla_image) + # Always try to send report email as crosperf will generate report when + # tests partially succeeded. + if not self._test: + self._SendEmail() + self._CopyJson() + # Non-zero ret here means crosperf tests partially failed, raise error here + # so that toolchain summary report can catch it. + if ret != 0: + raise RuntimeError("Crosperf tests partially failed!") + + return 0 def Main(argv): - """The main function.""" - - # Common initializations - command_executer.InitCommandExecuter() - parser = argparse.ArgumentParser() - parser.add_argument('--remote', - dest='remote', - help='Remote machines to run tests on.') - parser.add_argument('--board', - dest='board', - default='x86-zgb', - help='The target board.') - parser.add_argument('--chromeos_root', - dest='chromeos_root', - help='The chromeos root from which to run tests.') - parser.add_argument('--weekday', - default='', - dest='weekday', - help='The day of the week for which to run tests.') - parser.add_argument('--patch', - dest='patches', - help='The patches to use for the testing, ' - "seprate the patch numbers with ',' " - 'for more than one patches.') - parser.add_argument('--noschedv2', - dest='noschedv2', - action='store_true', - default=False, - help='Pass --noschedv2 to crosperf.') - parser.add_argument('--recipe', - dest='recipe', - default=True, - help='Use images generated from recipe rather than' - 'launching tryjob to get images.') - parser.add_argument('--test', - dest='test', - default=False, - help='Test this script on local desktop, ' - 'disabling mobiletc checking and email sending.' - 'Artifacts stored in /tmp/toolchain-tests') - - options = parser.parse_args(argv[1:]) - if not options.board: - print('Please give a board.') - return 1 - if not options.remote: - print('Please give at least one remote machine.') - return 1 - if not options.chromeos_root: - print('Please specify the ChromeOS root directory.') - return 1 - if options.test: - print('Cleaning local test directory for this script.') - if os.path.exists(TMP_TOOLCHAIN_TEST): - shutil.rmtree(TMP_TOOLCHAIN_TEST) - os.mkdir(TMP_TOOLCHAIN_TEST) - - fc = ToolchainComparator(options.board, options.remote, - options.chromeos_root, options.weekday, - options.patches, options.recipe, options.test, - options.noschedv2) - return fc.DoAll() - - -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) + """The main function.""" + + # Common initializations + command_executer.InitCommandExecuter() + parser = argparse.ArgumentParser() + parser.add_argument( + "--remote", dest="remote", help="Remote machines to run tests on." + ) + parser.add_argument( + "--board", dest="board", default="x86-zgb", help="The target board." + ) + parser.add_argument( + "--chromeos_root", + dest="chromeos_root", + help="The chromeos root from which to run tests.", + ) + parser.add_argument( + "--weekday", + default="", + dest="weekday", + help="The day of the week for which to run tests.", + ) + parser.add_argument( + "--patch", + dest="patches", + help="The patches to use for the testing, " + "seprate the patch numbers with ',' " + "for more than one patches.", + ) + parser.add_argument( + "--noschedv2", + dest="noschedv2", + action="store_true", + default=False, + help="Pass --noschedv2 to crosperf.", + ) + parser.add_argument( + "--recipe", + dest="recipe", + default=True, + help="Use images generated from recipe rather than" + "launching tryjob to get images.", + ) + parser.add_argument( + "--test", + dest="test", + default=False, + help="Test this script on local desktop, " + "disabling mobiletc checking and email sending." + "Artifacts stored in /tmp/toolchain-tests", + ) + + options = parser.parse_args(argv[1:]) + if not options.board: + print("Please give a board.") + return 1 + if not options.remote: + print("Please give at least one remote machine.") + return 1 + if not options.chromeos_root: + print("Please specify the ChromeOS root directory.") + return 1 + if options.test: + print("Cleaning local test directory for this script.") + if os.path.exists(TMP_TOOLCHAIN_TEST): + shutil.rmtree(TMP_TOOLCHAIN_TEST) + os.mkdir(TMP_TOOLCHAIN_TEST) + + fc = ToolchainComparator( + options.board, + options.remote, + options.chromeos_root, + options.weekday, + options.patches, + options.recipe, + options.test, + options.noschedv2, + ) + return fc.DoAll() + + +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/chromiumos_image_diff.py b/chromiumos_image_diff.py index 0f22ff35..5943f3aa 100755 --- a/chromiumos_image_diff.py +++ b/chromiumos_image_diff.py @@ -22,7 +22,8 @@ from __future__ import print_function -__author__ = 'shenhan@google.com (Han Shen)' + +__author__ = "shenhan@google.com (Han Shen)" import argparse import os @@ -30,338 +31,390 @@ import re import sys import tempfile -import image_chromeos from cros_utils import command_executer from cros_utils import logger from cros_utils import misc +import image_chromeos class CrosImage(object): - """A cros image object.""" - - def __init__(self, image, chromeos_root, no_unmount): - self.image = image - self.chromeos_root = chromeos_root - self.mounted = False - self._ce = command_executer.GetCommandExecuter() - self.logger = logger.GetLogger() - self.elf_files = [] - self.no_unmount = no_unmount - self.unmount_script = '' - self.stateful = '' - self.rootfs = '' - - def MountImage(self, mount_basename): - """Mount/unpack the image.""" - - if mount_basename: - self.rootfs = '/tmp/{0}.rootfs'.format(mount_basename) - self.stateful = '/tmp/{0}.stateful'.format(mount_basename) - self.unmount_script = '/tmp/{0}.unmount.sh'.format(mount_basename) - else: - self.rootfs = tempfile.mkdtemp( - suffix='.rootfs', prefix='chromiumos_image_diff') - ## rootfs is like /tmp/tmpxyz012.rootfs. - match = re.match(r'^(.*)\.rootfs$', self.rootfs) - basename = match.group(1) - self.stateful = basename + '.stateful' - os.mkdir(self.stateful) - self.unmount_script = '{0}.unmount.sh'.format(basename) - - self.logger.LogOutput('Mounting "{0}" onto "{1}" and "{2}"'.format( - self.image, self.rootfs, self.stateful)) - ## First of all creating an unmount image - self.CreateUnmountScript() - command = image_chromeos.GetImageMountCommand(self.image, self.rootfs, - self.stateful) - rv = self._ce.RunCommand(command, print_to_console=True) - self.mounted = (rv == 0) - if not self.mounted: - self.logger.LogError('Failed to mount "{0}" onto "{1}" and "{2}".'.format( - self.image, self.rootfs, self.stateful)) - return self.mounted - - def CreateUnmountScript(self): - command = ('sudo umount {r}/usr/local {r}/usr/share/oem ' - '{r}/var {r}/mnt/stateful_partition {r}; sudo umount {s} ; ' - 'rmdir {r} ; rmdir {s}\n').format( - r=self.rootfs, s=self.stateful) - f = open(self.unmount_script, 'w', encoding='utf-8') - f.write(command) - f.close() - self._ce.RunCommand( - 'chmod +x {}'.format(self.unmount_script), print_to_console=False) - self.logger.LogOutput('Created an unmount script - "{0}"'.format( - self.unmount_script)) - - def UnmountImage(self): - """Unmount the image and delete mount point.""" - - self.logger.LogOutput('Unmounting image "{0}" from "{1}" and "{2}"'.format( - self.image, self.rootfs, self.stateful)) - if self.mounted: - command = 'bash "{0}"'.format(self.unmount_script) - if self.no_unmount: - self.logger.LogOutput(('Please unmount manually - \n' - '\t bash "{0}"'.format(self.unmount_script))) - else: - if self._ce.RunCommand(command, print_to_console=True) == 0: - self._ce.RunCommand('rm {0}'.format(self.unmount_script)) - self.mounted = False - self.rootfs = None - self.stateful = None - self.unmount_script = None - - return not self.mounted - - def FindElfFiles(self): - """Find all elf files for the image. - - Returns: - Always true - """ - - self.logger.LogOutput('Finding all elf files in "{0}" ...'.format( - self.rootfs)) - # Note '\;' must be prefixed by 'r'. - command = ('find "{0}" -type f -exec ' - 'bash -c \'file -b "{{}}" | grep -q "ELF"\'' - r' \; ' - r'-exec echo "{{}}" \;').format(self.rootfs) - self.logger.LogCmd(command) - _, out, _ = self._ce.RunCommandWOutput(command, print_to_console=False) - self.elf_files = out.splitlines() - self.logger.LogOutput('Total {0} elf files found.'.format( - len(self.elf_files))) - return True + """A cros image object.""" + + def __init__(self, image, chromeos_root, no_unmount): + self.image = image + self.chromeos_root = chromeos_root + self.mounted = False + self._ce = command_executer.GetCommandExecuter() + self.logger = logger.GetLogger() + self.elf_files = [] + self.no_unmount = no_unmount + self.unmount_script = "" + self.stateful = "" + self.rootfs = "" + + def MountImage(self, mount_basename): + """Mount/unpack the image.""" + + if mount_basename: + self.rootfs = "/tmp/{0}.rootfs".format(mount_basename) + self.stateful = "/tmp/{0}.stateful".format(mount_basename) + self.unmount_script = "/tmp/{0}.unmount.sh".format(mount_basename) + else: + self.rootfs = tempfile.mkdtemp( + suffix=".rootfs", prefix="chromiumos_image_diff" + ) + ## rootfs is like /tmp/tmpxyz012.rootfs. + match = re.match(r"^(.*)\.rootfs$", self.rootfs) + basename = match.group(1) + self.stateful = basename + ".stateful" + os.mkdir(self.stateful) + self.unmount_script = "{0}.unmount.sh".format(basename) + + self.logger.LogOutput( + 'Mounting "{0}" onto "{1}" and "{2}"'.format( + self.image, self.rootfs, self.stateful + ) + ) + ## First of all creating an unmount image + self.CreateUnmountScript() + command = image_chromeos.GetImageMountCommand( + self.image, self.rootfs, self.stateful + ) + rv = self._ce.RunCommand(command, print_to_console=True) + self.mounted = rv == 0 + if not self.mounted: + self.logger.LogError( + 'Failed to mount "{0}" onto "{1}" and "{2}".'.format( + self.image, self.rootfs, self.stateful + ) + ) + return self.mounted + + def CreateUnmountScript(self): + command = ( + "sudo umount {r}/usr/local {r}/usr/share/oem " + "{r}/var {r}/mnt/stateful_partition {r}; sudo umount {s} ; " + "rmdir {r} ; rmdir {s}\n" + ).format(r=self.rootfs, s=self.stateful) + f = open(self.unmount_script, "w", encoding="utf-8") + f.write(command) + f.close() + self._ce.RunCommand( + "chmod +x {}".format(self.unmount_script), print_to_console=False + ) + self.logger.LogOutput( + 'Created an unmount script - "{0}"'.format(self.unmount_script) + ) + + def UnmountImage(self): + """Unmount the image and delete mount point.""" + + self.logger.LogOutput( + 'Unmounting image "{0}" from "{1}" and "{2}"'.format( + self.image, self.rootfs, self.stateful + ) + ) + if self.mounted: + command = 'bash "{0}"'.format(self.unmount_script) + if self.no_unmount: + self.logger.LogOutput( + ( + "Please unmount manually - \n" + '\t bash "{0}"'.format(self.unmount_script) + ) + ) + else: + if self._ce.RunCommand(command, print_to_console=True) == 0: + self._ce.RunCommand("rm {0}".format(self.unmount_script)) + self.mounted = False + self.rootfs = None + self.stateful = None + self.unmount_script = None + + return not self.mounted + + def FindElfFiles(self): + """Find all elf files for the image. + + Returns: + Always true + """ + + self.logger.LogOutput( + 'Finding all elf files in "{0}" ...'.format(self.rootfs) + ) + # Note '\;' must be prefixed by 'r'. + command = ( + 'find "{0}" -type f -exec ' + 'bash -c \'file -b "{{}}" | grep -q "ELF"\'' + r" \; " + r'-exec echo "{{}}" \;' + ).format(self.rootfs) + self.logger.LogCmd(command) + _, out, _ = self._ce.RunCommandWOutput(command, print_to_console=False) + self.elf_files = out.splitlines() + self.logger.LogOutput( + "Total {0} elf files found.".format(len(self.elf_files)) + ) + return True class ImageComparator(object): - """A class that wraps comparsion actions.""" - - def __init__(self, images, diff_file): - self.images = images - self.logger = logger.GetLogger() - self.diff_file = diff_file - self.tempf1 = None - self.tempf2 = None - - def Cleanup(self): - if self.tempf1 and self.tempf2: - command_executer.GetCommandExecuter().RunCommand('rm {0} {1}'.format( - self.tempf1, self.tempf2)) - logger.GetLogger('Removed "{0}" and "{1}".'.format( - self.tempf1, self.tempf2)) - - def CheckElfFileSetEquality(self): - """Checking whether images have exactly number of elf files.""" - - self.logger.LogOutput('Checking elf file equality ...') - i1 = self.images[0] - i2 = self.images[1] - t1 = i1.rootfs + '/' - elfset1 = {e.replace(t1, '') for e in i1.elf_files} - t2 = i2.rootfs + '/' - elfset2 = {e.replace(t2, '') for e in i2.elf_files} - dif1 = elfset1.difference(elfset2) - msg = None - if dif1: - msg = 'The following files are not in "{image}" - "{rootfs}":\n'.format( - image=i2.image, rootfs=i2.rootfs) - for d in dif1: - msg += '\t' + d + '\n' - dif2 = elfset2.difference(elfset1) - if dif2: - msg = 'The following files are not in "{image}" - "{rootfs}":\n'.format( - image=i1.image, rootfs=i1.rootfs) - for d in dif2: - msg += '\t' + d + '\n' - if msg: - self.logger.LogError(msg) - return False - return True - - def CompareImages(self): - """Do the comparsion work.""" - - if not self.CheckElfFileSetEquality(): - return False - - mismatch_list = [] - match_count = 0 - i1 = self.images[0] - i2 = self.images[1] - self.logger.LogOutput('Start comparing {0} elf file by file ...'.format( - len(i1.elf_files))) - ## Note - i1.elf_files and i2.elf_files have exactly the same entries here. - - ## Create 2 temp files to be used for all disassembed files. - handle, self.tempf1 = tempfile.mkstemp() - os.close(handle) # We do not need the handle - handle, self.tempf2 = tempfile.mkstemp() - os.close(handle) - - cmde = command_executer.GetCommandExecuter() - for elf1 in i1.elf_files: - tmp_rootfs = i1.rootfs + '/' - f1 = elf1.replace(tmp_rootfs, '') - full_path1 = elf1 - full_path2 = elf1.replace(i1.rootfs, i2.rootfs) - - if full_path1 == full_path2: - self.logger.LogError( - "Error: We're comparing the SAME file - {0}".format(f1)) - continue - - command = ( - 'objdump -d "{f1}" > {tempf1} ; ' - 'objdump -d "{f2}" > {tempf2} ; ' - # Remove path string inside the dissemble - "sed -i 's!{rootfs1}!!g' {tempf1} ; " - "sed -i 's!{rootfs2}!!g' {tempf2} ; " - 'diff {tempf1} {tempf2} 1>/dev/null 2>&1').format( - f1=full_path1, - f2=full_path2, - rootfs1=i1.rootfs, - rootfs2=i2.rootfs, - tempf1=self.tempf1, - tempf2=self.tempf2) - ret = cmde.RunCommand(command, print_to_console=False) - if ret != 0: - self.logger.LogOutput('*** Not match - "{0}" "{1}"'.format( - full_path1, full_path2)) - mismatch_list.append(f1) - if self.diff_file: - command = ('echo "Diffs of disassemble of \"{f1}\" and \"{f2}\"" ' - '>> {diff_file} ; diff {tempf1} {tempf2} ' - '>> {diff_file}').format( - f1=full_path1, - f2=full_path2, - diff_file=self.diff_file, - tempf1=self.tempf1, - tempf2=self.tempf2) - cmde.RunCommand(command, print_to_console=False) - else: - match_count += 1 - ## End of comparing every elf files. - - if not mismatch_list: - self.logger.LogOutput( - '** COOL, ALL {0} BINARIES MATCHED!! **'.format(match_count)) - return True - - mismatch_str = 'Found {0} mismatch:\n'.format(len(mismatch_list)) - for b in mismatch_list: - mismatch_str += '\t' + b + '\n' - - self.logger.LogOutput(mismatch_str) - return False + """A class that wraps comparsion actions.""" + + def __init__(self, images, diff_file): + self.images = images + self.logger = logger.GetLogger() + self.diff_file = diff_file + self.tempf1 = None + self.tempf2 = None + + def Cleanup(self): + if self.tempf1 and self.tempf2: + command_executer.GetCommandExecuter().RunCommand( + "rm {0} {1}".format(self.tempf1, self.tempf2) + ) + logger.GetLogger( + 'Removed "{0}" and "{1}".'.format(self.tempf1, self.tempf2) + ) + + def CheckElfFileSetEquality(self): + """Checking whether images have exactly number of elf files.""" + + self.logger.LogOutput("Checking elf file equality ...") + i1 = self.images[0] + i2 = self.images[1] + t1 = i1.rootfs + "/" + elfset1 = {e.replace(t1, "") for e in i1.elf_files} + t2 = i2.rootfs + "/" + elfset2 = {e.replace(t2, "") for e in i2.elf_files} + dif1 = elfset1.difference(elfset2) + msg = None + if dif1: + msg = 'The following files are not in "{image}" - "{rootfs}":\n'.format( + image=i2.image, rootfs=i2.rootfs + ) + for d in dif1: + msg += "\t" + d + "\n" + dif2 = elfset2.difference(elfset1) + if dif2: + msg = 'The following files are not in "{image}" - "{rootfs}":\n'.format( + image=i1.image, rootfs=i1.rootfs + ) + for d in dif2: + msg += "\t" + d + "\n" + if msg: + self.logger.LogError(msg) + return False + return True + + def CompareImages(self): + """Do the comparsion work.""" + + if not self.CheckElfFileSetEquality(): + return False + + mismatch_list = [] + match_count = 0 + i1 = self.images[0] + i2 = self.images[1] + self.logger.LogOutput( + "Start comparing {0} elf file by file ...".format(len(i1.elf_files)) + ) + ## Note - i1.elf_files and i2.elf_files have exactly the same entries here. + + ## Create 2 temp files to be used for all disassembed files. + handle, self.tempf1 = tempfile.mkstemp() + os.close(handle) # We do not need the handle + handle, self.tempf2 = tempfile.mkstemp() + os.close(handle) + + cmde = command_executer.GetCommandExecuter() + for elf1 in i1.elf_files: + tmp_rootfs = i1.rootfs + "/" + f1 = elf1.replace(tmp_rootfs, "") + full_path1 = elf1 + full_path2 = elf1.replace(i1.rootfs, i2.rootfs) + + if full_path1 == full_path2: + self.logger.LogError( + "Error: We're comparing the SAME file - {0}".format(f1) + ) + continue + + command = ( + 'objdump -d "{f1}" > {tempf1} ; ' + 'objdump -d "{f2}" > {tempf2} ; ' + # Remove path string inside the dissemble + "sed -i 's!{rootfs1}!!g' {tempf1} ; " + "sed -i 's!{rootfs2}!!g' {tempf2} ; " + "diff {tempf1} {tempf2} 1>/dev/null 2>&1" + ).format( + f1=full_path1, + f2=full_path2, + rootfs1=i1.rootfs, + rootfs2=i2.rootfs, + tempf1=self.tempf1, + tempf2=self.tempf2, + ) + ret = cmde.RunCommand(command, print_to_console=False) + if ret != 0: + self.logger.LogOutput( + '*** Not match - "{0}" "{1}"'.format(full_path1, full_path2) + ) + mismatch_list.append(f1) + if self.diff_file: + command = ( + 'echo "Diffs of disassemble of "{f1}" and "{f2}"" ' + ">> {diff_file} ; diff {tempf1} {tempf2} " + ">> {diff_file}" + ).format( + f1=full_path1, + f2=full_path2, + diff_file=self.diff_file, + tempf1=self.tempf1, + tempf2=self.tempf2, + ) + cmde.RunCommand(command, print_to_console=False) + else: + match_count += 1 + ## End of comparing every elf files. + + if not mismatch_list: + self.logger.LogOutput( + "** COOL, ALL {0} BINARIES MATCHED!! **".format(match_count) + ) + return True + + mismatch_str = "Found {0} mismatch:\n".format(len(mismatch_list)) + for b in mismatch_list: + mismatch_str += "\t" + b + "\n" + + self.logger.LogOutput(mismatch_str) + return False def Main(argv): - """The main function.""" - - command_executer.InitCommandExecuter() - images = [] - - parser = argparse.ArgumentParser() - parser.add_argument( - '--no_unmount', - action='store_true', - dest='no_unmount', - default=False, - help='Do not unmount after finish, this is useful for debugging.') - parser.add_argument( - '--chromeos_root', - dest='chromeos_root', - default=None, - action='store', - help=('[Optional] Specify a chromeos tree instead of ' - 'deducing it from image path so that we can compare ' - '2 images that are downloaded.')) - parser.add_argument( - '--mount_basename', - dest='mount_basename', - default=None, - action='store', - help=('Specify a meaningful name for the mount point. With this being ' + """The main function.""" + + command_executer.InitCommandExecuter() + images = [] + + parser = argparse.ArgumentParser() + parser.add_argument( + "--no_unmount", + action="store_true", + dest="no_unmount", + default=False, + help="Do not unmount after finish, this is useful for debugging.", + ) + parser.add_argument( + "--chromeos_root", + dest="chromeos_root", + default=None, + action="store", + help=( + "[Optional] Specify a chromeos tree instead of " + "deducing it from image path so that we can compare " + "2 images that are downloaded." + ), + ) + parser.add_argument( + "--mount_basename", + dest="mount_basename", + default=None, + action="store", + help=( + "Specify a meaningful name for the mount point. With this being " 'set, the mount points would be "/tmp/mount_basename.x.rootfs" ' - ' and "/tmp/mount_basename.x.stateful". (x is 1 or 2).')) - parser.add_argument( - '--diff_file', - dest='diff_file', - default=None, - help='Dumping all the diffs (if any) to the diff file') - parser.add_argument( - '--image1', - dest='image1', - default=None, - required=True, - help=('Image 1 file name.')) - parser.add_argument( - '--image2', - dest='image2', - default=None, - required=True, - help=('Image 2 file name.')) - options = parser.parse_args(argv[1:]) - - if options.mount_basename and options.mount_basename.find('/') >= 0: - logger.GetLogger().LogError( - '"--mount_basename" must be a name, not a path.') - parser.print_help() - return 1 - - result = False - image_comparator = None - try: - for i, image_path in enumerate([options.image1, options.image2], start=1): - image_path = os.path.realpath(image_path) - if not os.path.isfile(image_path): - logger.GetLogger().LogError('"{0}" is not a file.'.format(image_path)) - return 1 - - chromeos_root = None - if options.chromeos_root: - chromeos_root = options.chromeos_root - else: - ## Deduce chromeos root from image - t = image_path - while t != '/': - if misc.IsChromeOsTree(t): - break - t = os.path.dirname(t) - if misc.IsChromeOsTree(t): - chromeos_root = t - - if not chromeos_root: + ' and "/tmp/mount_basename.x.stateful". (x is 1 or 2).' + ), + ) + parser.add_argument( + "--diff_file", + dest="diff_file", + default=None, + help="Dumping all the diffs (if any) to the diff file", + ) + parser.add_argument( + "--image1", + dest="image1", + default=None, + required=True, + help=("Image 1 file name."), + ) + parser.add_argument( + "--image2", + dest="image2", + default=None, + required=True, + help=("Image 2 file name."), + ) + options = parser.parse_args(argv[1:]) + + if options.mount_basename and options.mount_basename.find("/") >= 0: logger.GetLogger().LogError( - 'Please provide a valid chromeos root via --chromeos_root') + '"--mount_basename" must be a name, not a path.' + ) + parser.print_help() return 1 - image = CrosImage(image_path, chromeos_root, options.no_unmount) - - if options.mount_basename: - mount_basename = '{basename}.{index}'.format( - basename=options.mount_basename, index=i) - else: - mount_basename = None - - if image.MountImage(mount_basename): - images.append(image) - image.FindElfFiles() - - if len(images) == 2: - image_comparator = ImageComparator(images, options.diff_file) - result = image_comparator.CompareImages() - finally: - for image in images: - image.UnmountImage() - if image_comparator: - image_comparator.Cleanup() - - return 0 if result else 1 - - -if __name__ == '__main__': - Main(sys.argv) + result = False + image_comparator = None + try: + for i, image_path in enumerate( + [options.image1, options.image2], start=1 + ): + image_path = os.path.realpath(image_path) + if not os.path.isfile(image_path): + logger.GetLogger().LogError( + '"{0}" is not a file.'.format(image_path) + ) + return 1 + + chromeos_root = None + if options.chromeos_root: + chromeos_root = options.chromeos_root + else: + ## Deduce chromeos root from image + t = image_path + while t != "/": + if misc.IsChromeOsTree(t): + break + t = os.path.dirname(t) + if misc.IsChromeOsTree(t): + chromeos_root = t + + if not chromeos_root: + logger.GetLogger().LogError( + "Please provide a valid chromeos root via --chromeos_root" + ) + return 1 + + image = CrosImage(image_path, chromeos_root, options.no_unmount) + + if options.mount_basename: + mount_basename = "{basename}.{index}".format( + basename=options.mount_basename, index=i + ) + else: + mount_basename = None + + if image.MountImage(mount_basename): + images.append(image) + image.FindElfFiles() + + if len(images) == 2: + image_comparator = ImageComparator(images, options.diff_file) + result = image_comparator.CompareImages() + finally: + for image in images: + image.UnmountImage() + if image_comparator: + image_comparator.Cleanup() + + return 0 if result else 1 + + +if __name__ == "__main__": + Main(sys.argv) diff --git a/compiler_wrapper/build.py b/compiler_wrapper/build.py index d004b553..e7486c54 100755 --- a/compiler_wrapper/build.py +++ b/compiler_wrapper/build.py @@ -16,100 +16,114 @@ import sys def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - '--config', - required=True, - choices=['cros.hardened', 'cros.nonhardened', 'cros.host', 'android']) - parser.add_argument('--use_ccache', required=True, choices=['true', 'false']) - parser.add_argument( - '--version_suffix', - help='A string appended to the computed version of the wrapper. This ' - 'is appeneded directly without any delimiter.') - parser.add_argument('--use_llvm_next', - required=True, - choices=['true', 'false']) - parser.add_argument('--output_file', required=True, type=str) - parser.add_argument( - '--static', - choices=['true', 'false'], - help='If true, produce a static wrapper. Autodetects a good value if ' - 'unspecified.') - args = parser.parse_args() - - if args.static is None: - args.static = 'cros' not in args.config - else: - args.static = args.static == 'true' - - return args + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + required=True, + choices=["cros.hardened", "cros.nonhardened", "cros.host", "android"], + ) + parser.add_argument( + "--use_ccache", required=True, choices=["true", "false"] + ) + parser.add_argument( + "--version_suffix", + help="A string appended to the computed version of the wrapper. This " + "is appeneded directly without any delimiter.", + ) + parser.add_argument( + "--use_llvm_next", required=True, choices=["true", "false"] + ) + parser.add_argument("--output_file", required=True, type=str) + parser.add_argument( + "--static", + choices=["true", "false"], + help="If true, produce a static wrapper. Autodetects a good value if " + "unspecified.", + ) + args = parser.parse_args() + + if args.static is None: + args.static = "cros" not in args.config + else: + args.static = args.static == "true" + + return args def calc_go_args(args, version, build_dir): - # These seem unnecessary, and might lead to breakages with Go's ldflag - # parsing. Don't allow them. - if "'" in version: - raise ValueError('`version` should not contain single quotes') - - ldFlags = [ - '-X', - 'main.ConfigName=' + args.config, - '-X', - 'main.UseCCache=' + args.use_ccache, - '-X', - 'main.UseLlvmNext=' + args.use_llvm_next, - '-X', - # Quote this, as `version` may have spaces in it. - "'main.Version=" + version + "'", - ] - - # If the wrapper is intended for ChromeOS, we need to use libc's exec. - extra_args = [] - if not args.static: - extra_args += ['-tags', 'libc_exec'] - - if args.config == 'android': - # If android_llvm_next_flags.go DNE, we'll get an obscure "no - # llvmNextFlags" build error; complaining here is clearer. - if not os.path.exists(os.path.join(build_dir, - 'android_llvm_next_flags.go')): - sys.exit('In order to build the Android wrapper, you must have a local ' - 'android_llvm_next_flags.go file; please see ' - 'cros_llvm_next_flags.go.') - extra_args += ['-tags', 'android_llvm_next_flags'] - - return [ - 'go', 'build', '-o', - os.path.abspath(args.output_file), '-ldflags', ' '.join(ldFlags) - ] + extra_args + # These seem unnecessary, and might lead to breakages with Go's ldflag + # parsing. Don't allow them. + if "'" in version: + raise ValueError("`version` should not contain single quotes") + + ldFlags = [ + "-X", + "main.ConfigName=" + args.config, + "-X", + "main.UseCCache=" + args.use_ccache, + "-X", + "main.UseLlvmNext=" + args.use_llvm_next, + "-X", + # Quote this, as `version` may have spaces in it. + "'main.Version=" + version + "'", + ] + + # If the wrapper is intended for ChromeOS, we need to use libc's exec. + extra_args = [] + if not args.static: + extra_args += ["-tags", "libc_exec"] + + if args.config == "android": + # If android_llvm_next_flags.go DNE, we'll get an obscure "no + # llvmNextFlags" build error; complaining here is clearer. + if not os.path.exists( + os.path.join(build_dir, "android_llvm_next_flags.go") + ): + sys.exit( + "In order to build the Android wrapper, you must have a local " + "android_llvm_next_flags.go file; please see " + "cros_llvm_next_flags.go." + ) + extra_args += ["-tags", "android_llvm_next_flags"] + + return [ + "go", + "build", + "-o", + os.path.abspath(args.output_file), + "-ldflags", + " ".join(ldFlags), + ] + extra_args def read_version(build_dir): - version_path = os.path.join(build_dir, 'VERSION') - if os.path.exists(version_path): - with open(version_path, 'r') as r: - return r.read() + version_path = os.path.join(build_dir, "VERSION") + if os.path.exists(version_path): + with open(version_path, "r") as r: + return r.read() - last_commit_msg = subprocess.check_output( - ['git', '-C', build_dir, 'log', '-1', '--pretty=%B'], encoding='utf-8') - # Use last found change id to support reverts as well. - change_ids = re.findall(r'Change-Id: (\w+)', last_commit_msg) - if not change_ids: - sys.exit("Couldn't find Change-Id in last commit message.") - return change_ids[-1] + last_commit_msg = subprocess.check_output( + ["git", "-C", build_dir, "log", "-1", "--pretty=%B"], encoding="utf-8" + ) + # Use last found change id to support reverts as well. + change_ids = re.findall(r"Change-Id: (\w+)", last_commit_msg) + if not change_ids: + sys.exit("Couldn't find Change-Id in last commit message.") + return change_ids[-1] def main(): - args = parse_args() - build_dir = os.path.dirname(__file__) - version = read_version(build_dir) - if args.version_suffix: - version += args.version_suffix - # Note: Go does not support using absolute package names. - # So we run go inside the directory of the the build file. - sys.exit( - subprocess.call(calc_go_args(args, version, build_dir), cwd=build_dir)) - - -if __name__ == '__main__': - main() + args = parse_args() + build_dir = os.path.dirname(__file__) + version = read_version(build_dir) + if args.version_suffix: + version += args.version_suffix + # Note: Go does not support using absolute package names. + # So we run go inside the directory of the the build file. + sys.exit( + subprocess.call(calc_go_args(args, version, build_dir), cwd=build_dir) + ) + + +if __name__ == "__main__": + main() diff --git a/compiler_wrapper/bundle.py b/compiler_wrapper/bundle.py index 4d5f5010..2624376b 100755 --- a/compiler_wrapper/bundle.py +++ b/compiler_wrapper/bundle.py @@ -17,67 +17,78 @@ import sys def parse_args(): - parser = argparse.ArgumentParser() - default_output_dir = os.path.normpath( - os.path.join( - os.path.dirname(os.path.realpath(__file__)), - '../../chromiumos-overlay/sys-devel/llvm/files/compiler_wrapper')) - parser.add_argument( - '--output_dir', - default=default_output_dir, - help='Output directory to place bundled files (default: %(default)s)') - parser.add_argument( - '--create', - action='store_true', - help='Create output_dir if it does not already exist') - return parser.parse_args() + parser = argparse.ArgumentParser() + default_output_dir = os.path.normpath( + os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "../../chromiumos-overlay/sys-devel/llvm/files/compiler_wrapper", + ) + ) + parser.add_argument( + "--output_dir", + default=default_output_dir, + help="Output directory to place bundled files (default: %(default)s)", + ) + parser.add_argument( + "--create", + action="store_true", + help="Create output_dir if it does not already exist", + ) + return parser.parse_args() def copy_files(input_dir, output_dir): - for filename in os.listdir(input_dir): - if ((filename.endswith('.go') and not filename.endswith('_test.go')) or - filename in ('build.py', 'go.mod')): - shutil.copy( - os.path.join(input_dir, filename), os.path.join(output_dir, filename)) + for filename in os.listdir(input_dir): + if ( + filename.endswith(".go") and not filename.endswith("_test.go") + ) or filename in ("build.py", "go.mod"): + shutil.copy( + os.path.join(input_dir, filename), + os.path.join(output_dir, filename), + ) def read_change_id(input_dir): - last_commit_msg = subprocess.check_output( - ['git', '-C', input_dir, 'log', '-1', '--pretty=%B'], encoding='utf-8') - # Use last found change id to support reverts as well. - change_ids = re.findall(r'Change-Id: (\w+)', last_commit_msg) - if not change_ids: - sys.exit("Couldn't find Change-Id in last commit message.") - return change_ids[-1] + last_commit_msg = subprocess.check_output( + ["git", "-C", input_dir, "log", "-1", "--pretty=%B"], encoding="utf-8" + ) + # Use last found change id to support reverts as well. + change_ids = re.findall(r"Change-Id: (\w+)", last_commit_msg) + if not change_ids: + sys.exit("Couldn't find Change-Id in last commit message.") + return change_ids[-1] def write_readme(input_dir, output_dir, change_id): - with open( - os.path.join(input_dir, 'bundle.README'), 'r', encoding='utf-8') as r: - with open(os.path.join(output_dir, 'README'), 'w', encoding='utf-8') as w: - content = r.read() - w.write(content.format(change_id=change_id)) + with open( + os.path.join(input_dir, "bundle.README"), "r", encoding="utf-8" + ) as r: + with open( + os.path.join(output_dir, "README"), "w", encoding="utf-8" + ) as w: + content = r.read() + w.write(content.format(change_id=change_id)) def write_version(output_dir, change_id): - with open(os.path.join(output_dir, 'VERSION'), 'w', encoding='utf-8') as w: - w.write(change_id) + with open(os.path.join(output_dir, "VERSION"), "w", encoding="utf-8") as w: + w.write(change_id) def main(): - args = parse_args() - input_dir = os.path.dirname(__file__) - change_id = read_change_id(input_dir) - if not args.create: - assert os.path.exists( - args.output_dir - ), f'Specified output directory ({args.output_dir}) does not exist' - shutil.rmtree(args.output_dir, ignore_errors=True) - os.makedirs(args.output_dir) - copy_files(input_dir, args.output_dir) - write_readme(input_dir, args.output_dir, change_id) - write_version(args.output_dir, change_id) - - -if __name__ == '__main__': - main() + args = parse_args() + input_dir = os.path.dirname(__file__) + change_id = read_change_id(input_dir) + if not args.create: + assert os.path.exists( + args.output_dir + ), f"Specified output directory ({args.output_dir}) does not exist" + shutil.rmtree(args.output_dir, ignore_errors=True) + os.makedirs(args.output_dir) + copy_files(input_dir, args.output_dir) + write_readme(input_dir, args.output_dir, change_id) + write_version(args.output_dir, change_id) + + +if __name__ == "__main__": + main() diff --git a/crate_ebuild_help.py b/crate_ebuild_help.py index e8ad48b7..0a0b6dcb 100755 --- a/crate_ebuild_help.py +++ b/crate_ebuild_help.py @@ -58,107 +58,120 @@ import toml def run(args: List[str]) -> bool: - result = subprocess.run(args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - check=False) - return result.returncode == 0 + result = subprocess.run( + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False + ) + return result.returncode == 0 def run_check(args: List[str]): - subprocess.run(args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - check=True) + subprocess.run( + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True + ) def gs_address_exists(address: str) -> bool: - # returns False if the file isn't there - return run(['gsutil.py', 'ls', address]) + # returns False if the file isn't there + return run(["gsutil.py", "ls", address]) def crate_already_uploaded(crate_name: str, crate_version: str) -> bool: - filename = f'{crate_name}-{crate_version}.crate' - return gs_address_exists( - f'gs://chromeos-localmirror/distfiles/{filename}') or gs_address_exists( - f'gs://chromeos-mirror/gentoo/distfiles/{filename}') + filename = f"{crate_name}-{crate_version}.crate" + return gs_address_exists( + f"gs://chromeos-localmirror/distfiles/{filename}" + ) or gs_address_exists(f"gs://chromeos-mirror/gentoo/distfiles/{filename}") def download_crate(crate_name: str, crate_version: str, localpath: Path): - urllib.request.urlretrieve( - f'https://crates.io/api/v1/crates/{crate_name}/{crate_version}/download', - localpath) + urllib.request.urlretrieve( + f"https://crates.io/api/v1/crates/{crate_name}/{crate_version}/download", + localpath, + ) def upload_crate(crate_name: str, crate_version: str, localpath: Path): - run_check([ - 'gsutil.py', 'cp', '-n', '-a', 'public-read', - str(localpath), - f'gs://chromeos-localmirror/distfiles/{crate_name}-{crate_version}.crate' - ]) + run_check( + [ + "gsutil.py", + "cp", + "-n", + "-a", + "public-read", + str(localpath), + f"gs://chromeos-localmirror/distfiles/{crate_name}-{crate_version}.crate", + ] + ) def main(): - parser = argparse.ArgumentParser( - description='Help prepare a Rust crate for an ebuild.') - parser.add_argument('--lockfile', - type=str, - required=True, - help='Path to the lockfile of the crate in question.') - parser.add_argument( - '--ignore', - type=str, - action='append', - required=False, - default=[], - help='Ignore the crate by this name (may be used multiple times).') - parser.add_argument( - '--dry-run', - action='store_true', - help="Don't actually download/upload crates, just print their names.") - ns = parser.parse_args() - - to_ignore = set(ns.ignore) - - toml_contents = toml.load(ns.lockfile) - packages = toml_contents['package'] - - crates = [(pkg['name'], pkg['version']) for pkg in packages - if pkg['name'] not in to_ignore] - crates.sort() - - print('Dependent crates:') - for name, version in crates: - print(f'{name}-{version}') - print() - - if ns.dry_run: - print('Crates that would be uploaded (skipping ones already uploaded):') - else: - print('Uploading crates (skipping ones already uploaded):') - - def maybe_upload(crate: Tuple[str, str]) -> str: - name, version = crate - if crate_already_uploaded(name, version): - return '' - if not ns.dry_run: - with tempfile.TemporaryDirectory() as temp_dir: - path = Path(temp_dir.name, f'{name}-{version}.crate') - download_crate(name, version, path) - upload_crate(name, version, path) - return f'{name}-{version}' - - # Simple benchmarking on my machine with rust-analyzer's Cargo.lock, using - # the --dry-run option, gives a wall time of 277 seconds with max_workers=1 - # and 70 seconds with max_workers=4. - with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: - crates_len = len(crates) - for i, s in enumerate(executor.map(maybe_upload, crates)): - if s: - j = i + 1 - print(f'[{j}/{crates_len}] {s}') - print() - - -if __name__ == '__main__': - main() + parser = argparse.ArgumentParser( + description="Help prepare a Rust crate for an ebuild." + ) + parser.add_argument( + "--lockfile", + type=str, + required=True, + help="Path to the lockfile of the crate in question.", + ) + parser.add_argument( + "--ignore", + type=str, + action="append", + required=False, + default=[], + help="Ignore the crate by this name (may be used multiple times).", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Don't actually download/upload crates, just print their names.", + ) + ns = parser.parse_args() + + to_ignore = set(ns.ignore) + + toml_contents = toml.load(ns.lockfile) + packages = toml_contents["package"] + + crates = [ + (pkg["name"], pkg["version"]) + for pkg in packages + if pkg["name"] not in to_ignore + ] + crates.sort() + + print("Dependent crates:") + for name, version in crates: + print(f"{name}-{version}") + print() + + if ns.dry_run: + print("Crates that would be uploaded (skipping ones already uploaded):") + else: + print("Uploading crates (skipping ones already uploaded):") + + def maybe_upload(crate: Tuple[str, str]) -> str: + name, version = crate + if crate_already_uploaded(name, version): + return "" + if not ns.dry_run: + with tempfile.TemporaryDirectory() as temp_dir: + path = Path(temp_dir.name, f"{name}-{version}.crate") + download_crate(name, version, path) + upload_crate(name, version, path) + return f"{name}-{version}" + + # Simple benchmarking on my machine with rust-analyzer's Cargo.lock, using + # the --dry-run option, gives a wall time of 277 seconds with max_workers=1 + # and 70 seconds with max_workers=4. + with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: + crates_len = len(crates) + for i, s in enumerate(executor.map(maybe_upload, crates)): + if s: + j = i + 1 + print(f"[{j}/{crates_len}] {s}") + print() + + +if __name__ == "__main__": + main() diff --git a/cros_utils/bugs.py b/cros_utils/bugs.py index 8c32d84e..5c620739 100755 --- a/cros_utils/bugs.py +++ b/cros_utils/bugs.py @@ -12,93 +12,103 @@ import json import os from typing import Any, Dict, List, Optional -X20_PATH = '/google/data/rw/teams/c-compiler-chrome/prod_bugs' + +X20_PATH = "/google/data/rw/teams/c-compiler-chrome/prod_bugs" class WellKnownComponents(enum.IntEnum): - """A listing of "well-known" components recognized by our infra.""" - CrOSToolchainPublic = -1 - CrOSToolchainPrivate = -2 + """A listing of "well-known" components recognized by our infra.""" + + CrOSToolchainPublic = -1 + CrOSToolchainPrivate = -2 def _WriteBugJSONFile(object_type: str, json_object: Dict[str, Any]): - """Writes a JSON file to X20_PATH with the given bug-ish object.""" - final_object = { - 'type': object_type, - 'value': json_object, - } - - # The name of this has two parts: - # - An easily sortable time, to provide uniqueness and let our service send - # things in the order they were put into the outbox. - # - 64 bits of entropy, so two racing bug writes don't clobber the same file. - now = datetime.datetime.utcnow().isoformat('T', 'seconds') + 'Z' - entropy = base64.urlsafe_b64encode(os.getrandom(8)) - entropy_str = entropy.rstrip(b'=').decode('utf-8') - file_path = os.path.join(X20_PATH, f'{now}_{entropy_str}.json') - - temp_path = file_path + '.in_progress' - try: - with open(temp_path, 'w') as f: - json.dump(final_object, f) - os.rename(temp_path, file_path) - except: - os.remove(temp_path) - raise - return file_path + """Writes a JSON file to X20_PATH with the given bug-ish object.""" + final_object = { + "type": object_type, + "value": json_object, + } + + # The name of this has two parts: + # - An easily sortable time, to provide uniqueness and let our service send + # things in the order they were put into the outbox. + # - 64 bits of entropy, so two racing bug writes don't clobber the same file. + now = datetime.datetime.utcnow().isoformat("T", "seconds") + "Z" + entropy = base64.urlsafe_b64encode(os.getrandom(8)) + entropy_str = entropy.rstrip(b"=").decode("utf-8") + file_path = os.path.join(X20_PATH, f"{now}_{entropy_str}.json") + + temp_path = file_path + ".in_progress" + try: + with open(temp_path, "w") as f: + json.dump(final_object, f) + os.rename(temp_path, file_path) + except: + os.remove(temp_path) + raise + return file_path def AppendToExistingBug(bug_id: int, body: str): - """Sends a reply to an existing bug.""" - _WriteBugJSONFile('AppendToExistingBugRequest', { - 'body': body, - 'bug_id': bug_id, - }) - - -def CreateNewBug(component_id: int, - title: str, - body: str, - assignee: Optional[str] = None, - cc: Optional[List[str]] = None): - """Sends a request to create a new bug. - - Args: - component_id: The component ID to add. Anything from WellKnownComponents - also works. - title: Title of the bug. Must be nonempty. - body: Body of the bug. Must be nonempty. - assignee: Assignee of the bug. Must be either an email address, or a - "well-known" assignee (detective, mage). - cc: A list of emails to add to the CC list. Must either be an email - address, or a "well-known" individual (detective, mage). - """ - obj = { - 'component_id': component_id, - 'subject': title, - 'body': body, - } - - if assignee: - obj['assignee'] = assignee - - if cc: - obj['cc'] = cc - - _WriteBugJSONFile('FileNewBugRequest', obj) + """Sends a reply to an existing bug.""" + _WriteBugJSONFile( + "AppendToExistingBugRequest", + { + "body": body, + "bug_id": bug_id, + }, + ) + + +def CreateNewBug( + component_id: int, + title: str, + body: str, + assignee: Optional[str] = None, + cc: Optional[List[str]] = None, +): + """Sends a request to create a new bug. + + Args: + component_id: The component ID to add. Anything from WellKnownComponents + also works. + title: Title of the bug. Must be nonempty. + body: Body of the bug. Must be nonempty. + assignee: Assignee of the bug. Must be either an email address, or a + "well-known" assignee (detective, mage). + cc: A list of emails to add to the CC list. Must either be an email + address, or a "well-known" individual (detective, mage). + """ + obj = { + "component_id": component_id, + "subject": title, + "body": body, + } + + if assignee: + obj["assignee"] = assignee + + if cc: + obj["cc"] = cc + + _WriteBugJSONFile("FileNewBugRequest", obj) def SendCronjobLog(cronjob_name: str, failed: bool, message: str): - """Sends the record of a cronjob to our bug infra. - - cronjob_name: The name of the cronjob. Expected to remain consistent over - time. - failed: Whether the job failed or not. - message: Any seemingly relevant context. This is pasted verbatim in a bug, if - the cronjob infra deems it worthy. - """ - _WriteBugJSONFile('ChrotomationCronjobUpdate', { - 'name': cronjob_name, - 'message': message, - 'failed': failed, - }) + """Sends the record of a cronjob to our bug infra. + + cronjob_name: The name of the cronjob. Expected to remain consistent over + time. + failed: Whether the job failed or not. + message: Any seemingly relevant context. This is pasted verbatim in a bug, if + the cronjob infra deems it worthy. + """ + _WriteBugJSONFile( + "ChrotomationCronjobUpdate", + { + "name": cronjob_name, + "message": message, + "failed": failed, + }, + ) diff --git a/cros_utils/bugs_test.py b/cros_utils/bugs_test.py index daab4af5..b7a2d09a 100755 --- a/cros_utils/bugs_test.py +++ b/cros_utils/bugs_test.py @@ -17,108 +17,115 @@ import bugs class Tests(unittest.TestCase): - """Tests for the bugs module.""" - def testWritingJSONFileSeemsToWork(self): - """Tests JSON file writing.""" - old_x20_path = bugs.X20_PATH - - def restore_x20_path(): - bugs.X20_PATH = old_x20_path - - self.addCleanup(restore_x20_path) - - with tempfile.TemporaryDirectory() as tempdir: - bugs.X20_PATH = tempdir - file_path = bugs._WriteBugJSONFile( - 'ObjectType', { - 'foo': 'bar', - 'baz': bugs.WellKnownComponents.CrOSToolchainPublic, - }) - - self.assertTrue(file_path.startswith(tempdir), - f'Expected {file_path} to start with {tempdir}') - - with open(file_path) as f: - self.assertEqual( - json.load(f), - { - 'type': 'ObjectType', - 'value': { - 'foo': 'bar', - 'baz': int(bugs.WellKnownComponents.CrOSToolchainPublic), + """Tests for the bugs module.""" + + def testWritingJSONFileSeemsToWork(self): + """Tests JSON file writing.""" + old_x20_path = bugs.X20_PATH + + def restore_x20_path(): + bugs.X20_PATH = old_x20_path + + self.addCleanup(restore_x20_path) + + with tempfile.TemporaryDirectory() as tempdir: + bugs.X20_PATH = tempdir + file_path = bugs._WriteBugJSONFile( + "ObjectType", + { + "foo": "bar", + "baz": bugs.WellKnownComponents.CrOSToolchainPublic, }, + ) + + self.assertTrue( + file_path.startswith(tempdir), + f"Expected {file_path} to start with {tempdir}", + ) + + with open(file_path) as f: + self.assertEqual( + json.load(f), + { + "type": "ObjectType", + "value": { + "foo": "bar", + "baz": int( + bugs.WellKnownComponents.CrOSToolchainPublic + ), + }, + }, + ) + + @patch("bugs._WriteBugJSONFile") + def testAppendingToBugsSeemsToWork(self, mock_write_json_file): + """Tests AppendToExistingBug.""" + bugs.AppendToExistingBug(1234, "hello, world!") + mock_write_json_file.assert_called_once_with( + "AppendToExistingBugRequest", + { + "body": "hello, world!", + "bug_id": 1234, }, ) - @patch('bugs._WriteBugJSONFile') - def testAppendingToBugsSeemsToWork(self, mock_write_json_file): - """Tests AppendToExistingBug.""" - bugs.AppendToExistingBug(1234, 'hello, world!') - mock_write_json_file.assert_called_once_with( - 'AppendToExistingBugRequest', - { - 'body': 'hello, world!', - 'bug_id': 1234, - }, - ) - - @patch('bugs._WriteBugJSONFile') - def testBugCreationSeemsToWork(self, mock_write_json_file): - """Tests CreateNewBug.""" - test_case_additions = ( - {}, - { - 'component_id': bugs.WellKnownComponents.CrOSToolchainPublic, - }, - { - 'assignee': 'foo@gbiv.com', - 'cc': ['bar@baz.com'], - }, - ) - - for additions in test_case_additions: - test_case = { - 'component_id': 123, - 'title': 'foo', - 'body': 'bar', - **additions, - } - - bugs.CreateNewBug(**test_case) - - expected_output = { - 'component_id': test_case['component_id'], - 'subject': test_case['title'], - 'body': test_case['body'], - } - - assignee = test_case.get('assignee') - if assignee: - expected_output['assignee'] = assignee - - cc = test_case.get('cc') - if cc: - expected_output['cc'] = cc - - mock_write_json_file.assert_called_once_with( - 'FileNewBugRequest', - expected_output, - ) - mock_write_json_file.reset_mock() - - @patch('bugs._WriteBugJSONFile') - def testCronjobLogSendingSeemsToWork(self, mock_write_json_file): - """Tests SendCronjobLog.""" - bugs.SendCronjobLog('my_name', False, 'hello, world!') - mock_write_json_file.assert_called_once_with( - 'ChrotomationCronjobUpdate', - { - 'name': 'my_name', - 'message': 'hello, world!', - 'failed': False, - }, - ) - - -if __name__ == '__main__': - unittest.main() + @patch("bugs._WriteBugJSONFile") + def testBugCreationSeemsToWork(self, mock_write_json_file): + """Tests CreateNewBug.""" + test_case_additions = ( + {}, + { + "component_id": bugs.WellKnownComponents.CrOSToolchainPublic, + }, + { + "assignee": "foo@gbiv.com", + "cc": ["bar@baz.com"], + }, + ) + + for additions in test_case_additions: + test_case = { + "component_id": 123, + "title": "foo", + "body": "bar", + **additions, + } + + bugs.CreateNewBug(**test_case) + + expected_output = { + "component_id": test_case["component_id"], + "subject": test_case["title"], + "body": test_case["body"], + } + + assignee = test_case.get("assignee") + if assignee: + expected_output["assignee"] = assignee + + cc = test_case.get("cc") + if cc: + expected_output["cc"] = cc + + mock_write_json_file.assert_called_once_with( + "FileNewBugRequest", + expected_output, + ) + mock_write_json_file.reset_mock() + + @patch("bugs._WriteBugJSONFile") + def testCronjobLogSendingSeemsToWork(self, mock_write_json_file): + """Tests SendCronjobLog.""" + bugs.SendCronjobLog("my_name", False, "hello, world!") + mock_write_json_file.assert_called_once_with( + "ChrotomationCronjobUpdate", + { + "name": "my_name", + "message": "hello, world!", + "failed": False, + }, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py index 3cff9d93..6cf159d8 100644 --- a/cros_utils/buildbot_utils.py +++ b/cros_utils/buildbot_utils.py @@ -27,262 +27,283 @@ TIME_OUT = 15 * 60 * 60 # Decide the build is dead or will never finish class BuildbotTimeout(Exception): - """Exception to throw when a buildbot operation timesout.""" + """Exception to throw when a buildbot operation timesout.""" def RunCommandInPath(path, cmd): - ce = command_executer.GetCommandExecuter() - cwd = os.getcwd() - os.chdir(path) - status, stdout, stderr = ce.RunCommandWOutput(cmd, print_to_console=False) - os.chdir(cwd) - return status, stdout, stderr + ce = command_executer.GetCommandExecuter() + cwd = os.getcwd() + os.chdir(path) + status, stdout, stderr = ce.RunCommandWOutput(cmd, print_to_console=False) + os.chdir(cwd) + return status, stdout, stderr def PeekTrybotImage(chromeos_root, buildbucket_id): - """Get the artifact URL of a given tryjob. + """Get the artifact URL of a given tryjob. - Args: - buildbucket_id: buildbucket-id - chromeos_root: root dir of chrome os checkout + Args: + buildbucket_id: buildbucket-id + chromeos_root: root dir of chrome os checkout - Returns: - (status, url) where status can be 'pass', 'fail', 'running', - and url looks like: - gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789 - """ - command = ('cros buildresult --report json --buildbucket-id %s' % - buildbucket_id) - rc, out, _ = RunCommandInPath(chromeos_root, command) + Returns: + (status, url) where status can be 'pass', 'fail', 'running', + and url looks like: + gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789 + """ + command = ( + "cros buildresult --report json --buildbucket-id %s" % buildbucket_id + ) + rc, out, _ = RunCommandInPath(chromeos_root, command) - # Current implementation of cros buildresult returns fail when a job is still - # running. - if rc != 0: - return ('running', None) + # Current implementation of cros buildresult returns fail when a job is still + # running. + if rc != 0: + return ("running", None) - results = json.loads(out)[buildbucket_id] + results = json.loads(out)[buildbucket_id] - # Handle the case where the tryjob failed to launch correctly. - if results['artifacts_url'] is None: - return (results['status'], '') + # Handle the case where the tryjob failed to launch correctly. + if results["artifacts_url"] is None: + return (results["status"], "") - return (results['status'], results['artifacts_url'].rstrip('/')) + return (results["status"], results["artifacts_url"].rstrip("/")) def ParseTryjobBuildbucketId(msg): - """Find the buildbucket-id in the messages from `cros tryjob`. - - Args: - msg: messages from `cros tryjob` - - Returns: - buildbucket-id, which will be passed to `cros buildresult` - """ - output_list = ast.literal_eval(msg) - output_dict = output_list[0] - if 'buildbucket_id' in output_dict: - return output_dict['buildbucket_id'] - return None - - -def SubmitTryjob(chromeos_root, - buildbot_name, - patch_list, - tryjob_flags=None, - build_toolchain=False): - """Calls `cros tryjob ...` - - Args: - chromeos_root: the path to the ChromeOS root, needed for finding chromite - and launching the buildbot. - buildbot_name: the name of the buildbot queue, such as lumpy-release or - daisy-paladin. - patch_list: a python list of the patches, if any, for the buildbot to use. - tryjob_flags: See cros tryjob --help for available options. - build_toolchain: builds and uses the latest toolchain, rather than the - prebuilt one in SDK. - - Returns: - buildbucket id - """ - patch_arg = '' - if patch_list: - for p in patch_list: - patch_arg = patch_arg + ' -g ' + repr(p) - if not tryjob_flags: - tryjob_flags = [] - if build_toolchain: - tryjob_flags.append('--latest-toolchain') - tryjob_flags = ' '.join(tryjob_flags) - - # Launch buildbot with appropriate flags. - build = buildbot_name - command = ('cros_sdk -- cros tryjob --yes --json --nochromesdk %s %s %s' % - (tryjob_flags, patch_arg, build)) - print('CMD: %s' % command) - _, out, _ = RunCommandInPath(chromeos_root, command) - buildbucket_id = ParseTryjobBuildbucketId(out) - print('buildbucket_id: %s' % repr(buildbucket_id)) - if not buildbucket_id: - logger.GetLogger().LogFatal('Error occurred while launching trybot job: ' - '%s' % command) - return buildbucket_id - - -def GetTrybotImage(chromeos_root, - buildbot_name, - patch_list, - tryjob_flags=None, - build_toolchain=False, - asynchronous=False): - """Launch buildbot and get resulting trybot artifact name. - - This function launches a buildbot with the appropriate flags to - build the test ChromeOS image, with the current ToT mobile compiler. It - checks every 10 minutes to see if the trybot has finished. When the trybot - has finished, it parses the resulting report logs to find the trybot - artifact (if one was created), and returns that artifact name. - - Args: - chromeos_root: the path to the ChromeOS root, needed for finding chromite - and launching the buildbot. - buildbot_name: the name of the buildbot queue, such as lumpy-release or - daisy-paladin. - patch_list: a python list of the patches, if any, for the buildbot to use. - tryjob_flags: See cros tryjob --help for available options. - build_toolchain: builds and uses the latest toolchain, rather than the - prebuilt one in SDK. - asynchronous: don't wait for artifacts; just return the buildbucket id - - Returns: - (buildbucket id, partial image url) e.g. - (8952271933586980528, trybot-elm-release-tryjob/R67-10480.0.0-b2373596) - """ - buildbucket_id = SubmitTryjob(chromeos_root, buildbot_name, patch_list, - tryjob_flags, build_toolchain) - if asynchronous: - return buildbucket_id, ' ' - - # The trybot generally takes more than 2 hours to finish. - # Wait two hours before polling the status. - time.sleep(INITIAL_SLEEP_TIME) - elapsed = INITIAL_SLEEP_TIME - status = 'running' - image = '' - while True: - status, image = PeekTrybotImage(chromeos_root, buildbucket_id) - if status == 'running': - if elapsed > TIME_OUT: + """Find the buildbucket-id in the messages from `cros tryjob`. + + Args: + msg: messages from `cros tryjob` + + Returns: + buildbucket-id, which will be passed to `cros buildresult` + """ + output_list = ast.literal_eval(msg) + output_dict = output_list[0] + if "buildbucket_id" in output_dict: + return output_dict["buildbucket_id"] + return None + + +def SubmitTryjob( + chromeos_root, + buildbot_name, + patch_list, + tryjob_flags=None, + build_toolchain=False, +): + """Calls `cros tryjob ...` + + Args: + chromeos_root: the path to the ChromeOS root, needed for finding chromite + and launching the buildbot. + buildbot_name: the name of the buildbot queue, such as lumpy-release or + daisy-paladin. + patch_list: a python list of the patches, if any, for the buildbot to use. + tryjob_flags: See cros tryjob --help for available options. + build_toolchain: builds and uses the latest toolchain, rather than the + prebuilt one in SDK. + + Returns: + buildbucket id + """ + patch_arg = "" + if patch_list: + for p in patch_list: + patch_arg = patch_arg + " -g " + repr(p) + if not tryjob_flags: + tryjob_flags = [] + if build_toolchain: + tryjob_flags.append("--latest-toolchain") + tryjob_flags = " ".join(tryjob_flags) + + # Launch buildbot with appropriate flags. + build = buildbot_name + command = "cros_sdk -- cros tryjob --yes --json --nochromesdk %s %s %s" % ( + tryjob_flags, + patch_arg, + build, + ) + print("CMD: %s" % command) + _, out, _ = RunCommandInPath(chromeos_root, command) + buildbucket_id = ParseTryjobBuildbucketId(out) + print("buildbucket_id: %s" % repr(buildbucket_id)) + if not buildbucket_id: logger.GetLogger().LogFatal( - 'Unable to get build result for target %s.' % buildbot_name) - else: - wait_msg = 'Unable to find build result; job may be running.' - logger.GetLogger().LogOutput(wait_msg) - logger.GetLogger().LogOutput(f'{elapsed / 60} minutes elapsed.') - logger.GetLogger().LogOutput(f'Sleeping {SLEEP_TIME} seconds.') - time.sleep(SLEEP_TIME) - elapsed += SLEEP_TIME + "Error occurred while launching trybot job: " "%s" % command + ) + return buildbucket_id + + +def GetTrybotImage( + chromeos_root, + buildbot_name, + patch_list, + tryjob_flags=None, + build_toolchain=False, + asynchronous=False, +): + """Launch buildbot and get resulting trybot artifact name. + + This function launches a buildbot with the appropriate flags to + build the test ChromeOS image, with the current ToT mobile compiler. It + checks every 10 minutes to see if the trybot has finished. When the trybot + has finished, it parses the resulting report logs to find the trybot + artifact (if one was created), and returns that artifact name. + + Args: + chromeos_root: the path to the ChromeOS root, needed for finding chromite + and launching the buildbot. + buildbot_name: the name of the buildbot queue, such as lumpy-release or + daisy-paladin. + patch_list: a python list of the patches, if any, for the buildbot to use. + tryjob_flags: See cros tryjob --help for available options. + build_toolchain: builds and uses the latest toolchain, rather than the + prebuilt one in SDK. + asynchronous: don't wait for artifacts; just return the buildbucket id + + Returns: + (buildbucket id, partial image url) e.g. + (8952271933586980528, trybot-elm-release-tryjob/R67-10480.0.0-b2373596) + """ + buildbucket_id = SubmitTryjob( + chromeos_root, buildbot_name, patch_list, tryjob_flags, build_toolchain + ) + if asynchronous: + return buildbucket_id, " " + + # The trybot generally takes more than 2 hours to finish. + # Wait two hours before polling the status. + time.sleep(INITIAL_SLEEP_TIME) + elapsed = INITIAL_SLEEP_TIME + status = "running" + image = "" + while True: + status, image = PeekTrybotImage(chromeos_root, buildbucket_id) + if status == "running": + if elapsed > TIME_OUT: + logger.GetLogger().LogFatal( + "Unable to get build result for target %s." % buildbot_name + ) + else: + wait_msg = "Unable to find build result; job may be running." + logger.GetLogger().LogOutput(wait_msg) + logger.GetLogger().LogOutput(f"{elapsed / 60} minutes elapsed.") + logger.GetLogger().LogOutput(f"Sleeping {SLEEP_TIME} seconds.") + time.sleep(SLEEP_TIME) + elapsed += SLEEP_TIME + else: + break + + if not buildbot_name.endswith("-toolchain") and status == "fail": + # For rotating testers, we don't care about their status + # result, because if any HWTest failed it will be non-zero. + # + # The nightly performance tests do not run HWTests, so if + # their status is non-zero, we do care. In this case + # non-zero means the image itself probably did not build. + image = "" + + if not image: + logger.GetLogger().LogError( + "Trybot job (buildbucket id: %s) failed with" + "status %s; no trybot image generated. " % (buildbucket_id, status) + ) else: - break - - if not buildbot_name.endswith('-toolchain') and status == 'fail': - # For rotating testers, we don't care about their status - # result, because if any HWTest failed it will be non-zero. - # - # The nightly performance tests do not run HWTests, so if - # their status is non-zero, we do care. In this case - # non-zero means the image itself probably did not build. - image = '' - - if not image: - logger.GetLogger().LogError('Trybot job (buildbucket id: %s) failed with' - 'status %s; no trybot image generated. ' % - (buildbucket_id, status)) - else: - # Convert full gs path to what crosperf expects. For example, convert - # gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789 - # to - # trybot-elm-release-tryjob/R67-10468.0.0-b20789 - image = '/'.join(image.split('/')[-2:]) - - logger.GetLogger().LogOutput("image is '%s'" % image) - logger.GetLogger().LogOutput('status is %s' % status) - return buildbucket_id, image + # Convert full gs path to what crosperf expects. For example, convert + # gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789 + # to + # trybot-elm-release-tryjob/R67-10468.0.0-b20789 + image = "/".join(image.split("/")[-2:]) + + logger.GetLogger().LogOutput("image is '%s'" % image) + logger.GetLogger().LogOutput("status is %s" % status) + return buildbucket_id, image def DoesImageExist(chromeos_root, build): - """Check if the image for the given build exists.""" + """Check if the image for the given build exists.""" - ce = command_executer.GetCommandExecuter() - command = ('gsutil ls gs://chromeos-image-archive/%s' - '/chromiumos_test_image.tar.xz' % (build)) - ret = ce.ChrootRunCommand(chromeos_root, command, print_to_console=False) - return not ret + ce = command_executer.GetCommandExecuter() + command = ( + "gsutil ls gs://chromeos-image-archive/%s" + "/chromiumos_test_image.tar.xz" % (build) + ) + ret = ce.ChrootRunCommand(chromeos_root, command, print_to_console=False) + return not ret def WaitForImage(chromeos_root, build): - """Wait for an image to be ready.""" + """Wait for an image to be ready.""" - elapsed_time = 0 - while elapsed_time < TIME_OUT: - if DoesImageExist(chromeos_root, build): - return - logger.GetLogger().LogOutput('Image %s not ready, waiting for 10 minutes' % - build) - time.sleep(SLEEP_TIME) - elapsed_time += SLEEP_TIME + elapsed_time = 0 + while elapsed_time < TIME_OUT: + if DoesImageExist(chromeos_root, build): + return + logger.GetLogger().LogOutput( + "Image %s not ready, waiting for 10 minutes" % build + ) + time.sleep(SLEEP_TIME) + elapsed_time += SLEEP_TIME - logger.GetLogger().LogOutput('Image %s not found, waited for %d hours' % - (build, (TIME_OUT / 3600))) - raise BuildbotTimeout('Timeout while waiting for image %s' % build) + logger.GetLogger().LogOutput( + "Image %s not found, waited for %d hours" % (build, (TIME_OUT / 3600)) + ) + raise BuildbotTimeout("Timeout while waiting for image %s" % build) def GetLatestImage(chromeos_root, path): - """Get latest image""" - - fmt = re.compile(r'R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)') - - ce = command_executer.GetCommandExecuter() - command = ('gsutil ls gs://chromeos-image-archive/%s' % path) - ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, - command, - print_to_console=False) - if ret != 0: - raise RuntimeError('Failed to list buckets with command: %s.' % command) - candidates = [l.split('/')[-2] for l in out.split()] - candidates = [fmt.match(c) for c in candidates] - candidates = [[int(r) for r in m.group(1, 2, 3, 4)] for m in candidates if m] - candidates.sort(reverse=True) - for c in candidates: - build = '%s/R%d-%d.%d.%d' % (path, c[0], c[1], c[2], c[3]) - if DoesImageExist(chromeos_root, build): - return build + """Get latest image""" + + fmt = re.compile(r"R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)") + + ce = command_executer.GetCommandExecuter() + command = "gsutil ls gs://chromeos-image-archive/%s" % path + ret, out, _ = ce.ChrootRunCommandWOutput( + chromeos_root, command, print_to_console=False + ) + if ret != 0: + raise RuntimeError("Failed to list buckets with command: %s." % command) + candidates = [l.split("/")[-2] for l in out.split()] + candidates = [fmt.match(c) for c in candidates] + candidates = [ + [int(r) for r in m.group(1, 2, 3, 4)] for m in candidates if m + ] + candidates.sort(reverse=True) + for c in candidates: + build = "%s/R%d-%d.%d.%d" % (path, c[0], c[1], c[2], c[3]) + if DoesImageExist(chromeos_root, build): + return build def GetLatestRecipeImage(chromeos_root, path): - """Get latest nightly test image from recipe bucket. - - Image location example: - $ARCHIVE/lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032 - """ - - fmt = re.compile(r'R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)-([0-9]+)') - - ce = command_executer.GetCommandExecuter() - command = ('gsutil ls gs://chromeos-image-archive/%s' % path) - ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, - command, - print_to_console=False) - if ret != 0: - raise RuntimeError('Failed to list buckets with command: %s.' % command) - candidates = [l.split('/')[-2] for l in out.split()] - candidates = [(fmt.match(c), c) for c in candidates] - candidates = [([int(r) for r in m[0].group(1, 2, 3, 4, 5)], m[1]) - for m in candidates if m] - candidates.sort(key=lambda x: x[0], reverse=True) - # Try to get ony last two days of images since nightly tests are run once - # another day. - for c in candidates[:2]: - build = '%s/%s' % (path, c[1]) - if DoesImageExist(chromeos_root, build): - return build + """Get latest nightly test image from recipe bucket. + + Image location example: + $ARCHIVE/lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032 + """ + + fmt = re.compile(r"R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)-([0-9]+)") + + ce = command_executer.GetCommandExecuter() + command = "gsutil ls gs://chromeos-image-archive/%s" % path + ret, out, _ = ce.ChrootRunCommandWOutput( + chromeos_root, command, print_to_console=False + ) + if ret != 0: + raise RuntimeError("Failed to list buckets with command: %s." % command) + candidates = [l.split("/")[-2] for l in out.split()] + candidates = [(fmt.match(c), c) for c in candidates] + candidates = [ + ([int(r) for r in m[0].group(1, 2, 3, 4, 5)], m[1]) + for m in candidates + if m + ] + candidates.sort(key=lambda x: x[0], reverse=True) + # Try to get ony last two days of images since nightly tests are run once + # another day. + for c in candidates[:2]: + build = "%s/%s" % (path, c[1]) + if DoesImageExist(chromeos_root, build): + return build diff --git a/cros_utils/buildbot_utils_unittest.py b/cros_utils/buildbot_utils_unittest.py index 42b5a8e3..e12ea19e 100755 --- a/cros_utils/buildbot_utils_unittest.py +++ b/cros_utils/buildbot_utils_unittest.py @@ -10,7 +10,6 @@ from __future__ import print_function import time - import unittest from unittest.mock import patch @@ -19,160 +18,226 @@ from cros_utils import command_executer class TrybotTest(unittest.TestCase): - """Test for CommandExecuter class.""" - - tryjob_out = ( - '[{"buildbucket_id": "8952721143823688176", "build_config": ' - '"cave-llvm-toolchain-tryjob", "url": ' - # pylint: disable=line-too-long - '"http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=8952721143823688176"}]' - ) - - GSUTILS_LS = '\n'.join([ - 'gs://chromeos-image-archive/{0}/R78-12421.0.0/', - 'gs://chromeos-image-archive/{0}/R78-12422.0.0/', - 'gs://chromeos-image-archive/{0}/R78-12423.0.0/', - ]) - - GSUTILS_LS_RECIPE = '\n'.join([ - 'gs://chromeos-image-archive/{0}/R83-12995.0.0-30031-8885075268947031/', - 'gs://chromeos-image-archive/{0}/R83-13003.0.0-30196-8884755532184725/', - 'gs://chromeos-image-archive/{0}/R83-13003.0.0-30218-8884712858556419/', - ]) - - buildresult_out = ( - '{"8952721143823688176": {"status": "pass", "artifacts_url":' - '"gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-' - 'b20789"}}') - - buildbucket_id = '8952721143823688176' - counter_1 = 10 - - def testGetTrybotImage(self): - with patch.object(buildbot_utils, 'SubmitTryjob') as mock_submit: - with patch.object(buildbot_utils, 'PeekTrybotImage') as mock_peek: - with patch.object(time, 'sleep', return_value=None): - - def peek(_chromeos_root, _buildbucket_id): - self.counter_1 -= 1 - if self.counter_1 >= 0: - return ('running', '') - return ('pass', - 'gs://chromeos-image-archive/trybot-elm-release-tryjob/' - 'R67-10468.0.0-b20789') - - mock_peek.side_effect = peek - mock_submit.return_value = self.buildbucket_id - - # sync - buildbucket_id, image = buildbot_utils.GetTrybotImage( - '/tmp', 'falco-release-tryjob', []) - self.assertEqual(buildbucket_id, self.buildbucket_id) - self.assertEqual('trybot-elm-release-tryjob/' - 'R67-10468.0.0-b20789', image) - - # async - buildbucket_id, image = buildbot_utils.GetTrybotImage( - '/tmp', 'falco-release-tryjob', [], asynchronous=True) - self.assertEqual(buildbucket_id, self.buildbucket_id) - self.assertEqual(' ', image) - - def testSubmitTryjob(self): - with patch.object(command_executer.CommandExecuter, - 'RunCommandWOutput') as mocked_run: - mocked_run.return_value = (0, self.tryjob_out, '') - buildbucket_id = buildbot_utils.SubmitTryjob('/', 'falco-release-tryjob', - [], []) - self.assertEqual(buildbucket_id, self.buildbucket_id) - - def testPeekTrybotImage(self): - with patch.object(command_executer.CommandExecuter, - 'RunCommandWOutput') as mocked_run: - # pass - mocked_run.return_value = (0, self.buildresult_out, '') - status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id) - self.assertEqual('pass', status) - self.assertEqual( - 'gs://chromeos-image-archive/trybot-elm-release-tryjob/' - 'R67-10468.0.0-b20789', image) - - # running - mocked_run.return_value = (1, '', '') - status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id) - self.assertEqual('running', status) - self.assertEqual(None, image) - - # fail - buildresult_fail = self.buildresult_out.replace('\"pass\"', '\"fail\"') - mocked_run.return_value = (0, buildresult_fail, '') - status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id) - self.assertEqual('fail', status) - self.assertEqual( - 'gs://chromeos-image-archive/trybot-elm-release-tryjob/' - 'R67-10468.0.0-b20789', image) - - def testParseTryjobBuildbucketId(self): - buildbucket_id = buildbot_utils.ParseTryjobBuildbucketId(self.tryjob_out) - self.assertEqual(buildbucket_id, self.buildbucket_id) - - def testGetLatestImageValid(self): - with patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') as mocked_run: - with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist: - IMAGE_DIR = 'lulu-release' - mocked_run.return_value = (0, self.GSUTILS_LS.format(IMAGE_DIR), '') - mocked_imageexist.return_value = True - image = buildbot_utils.GetLatestImage('', IMAGE_DIR) - self.assertEqual(image, '{0}/R78-12423.0.0'.format(IMAGE_DIR)) - - def testGetLatestImageInvalid(self): - with patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') as mocked_run: - with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist: - IMAGE_DIR = 'kefka-release' - mocked_run.return_value = (0, self.GSUTILS_LS.format(IMAGE_DIR), '') - mocked_imageexist.return_value = False - image = buildbot_utils.GetLatestImage('', IMAGE_DIR) - self.assertIsNone(image) - - def testGetLatestRecipeImageValid(self): - with patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') as mocked_run: - with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist: - IMAGE_DIR = 'lulu-llvm-next-nightly' - mocked_run.return_value = (0, self.GSUTILS_LS_RECIPE.format(IMAGE_DIR), - '') - mocked_imageexist.return_value = True - image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR) - self.assertEqual( - image, '{0}/R83-13003.0.0-30218-8884712858556419'.format(IMAGE_DIR)) - - def testGetLatestRecipeImageInvalid(self): - with patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') as mocked_run: - with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist: - IMAGE_DIR = 'kefka-llvm-next-nightly' - mocked_run.return_value = (0, self.GSUTILS_LS_RECIPE.format(IMAGE_DIR), - '') - mocked_imageexist.return_value = False - image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR) - self.assertIsNone(image) - - def testGetLatestRecipeImageTwodays(self): - with patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') as mocked_run: - with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist: - IMAGE_DIR = 'lulu-llvm-next-nightly' - mocked_run.return_value = (0, self.GSUTILS_LS_RECIPE.format(IMAGE_DIR), - '') - mocked_imageexist.side_effect = [False, False, True] - image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR) - self.assertIsNone(image) - mocked_imageexist.side_effect = [False, True, True] - image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR) - self.assertEqual( - image, '{0}/R83-13003.0.0-30196-8884755532184725'.format(IMAGE_DIR)) - - -if __name__ == '__main__': - unittest.main() + """Test for CommandExecuter class.""" + + tryjob_out = ( + '[{"buildbucket_id": "8952721143823688176", "build_config": ' + '"cave-llvm-toolchain-tryjob", "url": ' + # pylint: disable=line-too-long + '"http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=8952721143823688176"}]' + ) + + GSUTILS_LS = "\n".join( + [ + "gs://chromeos-image-archive/{0}/R78-12421.0.0/", + "gs://chromeos-image-archive/{0}/R78-12422.0.0/", + "gs://chromeos-image-archive/{0}/R78-12423.0.0/", + ] + ) + + GSUTILS_LS_RECIPE = "\n".join( + [ + "gs://chromeos-image-archive/{0}/R83-12995.0.0-30031-8885075268947031/", + "gs://chromeos-image-archive/{0}/R83-13003.0.0-30196-8884755532184725/", + "gs://chromeos-image-archive/{0}/R83-13003.0.0-30218-8884712858556419/", + ] + ) + + buildresult_out = ( + '{"8952721143823688176": {"status": "pass", "artifacts_url":' + '"gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-' + 'b20789"}}' + ) + + buildbucket_id = "8952721143823688176" + counter_1 = 10 + + def testGetTrybotImage(self): + with patch.object(buildbot_utils, "SubmitTryjob") as mock_submit: + with patch.object(buildbot_utils, "PeekTrybotImage") as mock_peek: + with patch.object(time, "sleep", return_value=None): + + def peek(_chromeos_root, _buildbucket_id): + self.counter_1 -= 1 + if self.counter_1 >= 0: + return ("running", "") + return ( + "pass", + "gs://chromeos-image-archive/trybot-elm-release-tryjob/" + "R67-10468.0.0-b20789", + ) + + mock_peek.side_effect = peek + mock_submit.return_value = self.buildbucket_id + + # sync + buildbucket_id, image = buildbot_utils.GetTrybotImage( + "/tmp", "falco-release-tryjob", [] + ) + self.assertEqual(buildbucket_id, self.buildbucket_id) + self.assertEqual( + "trybot-elm-release-tryjob/" "R67-10468.0.0-b20789", + image, + ) + + # async + buildbucket_id, image = buildbot_utils.GetTrybotImage( + "/tmp", "falco-release-tryjob", [], asynchronous=True + ) + self.assertEqual(buildbucket_id, self.buildbucket_id) + self.assertEqual(" ", image) + + def testSubmitTryjob(self): + with patch.object( + command_executer.CommandExecuter, "RunCommandWOutput" + ) as mocked_run: + mocked_run.return_value = (0, self.tryjob_out, "") + buildbucket_id = buildbot_utils.SubmitTryjob( + "/", "falco-release-tryjob", [], [] + ) + self.assertEqual(buildbucket_id, self.buildbucket_id) + + def testPeekTrybotImage(self): + with patch.object( + command_executer.CommandExecuter, "RunCommandWOutput" + ) as mocked_run: + # pass + mocked_run.return_value = (0, self.buildresult_out, "") + status, image = buildbot_utils.PeekTrybotImage( + "/", self.buildbucket_id + ) + self.assertEqual("pass", status) + self.assertEqual( + "gs://chromeos-image-archive/trybot-elm-release-tryjob/" + "R67-10468.0.0-b20789", + image, + ) + + # running + mocked_run.return_value = (1, "", "") + status, image = buildbot_utils.PeekTrybotImage( + "/", self.buildbucket_id + ) + self.assertEqual("running", status) + self.assertEqual(None, image) + + # fail + buildresult_fail = self.buildresult_out.replace('"pass"', '"fail"') + mocked_run.return_value = (0, buildresult_fail, "") + status, image = buildbot_utils.PeekTrybotImage( + "/", self.buildbucket_id + ) + self.assertEqual("fail", status) + self.assertEqual( + "gs://chromeos-image-archive/trybot-elm-release-tryjob/" + "R67-10468.0.0-b20789", + image, + ) + + def testParseTryjobBuildbucketId(self): + buildbucket_id = buildbot_utils.ParseTryjobBuildbucketId( + self.tryjob_out + ) + self.assertEqual(buildbucket_id, self.buildbucket_id) + + def testGetLatestImageValid(self): + with patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) as mocked_run: + with patch.object( + buildbot_utils, "DoesImageExist" + ) as mocked_imageexist: + IMAGE_DIR = "lulu-release" + mocked_run.return_value = ( + 0, + self.GSUTILS_LS.format(IMAGE_DIR), + "", + ) + mocked_imageexist.return_value = True + image = buildbot_utils.GetLatestImage("", IMAGE_DIR) + self.assertEqual(image, "{0}/R78-12423.0.0".format(IMAGE_DIR)) + + def testGetLatestImageInvalid(self): + with patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) as mocked_run: + with patch.object( + buildbot_utils, "DoesImageExist" + ) as mocked_imageexist: + IMAGE_DIR = "kefka-release" + mocked_run.return_value = ( + 0, + self.GSUTILS_LS.format(IMAGE_DIR), + "", + ) + mocked_imageexist.return_value = False + image = buildbot_utils.GetLatestImage("", IMAGE_DIR) + self.assertIsNone(image) + + def testGetLatestRecipeImageValid(self): + with patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) as mocked_run: + with patch.object( + buildbot_utils, "DoesImageExist" + ) as mocked_imageexist: + IMAGE_DIR = "lulu-llvm-next-nightly" + mocked_run.return_value = ( + 0, + self.GSUTILS_LS_RECIPE.format(IMAGE_DIR), + "", + ) + mocked_imageexist.return_value = True + image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR) + self.assertEqual( + image, + "{0}/R83-13003.0.0-30218-8884712858556419".format( + IMAGE_DIR + ), + ) + + def testGetLatestRecipeImageInvalid(self): + with patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) as mocked_run: + with patch.object( + buildbot_utils, "DoesImageExist" + ) as mocked_imageexist: + IMAGE_DIR = "kefka-llvm-next-nightly" + mocked_run.return_value = ( + 0, + self.GSUTILS_LS_RECIPE.format(IMAGE_DIR), + "", + ) + mocked_imageexist.return_value = False + image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR) + self.assertIsNone(image) + + def testGetLatestRecipeImageTwodays(self): + with patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) as mocked_run: + with patch.object( + buildbot_utils, "DoesImageExist" + ) as mocked_imageexist: + IMAGE_DIR = "lulu-llvm-next-nightly" + mocked_run.return_value = ( + 0, + self.GSUTILS_LS_RECIPE.format(IMAGE_DIR), + "", + ) + mocked_imageexist.side_effect = [False, False, True] + image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR) + self.assertIsNone(image) + mocked_imageexist.side_effect = [False, True, True] + image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR) + self.assertEqual( + image, + "{0}/R83-13003.0.0-30196-8884755532184725".format( + IMAGE_DIR + ), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/cros_utils/command_executer.py b/cros_utils/command_executer.py index fe21f625..a30ba752 100755 --- a/cros_utils/command_executer.py +++ b/cros_utils/command_executer.py @@ -20,699 +20,775 @@ import time from cros_utils import logger + mock_default = False -CHROMEOS_SCRIPTS_DIR = '/mnt/host/source/src/scripts' -LOG_LEVEL = ('none', 'quiet', 'average', 'verbose') +CHROMEOS_SCRIPTS_DIR = "/mnt/host/source/src/scripts" +LOG_LEVEL = ("none", "quiet", "average", "verbose") def InitCommandExecuter(mock=False): - # pylint: disable=global-statement - global mock_default - # Whether to default to a mock command executer or not - mock_default = mock + # pylint: disable=global-statement + global mock_default + # Whether to default to a mock command executer or not + mock_default = mock -def GetCommandExecuter(logger_to_set=None, mock=False, log_level='verbose'): - # If the default is a mock executer, always return one. - if mock_default or mock: - return MockCommandExecuter(log_level, logger_to_set) - else: - return CommandExecuter(log_level, logger_to_set) +def GetCommandExecuter(logger_to_set=None, mock=False, log_level="verbose"): + # If the default is a mock executer, always return one. + if mock_default or mock: + return MockCommandExecuter(log_level, logger_to_set) + else: + return CommandExecuter(log_level, logger_to_set) class CommandExecuter(object): - """Provides several methods to execute commands on several environments.""" - - def __init__(self, log_level, logger_to_set=None): - self.log_level = log_level - if log_level == 'none': - self.logger = None - else: - if logger_to_set is not None: - self.logger = logger_to_set - else: - self.logger = logger.GetLogger() - - def GetLogLevel(self): - return self.log_level - - def SetLogLevel(self, log_level): - self.log_level = log_level - - def RunCommandGeneric(self, - cmd, - return_output=False, - machine=None, - username=None, - command_terminator=None, - command_timeout=None, - terminated_timeout=10, - print_to_console=True, - env=None, - except_handler=lambda p, e: None): - """Run a command. - - Returns triplet (returncode, stdout, stderr). - """ - - cmd = str(cmd) - - if self.log_level == 'quiet': - print_to_console = False - - if self.log_level == 'verbose': - self.logger.LogCmd(cmd, machine, username, print_to_console) - elif self.logger: - self.logger.LogCmdToFileOnly(cmd, machine, username) - if command_terminator and command_terminator.IsTerminated(): - if self.logger: - self.logger.LogError('Command was terminated!', print_to_console) - return (1, '', '') - - if machine is not None: - user = '' - if username is not None: - user = username + '@' - cmd = "ssh -t -t %s%s -- '%s'" % (user, machine, cmd) - - # We use setsid so that the child will have a different session id - # and we can easily kill the process group. This is also important - # because the child will be disassociated from the parent terminal. - # In this way the child cannot mess the parent's terminal. - p = None - try: - # pylint: disable=bad-option-value, subprocess-popen-preexec-fn - p = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True, - preexec_fn=os.setsid, - executable='/bin/bash', - env=env) - - full_stdout = '' - full_stderr = '' - - # Pull output from pipes, send it to file/stdout/string - out = err = None - pipes = [p.stdout, p.stderr] - - my_poll = select.poll() - my_poll.register(p.stdout, select.POLLIN) - my_poll.register(p.stderr, select.POLLIN) - - terminated_time = None - started_time = time.time() - - while pipes: + """Provides several methods to execute commands on several environments.""" + + def __init__(self, log_level, logger_to_set=None): + self.log_level = log_level + if log_level == "none": + self.logger = None + else: + if logger_to_set is not None: + self.logger = logger_to_set + else: + self.logger = logger.GetLogger() + + def GetLogLevel(self): + return self.log_level + + def SetLogLevel(self, log_level): + self.log_level = log_level + + def RunCommandGeneric( + self, + cmd, + return_output=False, + machine=None, + username=None, + command_terminator=None, + command_timeout=None, + terminated_timeout=10, + print_to_console=True, + env=None, + except_handler=lambda p, e: None, + ): + """Run a command. + + Returns triplet (returncode, stdout, stderr). + """ + + cmd = str(cmd) + + if self.log_level == "quiet": + print_to_console = False + + if self.log_level == "verbose": + self.logger.LogCmd(cmd, machine, username, print_to_console) + elif self.logger: + self.logger.LogCmdToFileOnly(cmd, machine, username) if command_terminator and command_terminator.IsTerminated(): - os.killpg(os.getpgid(p.pid), signal.SIGTERM) - if self.logger: - self.logger.LogError( - 'Command received termination request. ' - 'Killed child process group.', print_to_console) - break - - l = my_poll.poll(100) - for (fd, _) in l: - if fd == p.stdout.fileno(): - out = os.read(p.stdout.fileno(), 16384).decode('utf8') - if return_output: - full_stdout += out if self.logger: - self.logger.LogCommandOutput(out, print_to_console) - if out == '': - pipes.remove(p.stdout) - my_poll.unregister(p.stdout) - if fd == p.stderr.fileno(): - err = os.read(p.stderr.fileno(), 16384).decode('utf8') + self.logger.LogError( + "Command was terminated!", print_to_console + ) + return (1, "", "") + + if machine is not None: + user = "" + if username is not None: + user = username + "@" + cmd = "ssh -t -t %s%s -- '%s'" % (user, machine, cmd) + + # We use setsid so that the child will have a different session id + # and we can easily kill the process group. This is also important + # because the child will be disassociated from the parent terminal. + # In this way the child cannot mess the parent's terminal. + p = None + try: + # pylint: disable=bad-option-value, subprocess-popen-preexec-fn + p = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + preexec_fn=os.setsid, + executable="/bin/bash", + env=env, + ) + + full_stdout = "" + full_stderr = "" + + # Pull output from pipes, send it to file/stdout/string + out = err = None + pipes = [p.stdout, p.stderr] + + my_poll = select.poll() + my_poll.register(p.stdout, select.POLLIN) + my_poll.register(p.stderr, select.POLLIN) + + terminated_time = None + started_time = time.time() + + while pipes: + if command_terminator and command_terminator.IsTerminated(): + os.killpg(os.getpgid(p.pid), signal.SIGTERM) + if self.logger: + self.logger.LogError( + "Command received termination request. " + "Killed child process group.", + print_to_console, + ) + break + + l = my_poll.poll(100) + for (fd, _) in l: + if fd == p.stdout.fileno(): + out = os.read(p.stdout.fileno(), 16384).decode("utf8") + if return_output: + full_stdout += out + if self.logger: + self.logger.LogCommandOutput(out, print_to_console) + if out == "": + pipes.remove(p.stdout) + my_poll.unregister(p.stdout) + if fd == p.stderr.fileno(): + err = os.read(p.stderr.fileno(), 16384).decode("utf8") + if return_output: + full_stderr += err + if self.logger: + self.logger.LogCommandError(err, print_to_console) + if err == "": + pipes.remove(p.stderr) + my_poll.unregister(p.stderr) + + if p.poll() is not None: + if terminated_time is None: + terminated_time = time.time() + elif ( + terminated_timeout is not None + and time.time() - terminated_time > terminated_timeout + ): + if self.logger: + self.logger.LogWarning( + "Timeout of %s seconds reached since " + "process termination." % terminated_timeout, + print_to_console, + ) + break + + if ( + command_timeout is not None + and time.time() - started_time > command_timeout + ): + os.killpg(os.getpgid(p.pid), signal.SIGTERM) + if self.logger: + self.logger.LogWarning( + "Timeout of %s seconds reached since process" + "started. Killed child process group." + % command_timeout, + print_to_console, + ) + break + + if out == err == "": + break + + p.wait() if return_output: - full_stderr += err + return (p.returncode, full_stdout, full_stderr) + return (p.returncode, "", "") + except BaseException as err: + except_handler(p, err) + raise + + def RunCommand(self, *args, **kwargs): + """Run a command. + + Takes the same arguments as RunCommandGeneric except for return_output. + Returns a single value returncode. + """ + # Make sure that args does not overwrite 'return_output' + assert len(args) <= 1 + assert "return_output" not in kwargs + kwargs["return_output"] = False + return self.RunCommandGeneric(*args, **kwargs)[0] + + def RunCommandWExceptionCleanup(self, *args, **kwargs): + """Run a command and kill process if exception is thrown. + + Takes the same arguments as RunCommandGeneric except for except_handler. + Returns same as RunCommandGeneric. + """ + + def KillProc(proc, _): + if proc: + os.killpg(os.getpgid(proc.pid), signal.SIGTERM) + + # Make sure that args does not overwrite 'except_handler' + assert len(args) <= 8 + assert "except_handler" not in kwargs + kwargs["except_handler"] = KillProc + return self.RunCommandGeneric(*args, **kwargs) + + def RunCommandWOutput(self, *args, **kwargs): + """Run a command. + + Takes the same arguments as RunCommandGeneric except for return_output. + Returns a triplet (returncode, stdout, stderr). + """ + # Make sure that args does not overwrite 'return_output' + assert len(args) <= 1 + assert "return_output" not in kwargs + kwargs["return_output"] = True + return self.RunCommandGeneric(*args, **kwargs) + + def RemoteAccessInitCommand(self, chromeos_root, machine, port=None): + command = "" + command += "\nset -- --remote=" + machine + if port: + command += " --ssh_port=" + port + command += "\n. " + chromeos_root + "/src/scripts/common.sh" + command += "\n. " + chromeos_root + "/src/scripts/remote_access.sh" + command += "\nTMP=$(mktemp -d)" + command += '\nFLAGS "$@" || exit 1' + command += "\nremote_access_init" + return command + + def WriteToTempShFile(self, contents): + with tempfile.NamedTemporaryFile( + "w", + encoding="utf-8", + delete=False, + prefix=os.uname()[1], + suffix=".sh", + ) as f: + f.write("#!/bin/bash\n") + f.write(contents) + f.flush() + return f.name + + def CrosLearnBoard(self, chromeos_root, machine): + command = self.RemoteAccessInitCommand(chromeos_root, machine) + command += "\nlearn_board" + command += "\necho ${FLAGS_board}" + retval, output, _ = self.RunCommandWOutput(command) + if self.logger: + self.logger.LogFatalIf(retval, "learn_board command failed") + elif retval: + sys.exit(1) + return output.split()[-1] + + def CrosRunCommandGeneric( + self, + cmd, + return_output=False, + machine=None, + command_terminator=None, + chromeos_root=None, + command_timeout=None, + terminated_timeout=10, + print_to_console=True, + ): + """Run a command on a ChromeOS box. + + Returns triplet (returncode, stdout, stderr). + """ + + if self.log_level != "verbose": + print_to_console = False + + if self.logger: + self.logger.LogCmd(cmd, print_to_console=print_to_console) + self.logger.LogFatalIf(not machine, "No machine provided!") + self.logger.LogFatalIf( + not chromeos_root, "chromeos_root not given!" + ) + else: + if not chromeos_root or not machine: + sys.exit(1) + chromeos_root = os.path.expanduser(chromeos_root) + + port = None + if ":" in machine: + machine, port = machine.split(":") + # Write all commands to a file. + command_file = self.WriteToTempShFile(cmd) + retval = self.CopyFiles( + command_file, + command_file, + dest_machine=machine, + dest_port=port, + command_terminator=command_terminator, + chromeos_root=chromeos_root, + dest_cros=True, + recursive=False, + print_to_console=print_to_console, + ) + if retval: if self.logger: - self.logger.LogCommandError(err, print_to_console) - if err == '': - pipes.remove(p.stderr) - my_poll.unregister(p.stderr) - - if p.poll() is not None: - if terminated_time is None: - terminated_time = time.time() - elif (terminated_timeout is not None - and time.time() - terminated_time > terminated_timeout): + self.logger.LogError( + "Could not run remote command on machine." + " Is the machine up?" + ) + return (retval, "", "") + + command = self.RemoteAccessInitCommand(chromeos_root, machine, port) + command += "\nremote_sh bash %s" % command_file + command += '\nl_retval=$?; echo "$REMOTE_OUT"; exit $l_retval' + retval = self.RunCommandGeneric( + command, + return_output, + command_terminator=command_terminator, + command_timeout=command_timeout, + terminated_timeout=terminated_timeout, + print_to_console=print_to_console, + ) + if return_output: + connect_signature = ( + "Initiating first contact with remote host\n" + + "Connection OK\n" + ) + connect_signature_re = re.compile(connect_signature) + modded_retval = list(retval) + modded_retval[1] = connect_signature_re.sub("", retval[1]) + return modded_retval + return retval + + def CrosRunCommand(self, *args, **kwargs): + """Run a command on a ChromeOS box. + + Takes the same arguments as CrosRunCommandGeneric except for return_output. + Returns a single value returncode. + """ + # Make sure that args does not overwrite 'return_output' + assert len(args) <= 1 + assert "return_output" not in kwargs + kwargs["return_output"] = False + return self.CrosRunCommandGeneric(*args, **kwargs)[0] + + def CrosRunCommandWOutput(self, *args, **kwargs): + """Run a command on a ChromeOS box. + + Takes the same arguments as CrosRunCommandGeneric except for return_output. + Returns a triplet (returncode, stdout, stderr). + """ + # Make sure that args does not overwrite 'return_output' + assert len(args) <= 1 + assert "return_output" not in kwargs + kwargs["return_output"] = True + return self.CrosRunCommandGeneric(*args, **kwargs) + + def ChrootRunCommandGeneric( + self, + chromeos_root, + command, + return_output=False, + command_terminator=None, + command_timeout=None, + terminated_timeout=10, + print_to_console=True, + cros_sdk_options="", + env=None, + ): + """Runs a command within the chroot. + + Returns triplet (returncode, stdout, stderr). + """ + + if self.log_level != "verbose": + print_to_console = False + + if self.logger: + self.logger.LogCmd(command, print_to_console=print_to_console) + + with tempfile.NamedTemporaryFile( + "w", + encoding="utf-8", + delete=False, + dir=os.path.join(chromeos_root, "src/scripts"), + suffix=".sh", + prefix="in_chroot_cmd", + ) as f: + f.write("#!/bin/bash\n") + f.write(command) + f.write("\n") + f.flush() + + command_file = f.name + os.chmod(command_file, 0o777) + + # if return_output is set, run a test command first to make sure that + # the chroot already exists. We want the final returned output to skip + # the output from chroot creation steps. + if return_output: + ret = self.RunCommand( + "cd %s; cros_sdk %s -- true" + % (chromeos_root, cros_sdk_options), + env=env, + # Give this command a long time to execute; it might involve setting + # the chroot up, or running fstrim on its image file. Both of these + # operations can take well over the timeout default of 10 seconds. + terminated_timeout=5 * 60, + ) + if ret: + return (ret, "", "") + + # Run command_file inside the chroot, making sure that any "~" is expanded + # by the shell inside the chroot, not outside. + command = "cd %s; cros_sdk %s -- bash -c '%s/%s'" % ( + chromeos_root, + cros_sdk_options, + CHROMEOS_SCRIPTS_DIR, + os.path.basename(command_file), + ) + ret = self.RunCommandGeneric( + command, + return_output, + command_terminator=command_terminator, + command_timeout=command_timeout, + terminated_timeout=terminated_timeout, + print_to_console=print_to_console, + env=env, + ) + os.remove(command_file) + return ret + + def ChrootRunCommand(self, *args, **kwargs): + """Runs a command within the chroot. + + Takes the same arguments as ChrootRunCommandGeneric except for + return_output. + Returns a single value returncode. + """ + # Make sure that args does not overwrite 'return_output' + assert len(args) <= 2 + assert "return_output" not in kwargs + kwargs["return_output"] = False + return self.ChrootRunCommandGeneric(*args, **kwargs)[0] + + def ChrootRunCommandWOutput(self, *args, **kwargs): + """Runs a command within the chroot. + + Takes the same arguments as ChrootRunCommandGeneric except for + return_output. + Returns a triplet (returncode, stdout, stderr). + """ + # Make sure that args does not overwrite 'return_output' + assert len(args) <= 2 + assert "return_output" not in kwargs + kwargs["return_output"] = True + return self.ChrootRunCommandGeneric(*args, **kwargs) + + def RunCommands( + self, cmdlist, machine=None, username=None, command_terminator=None + ): + cmd = " ;\n".join(cmdlist) + return self.RunCommand( + cmd, + machine=machine, + username=username, + command_terminator=command_terminator, + ) + + def CopyFiles( + self, + src, + dest, + src_machine=None, + src_port=None, + dest_machine=None, + dest_port=None, + src_user=None, + dest_user=None, + recursive=True, + command_terminator=None, + chromeos_root=None, + src_cros=False, + dest_cros=False, + print_to_console=True, + ): + src = os.path.expanduser(src) + dest = os.path.expanduser(dest) + + if recursive: + src = src + "/" + dest = dest + "/" + + if src_cros or dest_cros: if self.logger: - self.logger.LogWarning( - 'Timeout of %s seconds reached since ' - 'process termination.' % terminated_timeout, - print_to_console) - break - - if (command_timeout is not None - and time.time() - started_time > command_timeout): - os.killpg(os.getpgid(p.pid), signal.SIGTERM) - if self.logger: - self.logger.LogWarning( - 'Timeout of %s seconds reached since process' - 'started. Killed child process group.' % command_timeout, - print_to_console) - break - - if out == err == '': - break - - p.wait() - if return_output: - return (p.returncode, full_stdout, full_stderr) - return (p.returncode, '', '') - except BaseException as err: - except_handler(p, err) - raise - - def RunCommand(self, *args, **kwargs): - """Run a command. - - Takes the same arguments as RunCommandGeneric except for return_output. - Returns a single value returncode. - """ - # Make sure that args does not overwrite 'return_output' - assert len(args) <= 1 - assert 'return_output' not in kwargs - kwargs['return_output'] = False - return self.RunCommandGeneric(*args, **kwargs)[0] - - def RunCommandWExceptionCleanup(self, *args, **kwargs): - """Run a command and kill process if exception is thrown. - - Takes the same arguments as RunCommandGeneric except for except_handler. - Returns same as RunCommandGeneric. - """ - - def KillProc(proc, _): - if proc: - os.killpg(os.getpgid(proc.pid), signal.SIGTERM) - - # Make sure that args does not overwrite 'except_handler' - assert len(args) <= 8 - assert 'except_handler' not in kwargs - kwargs['except_handler'] = KillProc - return self.RunCommandGeneric(*args, **kwargs) - - def RunCommandWOutput(self, *args, **kwargs): - """Run a command. - - Takes the same arguments as RunCommandGeneric except for return_output. - Returns a triplet (returncode, stdout, stderr). - """ - # Make sure that args does not overwrite 'return_output' - assert len(args) <= 1 - assert 'return_output' not in kwargs - kwargs['return_output'] = True - return self.RunCommandGeneric(*args, **kwargs) - - def RemoteAccessInitCommand(self, chromeos_root, machine, port=None): - command = '' - command += '\nset -- --remote=' + machine - if port: - command += ' --ssh_port=' + port - command += '\n. ' + chromeos_root + '/src/scripts/common.sh' - command += '\n. ' + chromeos_root + '/src/scripts/remote_access.sh' - command += '\nTMP=$(mktemp -d)' - command += '\nFLAGS "$@" || exit 1' - command += '\nremote_access_init' - return command - - def WriteToTempShFile(self, contents): - with tempfile.NamedTemporaryFile('w', - encoding='utf-8', - delete=False, - prefix=os.uname()[1], - suffix='.sh') as f: - f.write('#!/bin/bash\n') - f.write(contents) - f.flush() - return f.name - - def CrosLearnBoard(self, chromeos_root, machine): - command = self.RemoteAccessInitCommand(chromeos_root, machine) - command += '\nlearn_board' - command += '\necho ${FLAGS_board}' - retval, output, _ = self.RunCommandWOutput(command) - if self.logger: - self.logger.LogFatalIf(retval, 'learn_board command failed') - elif retval: - sys.exit(1) - return output.split()[-1] - - def CrosRunCommandGeneric(self, - cmd, - return_output=False, - machine=None, - command_terminator=None, - chromeos_root=None, - command_timeout=None, - terminated_timeout=10, - print_to_console=True): - """Run a command on a ChromeOS box. - - Returns triplet (returncode, stdout, stderr). - """ - - if self.log_level != 'verbose': - print_to_console = False - - if self.logger: - self.logger.LogCmd(cmd, print_to_console=print_to_console) - self.logger.LogFatalIf(not machine, 'No machine provided!') - self.logger.LogFatalIf(not chromeos_root, 'chromeos_root not given!') - else: - if not chromeos_root or not machine: - sys.exit(1) - chromeos_root = os.path.expanduser(chromeos_root) - - port = None - if ':' in machine: - machine, port = machine.split(':') - # Write all commands to a file. - command_file = self.WriteToTempShFile(cmd) - retval = self.CopyFiles(command_file, - command_file, - dest_machine=machine, - dest_port=port, - command_terminator=command_terminator, - chromeos_root=chromeos_root, - dest_cros=True, - recursive=False, - print_to_console=print_to_console) - if retval: - if self.logger: - self.logger.LogError('Could not run remote command on machine.' - ' Is the machine up?') - return (retval, '', '') - - command = self.RemoteAccessInitCommand(chromeos_root, machine, port) - command += '\nremote_sh bash %s' % command_file - command += '\nl_retval=$?; echo "$REMOTE_OUT"; exit $l_retval' - retval = self.RunCommandGeneric(command, - return_output, - command_terminator=command_terminator, - command_timeout=command_timeout, - terminated_timeout=terminated_timeout, - print_to_console=print_to_console) - if return_output: - connect_signature = ('Initiating first contact with remote host\n' + - 'Connection OK\n') - connect_signature_re = re.compile(connect_signature) - modded_retval = list(retval) - modded_retval[1] = connect_signature_re.sub('', retval[1]) - return modded_retval - return retval - - def CrosRunCommand(self, *args, **kwargs): - """Run a command on a ChromeOS box. - - Takes the same arguments as CrosRunCommandGeneric except for return_output. - Returns a single value returncode. - """ - # Make sure that args does not overwrite 'return_output' - assert len(args) <= 1 - assert 'return_output' not in kwargs - kwargs['return_output'] = False - return self.CrosRunCommandGeneric(*args, **kwargs)[0] - - def CrosRunCommandWOutput(self, *args, **kwargs): - """Run a command on a ChromeOS box. - - Takes the same arguments as CrosRunCommandGeneric except for return_output. - Returns a triplet (returncode, stdout, stderr). - """ - # Make sure that args does not overwrite 'return_output' - assert len(args) <= 1 - assert 'return_output' not in kwargs - kwargs['return_output'] = True - return self.CrosRunCommandGeneric(*args, **kwargs) - - def ChrootRunCommandGeneric(self, - chromeos_root, - command, - return_output=False, - command_terminator=None, - command_timeout=None, - terminated_timeout=10, - print_to_console=True, - cros_sdk_options='', - env=None): - """Runs a command within the chroot. - - Returns triplet (returncode, stdout, stderr). - """ - - if self.log_level != 'verbose': - print_to_console = False - - if self.logger: - self.logger.LogCmd(command, print_to_console=print_to_console) - - with tempfile.NamedTemporaryFile('w', - encoding='utf-8', - delete=False, - dir=os.path.join(chromeos_root, - 'src/scripts'), - suffix='.sh', - prefix='in_chroot_cmd') as f: - f.write('#!/bin/bash\n') - f.write(command) - f.write('\n') - f.flush() - - command_file = f.name - os.chmod(command_file, 0o777) - - # if return_output is set, run a test command first to make sure that - # the chroot already exists. We want the final returned output to skip - # the output from chroot creation steps. - if return_output: - ret = self.RunCommand( - 'cd %s; cros_sdk %s -- true' % (chromeos_root, cros_sdk_options), - env=env, - # Give this command a long time to execute; it might involve setting - # the chroot up, or running fstrim on its image file. Both of these - # operations can take well over the timeout default of 10 seconds. - terminated_timeout=5 * 60) - if ret: - return (ret, '', '') - - # Run command_file inside the chroot, making sure that any "~" is expanded - # by the shell inside the chroot, not outside. - command = ("cd %s; cros_sdk %s -- bash -c '%s/%s'" % - (chromeos_root, cros_sdk_options, CHROMEOS_SCRIPTS_DIR, - os.path.basename(command_file))) - ret = self.RunCommandGeneric(command, - return_output, - command_terminator=command_terminator, - command_timeout=command_timeout, - terminated_timeout=terminated_timeout, - print_to_console=print_to_console, - env=env) - os.remove(command_file) - return ret - - def ChrootRunCommand(self, *args, **kwargs): - """Runs a command within the chroot. - - Takes the same arguments as ChrootRunCommandGeneric except for - return_output. - Returns a single value returncode. - """ - # Make sure that args does not overwrite 'return_output' - assert len(args) <= 2 - assert 'return_output' not in kwargs - kwargs['return_output'] = False - return self.ChrootRunCommandGeneric(*args, **kwargs)[0] - - def ChrootRunCommandWOutput(self, *args, **kwargs): - """Runs a command within the chroot. - - Takes the same arguments as ChrootRunCommandGeneric except for - return_output. - Returns a triplet (returncode, stdout, stderr). - """ - # Make sure that args does not overwrite 'return_output' - assert len(args) <= 2 - assert 'return_output' not in kwargs - kwargs['return_output'] = True - return self.ChrootRunCommandGeneric(*args, **kwargs) - - def RunCommands(self, - cmdlist, - machine=None, - username=None, - command_terminator=None): - cmd = ' ;\n'.join(cmdlist) - return self.RunCommand(cmd, - machine=machine, - username=username, - command_terminator=command_terminator) - - def CopyFiles(self, + self.logger.LogFatalIf( + src_cros == dest_cros, + "Only one of src_cros and desc_cros can " "be True.", + ) + self.logger.LogFatalIf( + not chromeos_root, "chromeos_root not given!" + ) + elif src_cros == dest_cros or not chromeos_root: + sys.exit(1) + if src_cros: + cros_machine = src_machine + cros_port = src_port + host_machine = dest_machine + host_user = dest_user + else: + cros_machine = dest_machine + cros_port = dest_port + host_machine = src_machine + host_user = src_user + + command = self.RemoteAccessInitCommand( + chromeos_root, cros_machine, cros_port + ) + ssh_command = ( + "ssh -o StrictHostKeyChecking=no" + + " -o UserKnownHostsFile=$(mktemp)" + + " -i $TMP_PRIVATE_KEY" + ) + if cros_port: + ssh_command += " -p %s" % cros_port + rsync_prefix = '\nrsync -r -e "%s" ' % ssh_command + if dest_cros: + command += rsync_prefix + "%s root@%s:%s" % ( + src, + cros_machine, + dest, + ) + else: + command += rsync_prefix + "root@%s:%s %s" % ( + cros_machine, + src, + dest, + ) + + return self.RunCommand( + command, + machine=host_machine, + username=host_user, + command_terminator=command_terminator, + print_to_console=print_to_console, + ) + + if dest_machine == src_machine: + command = "rsync -a %s %s" % (src, dest) + else: + if src_machine is None: + src_machine = os.uname()[1] + src_user = getpass.getuser() + command = "rsync -a %s@%s:%s %s" % ( + src_user, + src_machine, src, dest, - src_machine=None, - src_port=None, - dest_machine=None, - dest_port=None, - src_user=None, - dest_user=None, - recursive=True, - command_terminator=None, - chromeos_root=None, - src_cros=False, - dest_cros=False, - print_to_console=True): - src = os.path.expanduser(src) - dest = os.path.expanduser(dest) - - if recursive: - src = src + '/' - dest = dest + '/' - - if src_cros or dest_cros: - if self.logger: - self.logger.LogFatalIf( - src_cros == dest_cros, 'Only one of src_cros and desc_cros can ' - 'be True.') - self.logger.LogFatalIf(not chromeos_root, 'chromeos_root not given!') - elif src_cros == dest_cros or not chromeos_root: - sys.exit(1) - if src_cros: - cros_machine = src_machine - cros_port = src_port - host_machine = dest_machine - host_user = dest_user - else: - cros_machine = dest_machine - cros_port = dest_port - host_machine = src_machine - host_user = src_user - - command = self.RemoteAccessInitCommand(chromeos_root, cros_machine, - cros_port) - ssh_command = ('ssh -o StrictHostKeyChecking=no' + - ' -o UserKnownHostsFile=$(mktemp)' + - ' -i $TMP_PRIVATE_KEY') - if cros_port: - ssh_command += ' -p %s' % cros_port - rsync_prefix = '\nrsync -r -e "%s" ' % ssh_command - if dest_cros: - command += rsync_prefix + '%s root@%s:%s' % (src, cros_machine, dest) - else: - command += rsync_prefix + 'root@%s:%s %s' % (cros_machine, src, dest) - - return self.RunCommand(command, - machine=host_machine, - username=host_user, - command_terminator=command_terminator, - print_to_console=print_to_console) - - if dest_machine == src_machine: - command = 'rsync -a %s %s' % (src, dest) - else: - if src_machine is None: - src_machine = os.uname()[1] - src_user = getpass.getuser() - command = 'rsync -a %s@%s:%s %s' % (src_user, src_machine, src, dest) - return self.RunCommand(command, - machine=dest_machine, - username=dest_user, - command_terminator=command_terminator, - print_to_console=print_to_console) - - def RunCommand2(self, - cmd, - cwd=None, - line_consumer=None, - timeout=None, - shell=True, - join_stderr=True, - env=None, - except_handler=lambda p, e: None): - """Run the command with an extra feature line_consumer. - - This version allow developers to provide a line_consumer which will be - fed execution output lines. - - A line_consumer is a callback, which is given a chance to run for each - line the execution outputs (either to stdout or stderr). The - line_consumer must accept one and exactly one dict argument, the dict - argument has these items - - 'line' - The line output by the binary. Notice, this string includes - the trailing '\n'. - 'output' - Whether this is a stdout or stderr output, values are either - 'stdout' or 'stderr'. When join_stderr is True, this value - will always be 'output'. - 'pobject' - The object used to control execution, for example, call - pobject.kill(). - - Note: As this is written, the stdin for the process executed is - not associated with the stdin of the caller of this routine. - - Args: - cmd: Command in a single string. - cwd: Working directory for execution. - line_consumer: A function that will ba called by this function. See above - for details. - timeout: terminate command after this timeout. - shell: Whether to use a shell for execution. - join_stderr: Whether join stderr to stdout stream. - env: Execution environment. - except_handler: Callback for when exception is thrown during command - execution. Passed process object and exception. - - Returns: - Execution return code. - - Raises: - child_exception: if fails to start the command process (missing - permission, no such file, etc) - """ - - class StreamHandler(object): - """Internal utility class.""" - - def __init__(self, pobject, fd, name, line_consumer): - self._pobject = pobject - self._fd = fd - self._name = name - self._buf = '' - self._line_consumer = line_consumer - - def read_and_notify_line(self): - t = os.read(fd, 1024) - self._buf = self._buf + t - self.notify_line() - - def notify_line(self): - p = self._buf.find('\n') - while p >= 0: - self._line_consumer(line=self._buf[:p + 1], - output=self._name, - pobject=self._pobject) - if p < len(self._buf) - 1: - self._buf = self._buf[p + 1:] - p = self._buf.find('\n') - else: - self._buf = '' - p = -1 - break - - def notify_eos(self): - # Notify end of stream. The last line may not end with a '\n'. - if self._buf != '': - self._line_consumer(line=self._buf, - output=self._name, - pobject=self._pobject) - self._buf = '' - - if self.log_level == 'verbose': - self.logger.LogCmd(cmd) - elif self.logger: - self.logger.LogCmdToFileOnly(cmd) - - # We use setsid so that the child will have a different session id - # and we can easily kill the process group. This is also important - # because the child will be disassociated from the parent terminal. - # In this way the child cannot mess the parent's terminal. - pobject = None - try: - # pylint: disable=bad-option-value, subprocess-popen-preexec-fn - pobject = subprocess.Popen( - cmd, - cwd=cwd, - bufsize=1024, - env=env, - shell=shell, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT if join_stderr else subprocess.PIPE, - preexec_fn=os.setsid) - - # We provide a default line_consumer - if line_consumer is None: - line_consumer = lambda **d: None - start_time = time.time() - poll = select.poll() - outfd = pobject.stdout.fileno() - poll.register(outfd, select.POLLIN | select.POLLPRI) - handlermap = { - outfd: StreamHandler(pobject, outfd, 'stdout', line_consumer) - } - if not join_stderr: - errfd = pobject.stderr.fileno() - poll.register(errfd, select.POLLIN | select.POLLPRI) - handlermap[errfd] = StreamHandler(pobject, errfd, 'stderr', - line_consumer) - while handlermap: - readables = poll.poll(300) - for (fd, evt) in readables: - handler = handlermap[fd] - if evt & (select.POLLPRI | select.POLLIN): - handler.read_and_notify_line() - elif evt & (select.POLLHUP | select.POLLERR | select.POLLNVAL): - handler.notify_eos() - poll.unregister(fd) - del handlermap[fd] - - if timeout is not None and (time.time() - start_time > timeout): - os.killpg(os.getpgid(pobject.pid), signal.SIGTERM) - - return pobject.wait() - except BaseException as err: - except_handler(pobject, err) - raise + ) + return self.RunCommand( + command, + machine=dest_machine, + username=dest_user, + command_terminator=command_terminator, + print_to_console=print_to_console, + ) + + def RunCommand2( + self, + cmd, + cwd=None, + line_consumer=None, + timeout=None, + shell=True, + join_stderr=True, + env=None, + except_handler=lambda p, e: None, + ): + """Run the command with an extra feature line_consumer. + + This version allow developers to provide a line_consumer which will be + fed execution output lines. + + A line_consumer is a callback, which is given a chance to run for each + line the execution outputs (either to stdout or stderr). The + line_consumer must accept one and exactly one dict argument, the dict + argument has these items - + 'line' - The line output by the binary. Notice, this string includes + the trailing '\n'. + 'output' - Whether this is a stdout or stderr output, values are either + 'stdout' or 'stderr'. When join_stderr is True, this value + will always be 'output'. + 'pobject' - The object used to control execution, for example, call + pobject.kill(). + + Note: As this is written, the stdin for the process executed is + not associated with the stdin of the caller of this routine. + + Args: + cmd: Command in a single string. + cwd: Working directory for execution. + line_consumer: A function that will ba called by this function. See above + for details. + timeout: terminate command after this timeout. + shell: Whether to use a shell for execution. + join_stderr: Whether join stderr to stdout stream. + env: Execution environment. + except_handler: Callback for when exception is thrown during command + execution. Passed process object and exception. + + Returns: + Execution return code. + + Raises: + child_exception: if fails to start the command process (missing + permission, no such file, etc) + """ + + class StreamHandler(object): + """Internal utility class.""" + + def __init__(self, pobject, fd, name, line_consumer): + self._pobject = pobject + self._fd = fd + self._name = name + self._buf = "" + self._line_consumer = line_consumer + + def read_and_notify_line(self): + t = os.read(fd, 1024) + self._buf = self._buf + t + self.notify_line() + + def notify_line(self): + p = self._buf.find("\n") + while p >= 0: + self._line_consumer( + line=self._buf[: p + 1], + output=self._name, + pobject=self._pobject, + ) + if p < len(self._buf) - 1: + self._buf = self._buf[p + 1 :] + p = self._buf.find("\n") + else: + self._buf = "" + p = -1 + break + + def notify_eos(self): + # Notify end of stream. The last line may not end with a '\n'. + if self._buf != "": + self._line_consumer( + line=self._buf, output=self._name, pobject=self._pobject + ) + self._buf = "" + + if self.log_level == "verbose": + self.logger.LogCmd(cmd) + elif self.logger: + self.logger.LogCmdToFileOnly(cmd) + + # We use setsid so that the child will have a different session id + # and we can easily kill the process group. This is also important + # because the child will be disassociated from the parent terminal. + # In this way the child cannot mess the parent's terminal. + pobject = None + try: + # pylint: disable=bad-option-value, subprocess-popen-preexec-fn + pobject = subprocess.Popen( + cmd, + cwd=cwd, + bufsize=1024, + env=env, + shell=shell, + universal_newlines=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT if join_stderr else subprocess.PIPE, + preexec_fn=os.setsid, + ) + + # We provide a default line_consumer + if line_consumer is None: + line_consumer = lambda **d: None + start_time = time.time() + poll = select.poll() + outfd = pobject.stdout.fileno() + poll.register(outfd, select.POLLIN | select.POLLPRI) + handlermap = { + outfd: StreamHandler(pobject, outfd, "stdout", line_consumer) + } + if not join_stderr: + errfd = pobject.stderr.fileno() + poll.register(errfd, select.POLLIN | select.POLLPRI) + handlermap[errfd] = StreamHandler( + pobject, errfd, "stderr", line_consumer + ) + while handlermap: + readables = poll.poll(300) + for (fd, evt) in readables: + handler = handlermap[fd] + if evt & (select.POLLPRI | select.POLLIN): + handler.read_and_notify_line() + elif evt & ( + select.POLLHUP | select.POLLERR | select.POLLNVAL + ): + handler.notify_eos() + poll.unregister(fd) + del handlermap[fd] + + if timeout is not None and (time.time() - start_time > timeout): + os.killpg(os.getpgid(pobject.pid), signal.SIGTERM) + + return pobject.wait() + except BaseException as err: + except_handler(pobject, err) + raise class MockCommandExecuter(CommandExecuter): - """Mock class for class CommandExecuter.""" - - def RunCommandGeneric(self, - cmd, - return_output=False, - machine=None, - username=None, - command_terminator=None, - command_timeout=None, - terminated_timeout=10, - print_to_console=True, - env=None, - except_handler=lambda p, e: None): - assert not command_timeout - cmd = str(cmd) - if machine is None: - machine = 'localhost' - if username is None: - username = 'current' - logger.GetLogger().LogCmd('(Mock) ' + cmd, machine, username, - print_to_console) - return (0, '', '') - - def RunCommand(self, *args, **kwargs): - assert 'return_output' not in kwargs - kwargs['return_output'] = False - return self.RunCommandGeneric(*args, **kwargs)[0] - - def RunCommandWOutput(self, *args, **kwargs): - assert 'return_output' not in kwargs - kwargs['return_output'] = True - return self.RunCommandGeneric(*args, **kwargs) + """Mock class for class CommandExecuter.""" + + def RunCommandGeneric( + self, + cmd, + return_output=False, + machine=None, + username=None, + command_terminator=None, + command_timeout=None, + terminated_timeout=10, + print_to_console=True, + env=None, + except_handler=lambda p, e: None, + ): + assert not command_timeout + cmd = str(cmd) + if machine is None: + machine = "localhost" + if username is None: + username = "current" + logger.GetLogger().LogCmd( + "(Mock) " + cmd, machine, username, print_to_console + ) + return (0, "", "") + + def RunCommand(self, *args, **kwargs): + assert "return_output" not in kwargs + kwargs["return_output"] = False + return self.RunCommandGeneric(*args, **kwargs)[0] + + def RunCommandWOutput(self, *args, **kwargs): + assert "return_output" not in kwargs + kwargs["return_output"] = True + return self.RunCommandGeneric(*args, **kwargs) class CommandTerminator(object): - """Object to request termination of a command in execution.""" + """Object to request termination of a command in execution.""" - def __init__(self): - self.terminated = False + def __init__(self): + self.terminated = False - def Terminate(self): - self.terminated = True + def Terminate(self): + self.terminated = True - def IsTerminated(self): - return self.terminated + def IsTerminated(self): + return self.terminated diff --git a/cros_utils/command_executer_timeout_test.py b/cros_utils/command_executer_timeout_test.py index 6efbee74..2798d227 100755 --- a/cros_utils/command_executer_timeout_test.py +++ b/cros_utils/command_executer_timeout_test.py @@ -9,7 +9,8 @@ from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" import argparse import sys @@ -18,20 +19,20 @@ from cros_utils import command_executer def Usage(parser, message): - print('ERROR: %s' % message) - parser.print_help() - sys.exit(0) + print("ERROR: %s" % message) + parser.print_help() + sys.exit(0) def Main(argv): - parser = argparse.ArgumentParser() - _ = parser.parse_args(argv) + parser = argparse.ArgumentParser() + _ = parser.parse_args(argv) - command = 'sleep 1000' - ce = command_executer.GetCommandExecuter() - ce.RunCommand(command, command_timeout=1) - return 0 + command = "sleep 1000" + ce = command_executer.GetCommandExecuter() + ce.RunCommand(command, command_timeout=1) + return 0 -if __name__ == '__main__': - Main(sys.argv[1:]) +if __name__ == "__main__": + Main(sys.argv[1:]) diff --git a/cros_utils/command_executer_unittest.py b/cros_utils/command_executer_unittest.py index 7b023534..aade4eb0 100755 --- a/cros_utils/command_executer_unittest.py +++ b/cros_utils/command_executer_unittest.py @@ -15,18 +15,20 @@ from cros_utils import command_executer class CommandExecuterTest(unittest.TestCase): - """Test for CommandExecuter class.""" - - def testTimeout(self): - timeout = 1 - logging_level = 'average' - ce = command_executer.CommandExecuter(logging_level) - start = time.time() - command = 'sleep 20' - ce.RunCommand(command, command_timeout=timeout, terminated_timeout=timeout) - end = time.time() - self.assertTrue(round(end - start) == timeout) - - -if __name__ == '__main__': - unittest.main() + """Test for CommandExecuter class.""" + + def testTimeout(self): + timeout = 1 + logging_level = "average" + ce = command_executer.CommandExecuter(logging_level) + start = time.time() + command = "sleep 20" + ce.RunCommand( + command, command_timeout=timeout, terminated_timeout=timeout + ) + end = time.time() + self.assertTrue(round(end - start) == timeout) + + +if __name__ == "__main__": + unittest.main() diff --git a/cros_utils/constants.py b/cros_utils/constants.py index 58c0688e..caee1947 100644 --- a/cros_utils/constants.py +++ b/cros_utils/constants.py @@ -6,9 +6,9 @@ """Generic constants used accross modules. """ -__author__ = 'shenhan@google.com (Han Shen)' +__author__ = "shenhan@google.com (Han Shen)" -MOUNTED_TOOLCHAIN_ROOT = '/usr/local/toolchain_root' +MOUNTED_TOOLCHAIN_ROOT = "/usr/local/toolchain_root" # Root directory for night testing run. -CROSTC_WORKSPACE = '/usr/local/google/crostc' +CROSTC_WORKSPACE = "/usr/local/google/crostc" diff --git a/cros_utils/device_setup_utils.py b/cros_utils/device_setup_utils.py index 1a2e6cb3..33f934c9 100644 --- a/cros_utils/device_setup_utils.py +++ b/cros_utils/device_setup_utils.py @@ -12,526 +12,607 @@ This script provides utils to set device specs. from __future__ import division from __future__ import print_function -__author__ = 'zhizhouy@google.com (Zhizhou Yang)' -import re -import time +__author__ = "zhizhouy@google.com (Zhizhou Yang)" from contextlib import contextmanager +import re +import time from cros_utils import command_executer class DutWrapper(object): - """Wrap DUT parameters inside.""" - - def __init__(self, - chromeos_root, - remote, - log_level='verbose', - logger=None, - ce=None, - dut_config=None): - self.chromeos_root = chromeos_root - self.remote = remote - self.log_level = log_level - self.logger = logger - self.ce = ce or command_executer.GetCommandExecuter(log_level=log_level) - self.dut_config = dut_config - - def RunCommandOnDut(self, command, ignore_status=False): - """Helper function to run command on DUT.""" - ret, msg, err_msg = self.ce.CrosRunCommandWOutput( - command, machine=self.remote, chromeos_root=self.chromeos_root) - - if ret: - err_msg = ('Command execution on DUT %s failed.\n' - 'Failing command: %s\n' - 'returned %d\n' - 'Error message: %s' % (self.remote, command, ret, err_msg)) - if ignore_status: - self.logger.LogError(err_msg + - '\n(Failure is considered non-fatal. Continue.)') - else: - self.logger.LogFatal(err_msg) - - return ret, msg, err_msg - - def DisableASLR(self): - """Disable ASLR on DUT.""" - disable_aslr = ('set -e; ' - 'if [[ -e /proc/sys/kernel/randomize_va_space ]]; then ' - ' echo 0 > /proc/sys/kernel/randomize_va_space; ' - 'fi') - if self.log_level == 'average': - self.logger.LogOutput('Disable ASLR.') - self.RunCommandOnDut(disable_aslr) - - def SetCpuGovernor(self, governor, ignore_status=False): - """Setup CPU Governor on DUT.""" - set_gov_cmd = ( - 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do ' - # Skip writing scaling_governor if cpu is offline. - ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} ' - ' && continue; ' - ' cd $f; ' - ' if [[ -e scaling_governor ]]; then ' - ' echo %s > scaling_governor; fi; ' - 'done; ') - if self.log_level == 'average': - self.logger.LogOutput('Setup CPU Governor: %s.' % governor) - ret, _, _ = self.RunCommandOnDut( - set_gov_cmd % governor, ignore_status=ignore_status) - return ret - - def DisableTurbo(self): - """Disable Turbo on DUT.""" - dis_turbo_cmd = ( - 'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then ' - ' if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then ' - ' echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; ' - ' fi; ' - 'fi; ') - if self.log_level == 'average': - self.logger.LogOutput('Disable Turbo.') - self.RunCommandOnDut(dis_turbo_cmd) - - def SetupCpuUsage(self): - """Setup CPU usage. - - Based on self.dut_config['cpu_usage'] configure CPU cores - utilization. - """ - - if (self.dut_config['cpu_usage'] == 'big_only' or - self.dut_config['cpu_usage'] == 'little_only'): - _, arch, _ = self.RunCommandOnDut('uname -m') - - if arch.lower().startswith('arm') or arch.lower().startswith('aarch64'): - self.SetupArmCores() - - def SetupArmCores(self): - """Setup ARM big/little cores.""" - - # CPU implemeters/part numbers of big/LITTLE CPU. - # Format: dict(CPU implementer: set(CPU part numbers)) - LITTLE_CORES = { - '0x41': { - '0xd01', # Cortex A32 - '0xd03', # Cortex A53 - '0xd04', # Cortex A35 - '0xd05', # Cortex A55 - }, - } - BIG_CORES = { - '0x41': { - '0xd07', # Cortex A57 - '0xd08', # Cortex A72 - '0xd09', # Cortex A73 - '0xd0a', # Cortex A75 - '0xd0b', # Cortex A76 - }, - } - - # Values of CPU Implementer and CPU part number are exposed by cpuinfo. - # Format: - # ================= - # processor : 0 - # model name : ARMv8 Processor rev 4 (v8l) - # BogoMIPS : 48.00 - # Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 - # CPU implementer : 0x41 - # CPU architecture: 8 - # CPU variant : 0x0 - # CPU part : 0xd03 - # CPU revision : 4 - - _, cpuinfo, _ = self.RunCommandOnDut('cat /proc/cpuinfo') - - # List of all CPU cores: 0, 1, .. - proc_matches = re.findall(r'^processor\s*: (\d+)$', cpuinfo, re.MULTILINE) - # List of all corresponding CPU implementers - impl_matches = re.findall(r'^CPU implementer\s*: (0x[\da-f]+)$', cpuinfo, - re.MULTILINE) - # List of all corresponding CPU part numbers - part_matches = re.findall(r'^CPU part\s*: (0x[\da-f]+)$', cpuinfo, - re.MULTILINE) - assert len(proc_matches) == len(impl_matches) - assert len(part_matches) == len(impl_matches) - - all_cores = set(proc_matches) - dut_big_cores = { - core - for core, impl, part in zip(proc_matches, impl_matches, part_matches) - if impl in BIG_CORES and part in BIG_CORES[impl] - } - dut_lit_cores = { - core - for core, impl, part in zip(proc_matches, impl_matches, part_matches) - if impl in LITTLE_CORES and part in LITTLE_CORES[impl] - } - - if self.dut_config['cpu_usage'] == 'big_only': - cores_to_enable = dut_big_cores - cores_to_disable = all_cores - dut_big_cores - elif self.dut_config['cpu_usage'] == 'little_only': - cores_to_enable = dut_lit_cores - cores_to_disable = all_cores - dut_lit_cores - else: - self.logger.LogError( - 'cpu_usage=%s is not supported on ARM.\n' - 'Ignore ARM CPU setup and continue.' % self.dut_config['cpu_usage']) - return - - if cores_to_enable: - cmd_enable_cores = ('echo 1 | tee /sys/devices/system/cpu/cpu{%s}/online' - % ','.join(sorted(cores_to_enable))) - - cmd_disable_cores = '' - if cores_to_disable: - cmd_disable_cores = ( - 'echo 0 | tee /sys/devices/system/cpu/cpu{%s}/online' % ','.join( - sorted(cores_to_disable))) - - self.RunCommandOnDut('; '.join([cmd_enable_cores, cmd_disable_cores])) - else: - # If there are no cores enabled by dut_config then configuration - # is invalid for current platform and should be ignored. - self.logger.LogError( - '"cpu_usage" is invalid for targeted platform.\n' - 'dut_config[cpu_usage]=%s\n' - 'dut big cores: %s\n' - 'dut little cores: %s\n' - 'Ignore ARM CPU setup and continue.' % (self.dut_config['cpu_usage'], - dut_big_cores, dut_lit_cores)) - - def GetCpuOnline(self): - """Get online status of CPU cores. - - Return dict of {int(cpu_num): <0|1>}. - """ - get_cpu_online_cmd = ('paste -d" "' - ' <(ls /sys/devices/system/cpu/cpu*/online)' - ' <(cat /sys/devices/system/cpu/cpu*/online)') - _, online_output_str, _ = self.RunCommandOnDut(get_cpu_online_cmd) - - # Here is the output we expect to see: - # ----------------- - # /sys/devices/system/cpu/cpu0/online 0 - # /sys/devices/system/cpu/cpu1/online 1 - - cpu_online = {} - cpu_online_match = re.compile(r'^[/\S]+/cpu(\d+)/[/\S]+\s+(\d+)$') - for line in online_output_str.splitlines(): - match = cpu_online_match.match(line) - if match: - cpu = int(match.group(1)) - status = int(match.group(2)) - cpu_online[cpu] = status - # At least one CPU has to be online. - assert cpu_online - - return cpu_online - - def SetupCpuFreq(self, online_cores): - """Setup CPU frequency. - - Based on self.dut_config['cpu_freq_pct'] setup frequency of online CPU cores - to a supported value which is less or equal to (freq_pct * max_freq / 100) - limited by min_freq. - - NOTE: scaling_available_frequencies support is required. - Otherwise the function has no effect. - """ - freq_percent = self.dut_config['cpu_freq_pct'] - list_all_avail_freq_cmd = ('ls /sys/devices/system/cpu/cpu{%s}/cpufreq/' - 'scaling_available_frequencies') - # Ignore error to support general usage of frequency setup. - # Not all platforms support scaling_available_frequencies. - ret, all_avail_freq_str, _ = self.RunCommandOnDut( - list_all_avail_freq_cmd % ','.join(str(core) for core in online_cores), - ignore_status=True) - if ret or not all_avail_freq_str: - # No scalable frequencies available for the core. - return ret - for avail_freq_path in all_avail_freq_str.split(): - # Get available freq from every scaling_available_frequency path. - # Error is considered fatal in self.RunCommandOnDut(). - _, avail_freq_str, _ = self.RunCommandOnDut('cat ' + avail_freq_path) - assert avail_freq_str - - all_avail_freq = sorted( - int(freq_str) for freq_str in avail_freq_str.split()) - min_freq = all_avail_freq[0] - max_freq = all_avail_freq[-1] - # Calculate the frequency we are targeting. - target_freq = round(max_freq * freq_percent / 100) - # More likely it's not in the list of supported frequencies - # and our goal is to find the one which is less or equal. - # Default is min and we will try to maximize it. - avail_ngt_target = min_freq - # Find the largest not greater than the target. - for next_largest in reversed(all_avail_freq): - if next_largest <= target_freq: - avail_ngt_target = next_largest - break - - max_freq_path = avail_freq_path.replace('scaling_available_frequencies', - 'scaling_max_freq') - min_freq_path = avail_freq_path.replace('scaling_available_frequencies', - 'scaling_min_freq') - # With default ignore_status=False we expect 0 status or Fatal error. - self.RunCommandOnDut('echo %s | tee %s %s' % - (avail_ngt_target, max_freq_path, min_freq_path)) - - def WaitCooldown(self): - """Wait for DUT to cool down to certain temperature.""" - waittime = 0 - timeout_in_sec = int(self.dut_config['cooldown_time']) * 60 - # Temperature from sensors come in uCelsius units. - temp_in_ucels = int(self.dut_config['cooldown_temp']) * 1000 - sleep_interval = 30 - - # Wait until any of two events occurs: - # 1. CPU cools down to a specified temperature. - # 2. Timeout cooldown_time expires. - # For the case when targeted temperature is not reached within specified - # timeout the benchmark is going to start with higher initial CPU temp. - # In the worst case it may affect test results but at the same time we - # guarantee the upper bound of waiting time. - # TODO(denik): Report (or highlight) "high" CPU temperature in test results. - # "high" should be calculated based on empirical data per platform. - # Based on such reports we can adjust CPU configuration or - # cooldown limits accordingly. - while waittime < timeout_in_sec: - _, temp_output, _ = self.RunCommandOnDut( - 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True) - if any(int(temp) > temp_in_ucels for temp in temp_output.split()): - time.sleep(sleep_interval) - waittime += sleep_interval - else: - # Exit the loop when: - # 1. Reported temp numbers from all thermal sensors do not exceed - # 'cooldown_temp' or - # 2. No data from the sensors. - break - - self.logger.LogOutput('Cooldown wait time: %.1f min' % (waittime / 60)) - return waittime - - def DecreaseWaitTime(self): - """Change the ten seconds wait time for pagecycler to two seconds.""" - FILE = '/usr/local/telemetry/src/tools/perf/page_sets/page_cycler_story.py' - ret = self.RunCommandOnDut('ls ' + FILE) - - if not ret: - sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" ' - self.RunCommandOnDut(sed_command + FILE) - - def StopUI(self): - """Stop UI on DUT.""" - # Added "ignore_status" for the case when crosperf stops ui service which - # was already stopped. Command is going to fail with 1. - self.RunCommandOnDut('stop ui', ignore_status=True) - - def StartUI(self): - """Start UI on DUT.""" - # Similar to StopUI, `start ui` fails if the service is already started. - self.RunCommandOnDut('start ui', ignore_status=True) - - def KerncmdUpdateNeeded(self, intel_pstate): - """Check whether kernel cmdline update is needed. - - Args: - intel_pstate: kernel command line argument (active, passive, no_hwp) - - Returns: - True if update is needed. - """ - - good = 0 - - # Check that dut platform supports hwp - cmd = "grep -q '^flags.*hwp' /proc/cpuinfo" - ret_code, _, _ = self.RunCommandOnDut(cmd, ignore_status=True) - if ret_code != good: - # Intel hwp is not supported, update is not needed. - return False - - kern_cmdline_cmd = 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate - ret_code, _, _ = self.RunCommandOnDut(kern_cmdline_cmd, ignore_status=True) - self.logger.LogOutput('grep /proc/cmdline returned %d' % ret_code) - if (intel_pstate and ret_code == good or - not intel_pstate and ret_code != good): - # No need to updated cmdline if: - # 1. We are setting intel_pstate and we found it is already set. - # 2. Not using intel_pstate and it is not in cmdline. - return False - - # Otherwise we need to update intel_pstate. - return True - - def UpdateKerncmdIntelPstate(self, intel_pstate): - """Update kernel command line. - - Args: - intel_pstate: kernel command line argument (active, passive, no_hwp) - """ - - good = 0 - - # First phase is to remove rootfs verification to allow cmdline change. - remove_verif_cmd = ' '.join([ - '/usr/share/vboot/bin/make_dev_ssd.sh', - '--remove_rootfs_verification', - '--partition %d', - ]) - # Command for partition 2. - verif_part2_failed, _, _ = self.RunCommandOnDut( - remove_verif_cmd % 2, ignore_status=True) - # Command for partition 4 - # Some machines in the lab use partition 4 to boot from, - # so cmdline should be update for both partitions. - verif_part4_failed, _, _ = self.RunCommandOnDut( - remove_verif_cmd % 4, ignore_status=True) - if verif_part2_failed or verif_part4_failed: - self.logger.LogFatal( - 'ERROR. Failed to update kernel cmdline on partition %d.\n' - 'Remove verification failed with status %d' % - (2 if verif_part2_failed else 4, verif_part2_failed or - verif_part4_failed)) - - self.RunCommandOnDut('reboot && exit') - # Give enough time for dut to complete reboot - # TODO(denik): Replace with the function checking machine availability. - time.sleep(30) - - # Second phase to update intel_pstate in kernel cmdline. - kern_cmdline = '\n'.join([ - 'tmpfile=$(mktemp)', - 'partnumb=%d', - 'pstate=%s', - # Store kernel cmdline in a temp file. - '/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}' - ' --save_config ${tmpfile}', - # Remove intel_pstate argument if present. - "sed -i -r 's/ intel_pstate=[A-Za-z_]+//g' ${tmpfile}.${partnumb}", - # Insert intel_pstate with a new value if it is set. - '[[ -n ${pstate} ]] &&' - ' sed -i -e \"s/ *$/ intel_pstate=${pstate}/\" ${tmpfile}.${partnumb}', - # Save the change in kernel cmdline. - # After completion we have to reboot. - '/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}' - ' --set_config ${tmpfile}' - ]) - kern_part2_cmdline_cmd = kern_cmdline % (2, intel_pstate) - self.logger.LogOutput( - 'Command to change kernel command line: %s' % kern_part2_cmdline_cmd) - upd_part2_failed, _, _ = self.RunCommandOnDut( - kern_part2_cmdline_cmd, ignore_status=True) - # Again here we are updating cmdline for partition 4 - # in addition to partition 2. Without this some machines - # in the lab might fail. - kern_part4_cmdline_cmd = kern_cmdline % (4, intel_pstate) - self.logger.LogOutput( - 'Command to change kernel command line: %s' % kern_part4_cmdline_cmd) - upd_part4_failed, _, _ = self.RunCommandOnDut( - kern_part4_cmdline_cmd, ignore_status=True) - if upd_part2_failed or upd_part4_failed: - self.logger.LogFatal( - 'ERROR. Failed to update kernel cmdline on partition %d.\n' - 'intel_pstate update failed with status %d' % - (2 if upd_part2_failed else 4, upd_part2_failed or upd_part4_failed)) - - self.RunCommandOnDut('reboot && exit') - # Wait 30s after reboot. - time.sleep(30) - - # Verification phase. - # Check that cmdline was updated. - # Throw an exception if not. - kern_cmdline_cmd = 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate - ret_code, _, _ = self.RunCommandOnDut(kern_cmdline_cmd, ignore_status=True) - if (intel_pstate and ret_code != good or - not intel_pstate and ret_code == good): - # Kernel cmdline doesn't match input intel_pstate. - self.logger.LogFatal( - 'ERROR. Failed to update kernel cmdline. ' - 'Final verification failed with status %d' % ret_code) - - self.logger.LogOutput('Kernel cmdline updated successfully.') - - @contextmanager - def PauseUI(self): - """Stop UI before and Start UI after the context block. - - Context manager will make sure UI is always resumed at the end. - """ - self.StopUI() - try: - yield - - finally: - self.StartUI() - - def SetupDevice(self): - """Setup device to get it ready for testing. - - @Returns Wait time of cool down for this benchmark run. - """ - self.logger.LogOutput('Update kernel cmdline if necessary and reboot') - intel_pstate = self.dut_config['intel_pstate'] - if intel_pstate and self.KerncmdUpdateNeeded(intel_pstate): - self.UpdateKerncmdIntelPstate(intel_pstate) - - wait_time = 0 - # Pause UI while configuring the DUT. - # This will accelerate setup (waiting for cooldown has x10 drop) - # and help to reset a Chrome state left after the previous test. - with self.PauseUI(): - # Unless the user turns on ASLR in the flag, we first disable ASLR - # before running the benchmarks - if not self.dut_config['enable_aslr']: - self.DisableASLR() - - # CPU usage setup comes first where we enable/disable cores. - self.SetupCpuUsage() - cpu_online_status = self.GetCpuOnline() - # List of online cores of type int (core number). - online_cores = [ - core for core, status in cpu_online_status.items() if status - ] - if self.dut_config['cooldown_time']: - # Setup power conservative mode for effective cool down. - # Set ignore status since powersave may no be available - # on all platforms and we are going to handle it. - ret = self.SetCpuGovernor('powersave', ignore_status=True) + """Wrap DUT parameters inside.""" + + def __init__( + self, + chromeos_root, + remote, + log_level="verbose", + logger=None, + ce=None, + dut_config=None, + ): + self.chromeos_root = chromeos_root + self.remote = remote + self.log_level = log_level + self.logger = logger + self.ce = ce or command_executer.GetCommandExecuter(log_level=log_level) + self.dut_config = dut_config + + def RunCommandOnDut(self, command, ignore_status=False): + """Helper function to run command on DUT.""" + ret, msg, err_msg = self.ce.CrosRunCommandWOutput( + command, machine=self.remote, chromeos_root=self.chromeos_root + ) + if ret: - # "powersave" is not available, use "ondemand". - # Still not a fatal error if it fails. - ret = self.SetCpuGovernor('ondemand', ignore_status=True) - # TODO(denik): Run comparison test for 'powersave' and 'ondemand' - # on scarlet and kevin64. - # We might have to consider reducing freq manually to the min - # if it helps to reduce waiting time. - wait_time = self.WaitCooldown() - - # Setup CPU governor for the benchmark run. - # It overwrites the previous governor settings. - governor = self.dut_config['governor'] - # FIXME(denik): Pass online cores to governor setup. - self.SetCpuGovernor(governor) - - # Disable Turbo and Setup CPU freq should ALWAYS proceed governor setup - # since governor may change: - # - frequency; - # - turbo/boost. - self.DisableTurbo() - self.SetupCpuFreq(online_cores) - - self.DecreaseWaitTime() - # FIXME(denik): Currently we are not recovering the previous cpufreq - # settings since we do reboot/setup every time anyway. - # But it may change in the future and then we have to recover the - # settings. - return wait_time + err_msg = ( + "Command execution on DUT %s failed.\n" + "Failing command: %s\n" + "returned %d\n" + "Error message: %s" % (self.remote, command, ret, err_msg) + ) + if ignore_status: + self.logger.LogError( + err_msg + "\n(Failure is considered non-fatal. Continue.)" + ) + else: + self.logger.LogFatal(err_msg) + + return ret, msg, err_msg + + def DisableASLR(self): + """Disable ASLR on DUT.""" + disable_aslr = ( + "set -e; " + "if [[ -e /proc/sys/kernel/randomize_va_space ]]; then " + " echo 0 > /proc/sys/kernel/randomize_va_space; " + "fi" + ) + if self.log_level == "average": + self.logger.LogOutput("Disable ASLR.") + self.RunCommandOnDut(disable_aslr) + + def SetCpuGovernor(self, governor, ignore_status=False): + """Setup CPU Governor on DUT.""" + set_gov_cmd = ( + "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do " + # Skip writing scaling_governor if cpu is offline. + " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} " + " && continue; " + " cd $f; " + " if [[ -e scaling_governor ]]; then " + " echo %s > scaling_governor; fi; " + "done; " + ) + if self.log_level == "average": + self.logger.LogOutput("Setup CPU Governor: %s." % governor) + ret, _, _ = self.RunCommandOnDut( + set_gov_cmd % governor, ignore_status=ignore_status + ) + return ret + + def DisableTurbo(self): + """Disable Turbo on DUT.""" + dis_turbo_cmd = ( + "if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then " + " if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then " + " echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; " + " fi; " + "fi; " + ) + if self.log_level == "average": + self.logger.LogOutput("Disable Turbo.") + self.RunCommandOnDut(dis_turbo_cmd) + + def SetupCpuUsage(self): + """Setup CPU usage. + + Based on self.dut_config['cpu_usage'] configure CPU cores + utilization. + """ + + if ( + self.dut_config["cpu_usage"] == "big_only" + or self.dut_config["cpu_usage"] == "little_only" + ): + _, arch, _ = self.RunCommandOnDut("uname -m") + + if arch.lower().startswith("arm") or arch.lower().startswith( + "aarch64" + ): + self.SetupArmCores() + + def SetupArmCores(self): + """Setup ARM big/little cores.""" + + # CPU implemeters/part numbers of big/LITTLE CPU. + # Format: dict(CPU implementer: set(CPU part numbers)) + LITTLE_CORES = { + "0x41": { + "0xd01", # Cortex A32 + "0xd03", # Cortex A53 + "0xd04", # Cortex A35 + "0xd05", # Cortex A55 + }, + } + BIG_CORES = { + "0x41": { + "0xd07", # Cortex A57 + "0xd08", # Cortex A72 + "0xd09", # Cortex A73 + "0xd0a", # Cortex A75 + "0xd0b", # Cortex A76 + }, + } + + # Values of CPU Implementer and CPU part number are exposed by cpuinfo. + # Format: + # ================= + # processor : 0 + # model name : ARMv8 Processor rev 4 (v8l) + # BogoMIPS : 48.00 + # Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 + # CPU implementer : 0x41 + # CPU architecture: 8 + # CPU variant : 0x0 + # CPU part : 0xd03 + # CPU revision : 4 + + _, cpuinfo, _ = self.RunCommandOnDut("cat /proc/cpuinfo") + + # List of all CPU cores: 0, 1, .. + proc_matches = re.findall( + r"^processor\s*: (\d+)$", cpuinfo, re.MULTILINE + ) + # List of all corresponding CPU implementers + impl_matches = re.findall( + r"^CPU implementer\s*: (0x[\da-f]+)$", cpuinfo, re.MULTILINE + ) + # List of all corresponding CPU part numbers + part_matches = re.findall( + r"^CPU part\s*: (0x[\da-f]+)$", cpuinfo, re.MULTILINE + ) + assert len(proc_matches) == len(impl_matches) + assert len(part_matches) == len(impl_matches) + + all_cores = set(proc_matches) + dut_big_cores = { + core + for core, impl, part in zip( + proc_matches, impl_matches, part_matches + ) + if impl in BIG_CORES and part in BIG_CORES[impl] + } + dut_lit_cores = { + core + for core, impl, part in zip( + proc_matches, impl_matches, part_matches + ) + if impl in LITTLE_CORES and part in LITTLE_CORES[impl] + } + + if self.dut_config["cpu_usage"] == "big_only": + cores_to_enable = dut_big_cores + cores_to_disable = all_cores - dut_big_cores + elif self.dut_config["cpu_usage"] == "little_only": + cores_to_enable = dut_lit_cores + cores_to_disable = all_cores - dut_lit_cores + else: + self.logger.LogError( + "cpu_usage=%s is not supported on ARM.\n" + "Ignore ARM CPU setup and continue." + % self.dut_config["cpu_usage"] + ) + return + + if cores_to_enable: + cmd_enable_cores = ( + "echo 1 | tee /sys/devices/system/cpu/cpu{%s}/online" + % ",".join(sorted(cores_to_enable)) + ) + + cmd_disable_cores = "" + if cores_to_disable: + cmd_disable_cores = ( + "echo 0 | tee /sys/devices/system/cpu/cpu{%s}/online" + % ",".join(sorted(cores_to_disable)) + ) + + self.RunCommandOnDut( + "; ".join([cmd_enable_cores, cmd_disable_cores]) + ) + else: + # If there are no cores enabled by dut_config then configuration + # is invalid for current platform and should be ignored. + self.logger.LogError( + '"cpu_usage" is invalid for targeted platform.\n' + "dut_config[cpu_usage]=%s\n" + "dut big cores: %s\n" + "dut little cores: %s\n" + "Ignore ARM CPU setup and continue." + % (self.dut_config["cpu_usage"], dut_big_cores, dut_lit_cores) + ) + + def GetCpuOnline(self): + """Get online status of CPU cores. + + Return dict of {int(cpu_num): <0|1>}. + """ + get_cpu_online_cmd = ( + 'paste -d" "' + " <(ls /sys/devices/system/cpu/cpu*/online)" + " <(cat /sys/devices/system/cpu/cpu*/online)" + ) + _, online_output_str, _ = self.RunCommandOnDut(get_cpu_online_cmd) + + # Here is the output we expect to see: + # ----------------- + # /sys/devices/system/cpu/cpu0/online 0 + # /sys/devices/system/cpu/cpu1/online 1 + + cpu_online = {} + cpu_online_match = re.compile(r"^[/\S]+/cpu(\d+)/[/\S]+\s+(\d+)$") + for line in online_output_str.splitlines(): + match = cpu_online_match.match(line) + if match: + cpu = int(match.group(1)) + status = int(match.group(2)) + cpu_online[cpu] = status + # At least one CPU has to be online. + assert cpu_online + + return cpu_online + + def SetupCpuFreq(self, online_cores): + """Setup CPU frequency. + + Based on self.dut_config['cpu_freq_pct'] setup frequency of online CPU cores + to a supported value which is less or equal to (freq_pct * max_freq / 100) + limited by min_freq. + + NOTE: scaling_available_frequencies support is required. + Otherwise the function has no effect. + """ + freq_percent = self.dut_config["cpu_freq_pct"] + list_all_avail_freq_cmd = ( + "ls /sys/devices/system/cpu/cpu{%s}/cpufreq/" + "scaling_available_frequencies" + ) + # Ignore error to support general usage of frequency setup. + # Not all platforms support scaling_available_frequencies. + ret, all_avail_freq_str, _ = self.RunCommandOnDut( + list_all_avail_freq_cmd + % ",".join(str(core) for core in online_cores), + ignore_status=True, + ) + if ret or not all_avail_freq_str: + # No scalable frequencies available for the core. + return ret + for avail_freq_path in all_avail_freq_str.split(): + # Get available freq from every scaling_available_frequency path. + # Error is considered fatal in self.RunCommandOnDut(). + _, avail_freq_str, _ = self.RunCommandOnDut( + "cat " + avail_freq_path + ) + assert avail_freq_str + + all_avail_freq = sorted( + int(freq_str) for freq_str in avail_freq_str.split() + ) + min_freq = all_avail_freq[0] + max_freq = all_avail_freq[-1] + # Calculate the frequency we are targeting. + target_freq = round(max_freq * freq_percent / 100) + # More likely it's not in the list of supported frequencies + # and our goal is to find the one which is less or equal. + # Default is min and we will try to maximize it. + avail_ngt_target = min_freq + # Find the largest not greater than the target. + for next_largest in reversed(all_avail_freq): + if next_largest <= target_freq: + avail_ngt_target = next_largest + break + + max_freq_path = avail_freq_path.replace( + "scaling_available_frequencies", "scaling_max_freq" + ) + min_freq_path = avail_freq_path.replace( + "scaling_available_frequencies", "scaling_min_freq" + ) + # With default ignore_status=False we expect 0 status or Fatal error. + self.RunCommandOnDut( + "echo %s | tee %s %s" + % (avail_ngt_target, max_freq_path, min_freq_path) + ) + + def WaitCooldown(self): + """Wait for DUT to cool down to certain temperature.""" + waittime = 0 + timeout_in_sec = int(self.dut_config["cooldown_time"]) * 60 + # Temperature from sensors come in uCelsius units. + temp_in_ucels = int(self.dut_config["cooldown_temp"]) * 1000 + sleep_interval = 30 + + # Wait until any of two events occurs: + # 1. CPU cools down to a specified temperature. + # 2. Timeout cooldown_time expires. + # For the case when targeted temperature is not reached within specified + # timeout the benchmark is going to start with higher initial CPU temp. + # In the worst case it may affect test results but at the same time we + # guarantee the upper bound of waiting time. + # TODO(denik): Report (or highlight) "high" CPU temperature in test results. + # "high" should be calculated based on empirical data per platform. + # Based on such reports we can adjust CPU configuration or + # cooldown limits accordingly. + while waittime < timeout_in_sec: + _, temp_output, _ = self.RunCommandOnDut( + "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True + ) + if any(int(temp) > temp_in_ucels for temp in temp_output.split()): + time.sleep(sleep_interval) + waittime += sleep_interval + else: + # Exit the loop when: + # 1. Reported temp numbers from all thermal sensors do not exceed + # 'cooldown_temp' or + # 2. No data from the sensors. + break + + self.logger.LogOutput("Cooldown wait time: %.1f min" % (waittime / 60)) + return waittime + + def DecreaseWaitTime(self): + """Change the ten seconds wait time for pagecycler to two seconds.""" + FILE = ( + "/usr/local/telemetry/src/tools/perf/page_sets/page_cycler_story.py" + ) + ret = self.RunCommandOnDut("ls " + FILE) + + if not ret: + sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" ' + self.RunCommandOnDut(sed_command + FILE) + + def StopUI(self): + """Stop UI on DUT.""" + # Added "ignore_status" for the case when crosperf stops ui service which + # was already stopped. Command is going to fail with 1. + self.RunCommandOnDut("stop ui", ignore_status=True) + + def StartUI(self): + """Start UI on DUT.""" + # Similar to StopUI, `start ui` fails if the service is already started. + self.RunCommandOnDut("start ui", ignore_status=True) + + def KerncmdUpdateNeeded(self, intel_pstate): + """Check whether kernel cmdline update is needed. + + Args: + intel_pstate: kernel command line argument (active, passive, no_hwp) + + Returns: + True if update is needed. + """ + + good = 0 + + # Check that dut platform supports hwp + cmd = "grep -q '^flags.*hwp' /proc/cpuinfo" + ret_code, _, _ = self.RunCommandOnDut(cmd, ignore_status=True) + if ret_code != good: + # Intel hwp is not supported, update is not needed. + return False + + kern_cmdline_cmd = ( + 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate + ) + ret_code, _, _ = self.RunCommandOnDut( + kern_cmdline_cmd, ignore_status=True + ) + self.logger.LogOutput("grep /proc/cmdline returned %d" % ret_code) + if ( + intel_pstate + and ret_code == good + or not intel_pstate + and ret_code != good + ): + # No need to updated cmdline if: + # 1. We are setting intel_pstate and we found it is already set. + # 2. Not using intel_pstate and it is not in cmdline. + return False + + # Otherwise we need to update intel_pstate. + return True + + def UpdateKerncmdIntelPstate(self, intel_pstate): + """Update kernel command line. + + Args: + intel_pstate: kernel command line argument (active, passive, no_hwp) + """ + + good = 0 + + # First phase is to remove rootfs verification to allow cmdline change. + remove_verif_cmd = " ".join( + [ + "/usr/share/vboot/bin/make_dev_ssd.sh", + "--remove_rootfs_verification", + "--partition %d", + ] + ) + # Command for partition 2. + verif_part2_failed, _, _ = self.RunCommandOnDut( + remove_verif_cmd % 2, ignore_status=True + ) + # Command for partition 4 + # Some machines in the lab use partition 4 to boot from, + # so cmdline should be update for both partitions. + verif_part4_failed, _, _ = self.RunCommandOnDut( + remove_verif_cmd % 4, ignore_status=True + ) + if verif_part2_failed or verif_part4_failed: + self.logger.LogFatal( + "ERROR. Failed to update kernel cmdline on partition %d.\n" + "Remove verification failed with status %d" + % ( + 2 if verif_part2_failed else 4, + verif_part2_failed or verif_part4_failed, + ) + ) + + self.RunCommandOnDut("reboot && exit") + # Give enough time for dut to complete reboot + # TODO(denik): Replace with the function checking machine availability. + time.sleep(30) + + # Second phase to update intel_pstate in kernel cmdline. + kern_cmdline = "\n".join( + [ + "tmpfile=$(mktemp)", + "partnumb=%d", + "pstate=%s", + # Store kernel cmdline in a temp file. + "/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}" + " --save_config ${tmpfile}", + # Remove intel_pstate argument if present. + "sed -i -r 's/ intel_pstate=[A-Za-z_]+//g' ${tmpfile}.${partnumb}", + # Insert intel_pstate with a new value if it is set. + "[[ -n ${pstate} ]] &&" + ' sed -i -e "s/ *$/ intel_pstate=${pstate}/" ${tmpfile}.${partnumb}', + # Save the change in kernel cmdline. + # After completion we have to reboot. + "/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}" + " --set_config ${tmpfile}", + ] + ) + kern_part2_cmdline_cmd = kern_cmdline % (2, intel_pstate) + self.logger.LogOutput( + "Command to change kernel command line: %s" % kern_part2_cmdline_cmd + ) + upd_part2_failed, _, _ = self.RunCommandOnDut( + kern_part2_cmdline_cmd, ignore_status=True + ) + # Again here we are updating cmdline for partition 4 + # in addition to partition 2. Without this some machines + # in the lab might fail. + kern_part4_cmdline_cmd = kern_cmdline % (4, intel_pstate) + self.logger.LogOutput( + "Command to change kernel command line: %s" % kern_part4_cmdline_cmd + ) + upd_part4_failed, _, _ = self.RunCommandOnDut( + kern_part4_cmdline_cmd, ignore_status=True + ) + if upd_part2_failed or upd_part4_failed: + self.logger.LogFatal( + "ERROR. Failed to update kernel cmdline on partition %d.\n" + "intel_pstate update failed with status %d" + % ( + 2 if upd_part2_failed else 4, + upd_part2_failed or upd_part4_failed, + ) + ) + + self.RunCommandOnDut("reboot && exit") + # Wait 30s after reboot. + time.sleep(30) + + # Verification phase. + # Check that cmdline was updated. + # Throw an exception if not. + kern_cmdline_cmd = ( + 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate + ) + ret_code, _, _ = self.RunCommandOnDut( + kern_cmdline_cmd, ignore_status=True + ) + if ( + intel_pstate + and ret_code != good + or not intel_pstate + and ret_code == good + ): + # Kernel cmdline doesn't match input intel_pstate. + self.logger.LogFatal( + "ERROR. Failed to update kernel cmdline. " + "Final verification failed with status %d" % ret_code + ) + + self.logger.LogOutput("Kernel cmdline updated successfully.") + + @contextmanager + def PauseUI(self): + """Stop UI before and Start UI after the context block. + + Context manager will make sure UI is always resumed at the end. + """ + self.StopUI() + try: + yield + + finally: + self.StartUI() + + def SetupDevice(self): + """Setup device to get it ready for testing. + + @Returns Wait time of cool down for this benchmark run. + """ + self.logger.LogOutput("Update kernel cmdline if necessary and reboot") + intel_pstate = self.dut_config["intel_pstate"] + if intel_pstate and self.KerncmdUpdateNeeded(intel_pstate): + self.UpdateKerncmdIntelPstate(intel_pstate) + + wait_time = 0 + # Pause UI while configuring the DUT. + # This will accelerate setup (waiting for cooldown has x10 drop) + # and help to reset a Chrome state left after the previous test. + with self.PauseUI(): + # Unless the user turns on ASLR in the flag, we first disable ASLR + # before running the benchmarks + if not self.dut_config["enable_aslr"]: + self.DisableASLR() + + # CPU usage setup comes first where we enable/disable cores. + self.SetupCpuUsage() + cpu_online_status = self.GetCpuOnline() + # List of online cores of type int (core number). + online_cores = [ + core for core, status in cpu_online_status.items() if status + ] + if self.dut_config["cooldown_time"]: + # Setup power conservative mode for effective cool down. + # Set ignore status since powersave may no be available + # on all platforms and we are going to handle it. + ret = self.SetCpuGovernor("powersave", ignore_status=True) + if ret: + # "powersave" is not available, use "ondemand". + # Still not a fatal error if it fails. + ret = self.SetCpuGovernor("ondemand", ignore_status=True) + # TODO(denik): Run comparison test for 'powersave' and 'ondemand' + # on scarlet and kevin64. + # We might have to consider reducing freq manually to the min + # if it helps to reduce waiting time. + wait_time = self.WaitCooldown() + + # Setup CPU governor for the benchmark run. + # It overwrites the previous governor settings. + governor = self.dut_config["governor"] + # FIXME(denik): Pass online cores to governor setup. + self.SetCpuGovernor(governor) + + # Disable Turbo and Setup CPU freq should ALWAYS proceed governor setup + # since governor may change: + # - frequency; + # - turbo/boost. + self.DisableTurbo() + self.SetupCpuFreq(online_cores) + + self.DecreaseWaitTime() + # FIXME(denik): Currently we are not recovering the previous cpufreq + # settings since we do reboot/setup every time anyway. + # But it may change in the future and then we have to recover the + # settings. + return wait_time diff --git a/cros_utils/device_setup_utils_unittest.py b/cros_utils/device_setup_utils_unittest.py index f546e881..76775c4d 100755 --- a/cros_utils/device_setup_utils_unittest.py +++ b/cros_utils/device_setup_utils_unittest.py @@ -10,7 +10,6 @@ from __future__ import print_function import time - import unittest from unittest import mock @@ -18,6 +17,7 @@ from cros_utils import command_executer from cros_utils import logger from cros_utils.device_setup_utils import DutWrapper + BIG_LITTLE_CPUINFO = """processor : 0 model name : ARMv8 Processor rev 4 (v8l) BogoMIPS : 48.00 @@ -94,609 +94,713 @@ Serial : 0000000000000000 class DutWrapperTest(unittest.TestCase): - """Class of DutWrapper test.""" - real_logger = logger.GetLogger() - - mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - mock_logger = mock.Mock(spec=logger.Logger) - - def __init__(self, *args, **kwargs): - super(DutWrapperTest, self).__init__(*args, **kwargs) - - def setUp(self): - self.dw = DutWrapper( - '/tmp/chromeos', - 'lumpy.cros2', - log_level='verbose', - logger=self.mock_logger, - ce=self.mock_cmd_exec, - dut_config={}) - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput') - def test_run_command_on_dut(self, mock_cros_runcmd): - self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd - mock_cros_runcmd.return_value = (0, '', '') - mock_cros_runcmd.assert_not_called() - self.dw.RunCommandOnDut('run command;') - mock_cros_runcmd.assert_called_once_with( - 'run command;', chromeos_root='/tmp/chromeos', machine='lumpy.cros2') - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput') - def test_dut_wrapper_fatal_error(self, mock_cros_runcmd): - self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd - # Command returns error 1. - mock_cros_runcmd.return_value = (1, '', 'Error!') - mock_cros_runcmd.assert_not_called() - self.dw.RunCommandOnDut('run command;') - mock_cros_runcmd.assert_called_once_with( - 'run command;', chromeos_root='/tmp/chromeos', machine='lumpy.cros2') - # Error status causes log fatal. - self.assertEqual( - self.mock_logger.method_calls[-1], - mock.call.LogFatal('Command execution on DUT lumpy.cros2 failed.\n' - 'Failing command: run command;\nreturned 1\n' - 'Error message: Error!')) - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput') - def test_dut_wrapper_ignore_error(self, mock_cros_runcmd): - self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd - # Command returns error 1. - mock_cros_runcmd.return_value = (1, '', 'Error!') - self.dw.RunCommandOnDut('run command;', ignore_status=True) - mock_cros_runcmd.assert_called_once_with( - 'run command;', chromeos_root='/tmp/chromeos', machine='lumpy.cros2') - # Error status is not fatal. LogError records the error message. - self.assertEqual( - self.mock_logger.method_calls[-1], - mock.call.LogError('Command execution on DUT lumpy.cros2 failed.\n' - 'Failing command: run command;\nreturned 1\n' - 'Error message: Error!\n' - '(Failure is considered non-fatal. Continue.)')) - - def test_disable_aslr(self): - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', '')) - self.dw.DisableASLR() - # pyformat: disable - set_cpu_cmd = ('set -e; ' - 'if [[ -e /proc/sys/kernel/randomize_va_space ]]; then ' - ' echo 0 > /proc/sys/kernel/randomize_va_space; ' - 'fi') - self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd) - - def test_set_cpu_governor(self): - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', '')) - self.dw.SetCpuGovernor('new_governor', ignore_status=False) - set_cpu_cmd = ( - 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do ' - # Skip writing scaling_governor if cpu is offline. - ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} ' - ' && continue; ' - ' cd $f; ' - ' if [[ -e scaling_governor ]]; then ' - ' echo %s > scaling_governor; fi; ' - 'done; ') - self.dw.RunCommandOnDut.assert_called_once_with( - set_cpu_cmd % 'new_governor', ignore_status=False) - - def test_set_cpu_governor_propagate_error(self): - self.dw.RunCommandOnDut = mock.Mock(return_value=(1, '', 'Error.')) - self.dw.SetCpuGovernor('non-exist_governor') - set_cpu_cmd = ( - 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do ' - # Skip writing scaling_governor if cpu is not online. - ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} ' - ' && continue; ' - ' cd $f; ' - ' if [[ -e scaling_governor ]]; then ' - ' echo %s > scaling_governor; fi; ' - 'done; ') - # By default error status is fatal. - self.dw.RunCommandOnDut.assert_called_once_with( - set_cpu_cmd % 'non-exist_governor', ignore_status=False) - - def test_set_cpu_governor_ignore_status(self): - self.dw.RunCommandOnDut = mock.Mock(return_value=(1, '', 'Error.')) - ret_code = self.dw.SetCpuGovernor('non-exist_governor', ignore_status=True) - set_cpu_cmd = ( - 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do ' - # Skip writing scaling_governor if cpu is not online. - ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} ' - ' && continue; ' - ' cd $f; ' - ' if [[ -e scaling_governor ]]; then ' - ' echo %s > scaling_governor; fi; ' - 'done; ') - self.dw.RunCommandOnDut.assert_called_once_with( - set_cpu_cmd % 'non-exist_governor', ignore_status=True) - self.assertEqual(ret_code, 1) - - def test_disable_turbo(self): - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', '')) - self.dw.DisableTurbo() - set_cpu_cmd = ( - # Disable Turbo in Intel pstate driver - 'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then ' - ' if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then ' - ' echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; ' - ' fi; ' - 'fi; ') - self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd) - - def test_get_cpu_online_two(self): - """Test one digit CPU #.""" - self.dw.RunCommandOnDut = mock.Mock( - return_value=(0, '/sys/devices/system/cpu/cpu0/online 0\n' - '/sys/devices/system/cpu/cpu1/online 1\n', '')) - cpu_online = self.dw.GetCpuOnline() - self.assertEqual(cpu_online, {0: 0, 1: 1}) - - def test_get_cpu_online_twelve(self): - """Test two digit CPU #.""" - self.dw.RunCommandOnDut = mock.Mock( - return_value=(0, '/sys/devices/system/cpu/cpu0/online 1\n' - '/sys/devices/system/cpu/cpu1/online 0\n' - '/sys/devices/system/cpu/cpu10/online 1\n' - '/sys/devices/system/cpu/cpu11/online 1\n' - '/sys/devices/system/cpu/cpu2/online 1\n' - '/sys/devices/system/cpu/cpu3/online 0\n' - '/sys/devices/system/cpu/cpu4/online 1\n' - '/sys/devices/system/cpu/cpu5/online 0\n' - '/sys/devices/system/cpu/cpu6/online 1\n' - '/sys/devices/system/cpu/cpu7/online 0\n' - '/sys/devices/system/cpu/cpu8/online 1\n' - '/sys/devices/system/cpu/cpu9/online 0\n', '')) - cpu_online = self.dw.GetCpuOnline() - self.assertEqual(cpu_online, { - 0: 1, - 1: 0, - 2: 1, - 3: 0, - 4: 1, - 5: 0, - 6: 1, - 7: 0, - 8: 1, - 9: 0, - 10: 1, - 11: 1 - }) - - def test_get_cpu_online_no_output(self): - """Test error case, no output.""" - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', '')) - with self.assertRaises(AssertionError): - self.dw.GetCpuOnline() - - def test_get_cpu_online_command_error(self): - """Test error case, command error.""" - self.dw.RunCommandOnDut = mock.Mock(side_effect=AssertionError) - with self.assertRaises(AssertionError): - self.dw.GetCpuOnline() - - @mock.patch.object(DutWrapper, 'SetupArmCores') - def test_setup_cpu_usage_little_on_arm(self, mock_setup_arm): - self.dw.SetupArmCores = mock_setup_arm - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'armv7l', '')) - self.dw.dut_config['cpu_usage'] = 'little_only' - self.dw.SetupCpuUsage() - self.dw.SetupArmCores.assert_called_once_with() - - @mock.patch.object(DutWrapper, 'SetupArmCores') - def test_setup_cpu_usage_big_on_aarch64(self, mock_setup_arm): - self.dw.SetupArmCores = mock_setup_arm - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'aarch64', '')) - self.dw.dut_config['cpu_usage'] = 'big_only' - self.dw.SetupCpuUsage() - self.dw.SetupArmCores.assert_called_once_with() - - @mock.patch.object(DutWrapper, 'SetupArmCores') - def test_setup_cpu_usage_big_on_intel(self, mock_setup_arm): - self.dw.SetupArmCores = mock_setup_arm - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'x86_64', '')) - self.dw.dut_config['cpu_usage'] = 'big_only' - self.dw.SetupCpuUsage() - # Check that SetupArmCores not called with invalid setup. - self.dw.SetupArmCores.assert_not_called() - - @mock.patch.object(DutWrapper, 'SetupArmCores') - def test_setup_cpu_usage_all_on_intel(self, mock_setup_arm): - self.dw.SetupArmCores = mock_setup_arm - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'x86_64', '')) - self.dw.dut_config['cpu_usage'] = 'all' - self.dw.SetupCpuUsage() - # Check that SetupArmCores not called in general case. - self.dw.SetupArmCores.assert_not_called() - - def test_setup_arm_cores_big_on_big_little(self): - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, BIG_LITTLE_CPUINFO, ''), - (0, '', ''), - ]) - self.dw.dut_config['cpu_usage'] = 'big_only' - self.dw.SetupArmCores() - self.dw.RunCommandOnDut.assert_called_with( - 'echo 1 | tee /sys/devices/system/cpu/cpu{2}/online; ' - 'echo 0 | tee /sys/devices/system/cpu/cpu{0,1}/online') - - def test_setup_arm_cores_little_on_big_little(self): - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, BIG_LITTLE_CPUINFO, ''), - (0, '', ''), - ]) - self.dw.dut_config['cpu_usage'] = 'little_only' - self.dw.SetupArmCores() - self.dw.RunCommandOnDut.assert_called_with( - 'echo 1 | tee /sys/devices/system/cpu/cpu{0,1}/online; ' - 'echo 0 | tee /sys/devices/system/cpu/cpu{2}/online') - - def test_setup_arm_cores_invalid_config(self): - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, LITTLE_ONLY_CPUINFO, ''), - (0, '', ''), - ]) - self.dw.dut_config['cpu_usage'] = 'big_only' - self.dw.SetupArmCores() - # Check that setup command is not sent when trying - # to use 'big_only' on a platform with all little cores. - self.dw.RunCommandOnDut.assert_called_once_with('cat /proc/cpuinfo') - - def test_setup_arm_cores_not_big_little(self): - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, NOT_BIG_LITTLE_CPUINFO, ''), - (0, '', ''), - ]) - self.dw.dut_config['cpu_usage'] = 'big_only' - self.dw.SetupArmCores() - # Check that setup command is not sent when trying - # to use 'big_only' on a platform w/o support of big/little. - self.dw.RunCommandOnDut.assert_called_once_with('cat /proc/cpuinfo') - - def test_setup_arm_cores_unsupported_cpu_usage(self): - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, BIG_LITTLE_CPUINFO, ''), - (0, '', ''), - ]) - self.dw.dut_config['cpu_usage'] = 'exclusive_cores' - self.dw.SetupArmCores() - # Check that setup command is not sent when trying to use - # 'exclusive_cores' on ARM CPU setup. - self.dw.RunCommandOnDut.assert_called_once_with('cat /proc/cpuinfo') - - def test_setup_cpu_freq_single_full(self): - online = [0] - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n', - ''), - (0, '1 2 3 4 5 6 7 8 9 10', ''), - (0, '', ''), - ]) - self.dw.dut_config['cpu_freq_pct'] = 100 - self.dw.SetupCpuFreq(online) - self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 3) - self.assertEqual( - self.dw.RunCommandOnDut.call_args, - mock.call('echo 10 | tee ' - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq ' - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq')) - - def test_setup_cpu_freq_middle(self): - online = [0] - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n', - ''), - (0, '1 2 3 4 5 6 7 8 9 10', ''), - (0, '', ''), - ]) - self.dw.dut_config['cpu_freq_pct'] = 60 - self.dw.SetupCpuFreq(online) - self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2) - self.assertEqual( - self.dw.RunCommandOnDut.call_args, - mock.call('echo 6 | tee ' - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq ' - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq')) - - def test_setup_cpu_freq_lowest(self): - online = [0] - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n', - ''), - (0, '1 2 3 4 5 6 7 8 9 10', ''), - (0, '', ''), - ]) - self.dw.dut_config['cpu_freq_pct'] = 0 - self.dw.SetupCpuFreq(online) - self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2) - self.assertEqual( - self.dw.RunCommandOnDut.call_args, - mock.call('echo 1 | tee ' - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq ' - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq')) - - def test_setup_cpu_freq_multiple_middle(self): - online = [0, 1] - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n' - '/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n', - ''), - (0, '1 2 3 4 5 6 7 8 9 10', ''), - (0, '', ''), - (0, '1 4 6 8 10 12 14 16 18 20', ''), - (0, '', ''), - ]) - self.dw.dut_config['cpu_freq_pct'] = 70 - self.dw.SetupCpuFreq(online) - self.assertEqual(self.dw.RunCommandOnDut.call_count, 5) - self.assertEqual( - self.dw.RunCommandOnDut.call_args_list[2], - mock.call('echo 7 | tee ' - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq ' - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq')) - self.assertEqual( - self.dw.RunCommandOnDut.call_args_list[4], - mock.call('echo 14 | tee ' - '/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq ' - '/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq')) - - def test_setup_cpu_freq_no_scaling_available(self): - online = [0, 1] - self.dw.RunCommandOnDut = mock.Mock( - return_value=(2, '', 'No such file or directory')) - self.dw.dut_config['cpu_freq_pct'] = 50 - self.dw.SetupCpuFreq(online) - self.dw.RunCommandOnDut.assert_called_once() - self.assertNotRegex(self.dw.RunCommandOnDut.call_args_list[0][0][0], - '^echo.*scaling_max_freq$') - - def test_setup_cpu_freq_multiple_no_access(self): - online = [0, 1] - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, - '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n' - '/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n', - ''), - (0, '1 4 6 8 10 12 14 16 18 20', ''), - AssertionError(), - ]) - self.dw.dut_config['cpu_freq_pct'] = 30 - # Error status causes log fatal. - with self.assertRaises(AssertionError): - self.dw.SetupCpuFreq(online) - - @mock.patch.object(time, 'sleep') - def test_wait_cooldown_nowait(self, mock_sleep): - mock_sleep.return_value = 0 - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '39000', '')) - self.dw.dut_config['cooldown_time'] = 10 - self.dw.dut_config['cooldown_temp'] = 40 - wait_time = self.dw.WaitCooldown() - # Send command to DUT only once to check temperature - # and make sure it does not exceed the threshold. - self.dw.RunCommandOnDut.assert_called_once() - mock_sleep.assert_not_called() - self.assertEqual(wait_time, 0) - - @mock.patch.object(time, 'sleep') - def test_wait_cooldown_needwait_once(self, mock_sleep): - """Wait one iteration for cooldown. - - Set large enough timeout and changing temperature - output. Make sure it exits when expected value - received. - Expect that WaitCooldown check temp twice. - """ - mock_sleep.return_value = 0 - self.dw.RunCommandOnDut = mock.Mock(side_effect=[(0, '41000', - ''), (0, '39999', '')]) - self.dw.dut_config['cooldown_time'] = 100 - self.dw.dut_config['cooldown_temp'] = 40 - wait_time = self.dw.WaitCooldown() - self.dw.RunCommandOnDut.assert_called() - self.assertEqual(self.dw.RunCommandOnDut.call_count, 2) - mock_sleep.assert_called() - self.assertGreater(wait_time, 0) - - @mock.patch.object(time, 'sleep') - def test_wait_cooldown_needwait(self, mock_sleep): - """Test exit by timeout. - - Send command to DUT checking the temperature and - check repeatedly until timeout goes off. - Output from temperature sensor never changes. - """ - mock_sleep.return_value = 0 - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '41000', '')) - self.dw.dut_config['cooldown_time'] = 60 - self.dw.dut_config['cooldown_temp'] = 40 - wait_time = self.dw.WaitCooldown() - self.dw.RunCommandOnDut.assert_called() - self.assertGreater(self.dw.RunCommandOnDut.call_count, 2) - mock_sleep.assert_called() - self.assertGreater(wait_time, 0) - - @mock.patch.object(time, 'sleep') - def test_wait_cooldown_needwait_multtemp(self, mock_sleep): - """Wait until all temps go down. - - Set large enough timeout and changing temperature - output. Make sure it exits when expected value - for all temperatures received. - Expect 3 checks. - """ - mock_sleep.return_value = 0 - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (0, '41000\n20000\n30000\n45000', ''), - (0, '39000\n20000\n30000\n41000', ''), - (0, '39000\n20000\n30000\n31000', ''), - ]) - self.dw.dut_config['cooldown_time'] = 100 - self.dw.dut_config['cooldown_temp'] = 40 - wait_time = self.dw.WaitCooldown() - self.dw.RunCommandOnDut.assert_called() - self.assertEqual(self.dw.RunCommandOnDut.call_count, 3) - mock_sleep.assert_called() - self.assertGreater(wait_time, 0) - - @mock.patch.object(time, 'sleep') - def test_wait_cooldown_thermal_error(self, mock_sleep): - """Handle error status. - - Any error should be considered non-fatal. - """ - mock_sleep.return_value = 0 - self.dw.RunCommandOnDut = mock.Mock(side_effect=[ - (1, '39000\n20000\n30000\n41000', 'Thermal error'), - (1, '39000\n20000\n30000\n31000', 'Thermal error'), - ]) - self.dw.dut_config['cooldown_time'] = 10 - self.dw.dut_config['cooldown_temp'] = 40 - wait_time = self.dw.WaitCooldown() - # Check that errors are ignored. - self.dw.RunCommandOnDut.assert_called_with( - 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True) - self.assertEqual(self.dw.RunCommandOnDut.call_count, 2) - # Check that we are waiting even when an error is returned - # as soon as data is coming. - mock_sleep.assert_called() - self.assertGreater(wait_time, 0) - - @mock.patch.object(time, 'sleep') - def test_wait_cooldown_thermal_no_output(self, mock_sleep): - """Handle no output. - - Check handling of empty stdout. - """ - mock_sleep.return_value = 0 - self.dw.RunCommandOnDut = mock.Mock(side_effect=[(1, '', 'Thermal error')]) - self.dw.dut_config['cooldown_time'] = 10 - self.dw.dut_config['cooldown_temp'] = 40 - wait_time = self.dw.WaitCooldown() - # Check that errors are ignored. - self.dw.RunCommandOnDut.assert_called_once_with( - 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True) - # No wait. - mock_sleep.assert_not_called() - self.assertEqual(wait_time, 0) - - @mock.patch.object(time, 'sleep') - def test_wait_cooldown_thermal_ws_output(self, mock_sleep): - """Handle whitespace output. - - Check handling of whitespace only. - """ - mock_sleep.return_value = 0 - self.dw.RunCommandOnDut = mock.Mock(side_effect=[(1, '\n', - 'Thermal error')]) - self.dw.dut_config['cooldown_time'] = 10 - self.dw.dut_config['cooldown_temp'] = 40 - wait_time = self.dw.WaitCooldown() - # Check that errors are ignored. - self.dw.RunCommandOnDut.assert_called_once_with( - 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True) - # No wait. - mock_sleep.assert_not_called() - self.assertEqual(wait_time, 0) - - def test_stop_ui(self): - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', '')) - self.dw.StopUI() - self.dw.RunCommandOnDut.assert_called_once_with( - 'stop ui', ignore_status=True) - - def test_start_ui(self): - self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', '')) - self.dw.StartUI() - self.dw.RunCommandOnDut.assert_called_once_with( - 'start ui', ignore_status=True) - - def test_setup_device(self): - - def FakeRunner(command, ignore_status=False): - # pylint fix for unused variable. - del command, ignore_status - return 0, '', '' - - def SetupMockFunctions(): - self.dw.RunCommandOnDut = mock.Mock(return_value=FakeRunner) - self.dw.KerncmdUpdateNeeded = mock.Mock(return_value=True) - self.dw.UpdateKerncmdIntelPstate = mock.Mock(return_value=0) - self.dw.DisableASLR = mock.Mock(return_value=0) - self.dw.SetupCpuUsage = mock.Mock(return_value=0) - self.dw.SetupCpuFreq = mock.Mock(return_value=0) - self.dw.GetCpuOnline = mock.Mock(return_value={0: 1, 1: 1, 2: 0}) - self.dw.SetCpuGovernor = mock.Mock(return_value=0) - self.dw.DisableTurbo = mock.Mock(return_value=0) - self.dw.StopUI = mock.Mock(return_value=0) - self.dw.StartUI = mock.Mock(return_value=0) - self.dw.WaitCooldown = mock.Mock(return_value=0) - self.dw.DecreaseWaitTime = mock.Mock(return_value=0) - - self.dw.dut_config['enable_aslr'] = False - self.dw.dut_config['cooldown_time'] = 0 - self.dw.dut_config['governor'] = 'fake_governor' - self.dw.dut_config['cpu_freq_pct'] = 65 - self.dw.dut_config['intel_pstate'] = 'no_hwp' - - SetupMockFunctions() - self.dw.SetupDevice() - - self.dw.KerncmdUpdateNeeded.assert_called_once() - self.dw.UpdateKerncmdIntelPstate.assert_called_once() - self.dw.DisableASLR.assert_called_once() - self.dw.SetupCpuUsage.assert_called_once_with() - self.dw.SetupCpuFreq.assert_called_once_with([0, 1]) - self.dw.GetCpuOnline.assert_called_once_with() - self.dw.SetCpuGovernor.assert_called_once_with('fake_governor') - self.dw.DisableTurbo.assert_called_once_with() - self.dw.DecreaseWaitTime.assert_called_once_with() - self.dw.StopUI.assert_called_once_with() - self.dw.StartUI.assert_called_once_with() - self.dw.WaitCooldown.assert_not_called() - - # Test SetupDevice with cooldown - self.dw.dut_config['cooldown_time'] = 10 - - SetupMockFunctions() - self.dw.GetCpuOnline = mock.Mock(return_value={0: 0, 1: 1}) - - self.dw.SetupDevice() - - self.dw.WaitCooldown.assert_called_once_with() - self.dw.DisableASLR.assert_called_once() - self.dw.DisableTurbo.assert_called_once_with() - self.dw.SetupCpuUsage.assert_called_once_with() - self.dw.SetupCpuFreq.assert_called_once_with([1]) - self.dw.SetCpuGovernor.assert_called() - self.dw.GetCpuOnline.assert_called_once_with() - self.dw.StopUI.assert_called_once_with() - self.dw.StartUI.assert_called_once_with() - self.assertGreater(self.dw.SetCpuGovernor.call_count, 1) - self.assertEqual(self.dw.SetCpuGovernor.call_args, - mock.call('fake_governor')) - - # Test SetupDevice with cooldown - SetupMockFunctions() - self.dw.SetupCpuUsage = mock.Mock(side_effect=RuntimeError()) - - with self.assertRaises(RuntimeError): - self.dw.SetupDevice() - - # This call injected an exception. - self.dw.SetupCpuUsage.assert_called_once_with() - # Calls following the expeption are skipped. - self.dw.WaitCooldown.assert_not_called() - self.dw.DisableTurbo.assert_not_called() - self.dw.SetupCpuFreq.assert_not_called() - self.dw.SetCpuGovernor.assert_not_called() - self.dw.GetCpuOnline.assert_not_called() - # Check that Stop/Start UI are always called. - self.dw.StopUI.assert_called_once_with() - self.dw.StartUI.assert_called_once_with() - - -if __name__ == '__main__': - unittest.main() + """Class of DutWrapper test.""" + + real_logger = logger.GetLogger() + + mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) + mock_logger = mock.Mock(spec=logger.Logger) + + def __init__(self, *args, **kwargs): + super(DutWrapperTest, self).__init__(*args, **kwargs) + + def setUp(self): + self.dw = DutWrapper( + "/tmp/chromeos", + "lumpy.cros2", + log_level="verbose", + logger=self.mock_logger, + ce=self.mock_cmd_exec, + dut_config={}, + ) + + @mock.patch.object( + command_executer.CommandExecuter, "CrosRunCommandWOutput" + ) + def test_run_command_on_dut(self, mock_cros_runcmd): + self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd + mock_cros_runcmd.return_value = (0, "", "") + mock_cros_runcmd.assert_not_called() + self.dw.RunCommandOnDut("run command;") + mock_cros_runcmd.assert_called_once_with( + "run command;", chromeos_root="/tmp/chromeos", machine="lumpy.cros2" + ) + + @mock.patch.object( + command_executer.CommandExecuter, "CrosRunCommandWOutput" + ) + def test_dut_wrapper_fatal_error(self, mock_cros_runcmd): + self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd + # Command returns error 1. + mock_cros_runcmd.return_value = (1, "", "Error!") + mock_cros_runcmd.assert_not_called() + self.dw.RunCommandOnDut("run command;") + mock_cros_runcmd.assert_called_once_with( + "run command;", chromeos_root="/tmp/chromeos", machine="lumpy.cros2" + ) + # Error status causes log fatal. + self.assertEqual( + self.mock_logger.method_calls[-1], + mock.call.LogFatal( + "Command execution on DUT lumpy.cros2 failed.\n" + "Failing command: run command;\nreturned 1\n" + "Error message: Error!" + ), + ) + + @mock.patch.object( + command_executer.CommandExecuter, "CrosRunCommandWOutput" + ) + def test_dut_wrapper_ignore_error(self, mock_cros_runcmd): + self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd + # Command returns error 1. + mock_cros_runcmd.return_value = (1, "", "Error!") + self.dw.RunCommandOnDut("run command;", ignore_status=True) + mock_cros_runcmd.assert_called_once_with( + "run command;", chromeos_root="/tmp/chromeos", machine="lumpy.cros2" + ) + # Error status is not fatal. LogError records the error message. + self.assertEqual( + self.mock_logger.method_calls[-1], + mock.call.LogError( + "Command execution on DUT lumpy.cros2 failed.\n" + "Failing command: run command;\nreturned 1\n" + "Error message: Error!\n" + "(Failure is considered non-fatal. Continue.)" + ), + ) + + def test_disable_aslr(self): + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", "")) + self.dw.DisableASLR() + # pyformat: disable + set_cpu_cmd = ( + "set -e; " + "if [[ -e /proc/sys/kernel/randomize_va_space ]]; then " + " echo 0 > /proc/sys/kernel/randomize_va_space; " + "fi" + ) + self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd) + + def test_set_cpu_governor(self): + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", "")) + self.dw.SetCpuGovernor("new_governor", ignore_status=False) + set_cpu_cmd = ( + "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do " + # Skip writing scaling_governor if cpu is offline. + " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} " + " && continue; " + " cd $f; " + " if [[ -e scaling_governor ]]; then " + " echo %s > scaling_governor; fi; " + "done; " + ) + self.dw.RunCommandOnDut.assert_called_once_with( + set_cpu_cmd % "new_governor", ignore_status=False + ) + + def test_set_cpu_governor_propagate_error(self): + self.dw.RunCommandOnDut = mock.Mock(return_value=(1, "", "Error.")) + self.dw.SetCpuGovernor("non-exist_governor") + set_cpu_cmd = ( + "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do " + # Skip writing scaling_governor if cpu is not online. + " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} " + " && continue; " + " cd $f; " + " if [[ -e scaling_governor ]]; then " + " echo %s > scaling_governor; fi; " + "done; " + ) + # By default error status is fatal. + self.dw.RunCommandOnDut.assert_called_once_with( + set_cpu_cmd % "non-exist_governor", ignore_status=False + ) + + def test_set_cpu_governor_ignore_status(self): + self.dw.RunCommandOnDut = mock.Mock(return_value=(1, "", "Error.")) + ret_code = self.dw.SetCpuGovernor( + "non-exist_governor", ignore_status=True + ) + set_cpu_cmd = ( + "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do " + # Skip writing scaling_governor if cpu is not online. + " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} " + " && continue; " + " cd $f; " + " if [[ -e scaling_governor ]]; then " + " echo %s > scaling_governor; fi; " + "done; " + ) + self.dw.RunCommandOnDut.assert_called_once_with( + set_cpu_cmd % "non-exist_governor", ignore_status=True + ) + self.assertEqual(ret_code, 1) + + def test_disable_turbo(self): + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", "")) + self.dw.DisableTurbo() + set_cpu_cmd = ( + # Disable Turbo in Intel pstate driver + "if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then " + " if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then " + " echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; " + " fi; " + "fi; " + ) + self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd) + + def test_get_cpu_online_two(self): + """Test one digit CPU #.""" + self.dw.RunCommandOnDut = mock.Mock( + return_value=( + 0, + "/sys/devices/system/cpu/cpu0/online 0\n" + "/sys/devices/system/cpu/cpu1/online 1\n", + "", + ) + ) + cpu_online = self.dw.GetCpuOnline() + self.assertEqual(cpu_online, {0: 0, 1: 1}) + + def test_get_cpu_online_twelve(self): + """Test two digit CPU #.""" + self.dw.RunCommandOnDut = mock.Mock( + return_value=( + 0, + "/sys/devices/system/cpu/cpu0/online 1\n" + "/sys/devices/system/cpu/cpu1/online 0\n" + "/sys/devices/system/cpu/cpu10/online 1\n" + "/sys/devices/system/cpu/cpu11/online 1\n" + "/sys/devices/system/cpu/cpu2/online 1\n" + "/sys/devices/system/cpu/cpu3/online 0\n" + "/sys/devices/system/cpu/cpu4/online 1\n" + "/sys/devices/system/cpu/cpu5/online 0\n" + "/sys/devices/system/cpu/cpu6/online 1\n" + "/sys/devices/system/cpu/cpu7/online 0\n" + "/sys/devices/system/cpu/cpu8/online 1\n" + "/sys/devices/system/cpu/cpu9/online 0\n", + "", + ) + ) + cpu_online = self.dw.GetCpuOnline() + self.assertEqual( + cpu_online, + { + 0: 1, + 1: 0, + 2: 1, + 3: 0, + 4: 1, + 5: 0, + 6: 1, + 7: 0, + 8: 1, + 9: 0, + 10: 1, + 11: 1, + }, + ) + + def test_get_cpu_online_no_output(self): + """Test error case, no output.""" + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", "")) + with self.assertRaises(AssertionError): + self.dw.GetCpuOnline() + + def test_get_cpu_online_command_error(self): + """Test error case, command error.""" + self.dw.RunCommandOnDut = mock.Mock(side_effect=AssertionError) + with self.assertRaises(AssertionError): + self.dw.GetCpuOnline() + + @mock.patch.object(DutWrapper, "SetupArmCores") + def test_setup_cpu_usage_little_on_arm(self, mock_setup_arm): + self.dw.SetupArmCores = mock_setup_arm + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "armv7l", "")) + self.dw.dut_config["cpu_usage"] = "little_only" + self.dw.SetupCpuUsage() + self.dw.SetupArmCores.assert_called_once_with() + + @mock.patch.object(DutWrapper, "SetupArmCores") + def test_setup_cpu_usage_big_on_aarch64(self, mock_setup_arm): + self.dw.SetupArmCores = mock_setup_arm + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "aarch64", "")) + self.dw.dut_config["cpu_usage"] = "big_only" + self.dw.SetupCpuUsage() + self.dw.SetupArmCores.assert_called_once_with() + + @mock.patch.object(DutWrapper, "SetupArmCores") + def test_setup_cpu_usage_big_on_intel(self, mock_setup_arm): + self.dw.SetupArmCores = mock_setup_arm + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "x86_64", "")) + self.dw.dut_config["cpu_usage"] = "big_only" + self.dw.SetupCpuUsage() + # Check that SetupArmCores not called with invalid setup. + self.dw.SetupArmCores.assert_not_called() + + @mock.patch.object(DutWrapper, "SetupArmCores") + def test_setup_cpu_usage_all_on_intel(self, mock_setup_arm): + self.dw.SetupArmCores = mock_setup_arm + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "x86_64", "")) + self.dw.dut_config["cpu_usage"] = "all" + self.dw.SetupCpuUsage() + # Check that SetupArmCores not called in general case. + self.dw.SetupArmCores.assert_not_called() + + def test_setup_arm_cores_big_on_big_little(self): + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + (0, BIG_LITTLE_CPUINFO, ""), + (0, "", ""), + ] + ) + self.dw.dut_config["cpu_usage"] = "big_only" + self.dw.SetupArmCores() + self.dw.RunCommandOnDut.assert_called_with( + "echo 1 | tee /sys/devices/system/cpu/cpu{2}/online; " + "echo 0 | tee /sys/devices/system/cpu/cpu{0,1}/online" + ) + + def test_setup_arm_cores_little_on_big_little(self): + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + (0, BIG_LITTLE_CPUINFO, ""), + (0, "", ""), + ] + ) + self.dw.dut_config["cpu_usage"] = "little_only" + self.dw.SetupArmCores() + self.dw.RunCommandOnDut.assert_called_with( + "echo 1 | tee /sys/devices/system/cpu/cpu{0,1}/online; " + "echo 0 | tee /sys/devices/system/cpu/cpu{2}/online" + ) + + def test_setup_arm_cores_invalid_config(self): + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + (0, LITTLE_ONLY_CPUINFO, ""), + (0, "", ""), + ] + ) + self.dw.dut_config["cpu_usage"] = "big_only" + self.dw.SetupArmCores() + # Check that setup command is not sent when trying + # to use 'big_only' on a platform with all little cores. + self.dw.RunCommandOnDut.assert_called_once_with("cat /proc/cpuinfo") + + def test_setup_arm_cores_not_big_little(self): + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + (0, NOT_BIG_LITTLE_CPUINFO, ""), + (0, "", ""), + ] + ) + self.dw.dut_config["cpu_usage"] = "big_only" + self.dw.SetupArmCores() + # Check that setup command is not sent when trying + # to use 'big_only' on a platform w/o support of big/little. + self.dw.RunCommandOnDut.assert_called_once_with("cat /proc/cpuinfo") + + def test_setup_arm_cores_unsupported_cpu_usage(self): + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + (0, BIG_LITTLE_CPUINFO, ""), + (0, "", ""), + ] + ) + self.dw.dut_config["cpu_usage"] = "exclusive_cores" + self.dw.SetupArmCores() + # Check that setup command is not sent when trying to use + # 'exclusive_cores' on ARM CPU setup. + self.dw.RunCommandOnDut.assert_called_once_with("cat /proc/cpuinfo") + + def test_setup_cpu_freq_single_full(self): + online = [0] + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + ( + 0, + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n", + "", + ), + (0, "1 2 3 4 5 6 7 8 9 10", ""), + (0, "", ""), + ] + ) + self.dw.dut_config["cpu_freq_pct"] = 100 + self.dw.SetupCpuFreq(online) + self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 3) + self.assertEqual( + self.dw.RunCommandOnDut.call_args, + mock.call( + "echo 10 | tee " + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq " + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq" + ), + ) + + def test_setup_cpu_freq_middle(self): + online = [0] + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + ( + 0, + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n", + "", + ), + (0, "1 2 3 4 5 6 7 8 9 10", ""), + (0, "", ""), + ] + ) + self.dw.dut_config["cpu_freq_pct"] = 60 + self.dw.SetupCpuFreq(online) + self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2) + self.assertEqual( + self.dw.RunCommandOnDut.call_args, + mock.call( + "echo 6 | tee " + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq " + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq" + ), + ) + + def test_setup_cpu_freq_lowest(self): + online = [0] + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + ( + 0, + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n", + "", + ), + (0, "1 2 3 4 5 6 7 8 9 10", ""), + (0, "", ""), + ] + ) + self.dw.dut_config["cpu_freq_pct"] = 0 + self.dw.SetupCpuFreq(online) + self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2) + self.assertEqual( + self.dw.RunCommandOnDut.call_args, + mock.call( + "echo 1 | tee " + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq " + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq" + ), + ) + + def test_setup_cpu_freq_multiple_middle(self): + online = [0, 1] + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + ( + 0, + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n" + "/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n", + "", + ), + (0, "1 2 3 4 5 6 7 8 9 10", ""), + (0, "", ""), + (0, "1 4 6 8 10 12 14 16 18 20", ""), + (0, "", ""), + ] + ) + self.dw.dut_config["cpu_freq_pct"] = 70 + self.dw.SetupCpuFreq(online) + self.assertEqual(self.dw.RunCommandOnDut.call_count, 5) + self.assertEqual( + self.dw.RunCommandOnDut.call_args_list[2], + mock.call( + "echo 7 | tee " + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq " + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq" + ), + ) + self.assertEqual( + self.dw.RunCommandOnDut.call_args_list[4], + mock.call( + "echo 14 | tee " + "/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq " + "/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq" + ), + ) + + def test_setup_cpu_freq_no_scaling_available(self): + online = [0, 1] + self.dw.RunCommandOnDut = mock.Mock( + return_value=(2, "", "No such file or directory") + ) + self.dw.dut_config["cpu_freq_pct"] = 50 + self.dw.SetupCpuFreq(online) + self.dw.RunCommandOnDut.assert_called_once() + self.assertNotRegex( + self.dw.RunCommandOnDut.call_args_list[0][0][0], + "^echo.*scaling_max_freq$", + ) + + def test_setup_cpu_freq_multiple_no_access(self): + online = [0, 1] + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + ( + 0, + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n" + "/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n", + "", + ), + (0, "1 4 6 8 10 12 14 16 18 20", ""), + AssertionError(), + ] + ) + self.dw.dut_config["cpu_freq_pct"] = 30 + # Error status causes log fatal. + with self.assertRaises(AssertionError): + self.dw.SetupCpuFreq(online) + + @mock.patch.object(time, "sleep") + def test_wait_cooldown_nowait(self, mock_sleep): + mock_sleep.return_value = 0 + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "39000", "")) + self.dw.dut_config["cooldown_time"] = 10 + self.dw.dut_config["cooldown_temp"] = 40 + wait_time = self.dw.WaitCooldown() + # Send command to DUT only once to check temperature + # and make sure it does not exceed the threshold. + self.dw.RunCommandOnDut.assert_called_once() + mock_sleep.assert_not_called() + self.assertEqual(wait_time, 0) + + @mock.patch.object(time, "sleep") + def test_wait_cooldown_needwait_once(self, mock_sleep): + """Wait one iteration for cooldown. + + Set large enough timeout and changing temperature + output. Make sure it exits when expected value + received. + Expect that WaitCooldown check temp twice. + """ + mock_sleep.return_value = 0 + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[(0, "41000", ""), (0, "39999", "")] + ) + self.dw.dut_config["cooldown_time"] = 100 + self.dw.dut_config["cooldown_temp"] = 40 + wait_time = self.dw.WaitCooldown() + self.dw.RunCommandOnDut.assert_called() + self.assertEqual(self.dw.RunCommandOnDut.call_count, 2) + mock_sleep.assert_called() + self.assertGreater(wait_time, 0) + + @mock.patch.object(time, "sleep") + def test_wait_cooldown_needwait(self, mock_sleep): + """Test exit by timeout. + + Send command to DUT checking the temperature and + check repeatedly until timeout goes off. + Output from temperature sensor never changes. + """ + mock_sleep.return_value = 0 + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "41000", "")) + self.dw.dut_config["cooldown_time"] = 60 + self.dw.dut_config["cooldown_temp"] = 40 + wait_time = self.dw.WaitCooldown() + self.dw.RunCommandOnDut.assert_called() + self.assertGreater(self.dw.RunCommandOnDut.call_count, 2) + mock_sleep.assert_called() + self.assertGreater(wait_time, 0) + + @mock.patch.object(time, "sleep") + def test_wait_cooldown_needwait_multtemp(self, mock_sleep): + """Wait until all temps go down. + + Set large enough timeout and changing temperature + output. Make sure it exits when expected value + for all temperatures received. + Expect 3 checks. + """ + mock_sleep.return_value = 0 + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + (0, "41000\n20000\n30000\n45000", ""), + (0, "39000\n20000\n30000\n41000", ""), + (0, "39000\n20000\n30000\n31000", ""), + ] + ) + self.dw.dut_config["cooldown_time"] = 100 + self.dw.dut_config["cooldown_temp"] = 40 + wait_time = self.dw.WaitCooldown() + self.dw.RunCommandOnDut.assert_called() + self.assertEqual(self.dw.RunCommandOnDut.call_count, 3) + mock_sleep.assert_called() + self.assertGreater(wait_time, 0) + + @mock.patch.object(time, "sleep") + def test_wait_cooldown_thermal_error(self, mock_sleep): + """Handle error status. + + Any error should be considered non-fatal. + """ + mock_sleep.return_value = 0 + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[ + (1, "39000\n20000\n30000\n41000", "Thermal error"), + (1, "39000\n20000\n30000\n31000", "Thermal error"), + ] + ) + self.dw.dut_config["cooldown_time"] = 10 + self.dw.dut_config["cooldown_temp"] = 40 + wait_time = self.dw.WaitCooldown() + # Check that errors are ignored. + self.dw.RunCommandOnDut.assert_called_with( + "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True + ) + self.assertEqual(self.dw.RunCommandOnDut.call_count, 2) + # Check that we are waiting even when an error is returned + # as soon as data is coming. + mock_sleep.assert_called() + self.assertGreater(wait_time, 0) + + @mock.patch.object(time, "sleep") + def test_wait_cooldown_thermal_no_output(self, mock_sleep): + """Handle no output. + + Check handling of empty stdout. + """ + mock_sleep.return_value = 0 + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[(1, "", "Thermal error")] + ) + self.dw.dut_config["cooldown_time"] = 10 + self.dw.dut_config["cooldown_temp"] = 40 + wait_time = self.dw.WaitCooldown() + # Check that errors are ignored. + self.dw.RunCommandOnDut.assert_called_once_with( + "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True + ) + # No wait. + mock_sleep.assert_not_called() + self.assertEqual(wait_time, 0) + + @mock.patch.object(time, "sleep") + def test_wait_cooldown_thermal_ws_output(self, mock_sleep): + """Handle whitespace output. + + Check handling of whitespace only. + """ + mock_sleep.return_value = 0 + self.dw.RunCommandOnDut = mock.Mock( + side_effect=[(1, "\n", "Thermal error")] + ) + self.dw.dut_config["cooldown_time"] = 10 + self.dw.dut_config["cooldown_temp"] = 40 + wait_time = self.dw.WaitCooldown() + # Check that errors are ignored. + self.dw.RunCommandOnDut.assert_called_once_with( + "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True + ) + # No wait. + mock_sleep.assert_not_called() + self.assertEqual(wait_time, 0) + + def test_stop_ui(self): + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", "")) + self.dw.StopUI() + self.dw.RunCommandOnDut.assert_called_once_with( + "stop ui", ignore_status=True + ) + + def test_start_ui(self): + self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", "")) + self.dw.StartUI() + self.dw.RunCommandOnDut.assert_called_once_with( + "start ui", ignore_status=True + ) + + def test_setup_device(self): + def FakeRunner(command, ignore_status=False): + # pylint fix for unused variable. + del command, ignore_status + return 0, "", "" + + def SetupMockFunctions(): + self.dw.RunCommandOnDut = mock.Mock(return_value=FakeRunner) + self.dw.KerncmdUpdateNeeded = mock.Mock(return_value=True) + self.dw.UpdateKerncmdIntelPstate = mock.Mock(return_value=0) + self.dw.DisableASLR = mock.Mock(return_value=0) + self.dw.SetupCpuUsage = mock.Mock(return_value=0) + self.dw.SetupCpuFreq = mock.Mock(return_value=0) + self.dw.GetCpuOnline = mock.Mock(return_value={0: 1, 1: 1, 2: 0}) + self.dw.SetCpuGovernor = mock.Mock(return_value=0) + self.dw.DisableTurbo = mock.Mock(return_value=0) + self.dw.StopUI = mock.Mock(return_value=0) + self.dw.StartUI = mock.Mock(return_value=0) + self.dw.WaitCooldown = mock.Mock(return_value=0) + self.dw.DecreaseWaitTime = mock.Mock(return_value=0) + + self.dw.dut_config["enable_aslr"] = False + self.dw.dut_config["cooldown_time"] = 0 + self.dw.dut_config["governor"] = "fake_governor" + self.dw.dut_config["cpu_freq_pct"] = 65 + self.dw.dut_config["intel_pstate"] = "no_hwp" + + SetupMockFunctions() + self.dw.SetupDevice() + + self.dw.KerncmdUpdateNeeded.assert_called_once() + self.dw.UpdateKerncmdIntelPstate.assert_called_once() + self.dw.DisableASLR.assert_called_once() + self.dw.SetupCpuUsage.assert_called_once_with() + self.dw.SetupCpuFreq.assert_called_once_with([0, 1]) + self.dw.GetCpuOnline.assert_called_once_with() + self.dw.SetCpuGovernor.assert_called_once_with("fake_governor") + self.dw.DisableTurbo.assert_called_once_with() + self.dw.DecreaseWaitTime.assert_called_once_with() + self.dw.StopUI.assert_called_once_with() + self.dw.StartUI.assert_called_once_with() + self.dw.WaitCooldown.assert_not_called() + + # Test SetupDevice with cooldown + self.dw.dut_config["cooldown_time"] = 10 + + SetupMockFunctions() + self.dw.GetCpuOnline = mock.Mock(return_value={0: 0, 1: 1}) + + self.dw.SetupDevice() + + self.dw.WaitCooldown.assert_called_once_with() + self.dw.DisableASLR.assert_called_once() + self.dw.DisableTurbo.assert_called_once_with() + self.dw.SetupCpuUsage.assert_called_once_with() + self.dw.SetupCpuFreq.assert_called_once_with([1]) + self.dw.SetCpuGovernor.assert_called() + self.dw.GetCpuOnline.assert_called_once_with() + self.dw.StopUI.assert_called_once_with() + self.dw.StartUI.assert_called_once_with() + self.assertGreater(self.dw.SetCpuGovernor.call_count, 1) + self.assertEqual( + self.dw.SetCpuGovernor.call_args, mock.call("fake_governor") + ) + + # Test SetupDevice with cooldown + SetupMockFunctions() + self.dw.SetupCpuUsage = mock.Mock(side_effect=RuntimeError()) + + with self.assertRaises(RuntimeError): + self.dw.SetupDevice() + + # This call injected an exception. + self.dw.SetupCpuUsage.assert_called_once_with() + # Calls following the expeption are skipped. + self.dw.WaitCooldown.assert_not_called() + self.dw.DisableTurbo.assert_not_called() + self.dw.SetupCpuFreq.assert_not_called() + self.dw.SetCpuGovernor.assert_not_called() + self.dw.GetCpuOnline.assert_not_called() + # Check that Stop/Start UI are always called. + self.dw.StopUI.assert_called_once_with() + self.dw.StartUI.assert_called_once_with() + + +if __name__ == "__main__": + unittest.main() diff --git a/cros_utils/email_sender.py b/cros_utils/email_sender.py index a4ddb2b5..0572d2e1 100755 --- a/cros_utils/email_sender.py +++ b/cros_utils/email_sender.py @@ -12,248 +12,304 @@ from __future__ import print_function import base64 import contextlib import datetime +from email import encoders as Encoders +from email.mime.base import MIMEBase +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText import getpass import json import os import smtplib import tempfile -from email import encoders as Encoders -from email.mime.base import MIMEBase -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText from cros_utils import command_executer -X20_PATH = '/google/data/rw/teams/c-compiler-chrome/prod_emails' + +X20_PATH = "/google/data/rw/teams/c-compiler-chrome/prod_emails" @contextlib.contextmanager def AtomicallyWriteFile(file_path): - temp_path = file_path + '.in_progress' - try: - with open(temp_path, 'w') as f: - yield f - os.rename(temp_path, file_path) - except: - os.remove(temp_path) - raise + temp_path = file_path + ".in_progress" + try: + with open(temp_path, "w") as f: + yield f + os.rename(temp_path, file_path) + except: + os.remove(temp_path) + raise class EmailSender(object): - """Utility class to send email through SMTP or SendGMR.""" - - class Attachment(object): - """Small class to keep track of attachment info.""" - - def __init__(self, name, content): - self.name = name - self.content = content - - def SendX20Email(self, - subject, - identifier, - well_known_recipients=(), - direct_recipients=(), - text_body=None, - html_body=None): - """Enqueues an email in our x20 outbox. - - These emails ultimately get sent by the machinery in - //depot/google3/googleclient/chrome/chromeos_toolchain/mailer/mail.go. This - kind of sending is intended for accounts that don't have smtp or gmr access - (e.g., role accounts), but can be used by anyone with x20 access. - - All emails are sent from `mdb.c-compiler-chrome+${identifier}@google.com`. - - Args: - subject: email subject. Must be nonempty. - identifier: email identifier, or the text that lands after the `+` in the - "From" email address. Must be nonempty. - well_known_recipients: a list of well-known recipients for the email. - These are translated into addresses by our mailer. - Current potential values for this are ('detective', - 'cwp-team', 'cros-team', 'mage'). Either this or - direct_recipients must be a nonempty list. - direct_recipients: @google.com emails to send addresses to. Either this - or well_known_recipients must be a nonempty list. - text_body: a 'text/plain' email body to send. Either this or html_body - must be a nonempty string. Both may be specified - html_body: a 'text/html' email body to send. Either this or text_body - must be a nonempty string. Both may be specified - """ - # `str`s act a lot like tuples/lists. Ensure that we're not accidentally - # iterating over one of those (or anything else that's sketchy, for that - # matter). - if not isinstance(well_known_recipients, (tuple, list)): - raise ValueError('`well_known_recipients` is unexpectedly a %s' % - type(well_known_recipients)) - - if not isinstance(direct_recipients, (tuple, list)): - raise ValueError('`direct_recipients` is unexpectedly a %s' % - type(direct_recipients)) - - if not subject or not identifier: - raise ValueError('both `subject` and `identifier` must be nonempty') - - if not (well_known_recipients or direct_recipients): - raise ValueError('either `well_known_recipients` or `direct_recipients` ' - 'must be specified') - - for recipient in direct_recipients: - if not recipient.endswith('@google.com'): - raise ValueError('All recipients must end with @google.com') - - if not (text_body or html_body): - raise ValueError('either `text_body` or `html_body` must be specified') - - email_json = { - 'email_identifier': identifier, - 'subject': subject, - } - - if well_known_recipients: - email_json['well_known_recipients'] = well_known_recipients - - if direct_recipients: - email_json['direct_recipients'] = direct_recipients - - if text_body: - email_json['body'] = text_body - - if html_body: - email_json['html_body'] = html_body - - # The name of this has two parts: - # - An easily sortable time, to provide uniqueness and let our emailer - # send things in the order they were put into the outbox. - # - 64 bits of entropy, so two racing email sends don't clobber the same - # file. - now = datetime.datetime.utcnow().isoformat('T', 'seconds') + 'Z' - entropy = base64.urlsafe_b64encode(os.getrandom(8)) - entropy_str = entropy.rstrip(b'=').decode('utf-8') - result_path = os.path.join(X20_PATH, now + '_' + entropy_str + '.json') - - with AtomicallyWriteFile(result_path) as f: - json.dump(email_json, f) - - def SendEmail(self, + """Utility class to send email through SMTP or SendGMR.""" + + class Attachment(object): + """Small class to keep track of attachment info.""" + + def __init__(self, name, content): + self.name = name + self.content = content + + def SendX20Email( + self, + subject, + identifier, + well_known_recipients=(), + direct_recipients=(), + text_body=None, + html_body=None, + ): + """Enqueues an email in our x20 outbox. + + These emails ultimately get sent by the machinery in + //depot/google3/googleclient/chrome/chromeos_toolchain/mailer/mail.go. This + kind of sending is intended for accounts that don't have smtp or gmr access + (e.g., role accounts), but can be used by anyone with x20 access. + + All emails are sent from `mdb.c-compiler-chrome+${identifier}@google.com`. + + Args: + subject: email subject. Must be nonempty. + identifier: email identifier, or the text that lands after the `+` in the + "From" email address. Must be nonempty. + well_known_recipients: a list of well-known recipients for the email. + These are translated into addresses by our mailer. + Current potential values for this are ('detective', + 'cwp-team', 'cros-team', 'mage'). Either this or + direct_recipients must be a nonempty list. + direct_recipients: @google.com emails to send addresses to. Either this + or well_known_recipients must be a nonempty list. + text_body: a 'text/plain' email body to send. Either this or html_body + must be a nonempty string. Both may be specified + html_body: a 'text/html' email body to send. Either this or text_body + must be a nonempty string. Both may be specified + """ + # `str`s act a lot like tuples/lists. Ensure that we're not accidentally + # iterating over one of those (or anything else that's sketchy, for that + # matter). + if not isinstance(well_known_recipients, (tuple, list)): + raise ValueError( + "`well_known_recipients` is unexpectedly a %s" + % type(well_known_recipients) + ) + + if not isinstance(direct_recipients, (tuple, list)): + raise ValueError( + "`direct_recipients` is unexpectedly a %s" + % type(direct_recipients) + ) + + if not subject or not identifier: + raise ValueError("both `subject` and `identifier` must be nonempty") + + if not (well_known_recipients or direct_recipients): + raise ValueError( + "either `well_known_recipients` or `direct_recipients` " + "must be specified" + ) + + for recipient in direct_recipients: + if not recipient.endswith("@google.com"): + raise ValueError("All recipients must end with @google.com") + + if not (text_body or html_body): + raise ValueError( + "either `text_body` or `html_body` must be specified" + ) + + email_json = { + "email_identifier": identifier, + "subject": subject, + } + + if well_known_recipients: + email_json["well_known_recipients"] = well_known_recipients + + if direct_recipients: + email_json["direct_recipients"] = direct_recipients + + if text_body: + email_json["body"] = text_body + + if html_body: + email_json["html_body"] = html_body + + # The name of this has two parts: + # - An easily sortable time, to provide uniqueness and let our emailer + # send things in the order they were put into the outbox. + # - 64 bits of entropy, so two racing email sends don't clobber the same + # file. + now = datetime.datetime.utcnow().isoformat("T", "seconds") + "Z" + entropy = base64.urlsafe_b64encode(os.getrandom(8)) + entropy_str = entropy.rstrip(b"=").decode("utf-8") + result_path = os.path.join(X20_PATH, now + "_" + entropy_str + ".json") + + with AtomicallyWriteFile(result_path) as f: + json.dump(email_json, f) + + def SendEmail( + self, + email_to, + subject, + text_to_send, + email_cc=None, + email_bcc=None, + email_from=None, + msg_type="plain", + attachments=None, + ): + """Choose appropriate email method and call it.""" + if os.path.exists("/usr/bin/sendgmr"): + self.SendGMREmail( email_to, subject, text_to_send, - email_cc=None, - email_bcc=None, - email_from=None, - msg_type='plain', - attachments=None): - """Choose appropriate email method and call it.""" - if os.path.exists('/usr/bin/sendgmr'): - self.SendGMREmail(email_to, subject, text_to_send, email_cc, email_bcc, - email_from, msg_type, attachments) - else: - self.SendSMTPEmail(email_to, subject, text_to_send, email_cc, email_bcc, - email_from, msg_type, attachments) - - def SendSMTPEmail(self, email_to, subject, text_to_send, email_cc, email_bcc, - email_from, msg_type, attachments): - """Send email via standard smtp mail.""" - # Email summary to the current user. - msg = MIMEMultipart() - - if not email_from: - email_from = os.path.basename(__file__) - - msg['To'] = ','.join(email_to) - msg['Subject'] = subject - - if email_from: - msg['From'] = email_from - if email_cc: - msg['CC'] = ','.join(email_cc) - email_to += email_cc - if email_bcc: - msg['BCC'] = ','.join(email_bcc) - email_to += email_bcc - - msg.attach(MIMEText(text_to_send, msg_type)) - if attachments: - for attachment in attachments: - part = MIMEBase('application', 'octet-stream') - part.set_payload(attachment.content) - Encoders.encode_base64(part) - part.add_header('Content-Disposition', - 'attachment; filename="%s"' % attachment.name) - msg.attach(part) - - # Send the message via our own SMTP server, but don't include the - # envelope header. - s = smtplib.SMTP('localhost') - s.sendmail(email_from, email_to, msg.as_string()) - s.quit() - - def SendGMREmail(self, email_to, subject, text_to_send, email_cc, email_bcc, - email_from, msg_type, attachments): - """Send email via sendgmr program.""" - ce = command_executer.GetCommandExecuter(log_level='none') - - if not email_from: - email_from = getpass.getuser() + '@google.com' - - to_list = ','.join(email_to) - - if not text_to_send: - text_to_send = 'Empty message body.' - - to_be_deleted = [] - try: - with tempfile.NamedTemporaryFile('w', encoding='utf-8', - delete=False) as f: - f.write(text_to_send) - f.flush() - to_be_deleted.append(f.name) - - # Fix single-quotes inside the subject. In bash, to escape a single quote - # (e.g 'don't') you need to replace it with '\'' (e.g. 'don'\''t'). To - # make Python read the backslash as a backslash rather than an escape - # character, you need to double it. So... - subject = subject.replace("'", "'\\''") - - if msg_type == 'html': - command = ("sendgmr --to='%s' --from='%s' --subject='%s' " - "--html_file='%s' --body_file=/dev/null" % - (to_list, email_from, subject, f.name)) - else: - command = ("sendgmr --to='%s' --from='%s' --subject='%s' " - "--body_file='%s'" % (to_list, email_from, subject, f.name)) - - if email_cc: - cc_list = ','.join(email_cc) - command += " --cc='%s'" % cc_list - if email_bcc: - bcc_list = ','.join(email_bcc) - command += " --bcc='%s'" % bcc_list - - if attachments: - attachment_files = [] - for attachment in attachments: - if '<html>' in attachment.content: - report_suffix = '_report.html' - else: - report_suffix = '_report.txt' - with tempfile.NamedTemporaryFile('w', - encoding='utf-8', - delete=False, - suffix=report_suffix) as f: - f.write(attachment.content) - f.flush() - attachment_files.append(f.name) - files = ','.join(attachment_files) - command += " --attachment_files='%s'" % files - to_be_deleted += attachment_files - - # Send the message via our own GMR server. - status = ce.RunCommand(command) - return status - - finally: - for f in to_be_deleted: - os.remove(f) + email_cc, + email_bcc, + email_from, + msg_type, + attachments, + ) + else: + self.SendSMTPEmail( + email_to, + subject, + text_to_send, + email_cc, + email_bcc, + email_from, + msg_type, + attachments, + ) + + def SendSMTPEmail( + self, + email_to, + subject, + text_to_send, + email_cc, + email_bcc, + email_from, + msg_type, + attachments, + ): + """Send email via standard smtp mail.""" + # Email summary to the current user. + msg = MIMEMultipart() + + if not email_from: + email_from = os.path.basename(__file__) + + msg["To"] = ",".join(email_to) + msg["Subject"] = subject + + if email_from: + msg["From"] = email_from + if email_cc: + msg["CC"] = ",".join(email_cc) + email_to += email_cc + if email_bcc: + msg["BCC"] = ",".join(email_bcc) + email_to += email_bcc + + msg.attach(MIMEText(text_to_send, msg_type)) + if attachments: + for attachment in attachments: + part = MIMEBase("application", "octet-stream") + part.set_payload(attachment.content) + Encoders.encode_base64(part) + part.add_header( + "Content-Disposition", + 'attachment; filename="%s"' % attachment.name, + ) + msg.attach(part) + + # Send the message via our own SMTP server, but don't include the + # envelope header. + s = smtplib.SMTP("localhost") + s.sendmail(email_from, email_to, msg.as_string()) + s.quit() + + def SendGMREmail( + self, + email_to, + subject, + text_to_send, + email_cc, + email_bcc, + email_from, + msg_type, + attachments, + ): + """Send email via sendgmr program.""" + ce = command_executer.GetCommandExecuter(log_level="none") + + if not email_from: + email_from = getpass.getuser() + "@google.com" + + to_list = ",".join(email_to) + + if not text_to_send: + text_to_send = "Empty message body." + + to_be_deleted = [] + try: + with tempfile.NamedTemporaryFile( + "w", encoding="utf-8", delete=False + ) as f: + f.write(text_to_send) + f.flush() + to_be_deleted.append(f.name) + + # Fix single-quotes inside the subject. In bash, to escape a single quote + # (e.g 'don't') you need to replace it with '\'' (e.g. 'don'\''t'). To + # make Python read the backslash as a backslash rather than an escape + # character, you need to double it. So... + subject = subject.replace("'", "'\\''") + + if msg_type == "html": + command = ( + "sendgmr --to='%s' --from='%s' --subject='%s' " + "--html_file='%s' --body_file=/dev/null" + % (to_list, email_from, subject, f.name) + ) + else: + command = ( + "sendgmr --to='%s' --from='%s' --subject='%s' " + "--body_file='%s'" % (to_list, email_from, subject, f.name) + ) + + if email_cc: + cc_list = ",".join(email_cc) + command += " --cc='%s'" % cc_list + if email_bcc: + bcc_list = ",".join(email_bcc) + command += " --bcc='%s'" % bcc_list + + if attachments: + attachment_files = [] + for attachment in attachments: + if "<html>" in attachment.content: + report_suffix = "_report.html" + else: + report_suffix = "_report.txt" + with tempfile.NamedTemporaryFile( + "w", + encoding="utf-8", + delete=False, + suffix=report_suffix, + ) as f: + f.write(attachment.content) + f.flush() + attachment_files.append(f.name) + files = ",".join(attachment_files) + command += " --attachment_files='%s'" % files + to_be_deleted += attachment_files + + # Send the message via our own GMR server. + status = ce.RunCommand(command) + return status + + finally: + for f in to_be_deleted: + os.remove(f) diff --git a/cros_utils/email_sender_unittest.py b/cros_utils/email_sender_unittest.py index 92519845..26b5b9a0 100755 --- a/cros_utils/email_sender_unittest.py +++ b/cros_utils/email_sender_unittest.py @@ -19,102 +19,105 @@ import cros_utils.email_sender as email_sender class Test(unittest.TestCase): - """Tests for email_sender.""" - - @mock.patch('cros_utils.email_sender.AtomicallyWriteFile') - def test_x20_email_sending_rejects_invalid_inputs(self, write_file): - test_cases = [ - { - # no subject - 'subject': '', - 'identifier': 'foo', - 'direct_recipients': ['gbiv@google.com'], - 'text_body': 'hi', - }, - { - 'subject': 'foo', - # no identifier - 'identifier': '', - 'direct_recipients': ['gbiv@google.com'], - 'text_body': 'hi', - }, - { - 'subject': 'foo', - 'identifier': 'foo', - # no recipients - 'direct_recipients': [], - 'text_body': 'hi', - }, - { - 'subject': 'foo', - 'identifier': 'foo', - 'direct_recipients': ['gbiv@google.com'], - # no body - }, - { - 'subject': 'foo', - 'identifier': 'foo', - # direct recipients lack @google. - 'direct_recipients': ['gbiv'], - 'text_body': 'hi', - }, - { - 'subject': 'foo', - 'identifier': 'foo', - # non-list recipients - 'direct_recipients': 'gbiv@google.com', - 'text_body': 'hi', - }, - { - 'subject': 'foo', - 'identifier': 'foo', - # non-list recipients - 'well_known_recipients': 'detective', - 'text_body': 'hi', - }, - ] - - sender = email_sender.EmailSender() - for case in test_cases: - with self.assertRaises(ValueError): - sender.SendX20Email(**case) - - write_file.assert_not_called() - - @mock.patch('cros_utils.email_sender.AtomicallyWriteFile') - def test_x20_email_sending_translates_to_reasonable_json(self, write_file): - written_obj = None - - @contextlib.contextmanager - def actual_write_file(file_path): - nonlocal written_obj - - self.assertTrue(file_path.startswith(email_sender.X20_PATH + '/'), - file_path) - f = io.StringIO() - yield f - written_obj = json.loads(f.getvalue()) - - write_file.side_effect = actual_write_file - email_sender.EmailSender().SendX20Email( - subject='hello', - identifier='world', - well_known_recipients=['detective'], - direct_recipients=['gbiv@google.com'], - text_body='text', - html_body='html', - ) - - self.assertEqual( - written_obj, { - 'subject': 'hello', - 'email_identifier': 'world', - 'well_known_recipients': ['detective'], - 'direct_recipients': ['gbiv@google.com'], - 'body': 'text', - 'html_body': 'html', - }) - - -if __name__ == '__main__': - unittest.main() + """Tests for email_sender.""" + + @mock.patch("cros_utils.email_sender.AtomicallyWriteFile") + def test_x20_email_sending_rejects_invalid_inputs(self, write_file): + test_cases = [ + { + # no subject + "subject": "", + "identifier": "foo", + "direct_recipients": ["gbiv@google.com"], + "text_body": "hi", + }, + { + "subject": "foo", + # no identifier + "identifier": "", + "direct_recipients": ["gbiv@google.com"], + "text_body": "hi", + }, + { + "subject": "foo", + "identifier": "foo", + # no recipients + "direct_recipients": [], + "text_body": "hi", + }, + { + "subject": "foo", + "identifier": "foo", + "direct_recipients": ["gbiv@google.com"], + # no body + }, + { + "subject": "foo", + "identifier": "foo", + # direct recipients lack @google. + "direct_recipients": ["gbiv"], + "text_body": "hi", + }, + { + "subject": "foo", + "identifier": "foo", + # non-list recipients + "direct_recipients": "gbiv@google.com", + "text_body": "hi", + }, + { + "subject": "foo", + "identifier": "foo", + # non-list recipients + "well_known_recipients": "detective", + "text_body": "hi", + }, + ] + + sender = email_sender.EmailSender() + for case in test_cases: + with self.assertRaises(ValueError): + sender.SendX20Email(**case) + + write_file.assert_not_called() + + @mock.patch("cros_utils.email_sender.AtomicallyWriteFile") + def test_x20_email_sending_translates_to_reasonable_json(self, write_file): + written_obj = None + + @contextlib.contextmanager + def actual_write_file(file_path): + nonlocal written_obj + + self.assertTrue( + file_path.startswith(email_sender.X20_PATH + "/"), file_path + ) + f = io.StringIO() + yield f + written_obj = json.loads(f.getvalue()) + + write_file.side_effect = actual_write_file + email_sender.EmailSender().SendX20Email( + subject="hello", + identifier="world", + well_known_recipients=["detective"], + direct_recipients=["gbiv@google.com"], + text_body="text", + html_body="html", + ) + + self.assertEqual( + written_obj, + { + "subject": "hello", + "email_identifier": "world", + "well_known_recipients": ["detective"], + "direct_recipients": ["gbiv@google.com"], + "body": "text", + "html_body": "html", + }, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/cros_utils/file_utils.py b/cros_utils/file_utils.py index 437deadb..23b3969b 100644 --- a/cros_utils/file_utils.py +++ b/cros_utils/file_utils.py @@ -15,78 +15,83 @@ from cros_utils import command_executer class FileUtils(object): - """Utilities for operations on files.""" - _instance = None - DRY_RUN = False - - @classmethod - def Configure(cls, dry_run): - cls.DRY_RUN = dry_run - - def __new__(cls, *args, **kwargs): - if not cls._instance: - if cls.DRY_RUN: - cls._instance = super(FileUtils, cls).__new__(MockFileUtils, *args, - **kwargs) - else: - cls._instance = super(FileUtils, cls).__new__(cls, *args, **kwargs) - return cls._instance - - def Md5File(self, filename, log_level='verbose', _block_size=2**10): - command = 'md5sum %s' % filename - ce = command_executer.GetCommandExecuter(log_level=log_level) - ret, out, _ = ce.RunCommandWOutput(command) - if ret: - raise RuntimeError('Could not run md5sum on: %s' % filename) - - return out.strip().split()[0] - - def CanonicalizeChromeOSRoot(self, chromeos_root): - chromeos_root = os.path.expanduser(chromeos_root) - if os.path.isdir(os.path.join(chromeos_root, 'chromite')): - return chromeos_root - else: - return None - - def ChromeOSRootFromImage(self, chromeos_image): - chromeos_root = os.path.join( - os.path.dirname(chromeos_image), '../../../../..') - return self.CanonicalizeChromeOSRoot(chromeos_root) - - def MkDirP(self, path): - try: - os.makedirs(path) - except OSError as exc: - if exc.errno == errno.EEXIST: - pass - else: - raise - - def RmDir(self, path): - shutil.rmtree(path, ignore_errors=True) - - def WriteFile(self, path, contents): - with open(path, 'w', encoding='utf-8') as f: - f.write(contents) + """Utilities for operations on files.""" + + _instance = None + DRY_RUN = False + + @classmethod + def Configure(cls, dry_run): + cls.DRY_RUN = dry_run + + def __new__(cls, *args, **kwargs): + if not cls._instance: + if cls.DRY_RUN: + cls._instance = super(FileUtils, cls).__new__( + MockFileUtils, *args, **kwargs + ) + else: + cls._instance = super(FileUtils, cls).__new__( + cls, *args, **kwargs + ) + return cls._instance + + def Md5File(self, filename, log_level="verbose", _block_size=2 ** 10): + command = "md5sum %s" % filename + ce = command_executer.GetCommandExecuter(log_level=log_level) + ret, out, _ = ce.RunCommandWOutput(command) + if ret: + raise RuntimeError("Could not run md5sum on: %s" % filename) + + return out.strip().split()[0] + + def CanonicalizeChromeOSRoot(self, chromeos_root): + chromeos_root = os.path.expanduser(chromeos_root) + if os.path.isdir(os.path.join(chromeos_root, "chromite")): + return chromeos_root + else: + return None + + def ChromeOSRootFromImage(self, chromeos_image): + chromeos_root = os.path.join( + os.path.dirname(chromeos_image), "../../../../.." + ) + return self.CanonicalizeChromeOSRoot(chromeos_root) + + def MkDirP(self, path): + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + pass + else: + raise + + def RmDir(self, path): + shutil.rmtree(path, ignore_errors=True) + + def WriteFile(self, path, contents): + with open(path, "w", encoding="utf-8") as f: + f.write(contents) class MockFileUtils(FileUtils): - """Mock class for file utilities.""" + """Mock class for file utilities.""" - def Md5File(self, filename, log_level='verbose', _block_size=2**10): - return 'd41d8cd98f00b204e9800998ecf8427e' + def Md5File(self, filename, log_level="verbose", _block_size=2 ** 10): + return "d41d8cd98f00b204e9800998ecf8427e" - def CanonicalizeChromeOSRoot(self, chromeos_root): - return '/tmp/chromeos_root' + def CanonicalizeChromeOSRoot(self, chromeos_root): + return "/tmp/chromeos_root" - def ChromeOSRootFromImage(self, chromeos_image): - return '/tmp/chromeos_root' + def ChromeOSRootFromImage(self, chromeos_image): + return "/tmp/chromeos_root" - def RmDir(self, path): - pass + def RmDir(self, path): + pass - def MkDirP(self, path): - pass + def MkDirP(self, path): + pass - def WriteFile(self, path, contents): - pass + def WriteFile(self, path, contents): + pass diff --git a/cros_utils/html_tools.py b/cros_utils/html_tools.py index c23995b4..04ea93a1 100644 --- a/cros_utils/html_tools.py +++ b/cros_utils/html_tools.py @@ -7,7 +7,8 @@ def GetPageHeader(page_title): - return """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + return ( + """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html> <head> @@ -32,64 +33,68 @@ function displayRow(id){ </head> <body> -""" % page_title +""" + % page_title + ) def GetListHeader(): - return '<ul>' + return "<ul>" def GetListItem(text): - return '<li>%s</li>' % text + return "<li>%s</li>" % text def GetListFooter(): - return '</ul>' + return "</ul>" def GetList(items): - return '<ul>%s</ul>' % ''.join(['<li>%s</li>' % item for item in items]) + return "<ul>%s</ul>" % "".join(["<li>%s</li>" % item for item in items]) def GetParagraph(text): - return '<p>%s</p>' % text + return "<p>%s</p>" % text def GetFooter(): - return '</body>\n</html>' + return "</body>\n</html>" def GetHeader(text, h=1): - return '<h%s>%s</h%s>' % (h, text, h) + return "<h%s>%s</h%s>" % (h, text, h) def GetTableHeader(headers): - row = ''.join(['<th>%s</th>' % header for header in headers]) - return '<table><tr>%s</tr>' % row + row = "".join(["<th>%s</th>" % header for header in headers]) + return "<table><tr>%s</tr>" % row def GetTableFooter(): - return '</table>' + return "</table>" def FormatLineBreaks(text): - return text.replace('\n', '<br/>') + return text.replace("\n", "<br/>") def GetTableCell(text): - return '<td>%s</td>' % FormatLineBreaks(str(text)) + return "<td>%s</td>" % FormatLineBreaks(str(text)) def GetTableRow(columns): - return '<tr>%s</tr>' % '\n'.join([GetTableCell(column) for column in columns]) + return "<tr>%s</tr>" % "\n".join( + [GetTableCell(column) for column in columns] + ) def GetTable(headers, rows): - table = [GetTableHeader(headers)] - table.extend([GetTableRow(row) for row in rows]) - table.append(GetTableFooter()) - return '\n'.join(table) + table = [GetTableHeader(headers)] + table.extend([GetTableRow(row) for row in rows]) + table.append(GetTableFooter()) + return "\n".join(table) def GetLink(link, text): - return "<a href='%s'>%s</a>" % (link, text) + return "<a href='%s'>%s</a>" % (link, text) diff --git a/cros_utils/locks.py b/cros_utils/locks.py index 365fe044..b7eacd39 100644 --- a/cros_utils/locks.py +++ b/cros_utils/locks.py @@ -10,40 +10,44 @@ from __future__ import print_function import time -import lock_machine - from cros_utils import logger +import lock_machine def AcquireLock(machines, chromeos_root, timeout=1200): - """Acquire lock for machine(s) with timeout.""" - start_time = time.time() - locked = True - sleep_time = min(10, timeout / 10.0) - while True: - try: - lock_machine.LockManager(machines, False, - chromeos_root).UpdateMachines(True) - break - except Exception as e: - if time.time() - start_time > timeout: - locked = False - logger.GetLogger().LogWarning( - 'Could not acquire lock on {0} within {1} seconds: {2}'.format( - repr(machines), timeout, str(e))) - break - time.sleep(sleep_time) - return locked + """Acquire lock for machine(s) with timeout.""" + start_time = time.time() + locked = True + sleep_time = min(10, timeout / 10.0) + while True: + try: + lock_machine.LockManager( + machines, False, chromeos_root + ).UpdateMachines(True) + break + except Exception as e: + if time.time() - start_time > timeout: + locked = False + logger.GetLogger().LogWarning( + "Could not acquire lock on {0} within {1} seconds: {2}".format( + repr(machines), timeout, str(e) + ) + ) + break + time.sleep(sleep_time) + return locked def ReleaseLock(machines, chromeos_root): - """Release locked machine(s).""" - unlocked = True - try: - lock_machine.LockManager(machines, False, - chromeos_root).UpdateMachines(False) - except Exception as e: - unlocked = False - logger.GetLogger().LogWarning( - 'Could not unlock %s. %s' % (repr(machines), str(e))) - return unlocked + """Release locked machine(s).""" + unlocked = True + try: + lock_machine.LockManager(machines, False, chromeos_root).UpdateMachines( + False + ) + except Exception as e: + unlocked = False + logger.GetLogger().LogWarning( + "Could not unlock %s. %s" % (repr(machines), str(e)) + ) + return unlocked diff --git a/cros_utils/logger.py b/cros_utils/logger.py index 16ba8971..bf50e01e 100644 --- a/cros_utils/logger.py +++ b/cros_utils/logger.py @@ -15,350 +15,380 @@ import traceback # TODO(yunlian@google.com): Use GetRoot from misc def GetRoot(scr_name): - """Break up pathname into (dir+name).""" - abs_path = os.path.abspath(scr_name) - return (os.path.dirname(abs_path), os.path.basename(abs_path)) + """Break up pathname into (dir+name).""" + abs_path = os.path.abspath(scr_name) + return (os.path.dirname(abs_path), os.path.basename(abs_path)) class Logger(object): - """Logging helper class.""" - - MAX_LOG_FILES = 10 - - def __init__(self, rootdir, basefilename, print_console, subdir='logs'): - logdir = os.path.join(rootdir, subdir) - basename = os.path.join(logdir, basefilename) - - try: - os.makedirs(logdir) - except OSError: - pass - # print("Warning: Logs directory '%s' already exists." % logdir) - - self.print_console = print_console - - self._CreateLogFileHandles(basename) - - self._WriteTo(self.cmdfd, ' '.join(sys.argv), True) - - def _AddSuffix(self, basename, suffix): - return '%s%s' % (basename, suffix) - - def _FindSuffix(self, basename): - timestamps = [] - found_suffix = None - for i in range(self.MAX_LOG_FILES): - suffix = str(i) - suffixed_basename = self._AddSuffix(basename, suffix) - cmd_file = '%s.cmd' % suffixed_basename - if not os.path.exists(cmd_file): - found_suffix = suffix - break - timestamps.append(os.stat(cmd_file).st_mtime) - - if found_suffix: - return found_suffix - - # Try to pick the oldest file with the suffix and return that one. - suffix = str(timestamps.index(min(timestamps))) - # print ("Warning: Overwriting log file: %s" % - # self._AddSuffix(basename, suffix)) - return suffix - - def _CreateLogFileHandle(self, name): - fd = None - try: - fd = open(name, 'w') - except IOError: - print('Warning: could not open %s for writing.' % name) - return fd - - def _CreateLogFileHandles(self, basename): - suffix = self._FindSuffix(basename) - suffixed_basename = self._AddSuffix(basename, suffix) - - self.cmdfd = self._CreateLogFileHandle('%s.cmd' % suffixed_basename) - self.stdout = self._CreateLogFileHandle('%s.out' % suffixed_basename) - self.stderr = self._CreateLogFileHandle('%s.err' % suffixed_basename) - - self._CreateLogFileSymlinks(basename, suffixed_basename) - - # Symlink unsuffixed basename to currently suffixed one. - def _CreateLogFileSymlinks(self, basename, suffixed_basename): - try: - for extension in ['cmd', 'out', 'err']: - src_file = '%s.%s' % (os.path.basename(suffixed_basename), extension) - dest_file = '%s.%s' % (basename, extension) - if os.path.exists(dest_file): - os.remove(dest_file) - os.symlink(src_file, dest_file) - except Exception as ex: - print('Exception while creating symlinks: %s' % str(ex)) - - def _WriteTo(self, fd, msg, flush): - if fd: - fd.write(msg) - if flush: - fd.flush() - - def LogStartDots(self, print_to_console=True): - term_fd = self._GetStdout(print_to_console) - if term_fd: - term_fd.flush() - term_fd.write('. ') - term_fd.flush() - - def LogAppendDot(self, print_to_console=True): - term_fd = self._GetStdout(print_to_console) - if term_fd: - term_fd.write('. ') - term_fd.flush() - - def LogEndDots(self, print_to_console=True): - term_fd = self._GetStdout(print_to_console) - if term_fd: - term_fd.write('\n') - term_fd.flush() - - def LogMsg(self, file_fd, term_fd, msg, flush=True): - if file_fd: - self._WriteTo(file_fd, msg, flush) - if self.print_console: - self._WriteTo(term_fd, msg, flush) - - def _GetStdout(self, print_to_console): - if print_to_console: - return sys.stdout - return None - - def _GetStderr(self, print_to_console): - if print_to_console: - return sys.stderr - return None - - def LogCmdToFileOnly(self, cmd, machine='', user=None): - if not self.cmdfd: - return - - host = ('%s@%s' % (user, machine)) if user else machine - flush = True - cmd_string = 'CMD (%s): %s\n' % (host, cmd) - self._WriteTo(self.cmdfd, cmd_string, flush) - - def LogCmd(self, cmd, machine='', user=None, print_to_console=True): - if user: - host = '%s@%s' % (user, machine) - else: - host = machine - - self.LogMsg(self.cmdfd, self._GetStdout(print_to_console), - 'CMD (%s): %s\n' % (host, cmd)) - - def LogFatal(self, msg, print_to_console=True): - self.LogMsg(self.stderr, self._GetStderr(print_to_console), - 'FATAL: %s\n' % msg) - self.LogMsg(self.stderr, self._GetStderr(print_to_console), - '\n'.join(traceback.format_stack())) - sys.exit(1) - - def LogError(self, msg, print_to_console=True): - self.LogMsg(self.stderr, self._GetStderr(print_to_console), - 'ERROR: %s\n' % msg) - - def LogWarning(self, msg, print_to_console=True): - self.LogMsg(self.stderr, self._GetStderr(print_to_console), - 'WARNING: %s\n' % msg) - - def LogOutput(self, msg, print_to_console=True): - self.LogMsg(self.stdout, self._GetStdout(print_to_console), - 'OUTPUT: %s\n' % msg) - - def LogFatalIf(self, condition, msg): - if condition: - self.LogFatal(msg) - - def LogErrorIf(self, condition, msg): - if condition: - self.LogError(msg) - - def LogWarningIf(self, condition, msg): - if condition: - self.LogWarning(msg) - - def LogCommandOutput(self, msg, print_to_console=True): - self.LogMsg( - self.stdout, self._GetStdout(print_to_console), msg, flush=False) - - def LogCommandError(self, msg, print_to_console=True): - self.LogMsg( - self.stderr, self._GetStderr(print_to_console), msg, flush=False) - - def Flush(self): - self.cmdfd.flush() - self.stdout.flush() - self.stderr.flush() + """Logging helper class.""" + + MAX_LOG_FILES = 10 + + def __init__(self, rootdir, basefilename, print_console, subdir="logs"): + logdir = os.path.join(rootdir, subdir) + basename = os.path.join(logdir, basefilename) + + try: + os.makedirs(logdir) + except OSError: + pass + # print("Warning: Logs directory '%s' already exists." % logdir) + + self.print_console = print_console + + self._CreateLogFileHandles(basename) + + self._WriteTo(self.cmdfd, " ".join(sys.argv), True) + + def _AddSuffix(self, basename, suffix): + return "%s%s" % (basename, suffix) + + def _FindSuffix(self, basename): + timestamps = [] + found_suffix = None + for i in range(self.MAX_LOG_FILES): + suffix = str(i) + suffixed_basename = self._AddSuffix(basename, suffix) + cmd_file = "%s.cmd" % suffixed_basename + if not os.path.exists(cmd_file): + found_suffix = suffix + break + timestamps.append(os.stat(cmd_file).st_mtime) + + if found_suffix: + return found_suffix + + # Try to pick the oldest file with the suffix and return that one. + suffix = str(timestamps.index(min(timestamps))) + # print ("Warning: Overwriting log file: %s" % + # self._AddSuffix(basename, suffix)) + return suffix + + def _CreateLogFileHandle(self, name): + fd = None + try: + fd = open(name, "w") + except IOError: + print("Warning: could not open %s for writing." % name) + return fd + + def _CreateLogFileHandles(self, basename): + suffix = self._FindSuffix(basename) + suffixed_basename = self._AddSuffix(basename, suffix) + + self.cmdfd = self._CreateLogFileHandle("%s.cmd" % suffixed_basename) + self.stdout = self._CreateLogFileHandle("%s.out" % suffixed_basename) + self.stderr = self._CreateLogFileHandle("%s.err" % suffixed_basename) + + self._CreateLogFileSymlinks(basename, suffixed_basename) + + # Symlink unsuffixed basename to currently suffixed one. + def _CreateLogFileSymlinks(self, basename, suffixed_basename): + try: + for extension in ["cmd", "out", "err"]: + src_file = "%s.%s" % ( + os.path.basename(suffixed_basename), + extension, + ) + dest_file = "%s.%s" % (basename, extension) + if os.path.exists(dest_file): + os.remove(dest_file) + os.symlink(src_file, dest_file) + except Exception as ex: + print("Exception while creating symlinks: %s" % str(ex)) + + def _WriteTo(self, fd, msg, flush): + if fd: + fd.write(msg) + if flush: + fd.flush() + + def LogStartDots(self, print_to_console=True): + term_fd = self._GetStdout(print_to_console) + if term_fd: + term_fd.flush() + term_fd.write(". ") + term_fd.flush() + + def LogAppendDot(self, print_to_console=True): + term_fd = self._GetStdout(print_to_console) + if term_fd: + term_fd.write(". ") + term_fd.flush() + + def LogEndDots(self, print_to_console=True): + term_fd = self._GetStdout(print_to_console) + if term_fd: + term_fd.write("\n") + term_fd.flush() + + def LogMsg(self, file_fd, term_fd, msg, flush=True): + if file_fd: + self._WriteTo(file_fd, msg, flush) + if self.print_console: + self._WriteTo(term_fd, msg, flush) + + def _GetStdout(self, print_to_console): + if print_to_console: + return sys.stdout + return None + + def _GetStderr(self, print_to_console): + if print_to_console: + return sys.stderr + return None + + def LogCmdToFileOnly(self, cmd, machine="", user=None): + if not self.cmdfd: + return + + host = ("%s@%s" % (user, machine)) if user else machine + flush = True + cmd_string = "CMD (%s): %s\n" % (host, cmd) + self._WriteTo(self.cmdfd, cmd_string, flush) + + def LogCmd(self, cmd, machine="", user=None, print_to_console=True): + if user: + host = "%s@%s" % (user, machine) + else: + host = machine + + self.LogMsg( + self.cmdfd, + self._GetStdout(print_to_console), + "CMD (%s): %s\n" % (host, cmd), + ) + + def LogFatal(self, msg, print_to_console=True): + self.LogMsg( + self.stderr, self._GetStderr(print_to_console), "FATAL: %s\n" % msg + ) + self.LogMsg( + self.stderr, + self._GetStderr(print_to_console), + "\n".join(traceback.format_stack()), + ) + sys.exit(1) + + def LogError(self, msg, print_to_console=True): + self.LogMsg( + self.stderr, self._GetStderr(print_to_console), "ERROR: %s\n" % msg + ) + + def LogWarning(self, msg, print_to_console=True): + self.LogMsg( + self.stderr, + self._GetStderr(print_to_console), + "WARNING: %s\n" % msg, + ) + + def LogOutput(self, msg, print_to_console=True): + self.LogMsg( + self.stdout, self._GetStdout(print_to_console), "OUTPUT: %s\n" % msg + ) + + def LogFatalIf(self, condition, msg): + if condition: + self.LogFatal(msg) + + def LogErrorIf(self, condition, msg): + if condition: + self.LogError(msg) + + def LogWarningIf(self, condition, msg): + if condition: + self.LogWarning(msg) + + def LogCommandOutput(self, msg, print_to_console=True): + self.LogMsg( + self.stdout, self._GetStdout(print_to_console), msg, flush=False + ) + + def LogCommandError(self, msg, print_to_console=True): + self.LogMsg( + self.stderr, self._GetStderr(print_to_console), msg, flush=False + ) + + def Flush(self): + self.cmdfd.flush() + self.stdout.flush() + self.stderr.flush() class MockLogger(object): - """Logging helper class.""" - - MAX_LOG_FILES = 10 - - def __init__(self, *_args, **_kwargs): - self.stdout = sys.stdout - self.stderr = sys.stderr - - def _AddSuffix(self, basename, suffix): - return '%s%s' % (basename, suffix) - - def _FindSuffix(self, basename): - timestamps = [] - found_suffix = None - for i in range(self.MAX_LOG_FILES): - suffix = str(i) - suffixed_basename = self._AddSuffix(basename, suffix) - cmd_file = '%s.cmd' % suffixed_basename - if not os.path.exists(cmd_file): - found_suffix = suffix - break - timestamps.append(os.stat(cmd_file).st_mtime) - - if found_suffix: - return found_suffix - - # Try to pick the oldest file with the suffix and return that one. - suffix = str(timestamps.index(min(timestamps))) - # print ("Warning: Overwriting log file: %s" % - # self._AddSuffix(basename, suffix)) - return suffix - - def _CreateLogFileHandle(self, name): - print('MockLogger: creating open file handle for %s (writing)' % name) - - def _CreateLogFileHandles(self, basename): - suffix = self._FindSuffix(basename) - suffixed_basename = self._AddSuffix(basename, suffix) - - print('MockLogger: opening file %s.cmd' % suffixed_basename) - print('MockLogger: opening file %s.out' % suffixed_basename) - print('MockLogger: opening file %s.err' % suffixed_basename) + """Logging helper class.""" + + MAX_LOG_FILES = 10 + + def __init__(self, *_args, **_kwargs): + self.stdout = sys.stdout + self.stderr = sys.stderr + + def _AddSuffix(self, basename, suffix): + return "%s%s" % (basename, suffix) + + def _FindSuffix(self, basename): + timestamps = [] + found_suffix = None + for i in range(self.MAX_LOG_FILES): + suffix = str(i) + suffixed_basename = self._AddSuffix(basename, suffix) + cmd_file = "%s.cmd" % suffixed_basename + if not os.path.exists(cmd_file): + found_suffix = suffix + break + timestamps.append(os.stat(cmd_file).st_mtime) + + if found_suffix: + return found_suffix + + # Try to pick the oldest file with the suffix and return that one. + suffix = str(timestamps.index(min(timestamps))) + # print ("Warning: Overwriting log file: %s" % + # self._AddSuffix(basename, suffix)) + return suffix + + def _CreateLogFileHandle(self, name): + print("MockLogger: creating open file handle for %s (writing)" % name) + + def _CreateLogFileHandles(self, basename): + suffix = self._FindSuffix(basename) + suffixed_basename = self._AddSuffix(basename, suffix) + + print("MockLogger: opening file %s.cmd" % suffixed_basename) + print("MockLogger: opening file %s.out" % suffixed_basename) + print("MockLogger: opening file %s.err" % suffixed_basename) + + self._CreateLogFileSymlinks(basename, suffixed_basename) + + # Symlink unsuffixed basename to currently suffixed one. + def _CreateLogFileSymlinks(self, basename, suffixed_basename): + for extension in ["cmd", "out", "err"]: + src_file = "%s.%s" % ( + os.path.basename(suffixed_basename), + extension, + ) + dest_file = "%s.%s" % (basename, extension) + print( + "MockLogger: Calling os.symlink(%s, %s)" % (src_file, dest_file) + ) + + def _WriteTo(self, _fd, msg, _flush): + print("MockLogger: %s" % msg) + + def LogStartDots(self, _print_to_console=True): + print(". ") + + def LogAppendDot(self, _print_to_console=True): + print(". ") + + def LogEndDots(self, _print_to_console=True): + print("\n") + + def LogMsg(self, _file_fd, _term_fd, msg, **_kwargs): + print("MockLogger: %s" % msg) + + def _GetStdout(self, _print_to_console): + return None + + def _GetStderr(self, _print_to_console): + return None + + def LogCmdToFileOnly(self, *_args, **_kwargs): + return + + # def LogCmdToFileOnly(self, cmd, machine='', user=None): + # host = ('%s@%s' % (user, machine)) if user else machine + # cmd_string = 'CMD (%s): %s\n' % (host, cmd) + # print('MockLogger: Writing to file ONLY: %s' % cmd_string) + + def LogCmd(self, cmd, machine="", user=None, print_to_console=True): + if user: + host = "%s@%s" % (user, machine) + else: + host = machine + + self.LogMsg( + 0, self._GetStdout(print_to_console), "CMD (%s): %s\n" % (host, cmd) + ) + + def LogFatal(self, msg, print_to_console=True): + self.LogMsg(0, self._GetStderr(print_to_console), "FATAL: %s\n" % msg) + self.LogMsg( + 0, + self._GetStderr(print_to_console), + "\n".join(traceback.format_stack()), + ) + print("MockLogger: Calling sysexit(1)") + + def LogError(self, msg, print_to_console=True): + self.LogMsg(0, self._GetStderr(print_to_console), "ERROR: %s\n" % msg) + + def LogWarning(self, msg, print_to_console=True): + self.LogMsg(0, self._GetStderr(print_to_console), "WARNING: %s\n" % msg) + + def LogOutput(self, msg, print_to_console=True): + self.LogMsg(0, self._GetStdout(print_to_console), "OUTPUT: %s\n" % msg) + + def LogFatalIf(self, condition, msg): + if condition: + self.LogFatal(msg) - self._CreateLogFileSymlinks(basename, suffixed_basename) + def LogErrorIf(self, condition, msg): + if condition: + self.LogError(msg) - # Symlink unsuffixed basename to currently suffixed one. - def _CreateLogFileSymlinks(self, basename, suffixed_basename): - for extension in ['cmd', 'out', 'err']: - src_file = '%s.%s' % (os.path.basename(suffixed_basename), extension) - dest_file = '%s.%s' % (basename, extension) - print('MockLogger: Calling os.symlink(%s, %s)' % (src_file, dest_file)) + def LogWarningIf(self, condition, msg): + if condition: + self.LogWarning(msg) - def _WriteTo(self, _fd, msg, _flush): - print('MockLogger: %s' % msg) + def LogCommandOutput(self, msg, print_to_console=True): + self.LogMsg( + self.stdout, self._GetStdout(print_to_console), msg, flush=False + ) - def LogStartDots(self, _print_to_console=True): - print('. ') + def LogCommandError(self, msg, print_to_console=True): + self.LogMsg( + self.stderr, self._GetStderr(print_to_console), msg, flush=False + ) - def LogAppendDot(self, _print_to_console=True): - print('. ') - - def LogEndDots(self, _print_to_console=True): - print('\n') - - def LogMsg(self, _file_fd, _term_fd, msg, **_kwargs): - print('MockLogger: %s' % msg) - - def _GetStdout(self, _print_to_console): - return None - - def _GetStderr(self, _print_to_console): - return None - - def LogCmdToFileOnly(self, *_args, **_kwargs): - return - - # def LogCmdToFileOnly(self, cmd, machine='', user=None): - # host = ('%s@%s' % (user, machine)) if user else machine - # cmd_string = 'CMD (%s): %s\n' % (host, cmd) - # print('MockLogger: Writing to file ONLY: %s' % cmd_string) - - def LogCmd(self, cmd, machine='', user=None, print_to_console=True): - if user: - host = '%s@%s' % (user, machine) - else: - host = machine - - self.LogMsg(0, self._GetStdout(print_to_console), - 'CMD (%s): %s\n' % (host, cmd)) - - def LogFatal(self, msg, print_to_console=True): - self.LogMsg(0, self._GetStderr(print_to_console), 'FATAL: %s\n' % msg) - self.LogMsg(0, self._GetStderr(print_to_console), - '\n'.join(traceback.format_stack())) - print('MockLogger: Calling sysexit(1)') - - def LogError(self, msg, print_to_console=True): - self.LogMsg(0, self._GetStderr(print_to_console), 'ERROR: %s\n' % msg) - - def LogWarning(self, msg, print_to_console=True): - self.LogMsg(0, self._GetStderr(print_to_console), 'WARNING: %s\n' % msg) - - def LogOutput(self, msg, print_to_console=True): - self.LogMsg(0, self._GetStdout(print_to_console), 'OUTPUT: %s\n' % msg) - - def LogFatalIf(self, condition, msg): - if condition: - self.LogFatal(msg) - - def LogErrorIf(self, condition, msg): - if condition: - self.LogError(msg) - - def LogWarningIf(self, condition, msg): - if condition: - self.LogWarning(msg) - - def LogCommandOutput(self, msg, print_to_console=True): - self.LogMsg( - self.stdout, self._GetStdout(print_to_console), msg, flush=False) - - def LogCommandError(self, msg, print_to_console=True): - self.LogMsg( - self.stderr, self._GetStderr(print_to_console), msg, flush=False) - - def Flush(self): - print('MockLogger: Flushing cmdfd, stdout, stderr') + def Flush(self): + print("MockLogger: Flushing cmdfd, stdout, stderr") main_logger = None def InitLogger(script_name, log_dir, print_console=True, mock=False): - """Initialize a global logger. To be called only once.""" - # pylint: disable=global-statement - global main_logger - assert not main_logger, 'The logger has already been initialized' - rootdir, basefilename = GetRoot(script_name) - if not log_dir: - log_dir = rootdir - if not mock: - main_logger = Logger(log_dir, basefilename, print_console) - else: - main_logger = MockLogger(log_dir, basefilename, print_console) + """Initialize a global logger. To be called only once.""" + # pylint: disable=global-statement + global main_logger + assert not main_logger, "The logger has already been initialized" + rootdir, basefilename = GetRoot(script_name) + if not log_dir: + log_dir = rootdir + if not mock: + main_logger = Logger(log_dir, basefilename, print_console) + else: + main_logger = MockLogger(log_dir, basefilename, print_console) -def GetLogger(log_dir='', mock=False): - if not main_logger: - InitLogger(sys.argv[0], log_dir, mock=mock) - return main_logger +def GetLogger(log_dir="", mock=False): + if not main_logger: + InitLogger(sys.argv[0], log_dir, mock=mock) + return main_logger def HandleUncaughtExceptions(fun): - """Catches all exceptions that would go outside decorated fun scope.""" + """Catches all exceptions that would go outside decorated fun scope.""" - def _Interceptor(*args, **kwargs): - try: - return fun(*args, **kwargs) - except Exception: - GetLogger().LogFatal('Uncaught exception:\n%s' % traceback.format_exc()) + def _Interceptor(*args, **kwargs): + try: + return fun(*args, **kwargs) + except Exception: + GetLogger().LogFatal( + "Uncaught exception:\n%s" % traceback.format_exc() + ) - return _Interceptor + return _Interceptor diff --git a/cros_utils/machines.py b/cros_utils/machines.py index 0eb6d378..bdd1f322 100644 --- a/cros_utils/machines.py +++ b/cros_utils/machines.py @@ -10,18 +10,18 @@ from __future__ import print_function from cros_utils import command_executer -def MachineIsPingable(machine, logging_level='average'): - """Checks to see if a machine is responding to 'ping'. +def MachineIsPingable(machine, logging_level="average"): + """Checks to see if a machine is responding to 'ping'. - Args: - machine: String containing the name or ip address of the machine to check. - logging_level: The logging level with which to initialize the - command_executer (from command_executor.LOG_LEVEL enum list). + Args: + machine: String containing the name or ip address of the machine to check. + logging_level: The logging level with which to initialize the + command_executer (from command_executor.LOG_LEVEL enum list). - Returns: - Boolean indicating whether machine is responding to ping or not. - """ - ce = command_executer.GetCommandExecuter(log_level=logging_level) - cmd = 'ping -c 1 -w 3 %s' % machine - status = ce.RunCommand(cmd) - return status == 0 + Returns: + Boolean indicating whether machine is responding to ping or not. + """ + ce = command_executer.GetCommandExecuter(log_level=logging_level) + cmd = "ping -c 1 -w 3 %s" % machine + status = ce.RunCommand(cmd) + return status == 0 diff --git a/cros_utils/misc.py b/cros_utils/misc.py index 44935e9e..0b44b994 100644 --- a/cros_utils/misc.py +++ b/cros_utils/misc.py @@ -8,7 +8,8 @@ from __future__ import division from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" from contextlib import contextmanager import os @@ -19,495 +20,545 @@ import sys from cros_utils import command_executer from cros_utils import logger -CHROMEOS_SCRIPTS_DIR = '/mnt/host/source/src/scripts' -TOOLCHAIN_UTILS_PATH = ('/mnt/host/source/src/third_party/toolchain-utils/' - 'cros_utils/toolchain_utils.sh') + +CHROMEOS_SCRIPTS_DIR = "/mnt/host/source/src/scripts" +TOOLCHAIN_UTILS_PATH = ( + "/mnt/host/source/src/third_party/toolchain-utils/" + "cros_utils/toolchain_utils.sh" +) def GetChromeOSVersionFromLSBVersion(lsb_version): - """Get Chromeos version from Lsb version.""" - ce = command_executer.GetCommandExecuter() - command = ('git ls-remote ' - 'https://chromium.googlesource.com/chromiumos/manifest.git ' - 'refs/heads/release-R*') - ret, out, _ = ce.RunCommandWOutput(command, print_to_console=False) - assert ret == 0, 'Command %s failed' % command - lower = [] - for line in out.splitlines(): - mo = re.search(r'refs/heads/release-R(\d+)-(\d+)\.B', line) - if mo: - revision = int(mo.group(1)) - build = int(mo.group(2)) - lsb_build = int(lsb_version.split('.')[0]) - if lsb_build > build: - lower.append(revision) - lower = sorted(lower) - if lower: - return 'R%d-%s' % (lower[-1] + 1, lsb_version) - else: - return 'Unknown' + """Get Chromeos version from Lsb version.""" + ce = command_executer.GetCommandExecuter() + command = ( + "git ls-remote " + "https://chromium.googlesource.com/chromiumos/manifest.git " + "refs/heads/release-R*" + ) + ret, out, _ = ce.RunCommandWOutput(command, print_to_console=False) + assert ret == 0, "Command %s failed" % command + lower = [] + for line in out.splitlines(): + mo = re.search(r"refs/heads/release-R(\d+)-(\d+)\.B", line) + if mo: + revision = int(mo.group(1)) + build = int(mo.group(2)) + lsb_build = int(lsb_version.split(".")[0]) + if lsb_build > build: + lower.append(revision) + lower = sorted(lower) + if lower: + return "R%d-%s" % (lower[-1] + 1, lsb_version) + else: + return "Unknown" def ApplySubs(string, *substitutions): - for pattern, replacement in substitutions: - string = re.sub(pattern, replacement, string) - return string + for pattern, replacement in substitutions: + string = re.sub(pattern, replacement, string) + return string def UnitToNumber(unit_num, base=1000): - """Convert a number with unit to float.""" - unit_dict = {'kilo': base, 'mega': base**2, 'giga': base**3} - unit_num = unit_num.lower() - mo = re.search(r'(\d*)(.+)?', unit_num) - number = mo.group(1) - unit = mo.group(2) - if not unit: - return float(number) - for k, v in unit_dict.items(): - if k.startswith(unit): - return float(number) * v - raise RuntimeError('Unit: %s not found in byte: %s!' % (unit, unit_num)) + """Convert a number with unit to float.""" + unit_dict = {"kilo": base, "mega": base ** 2, "giga": base ** 3} + unit_num = unit_num.lower() + mo = re.search(r"(\d*)(.+)?", unit_num) + number = mo.group(1) + unit = mo.group(2) + if not unit: + return float(number) + for k, v in unit_dict.items(): + if k.startswith(unit): + return float(number) * v + raise RuntimeError("Unit: %s not found in byte: %s!" % (unit, unit_num)) def GetFilenameFromString(string): - return ApplySubs( - string, - (r'/', '__'), - (r'\s', '_'), - (r'[\\$="?^]', ''), - ) + return ApplySubs( + string, + (r"/", "__"), + (r"\s", "_"), + (r'[\\$="?^]', ""), + ) def GetRoot(scr_name): - """Break up pathname into (dir+name).""" - abs_path = os.path.abspath(scr_name) - return (os.path.dirname(abs_path), os.path.basename(abs_path)) + """Break up pathname into (dir+name).""" + abs_path = os.path.abspath(scr_name) + return (os.path.dirname(abs_path), os.path.basename(abs_path)) def GetChromeOSKeyFile(chromeos_root): - return os.path.join(chromeos_root, 'src', 'scripts', 'mod_for_test_scripts', - 'ssh_keys', 'testing_rsa') + return os.path.join( + chromeos_root, + "src", + "scripts", + "mod_for_test_scripts", + "ssh_keys", + "testing_rsa", + ) def GetChrootPath(chromeos_root): - return os.path.join(chromeos_root, 'chroot') + return os.path.join(chromeos_root, "chroot") def GetInsideChrootPath(chromeos_root, file_path): - if not file_path.startswith(GetChrootPath(chromeos_root)): - raise RuntimeError("File: %s doesn't seem to be in the chroot: %s" % - (file_path, chromeos_root)) - return file_path[len(GetChrootPath(chromeos_root)):] + if not file_path.startswith(GetChrootPath(chromeos_root)): + raise RuntimeError( + "File: %s doesn't seem to be in the chroot: %s" + % (file_path, chromeos_root) + ) + return file_path[len(GetChrootPath(chromeos_root)) :] def GetOutsideChrootPath(chromeos_root, file_path): - return os.path.join(GetChrootPath(chromeos_root), file_path.lstrip('/')) + return os.path.join(GetChrootPath(chromeos_root), file_path.lstrip("/")) def FormatQuotedCommand(command): - return ApplySubs(command, ('"', r'\"')) + return ApplySubs(command, ('"', r"\"")) def FormatCommands(commands): - return ApplySubs(str(commands), ('&&', '&&\n'), (';', ';\n'), - (r'\n+\s*', '\n')) + return ApplySubs( + str(commands), ("&&", "&&\n"), (";", ";\n"), (r"\n+\s*", "\n") + ) def GetImageDir(chromeos_root, board): - return os.path.join(chromeos_root, 'src', 'build', 'images', board) + return os.path.join(chromeos_root, "src", "build", "images", board) def LabelLatestImage(chromeos_root, board, label, vanilla_path=None): - image_dir = GetImageDir(chromeos_root, board) - latest_image_dir = os.path.join(image_dir, 'latest') - latest_image_dir = os.path.realpath(latest_image_dir) - latest_image_dir = os.path.basename(latest_image_dir) - retval = 0 - with WorkingDirectory(image_dir): - command = 'ln -sf -T %s %s' % (latest_image_dir, label) - ce = command_executer.GetCommandExecuter() - retval = ce.RunCommand(command) - if retval: - return retval - if vanilla_path: - command = 'ln -sf -T %s %s' % (vanilla_path, 'vanilla') - retval2 = ce.RunCommand(command) - return retval2 - return retval + image_dir = GetImageDir(chromeos_root, board) + latest_image_dir = os.path.join(image_dir, "latest") + latest_image_dir = os.path.realpath(latest_image_dir) + latest_image_dir = os.path.basename(latest_image_dir) + retval = 0 + with WorkingDirectory(image_dir): + command = "ln -sf -T %s %s" % (latest_image_dir, label) + ce = command_executer.GetCommandExecuter() + retval = ce.RunCommand(command) + if retval: + return retval + if vanilla_path: + command = "ln -sf -T %s %s" % (vanilla_path, "vanilla") + retval2 = ce.RunCommand(command) + return retval2 + return retval def DoesLabelExist(chromeos_root, board, label): - image_label = os.path.join(GetImageDir(chromeos_root, board), label) - return os.path.exists(image_label) + image_label = os.path.join(GetImageDir(chromeos_root, board), label) + return os.path.exists(image_label) def GetBuildPackagesCommand(board, usepkg=False, debug=False): - if usepkg: - usepkg_flag = '--usepkg' - else: - usepkg_flag = '--nousepkg' - if debug: - withdebug_flag = '--withdebug' - else: - withdebug_flag = '--nowithdebug' - return ('%s/build_packages %s --withdev --withtest --withautotest ' - '--skip_toolchain_update %s --board=%s ' - '--accept_licenses=@CHROMEOS' % - (CHROMEOS_SCRIPTS_DIR, usepkg_flag, withdebug_flag, board)) + if usepkg: + usepkg_flag = "--usepkg" + else: + usepkg_flag = "--nousepkg" + if debug: + withdebug_flag = "--withdebug" + else: + withdebug_flag = "--nowithdebug" + return ( + "%s/build_packages %s --withdev --withtest --withautotest " + "--skip_toolchain_update %s --board=%s " + "--accept_licenses=@CHROMEOS" + % (CHROMEOS_SCRIPTS_DIR, usepkg_flag, withdebug_flag, board) + ) def GetBuildImageCommand(board, dev=False): - dev_args = '' - if dev: - dev_args = '--noenable_rootfs_verification --disk_layout=2gb-rootfs' - return ('%s/build_image --board=%s %s test' % - (CHROMEOS_SCRIPTS_DIR, board, dev_args)) + dev_args = "" + if dev: + dev_args = "--noenable_rootfs_verification --disk_layout=2gb-rootfs" + return "%s/build_image --board=%s %s test" % ( + CHROMEOS_SCRIPTS_DIR, + board, + dev_args, + ) def GetSetupBoardCommand(board, usepkg=None, force=None): - """Get setup_board command.""" - options = [] + """Get setup_board command.""" + options = [] - if usepkg: - options.append('--usepkg') - else: - options.append('--nousepkg') + if usepkg: + options.append("--usepkg") + else: + options.append("--nousepkg") - if force: - options.append('--force') + if force: + options.append("--force") - options.append('--accept-licenses=@CHROMEOS') + options.append("--accept-licenses=@CHROMEOS") - return 'setup_board --board=%s %s' % (board, ' '.join(options)) + return "setup_board --board=%s %s" % (board, " ".join(options)) def CanonicalizePath(path): - path = os.path.expanduser(path) - path = os.path.realpath(path) - return path + path = os.path.expanduser(path) + path = os.path.realpath(path) + return path def GetCtargetFromBoard(board, chromeos_root): - """Get Ctarget from board.""" - base_board = board.split('_')[0] - command = ('source %s; get_ctarget_from_board %s' % - (TOOLCHAIN_UTILS_PATH, base_board)) - ce = command_executer.GetCommandExecuter() - ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command) - if ret != 0: - raise ValueError('Board %s is invalid!' % board) - # Remove ANSI escape sequences. - out = StripANSIEscapeSequences(out) - return out.strip() + """Get Ctarget from board.""" + base_board = board.split("_")[0] + command = "source %s; get_ctarget_from_board %s" % ( + TOOLCHAIN_UTILS_PATH, + base_board, + ) + ce = command_executer.GetCommandExecuter() + ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command) + if ret != 0: + raise ValueError("Board %s is invalid!" % board) + # Remove ANSI escape sequences. + out = StripANSIEscapeSequences(out) + return out.strip() def GetArchFromBoard(board, chromeos_root): - """Get Arch from board.""" - base_board = board.split('_')[0] - command = ('source %s; get_board_arch %s' % - (TOOLCHAIN_UTILS_PATH, base_board)) - ce = command_executer.GetCommandExecuter() - ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command) - if ret != 0: - raise ValueError('Board %s is invalid!' % board) - # Remove ANSI escape sequences. - out = StripANSIEscapeSequences(out) - return out.strip() + """Get Arch from board.""" + base_board = board.split("_")[0] + command = "source %s; get_board_arch %s" % ( + TOOLCHAIN_UTILS_PATH, + base_board, + ) + ce = command_executer.GetCommandExecuter() + ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command) + if ret != 0: + raise ValueError("Board %s is invalid!" % board) + # Remove ANSI escape sequences. + out = StripANSIEscapeSequences(out) + return out.strip() def GetGccLibsDestForBoard(board, chromeos_root): - """Get gcc libs destination from board.""" - arch = GetArchFromBoard(board, chromeos_root) - if arch == 'x86': - return '/build/%s/usr/lib/gcc/' % board - if arch == 'amd64': - return '/build/%s/usr/lib64/gcc/' % board - if arch == 'arm': - return '/build/%s/usr/lib/gcc/' % board - if arch == 'arm64': - return '/build/%s/usr/lib/gcc/' % board - raise ValueError('Arch %s is invalid!' % arch) + """Get gcc libs destination from board.""" + arch = GetArchFromBoard(board, chromeos_root) + if arch == "x86": + return "/build/%s/usr/lib/gcc/" % board + if arch == "amd64": + return "/build/%s/usr/lib64/gcc/" % board + if arch == "arm": + return "/build/%s/usr/lib/gcc/" % board + if arch == "arm64": + return "/build/%s/usr/lib/gcc/" % board + raise ValueError("Arch %s is invalid!" % arch) def StripANSIEscapeSequences(string): - string = re.sub(r'\x1b\[[0-9]*[a-zA-Z]', '', string) - return string + string = re.sub(r"\x1b\[[0-9]*[a-zA-Z]", "", string) + return string def GetChromeSrcDir(): - return 'var/cache/distfiles/target/chrome-src/src' + return "var/cache/distfiles/target/chrome-src/src" def GetEnvStringFromDict(env_dict): - return ' '.join(['%s="%s"' % var for var in env_dict.items()]) + return " ".join(['%s="%s"' % var for var in env_dict.items()]) def MergeEnvStringWithDict(env_string, env_dict, prepend=True): - """Merge env string with dict.""" - if not env_string.strip(): - return GetEnvStringFromDict(env_dict) - override_env_list = [] - ce = command_executer.GetCommandExecuter() - for k, v in env_dict.items(): - v = v.strip('"\'') - if prepend: - new_env = '%s="%s $%s"' % (k, v, k) - else: - new_env = '%s="$%s %s"' % (k, k, v) - command = '; '.join([env_string, new_env, 'echo $%s' % k]) - ret, out, _ = ce.RunCommandWOutput(command) - override_env_list.append('%s=%r' % (k, out.strip())) - ret = env_string + ' ' + ' '.join(override_env_list) - return ret.strip() + """Merge env string with dict.""" + if not env_string.strip(): + return GetEnvStringFromDict(env_dict) + override_env_list = [] + ce = command_executer.GetCommandExecuter() + for k, v in env_dict.items(): + v = v.strip("\"'") + if prepend: + new_env = '%s="%s $%s"' % (k, v, k) + else: + new_env = '%s="$%s %s"' % (k, k, v) + command = "; ".join([env_string, new_env, "echo $%s" % k]) + ret, out, _ = ce.RunCommandWOutput(command) + override_env_list.append("%s=%r" % (k, out.strip())) + ret = env_string + " " + " ".join(override_env_list) + return ret.strip() def GetAllImages(chromeos_root, board): - ce = command_executer.GetCommandExecuter() - command = ('find %s/src/build/images/%s -name chromiumos_test_image.bin' % - (chromeos_root, board)) - ret, out, _ = ce.RunCommandWOutput(command) - assert ret == 0, 'Could not run command: %s' % command - return out.splitlines() + ce = command_executer.GetCommandExecuter() + command = "find %s/src/build/images/%s -name chromiumos_test_image.bin" % ( + chromeos_root, + board, + ) + ret, out, _ = ce.RunCommandWOutput(command) + assert ret == 0, "Could not run command: %s" % command + return out.splitlines() def IsFloat(text): - if text is None: - return False - try: - float(text) - return True - except ValueError: - return False + if text is None: + return False + try: + float(text) + return True + except ValueError: + return False def RemoveChromeBrowserObjectFiles(chromeos_root, board): - """Remove any object files from all the posible locations.""" - out_dir = os.path.join( - GetChrootPath(chromeos_root), - 'var/cache/chromeos-chrome/chrome-src/src/out_%s' % board) - if os.path.exists(out_dir): - shutil.rmtree(out_dir) - logger.GetLogger().LogCmd('rm -rf %s' % out_dir) - out_dir = os.path.join( - GetChrootPath(chromeos_root), - 'var/cache/chromeos-chrome/chrome-src-internal/src/out_%s' % board) - if os.path.exists(out_dir): - shutil.rmtree(out_dir) - logger.GetLogger().LogCmd('rm -rf %s' % out_dir) + """Remove any object files from all the posible locations.""" + out_dir = os.path.join( + GetChrootPath(chromeos_root), + "var/cache/chromeos-chrome/chrome-src/src/out_%s" % board, + ) + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + logger.GetLogger().LogCmd("rm -rf %s" % out_dir) + out_dir = os.path.join( + GetChrootPath(chromeos_root), + "var/cache/chromeos-chrome/chrome-src-internal/src/out_%s" % board, + ) + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + logger.GetLogger().LogCmd("rm -rf %s" % out_dir) @contextmanager def WorkingDirectory(new_dir): - """Get the working directory.""" - old_dir = os.getcwd() - if old_dir != new_dir: - msg = 'cd %s' % new_dir - logger.GetLogger().LogCmd(msg) - os.chdir(new_dir) - yield new_dir - if old_dir != new_dir: - msg = 'cd %s' % old_dir - logger.GetLogger().LogCmd(msg) - os.chdir(old_dir) + """Get the working directory.""" + old_dir = os.getcwd() + if old_dir != new_dir: + msg = "cd %s" % new_dir + logger.GetLogger().LogCmd(msg) + os.chdir(new_dir) + yield new_dir + if old_dir != new_dir: + msg = "cd %s" % old_dir + logger.GetLogger().LogCmd(msg) + os.chdir(old_dir) def HasGitStagedChanges(git_dir): - """Return True if git repository has staged changes.""" - command = f'cd {git_dir} && git diff --quiet --cached --exit-code HEAD' - return command_executer.GetCommandExecuter().RunCommand( - command, print_to_console=False) + """Return True if git repository has staged changes.""" + command = f"cd {git_dir} && git diff --quiet --cached --exit-code HEAD" + return command_executer.GetCommandExecuter().RunCommand( + command, print_to_console=False + ) def HasGitUnstagedChanges(git_dir): - """Return True if git repository has un-staged changes.""" - command = f'cd {git_dir} && git diff --quiet --exit-code HEAD' - return command_executer.GetCommandExecuter().RunCommand( - command, print_to_console=False) + """Return True if git repository has un-staged changes.""" + command = f"cd {git_dir} && git diff --quiet --exit-code HEAD" + return command_executer.GetCommandExecuter().RunCommand( + command, print_to_console=False + ) def HasGitUntrackedChanges(git_dir): - """Return True if git repository has un-tracked changes.""" - command = (f'cd {git_dir} && test -z ' - '$(git ls-files --exclude-standard --others)') - return command_executer.GetCommandExecuter().RunCommand( - command, print_to_console=False) + """Return True if git repository has un-tracked changes.""" + command = ( + f"cd {git_dir} && test -z " + "$(git ls-files --exclude-standard --others)" + ) + return command_executer.GetCommandExecuter().RunCommand( + command, print_to_console=False + ) def GitGetCommitHash(git_dir, commit_symbolic_name): - """Return githash for the symbolic git commit. + """Return githash for the symbolic git commit. - For example, commit_symbolic_name could be - "cros/gcc.gnu.org/branches/gcc/gcc-4_8-mobile, this function returns the git - hash for this symbolic name. + For example, commit_symbolic_name could be + "cros/gcc.gnu.org/branches/gcc/gcc-4_8-mobile, this function returns the git + hash for this symbolic name. - Args: - git_dir: a git working tree. - commit_symbolic_name: a symbolic name for a particular git commit. + Args: + git_dir: a git working tree. + commit_symbolic_name: a symbolic name for a particular git commit. - Returns: - The git hash for the symbolic name or None if fails. - """ + Returns: + The git hash for the symbolic name or None if fails. + """ - command = (f'cd {git_dir} && git log -n 1' - f' --pretty="format:%H" {commit_symbolic_name}') - rv, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( - command, print_to_console=False) - if rv == 0: - return out.strip() - return None + command = ( + f"cd {git_dir} && git log -n 1" + f' --pretty="format:%H" {commit_symbolic_name}' + ) + rv, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( + command, print_to_console=False + ) + if rv == 0: + return out.strip() + return None def IsGitTreeClean(git_dir): - """Test if git tree has no local changes. - - Args: - git_dir: git tree directory. - - Returns: - True if git dir is clean. - """ - if HasGitStagedChanges(git_dir): - logger.GetLogger().LogWarning('Git tree has staged changes.') - return False - if HasGitUnstagedChanges(git_dir): - logger.GetLogger().LogWarning('Git tree has unstaged changes.') - return False - if HasGitUntrackedChanges(git_dir): - logger.GetLogger().LogWarning('Git tree has un-tracked changes.') - return False - return True + """Test if git tree has no local changes. + + Args: + git_dir: git tree directory. + + Returns: + True if git dir is clean. + """ + if HasGitStagedChanges(git_dir): + logger.GetLogger().LogWarning("Git tree has staged changes.") + return False + if HasGitUnstagedChanges(git_dir): + logger.GetLogger().LogWarning("Git tree has unstaged changes.") + return False + if HasGitUntrackedChanges(git_dir): + logger.GetLogger().LogWarning("Git tree has un-tracked changes.") + return False + return True def GetGitChangesAsList(git_dir, path=None, staged=False): - """Get changed files as a list. - - Args: - git_dir: git tree directory. - path: a relative path that is part of the tree directory, could be null. - staged: whether to include staged files as well. - - Returns: - A list containing all the changed files. - """ - command = f'cd {git_dir} && git diff --name-only' - if staged: - command += ' --cached' - if path: - command += ' -- ' + path - _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( - command, print_to_console=False) - rv = [] - for line in out.splitlines(): - rv.append(line) - return rv + """Get changed files as a list. + + Args: + git_dir: git tree directory. + path: a relative path that is part of the tree directory, could be null. + staged: whether to include staged files as well. + + Returns: + A list containing all the changed files. + """ + command = f"cd {git_dir} && git diff --name-only" + if staged: + command += " --cached" + if path: + command += " -- " + path + _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput( + command, print_to_console=False + ) + rv = [] + for line in out.splitlines(): + rv.append(line) + return rv def IsChromeOsTree(chromeos_root): - return (os.path.isdir( - os.path.join(chromeos_root, 'src/third_party/chromiumos-overlay')) - and os.path.isdir(os.path.join(chromeos_root, 'manifest'))) + return os.path.isdir( + os.path.join(chromeos_root, "src/third_party/chromiumos-overlay") + ) and os.path.isdir(os.path.join(chromeos_root, "manifest")) def DeleteChromeOsTree(chromeos_root, dry_run=False): - """Delete a ChromeOs tree *safely*. - - Args: - chromeos_root: dir of the tree, could be a relative one (but be careful) - dry_run: only prints out the command if True - - Returns: - True if everything is ok. - """ - if not IsChromeOsTree(chromeos_root): - logger.GetLogger().LogWarning(f'"{chromeos_root}" does not seem to be a' - ' valid chromeos tree, do nothing.') - return False - cmd0 = f'cd {chromeos_root} && cros_sdk --delete' - if dry_run: - print(cmd0) - else: - if command_executer.GetCommandExecuter().RunCommand( - cmd0, print_to_console=True) != 0: - return False - - cmd1 = ( - f'export CHROMEOSDIRNAME="$(dirname $(cd {chromeos_root} && pwd))" && ' - f'export CHROMEOSBASENAME="$(basename $(cd {chromeos_root} && pwd))" && ' - 'cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME') - if dry_run: - print(cmd1) - return True - - return command_executer.GetCommandExecuter().RunCommand( - cmd1, print_to_console=True) == 0 - - -def BooleanPrompt(prompt='Do you want to continue?', - default=True, - true_value='yes', - false_value='no', - prolog=None): - """Helper function for processing boolean choice prompts. - - Args: - prompt: The question to present to the user. - default: Boolean to return if the user just presses enter. - true_value: The text to display that represents a True returned. - false_value: The text to display that represents a False returned. - prolog: The text to display before prompt. - - Returns: - True or False. - """ - true_value, false_value = true_value.lower(), false_value.lower() - true_text, false_text = true_value, false_value - if true_value == false_value: - raise ValueError('true_value and false_value must differ: got %r' % - true_value) - - if default: - true_text = true_text[0].upper() + true_text[1:] - else: - false_text = false_text[0].upper() + false_text[1:] - - prompt = ('\n%s (%s/%s)? ' % (prompt, true_text, false_text)) - - if prolog: - prompt = ('\n%s\n%s' % (prolog, prompt)) - - while True: - try: - # pylint: disable=input-builtin, bad-builtin - response = input(prompt).lower() - except EOFError: - # If the user hits CTRL+D, or stdin is disabled, use the default. - print() - response = None - except KeyboardInterrupt: - # If the user hits CTRL+C, just exit the process. - print() - print('CTRL+C detected; exiting') - sys.exit() - - if not response: - return default - if true_value.startswith(response): - if not false_value.startswith(response): + """Delete a ChromeOs tree *safely*. + + Args: + chromeos_root: dir of the tree, could be a relative one (but be careful) + dry_run: only prints out the command if True + + Returns: + True if everything is ok. + """ + if not IsChromeOsTree(chromeos_root): + logger.GetLogger().LogWarning( + f'"{chromeos_root}" does not seem to be a' + " valid chromeos tree, do nothing." + ) + return False + cmd0 = f"cd {chromeos_root} && cros_sdk --delete" + if dry_run: + print(cmd0) + else: + if ( + command_executer.GetCommandExecuter().RunCommand( + cmd0, print_to_console=True + ) + != 0 + ): + return False + + cmd1 = ( + f'export CHROMEOSDIRNAME="$(dirname $(cd {chromeos_root} && pwd))" && ' + f'export CHROMEOSBASENAME="$(basename $(cd {chromeos_root} && pwd))" && ' + "cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME" + ) + if dry_run: + print(cmd1) return True - # common prefix between the two... - elif false_value.startswith(response): - return False + + return ( + command_executer.GetCommandExecuter().RunCommand( + cmd1, print_to_console=True + ) + == 0 + ) + + +def BooleanPrompt( + prompt="Do you want to continue?", + default=True, + true_value="yes", + false_value="no", + prolog=None, +): + """Helper function for processing boolean choice prompts. + + Args: + prompt: The question to present to the user. + default: Boolean to return if the user just presses enter. + true_value: The text to display that represents a True returned. + false_value: The text to display that represents a False returned. + prolog: The text to display before prompt. + + Returns: + True or False. + """ + true_value, false_value = true_value.lower(), false_value.lower() + true_text, false_text = true_value, false_value + if true_value == false_value: + raise ValueError( + "true_value and false_value must differ: got %r" % true_value + ) + + if default: + true_text = true_text[0].upper() + true_text[1:] + else: + false_text = false_text[0].upper() + false_text[1:] + + prompt = "\n%s (%s/%s)? " % (prompt, true_text, false_text) + + if prolog: + prompt = "\n%s\n%s" % (prolog, prompt) + + while True: + try: + # pylint: disable=input-builtin, bad-builtin + response = input(prompt).lower() + except EOFError: + # If the user hits CTRL+D, or stdin is disabled, use the default. + print() + response = None + except KeyboardInterrupt: + # If the user hits CTRL+C, just exit the process. + print() + print("CTRL+C detected; exiting") + sys.exit() + + if not response: + return default + if true_value.startswith(response): + if not false_value.startswith(response): + return True + # common prefix between the two... + elif false_value.startswith(response): + return False # pylint: disable=unused-argument def rgb2short(r, g, b): - """Converts RGB values to xterm-256 color.""" + """Converts RGB values to xterm-256 color.""" - redcolor = [255, 124, 160, 196, 9] - greencolor = [255, 118, 82, 46, 10] + redcolor = [255, 124, 160, 196, 9] + greencolor = [255, 118, 82, 46, 10] - if g == 0: - return redcolor[r // 52] - if r == 0: - return greencolor[g // 52] - return 4 + if g == 0: + return redcolor[r // 52] + if r == 0: + return greencolor[g // 52] + return 4 diff --git a/cros_utils/misc_test.py b/cros_utils/misc_test.py index b47644cf..9713ae10 100755 --- a/cros_utils/misc_test.py +++ b/cros_utils/misc_test.py @@ -8,7 +8,8 @@ from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" # System modules import unittest @@ -18,39 +19,51 @@ from cros_utils import misc class UtilsTest(unittest.TestCase): - """Tests for misc.""" - - def testGetFilenameFromString(self): - string = 'a /b=c"d^$?\\' - filename = misc.GetFilenameFromString(string) - self.assertEqual(filename, 'a___bcd') - - def testPrependMergeEnv(self): - var = 'USE' - use_flags = 'hello 123' - added_use_flags = 'bla bla' - env_string = '%s=%r' % (var, use_flags) - new_env_string = misc.MergeEnvStringWithDict(env_string, - {var: added_use_flags}) - expected_new_env = '%s=%r' % (var, ' '.join([added_use_flags, use_flags])) - self.assertEqual(new_env_string, ' '.join([env_string, expected_new_env])) - - def testGetChromeOSVersionFromLSBVersion(self): - versions_dict = {'2630.0.0': '22', '2030.0.0': '19'} - f = misc.GetChromeOSVersionFromLSBVersion - for k, v in versions_dict.items(): - self.assertEqual(f(k), 'R%s-%s' % (v, k)) - - def testPostpendMergeEnv(self): - var = 'USE' - use_flags = 'hello 123' - added_use_flags = 'bla bla' - env_string = '%s=%r' % (var, use_flags) - new_env_string = misc.MergeEnvStringWithDict(env_string, - {var: added_use_flags}, False) - expected_new_env = '%s=%r' % (var, ' '.join([use_flags, added_use_flags])) - self.assertEqual(new_env_string, ' '.join([env_string, expected_new_env])) - - -if __name__ == '__main__': - unittest.main() + """Tests for misc.""" + + def testGetFilenameFromString(self): + string = 'a /b=c"d^$?\\' + filename = misc.GetFilenameFromString(string) + self.assertEqual(filename, "a___bcd") + + def testPrependMergeEnv(self): + var = "USE" + use_flags = "hello 123" + added_use_flags = "bla bla" + env_string = "%s=%r" % (var, use_flags) + new_env_string = misc.MergeEnvStringWithDict( + env_string, {var: added_use_flags} + ) + expected_new_env = "%s=%r" % ( + var, + " ".join([added_use_flags, use_flags]), + ) + self.assertEqual( + new_env_string, " ".join([env_string, expected_new_env]) + ) + + def testGetChromeOSVersionFromLSBVersion(self): + versions_dict = {"2630.0.0": "22", "2030.0.0": "19"} + f = misc.GetChromeOSVersionFromLSBVersion + for k, v in versions_dict.items(): + self.assertEqual(f(k), "R%s-%s" % (v, k)) + + def testPostpendMergeEnv(self): + var = "USE" + use_flags = "hello 123" + added_use_flags = "bla bla" + env_string = "%s=%r" % (var, use_flags) + new_env_string = misc.MergeEnvStringWithDict( + env_string, {var: added_use_flags}, False + ) + expected_new_env = "%s=%r" % ( + var, + " ".join([use_flags, added_use_flags]), + ) + self.assertEqual( + new_env_string, " ".join([env_string, expected_new_env]) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/cros_utils/no_pseudo_terminal_test.py b/cros_utils/no_pseudo_terminal_test.py index 9d6a294c..1980d401 100755 --- a/cros_utils/no_pseudo_terminal_test.py +++ b/cros_utils/no_pseudo_terminal_test.py @@ -14,51 +14,52 @@ import subprocess import tempfile import time import unittest + from cros_utils import command_executer class NoPsuedoTerminalTest(unittest.TestCase): - """Test to ensure we're not touching /dev/ptmx when running commands.""" + """Test to ensure we're not touching /dev/ptmx when running commands.""" - _strace_process = None - STRACE_TIMEOUT = 10 + _strace_process = None + STRACE_TIMEOUT = 10 - def _AttachStraceToSelf(self, output_file): - """Attaches strace to the current process.""" - args = ['sudo', 'strace', '-o', output_file, '-p', str(os.getpid())] - print(args) - # pylint: disable=bad-option-value, subprocess-popen-preexec-fn - self._strace_process = subprocess.Popen(args, preexec_fn=os.setpgrp) - # Wait until we see some activity. - start_time = time.time() - while time.time() - start_time < self.STRACE_TIMEOUT: - if os.path.isfile(output_file) and open(output_file).read(1): - return True - time.sleep(1) - return False + def _AttachStraceToSelf(self, output_file): + """Attaches strace to the current process.""" + args = ["sudo", "strace", "-o", output_file, "-p", str(os.getpid())] + print(args) + # pylint: disable=bad-option-value, subprocess-popen-preexec-fn + self._strace_process = subprocess.Popen(args, preexec_fn=os.setpgrp) + # Wait until we see some activity. + start_time = time.time() + while time.time() - start_time < self.STRACE_TIMEOUT: + if os.path.isfile(output_file) and open(output_file).read(1): + return True + time.sleep(1) + return False - def _KillStraceProcess(self): - """Kills strace that was started by _AttachStraceToSelf().""" - pgid = os.getpgid(self._strace_process.pid) - args = ['sudo', 'kill', str(pgid)] - if subprocess.call(args) == 0: - os.waitpid(pgid, 0) - return True - return False + def _KillStraceProcess(self): + """Kills strace that was started by _AttachStraceToSelf().""" + pgid = os.getpgid(self._strace_process.pid) + args = ["sudo", "kill", str(pgid)] + if subprocess.call(args) == 0: + os.waitpid(pgid, 0) + return True + return False - def testNoPseudoTerminalWhenRunningCommand(self): - """Test to make sure we're not touching /dev/ptmx when running commands.""" - temp_file = tempfile.mktemp() - self.assertTrue(self._AttachStraceToSelf(temp_file)) + def testNoPseudoTerminalWhenRunningCommand(self): + """Test to make sure we're not touching /dev/ptmx when running commands.""" + temp_file = tempfile.mktemp() + self.assertTrue(self._AttachStraceToSelf(temp_file)) - ce = command_executer.GetCommandExecuter() - ce.RunCommand('echo') + ce = command_executer.GetCommandExecuter() + ce.RunCommand("echo") - self.assertTrue(self._KillStraceProcess()) + self.assertTrue(self._KillStraceProcess()) - strace_contents = open(temp_file).read() - self.assertFalse('/dev/ptmx' in strace_contents) + strace_contents = open(temp_file).read() + self.assertFalse("/dev/ptmx" in strace_contents) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/cros_utils/perf_diff.py b/cros_utils/perf_diff.py index d2bb7221..191a6ee0 100755 --- a/cros_utils/perf_diff.py +++ b/cros_utils/perf_diff.py @@ -11,7 +11,8 @@ A detailed description of perf_diff. from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" import argparse import functools @@ -21,319 +22,338 @@ import sys from cros_utils import misc from cros_utils import tabulator -ROWS_TO_SHOW = 'Rows_to_show_in_the_perf_table' -TOTAL_EVENTS = 'Total_events_of_this_profile' + +ROWS_TO_SHOW = "Rows_to_show_in_the_perf_table" +TOTAL_EVENTS = "Total_events_of_this_profile" def GetPerfDictFromReport(report_file): - output = {} - perf_report = PerfReport(report_file) - for k, v in perf_report.sections.items(): - if k not in output: - output[k] = {} - output[k][ROWS_TO_SHOW] = 0 - output[k][TOTAL_EVENTS] = 0 - for function in v.functions: - out_key = '%s' % (function.name) - output[k][out_key] = function.count - output[k][TOTAL_EVENTS] += function.count - if function.percent > 1: - output[k][ROWS_TO_SHOW] += 1 - return output + output = {} + perf_report = PerfReport(report_file) + for k, v in perf_report.sections.items(): + if k not in output: + output[k] = {} + output[k][ROWS_TO_SHOW] = 0 + output[k][TOTAL_EVENTS] = 0 + for function in v.functions: + out_key = "%s" % (function.name) + output[k][out_key] = function.count + output[k][TOTAL_EVENTS] += function.count + if function.percent > 1: + output[k][ROWS_TO_SHOW] += 1 + return output def _SortDictionaryByValue(d): - l = d.items() + l = d.items() - def GetFloat(x): - if misc.IsFloat(x): - return float(x) - else: - return x + def GetFloat(x): + if misc.IsFloat(x): + return float(x) + else: + return x - sorted_l = sorted(l, key=lambda x: GetFloat(x[1])) - sorted_l.reverse() - return [f[0] for f in sorted_l] + sorted_l = sorted(l, key=lambda x: GetFloat(x[1])) + sorted_l.reverse() + return [f[0] for f in sorted_l] class Tabulator(object): - """Make tables.""" - - def __init__(self, all_dicts): - self._all_dicts = all_dicts - - def PrintTable(self): - for dicts in self._all_dicts: - self.PrintTableHelper(dicts) - - def PrintTableHelper(self, dicts): - """Transfrom dicts to tables.""" - fields = {} - for d in dicts: - for f in d.keys(): - if f not in fields: - fields[f] = d[f] - else: - fields[f] = max(fields[f], d[f]) - table = [] - header = ['name'] - for i in range(len(dicts)): - header.append(i) + """Make tables.""" - table.append(header) + def __init__(self, all_dicts): + self._all_dicts = all_dicts - sorted_fields = _SortDictionaryByValue(fields) + def PrintTable(self): + for dicts in self._all_dicts: + self.PrintTableHelper(dicts) - for f in sorted_fields: - row = [f] - for d in dicts: - if f in d: - row.append(d[f]) - else: - row.append('0') - table.append(row) + def PrintTableHelper(self, dicts): + """Transfrom dicts to tables.""" + fields = {} + for d in dicts: + for f in d.keys(): + if f not in fields: + fields[f] = d[f] + else: + fields[f] = max(fields[f], d[f]) + table = [] + header = ["name"] + for i in range(len(dicts)): + header.append(i) + + table.append(header) - print(tabulator.GetSimpleTable(table)) + sorted_fields = _SortDictionaryByValue(fields) + + for f in sorted_fields: + row = [f] + for d in dicts: + if f in d: + row.append(d[f]) + else: + row.append("0") + table.append(row) + + print(tabulator.GetSimpleTable(table)) class Function(object): - """Function for formatting.""" + """Function for formatting.""" - def __init__(self): - self.count = 0 - self.name = '' - self.percent = 0 + def __init__(self): + self.count = 0 + self.name = "" + self.percent = 0 class Section(object): - """Section formatting.""" - - def __init__(self, contents): - self.name = '' - self.raw_contents = contents - self._ParseSection() - - def _ParseSection(self): - matches = re.findall(r'Events: (\w+)\s+(.*)', self.raw_contents) - assert len(matches) <= 1, 'More than one event found in 1 section' - if not matches: - return - match = matches[0] - self.name = match[1] - self.count = misc.UnitToNumber(match[0]) - - self.functions = [] - for line in self.raw_contents.splitlines(): - if not line.strip(): - continue - if '%' not in line: - continue - if not line.startswith('#'): - fields = [f for f in line.split(' ') if f] - function = Function() - function.percent = float(fields[0].strip('%')) - function.count = int(fields[1]) - function.name = ' '.join(fields[2:]) - self.functions.append(function) + """Section formatting.""" + + def __init__(self, contents): + self.name = "" + self.raw_contents = contents + self._ParseSection() + + def _ParseSection(self): + matches = re.findall(r"Events: (\w+)\s+(.*)", self.raw_contents) + assert len(matches) <= 1, "More than one event found in 1 section" + if not matches: + return + match = matches[0] + self.name = match[1] + self.count = misc.UnitToNumber(match[0]) + + self.functions = [] + for line in self.raw_contents.splitlines(): + if not line.strip(): + continue + if "%" not in line: + continue + if not line.startswith("#"): + fields = [f for f in line.split(" ") if f] + function = Function() + function.percent = float(fields[0].strip("%")) + function.count = int(fields[1]) + function.name = " ".join(fields[2:]) + self.functions.append(function) class PerfReport(object): - """Get report from raw report.""" - - def __init__(self, perf_file): - self.perf_file = perf_file - self._ReadFile() - self.sections = {} - self.metadata = {} - self._section_contents = [] - self._section_header = '' - self._SplitSections() - self._ParseSections() - self._ParseSectionHeader() - - def _ParseSectionHeader(self): - """Parse a header of a perf report file.""" - # The "captured on" field is inaccurate - this actually refers to when the - # report was generated, not when the data was captured. - for line in self._section_header.splitlines(): - line = line[2:] - if ':' in line: - key, val = line.strip().split(':', 1) - key = key.strip() - val = val.strip() - self.metadata[key] = val - - def _ReadFile(self): - self._perf_contents = open(self.perf_file).read() - - def _ParseSections(self): - self.event_counts = {} - self.sections = {} - for section_content in self._section_contents: - section = Section(section_content) - section.name = self._GetHumanReadableName(section.name) - self.sections[section.name] = section - - # TODO(asharif): Do this better. - def _GetHumanReadableName(self, section_name): - if not 'raw' in section_name: - return section_name - raw_number = section_name.strip().split(' ')[-1] - for line in self._section_header.splitlines(): - if raw_number in line: - name = line.strip().split(' ')[5] - return name - - def _SplitSections(self): - self._section_contents = [] - indices = [m.start() for m in re.finditer('# Events:', self._perf_contents)] - indices.append(len(self._perf_contents)) - for i in range(len(indices) - 1): - section_content = self._perf_contents[indices[i]:indices[i + 1]] - self._section_contents.append(section_content) - self._section_header = '' - if indices: - self._section_header = self._perf_contents[0:indices[0]] + """Get report from raw report.""" + + def __init__(self, perf_file): + self.perf_file = perf_file + self._ReadFile() + self.sections = {} + self.metadata = {} + self._section_contents = [] + self._section_header = "" + self._SplitSections() + self._ParseSections() + self._ParseSectionHeader() + + def _ParseSectionHeader(self): + """Parse a header of a perf report file.""" + # The "captured on" field is inaccurate - this actually refers to when the + # report was generated, not when the data was captured. + for line in self._section_header.splitlines(): + line = line[2:] + if ":" in line: + key, val = line.strip().split(":", 1) + key = key.strip() + val = val.strip() + self.metadata[key] = val + + def _ReadFile(self): + self._perf_contents = open(self.perf_file).read() + + def _ParseSections(self): + self.event_counts = {} + self.sections = {} + for section_content in self._section_contents: + section = Section(section_content) + section.name = self._GetHumanReadableName(section.name) + self.sections[section.name] = section + + # TODO(asharif): Do this better. + def _GetHumanReadableName(self, section_name): + if not "raw" in section_name: + return section_name + raw_number = section_name.strip().split(" ")[-1] + for line in self._section_header.splitlines(): + if raw_number in line: + name = line.strip().split(" ")[5] + return name + + def _SplitSections(self): + self._section_contents = [] + indices = [ + m.start() for m in re.finditer("# Events:", self._perf_contents) + ] + indices.append(len(self._perf_contents)) + for i in range(len(indices) - 1): + section_content = self._perf_contents[indices[i] : indices[i + 1]] + self._section_contents.append(section_content) + self._section_header = "" + if indices: + self._section_header = self._perf_contents[0 : indices[0]] class PerfDiffer(object): - """Perf differ class.""" - - def __init__(self, reports, num_symbols, common_only): - self._reports = reports - self._num_symbols = num_symbols - self._common_only = common_only - self._common_function_names = {} - - def DoDiff(self): - """The function that does the diff.""" - section_names = self._FindAllSections() - - filename_dicts = [] - summary_dicts = [] - for report in self._reports: - d = {} - filename_dicts.append({'file': report.perf_file}) - for section_name in section_names: - if section_name in report.sections: - d[section_name] = report.sections[section_name].count - summary_dicts.append(d) - - all_dicts = [filename_dicts, summary_dicts] - - for section_name in section_names: - function_names = self._GetTopFunctions(section_name, self._num_symbols) - self._FindCommonFunctions(section_name) - dicts = [] - for report in self._reports: + """Perf differ class.""" + + def __init__(self, reports, num_symbols, common_only): + self._reports = reports + self._num_symbols = num_symbols + self._common_only = common_only + self._common_function_names = {} + + def DoDiff(self): + """The function that does the diff.""" + section_names = self._FindAllSections() + + filename_dicts = [] + summary_dicts = [] + for report in self._reports: + d = {} + filename_dicts.append({"file": report.perf_file}) + for section_name in section_names: + if section_name in report.sections: + d[section_name] = report.sections[section_name].count + summary_dicts.append(d) + + all_dicts = [filename_dicts, summary_dicts] + + for section_name in section_names: + function_names = self._GetTopFunctions( + section_name, self._num_symbols + ) + self._FindCommonFunctions(section_name) + dicts = [] + for report in self._reports: + d = {} + if section_name in report.sections: + section = report.sections[section_name] + + # Get a common scaling factor for this report. + common_scaling_factor = self._GetCommonScalingFactor( + section + ) + + for function in section.functions: + if function.name in function_names: + key = "%s %s" % (section.name, function.name) + d[key] = function.count + # Compute a factor to scale the function count by in common_only + # mode. + if self._common_only and ( + function.name + in self._common_function_names[section.name] + ): + d[key + " scaled"] = ( + common_scaling_factor * function.count + ) + dicts.append(d) + + all_dicts.append(dicts) + + mytabulator = Tabulator(all_dicts) + mytabulator.PrintTable() + + def _FindAllSections(self): + sections = {} + for report in self._reports: + for section in report.sections.values(): + if section.name not in sections: + sections[section.name] = section.count + else: + sections[section.name] = max( + sections[section.name], section.count + ) + return _SortDictionaryByValue(sections) + + def _GetCommonScalingFactor(self, section): + unique_count = self._GetCount( + section, lambda x: x in self._common_function_names[section.name] + ) + return 100.0 / unique_count + + def _GetCount(self, section, filter_fun=None): + total_count = 0 + for function in section.functions: + if not filter_fun or filter_fun(function.name): + total_count += int(function.count) + return total_count + + def _FindCommonFunctions(self, section_name): + function_names_list = [] + for report in self._reports: + if section_name in report.sections: + section = report.sections[section_name] + function_names = {f.name for f in section.functions} + function_names_list.append(function_names) + + self._common_function_names[section_name] = functools.reduce( + set.intersection, function_names_list + ) + + def _GetTopFunctions(self, section_name, num_functions): + all_functions = {} + for report in self._reports: + if section_name in report.sections: + section = report.sections[section_name] + for f in section.functions[:num_functions]: + if f.name in all_functions: + all_functions[f.name] = max( + all_functions[f.name], f.count + ) + else: + all_functions[f.name] = f.count + # FIXME(asharif): Don't really need to sort these... + return _SortDictionaryByValue(all_functions) + + def _GetFunctionsDict(self, section, function_names): d = {} - if section_name in report.sections: - section = report.sections[section_name] - - # Get a common scaling factor for this report. - common_scaling_factor = self._GetCommonScalingFactor(section) - - for function in section.functions: + for function in section.functions: if function.name in function_names: - key = '%s %s' % (section.name, function.name) - d[key] = function.count - # Compute a factor to scale the function count by in common_only - # mode. - if self._common_only and ( - function.name in self._common_function_names[section.name]): - d[key + ' scaled'] = common_scaling_factor * function.count - dicts.append(d) - - all_dicts.append(dicts) - - mytabulator = Tabulator(all_dicts) - mytabulator.PrintTable() - - def _FindAllSections(self): - sections = {} - for report in self._reports: - for section in report.sections.values(): - if section.name not in sections: - sections[section.name] = section.count - else: - sections[section.name] = max(sections[section.name], section.count) - return _SortDictionaryByValue(sections) - - def _GetCommonScalingFactor(self, section): - unique_count = self._GetCount( - section, lambda x: x in self._common_function_names[section.name]) - return 100.0 / unique_count - - def _GetCount(self, section, filter_fun=None): - total_count = 0 - for function in section.functions: - if not filter_fun or filter_fun(function.name): - total_count += int(function.count) - return total_count - - def _FindCommonFunctions(self, section_name): - function_names_list = [] - for report in self._reports: - if section_name in report.sections: - section = report.sections[section_name] - function_names = {f.name for f in section.functions} - function_names_list.append(function_names) - - self._common_function_names[section_name] = ( - functools.reduce(set.intersection, function_names_list)) - - def _GetTopFunctions(self, section_name, num_functions): - all_functions = {} - for report in self._reports: - if section_name in report.sections: - section = report.sections[section_name] - for f in section.functions[:num_functions]: - if f.name in all_functions: - all_functions[f.name] = max(all_functions[f.name], f.count) - else: - all_functions[f.name] = f.count - # FIXME(asharif): Don't really need to sort these... - return _SortDictionaryByValue(all_functions) - - def _GetFunctionsDict(self, section, function_names): - d = {} - for function in section.functions: - if function.name in function_names: - d[function.name] = function.count - return d + d[function.name] = function.count + return d def Main(argv): - """The entry of the main.""" - parser = argparse.ArgumentParser() - parser.add_argument( - '-n', - '--num_symbols', - dest='num_symbols', - default='5', - help='The number of symbols to show.') - parser.add_argument( - '-c', - '--common_only', - dest='common_only', - action='store_true', - default=False, - help='Diff common symbols only.') - - options, args = parser.parse_known_args(argv) - - try: - reports = [] - for report in args[1:]: - report = PerfReport(report) - reports.append(report) - pd = PerfDiffer(reports, int(options.num_symbols), options.common_only) - pd.DoDiff() - finally: - pass - - return 0 - - -if __name__ == '__main__': - sys.exit(Main(sys.argv)) + """The entry of the main.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "-n", + "--num_symbols", + dest="num_symbols", + default="5", + help="The number of symbols to show.", + ) + parser.add_argument( + "-c", + "--common_only", + dest="common_only", + action="store_true", + default=False, + help="Diff common symbols only.", + ) + + options, args = parser.parse_known_args(argv) + + try: + reports = [] + for report in args[1:]: + report = PerfReport(report) + reports.append(report) + pd = PerfDiffer(reports, int(options.num_symbols), options.common_only) + pd.DoDiff() + finally: + pass + + return 0 + + +if __name__ == "__main__": + sys.exit(Main(sys.argv)) diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py index 1a46cca2..7b51bf86 100644 --- a/cros_utils/tabulator.py +++ b/cros_utils/tabulator.py @@ -69,958 +69,1018 @@ import getpass import math import statistics import sys + +from cros_utils import misc +from cros_utils.email_sender import EmailSender + # TODO(crbug.com/980719): Drop scipy in the future. # pylint: disable=import-error import scipy -from cros_utils.email_sender import EmailSender -from cros_utils import misc - def _AllFloat(values): - return all([misc.IsFloat(v) for v in values]) + return all([misc.IsFloat(v) for v in values]) def _GetFloats(values): - return [float(v) for v in values] + return [float(v) for v in values] def _StripNone(results): - res = [] - for result in results: - if result is not None: - res.append(result) - return res + res = [] + for result in results: + if result is not None: + res.append(result) + return res def _RemoveMinMax(cell, values): - if len(values) < 3: - print('WARNING: Values count is less than 3, not ignoring min/max values') - print('WARNING: Cell name:', cell.name, 'Values:', values) - return values + if len(values) < 3: + print( + "WARNING: Values count is less than 3, not ignoring min/max values" + ) + print("WARNING: Cell name:", cell.name, "Values:", values) + return values - values.remove(min(values)) - values.remove(max(values)) - return values + values.remove(min(values)) + values.remove(max(values)) + return values class TableGenerator(object): - """Creates a table from a list of list of dicts. - - The main public function is called GetTable(). - """ - SORT_BY_KEYS = 0 - SORT_BY_KEYS_DESC = 1 - SORT_BY_VALUES = 2 - SORT_BY_VALUES_DESC = 3 - NO_SORT = 4 - - MISSING_VALUE = 'x' - - def __init__(self, d, l, sort=NO_SORT, key_name='keys'): - self._runs = d - self._labels = l - self._sort = sort - self._key_name = key_name - - def _AggregateKeys(self): - keys = collections.OrderedDict() - for run_list in self._runs: - for run in run_list: - keys.update(dict.fromkeys(run.keys())) - return list(keys.keys()) - - def _GetHighestValue(self, key): - values = [] - for run_list in self._runs: - for run in run_list: - if key in run: - values.append(run[key]) - values = _StripNone(values) - if _AllFloat(values): - values = _GetFloats(values) - return max(values) - - def _GetLowestValue(self, key): - values = [] - for run_list in self._runs: - for run in run_list: - if key in run: - values.append(run[key]) - values = _StripNone(values) - if _AllFloat(values): - values = _GetFloats(values) - return min(values) - - def _SortKeys(self, keys): - if self._sort == self.SORT_BY_KEYS: - return sorted(keys) - elif self._sort == self.SORT_BY_VALUES: - # pylint: disable=unnecessary-lambda - return sorted(keys, key=lambda x: self._GetLowestValue(x)) - elif self._sort == self.SORT_BY_VALUES_DESC: - # pylint: disable=unnecessary-lambda - return sorted(keys, key=lambda x: self._GetHighestValue(x), reverse=True) - elif self._sort == self.NO_SORT: - return keys - else: - assert 0, 'Unimplemented sort %s' % self._sort - - def _GetKeys(self): - keys = self._AggregateKeys() - return self._SortKeys(keys) - - def GetTable(self, number_of_rows=sys.maxsize): - """Returns a table from a list of list of dicts. + """Creates a table from a list of list of dicts. - Examples: - We have the following runs: - [[{"k1": "v1", "k2": "v2"}, {"k1": "v3"}], - [{"k1": "v4", "k4": "v5"}]] - and the following labels: - ["vanilla", "modified"] - it will return: - [["Key", "vanilla", "modified"] - ["k1", ["v1", "v3"], ["v4"]] - ["k2", ["v2"], []] - ["k4", [], ["v5"]]] - The returned table can then be processed further by other classes in this - module. - - The list of list of dicts is passed into the constructor of TableGenerator. - This method converts that into a canonical list of lists which represents a - table of values. - - Args: - number_of_rows: Maximum number of rows to return from the table. - - Returns: - A list of lists which is the table. + The main public function is called GetTable(). """ - keys = self._GetKeys() - header = [self._key_name] + self._labels - table = [header] - rows = 0 - for k in keys: - row = [k] - unit = None - for run_list in self._runs: - v = [] - for run in run_list: - if k in run: - if isinstance(run[k], list): - val = run[k][0] - unit = run[k][1] - else: - val = run[k] - v.append(val) - else: - v.append(None) - row.append(v) - # If we got a 'unit' value, append the units name to the key name. - if unit: - keyname = row[0] + ' (%s) ' % unit - row[0] = keyname - table.append(row) - rows += 1 - if rows == number_of_rows: - break - return table - - -class SamplesTableGenerator(TableGenerator): - """Creates a table with only samples from the results - - The main public function is called GetTable(). - - Different than TableGenerator, self._runs is now a dict of {benchmark: runs} - We are expecting there is 'samples' in `runs`. - """ - - def __init__(self, run_keyvals, label_list, iter_counts, weights): - TableGenerator.__init__( - self, run_keyvals, label_list, key_name='Benchmarks') - self._iter_counts = iter_counts - self._weights = weights - - def _GetKeys(self): - keys = self._runs.keys() - return self._SortKeys(keys) - def GetTable(self, number_of_rows=sys.maxsize): - """Returns a tuple, which contains three args: - - 1) a table from a list of list of dicts. - 2) updated benchmark_results run_keyvals with composite benchmark - 3) updated benchmark_results iter_count with composite benchmark + SORT_BY_KEYS = 0 + SORT_BY_KEYS_DESC = 1 + SORT_BY_VALUES = 2 + SORT_BY_VALUES_DESC = 3 + NO_SORT = 4 + + MISSING_VALUE = "x" + + def __init__(self, d, l, sort=NO_SORT, key_name="keys"): + self._runs = d + self._labels = l + self._sort = sort + self._key_name = key_name + + def _AggregateKeys(self): + keys = collections.OrderedDict() + for run_list in self._runs: + for run in run_list: + keys.update(dict.fromkeys(run.keys())) + return list(keys.keys()) + + def _GetHighestValue(self, key): + values = [] + for run_list in self._runs: + for run in run_list: + if key in run: + values.append(run[key]) + values = _StripNone(values) + if _AllFloat(values): + values = _GetFloats(values) + return max(values) + + def _GetLowestValue(self, key): + values = [] + for run_list in self._runs: + for run in run_list: + if key in run: + values.append(run[key]) + values = _StripNone(values) + if _AllFloat(values): + values = _GetFloats(values) + return min(values) + + def _SortKeys(self, keys): + if self._sort == self.SORT_BY_KEYS: + return sorted(keys) + elif self._sort == self.SORT_BY_VALUES: + # pylint: disable=unnecessary-lambda + return sorted(keys, key=lambda x: self._GetLowestValue(x)) + elif self._sort == self.SORT_BY_VALUES_DESC: + # pylint: disable=unnecessary-lambda + return sorted( + keys, key=lambda x: self._GetHighestValue(x), reverse=True + ) + elif self._sort == self.NO_SORT: + return keys + else: + assert 0, "Unimplemented sort %s" % self._sort + + def _GetKeys(self): + keys = self._AggregateKeys() + return self._SortKeys(keys) + + def GetTable(self, number_of_rows=sys.maxsize): + """Returns a table from a list of list of dicts. + + Examples: + We have the following runs: + [[{"k1": "v1", "k2": "v2"}, {"k1": "v3"}], + [{"k1": "v4", "k4": "v5"}]] + and the following labels: + ["vanilla", "modified"] + it will return: + [["Key", "vanilla", "modified"] + ["k1", ["v1", "v3"], ["v4"]] + ["k2", ["v2"], []] + ["k4", [], ["v5"]]] + The returned table can then be processed further by other classes in this + module. + + The list of list of dicts is passed into the constructor of TableGenerator. + This method converts that into a canonical list of lists which represents a + table of values. + + Args: + number_of_rows: Maximum number of rows to return from the table. + + Returns: + A list of lists which is the table. + """ + keys = self._GetKeys() + header = [self._key_name] + self._labels + table = [header] + rows = 0 + for k in keys: + row = [k] + unit = None + for run_list in self._runs: + v = [] + for run in run_list: + if k in run: + if isinstance(run[k], list): + val = run[k][0] + unit = run[k][1] + else: + val = run[k] + v.append(val) + else: + v.append(None) + row.append(v) + # If we got a 'unit' value, append the units name to the key name. + if unit: + keyname = row[0] + " (%s) " % unit + row[0] = keyname + table.append(row) + rows += 1 + if rows == number_of_rows: + break + return table - The dict of list of list of dicts is passed into the constructor of - SamplesTableGenerator. - This method converts that into a canonical list of lists which - represents a table of values. - Examples: - We have the following runs: - {bench1: [[{"samples": "v1"}, {"samples": "v2"}], - [{"samples": "v3"}, {"samples": "v4"}]] - bench2: [[{"samples": "v21"}, None], - [{"samples": "v22"}, {"samples": "v23"}]]} - and weights of benchmarks: - {bench1: w1, bench2: w2} - and the following labels: - ["vanilla", "modified"] - it will return: - [["Benchmark", "Weights", "vanilla", "modified"] - ["bench1", w1, - ((2, 0), ["v1*w1", "v2*w1"]), ((2, 0), ["v3*w1", "v4*w1"])] - ["bench2", w2, - ((1, 1), ["v21*w2", None]), ((2, 0), ["v22*w2", "v23*w2"])] - ["Composite Benchmark", N/A, - ((1, 1), ["v1*w1+v21*w2", None]), - ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]] - The returned table can then be processed further by other classes in this - module. +class SamplesTableGenerator(TableGenerator): + """Creates a table with only samples from the results - Args: - number_of_rows: Maximum number of rows to return from the table. + The main public function is called GetTable(). - Returns: - A list of lists which is the table. + Different than TableGenerator, self._runs is now a dict of {benchmark: runs} + We are expecting there is 'samples' in `runs`. """ - keys = self._GetKeys() - header = [self._key_name, 'Weights'] + self._labels - table = [header] - rows = 0 - iterations = 0 - - for k in keys: - bench_runs = self._runs[k] - unit = None - all_runs_empty = all(not dict for label in bench_runs for dict in label) - if all_runs_empty: - cell = Cell() - cell.string_value = ('Benchmark %s contains no result.' - ' Is the benchmark name valid?' % k) - table.append([cell]) - else: - row = [k] - row.append(self._weights[k]) - for run_list in bench_runs: - run_pass = 0 - run_fail = 0 - v = [] - for run in run_list: - if 'samples' in run: - if isinstance(run['samples'], list): - val = run['samples'][0] * self._weights[k] - unit = run['samples'][1] - else: - val = run['samples'] * self._weights[k] - v.append(val) - run_pass += 1 + + def __init__(self, run_keyvals, label_list, iter_counts, weights): + TableGenerator.__init__( + self, run_keyvals, label_list, key_name="Benchmarks" + ) + self._iter_counts = iter_counts + self._weights = weights + + def _GetKeys(self): + keys = self._runs.keys() + return self._SortKeys(keys) + + def GetTable(self, number_of_rows=sys.maxsize): + """Returns a tuple, which contains three args: + + 1) a table from a list of list of dicts. + 2) updated benchmark_results run_keyvals with composite benchmark + 3) updated benchmark_results iter_count with composite benchmark + + The dict of list of list of dicts is passed into the constructor of + SamplesTableGenerator. + This method converts that into a canonical list of lists which + represents a table of values. + + Examples: + We have the following runs: + {bench1: [[{"samples": "v1"}, {"samples": "v2"}], + [{"samples": "v3"}, {"samples": "v4"}]] + bench2: [[{"samples": "v21"}, None], + [{"samples": "v22"}, {"samples": "v23"}]]} + and weights of benchmarks: + {bench1: w1, bench2: w2} + and the following labels: + ["vanilla", "modified"] + it will return: + [["Benchmark", "Weights", "vanilla", "modified"] + ["bench1", w1, + ((2, 0), ["v1*w1", "v2*w1"]), ((2, 0), ["v3*w1", "v4*w1"])] + ["bench2", w2, + ((1, 1), ["v21*w2", None]), ((2, 0), ["v22*w2", "v23*w2"])] + ["Composite Benchmark", N/A, + ((1, 1), ["v1*w1+v21*w2", None]), + ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]] + The returned table can then be processed further by other classes in this + module. + + Args: + number_of_rows: Maximum number of rows to return from the table. + + Returns: + A list of lists which is the table. + """ + keys = self._GetKeys() + header = [self._key_name, "Weights"] + self._labels + table = [header] + rows = 0 + iterations = 0 + + for k in keys: + bench_runs = self._runs[k] + unit = None + all_runs_empty = all( + not dict for label in bench_runs for dict in label + ) + if all_runs_empty: + cell = Cell() + cell.string_value = ( + "Benchmark %s contains no result." + " Is the benchmark name valid?" % k + ) + table.append([cell]) else: - v.append(None) - run_fail += 1 - one_tuple = ((run_pass, run_fail), v) - if iterations not in (0, run_pass + run_fail): - raise ValueError('Iterations of each benchmark run ' \ - 'are not the same') - iterations = run_pass + run_fail - row.append(one_tuple) - if unit: - keyname = row[0] + ' (%s) ' % unit - row[0] = keyname - table.append(row) - rows += 1 - if rows == number_of_rows: - break - - k = 'Composite Benchmark' - if k in keys: - raise RuntimeError('Composite benchmark already exists in results') - - # Create a new composite benchmark row at the bottom of the summary table - # The new row will be like the format in example: - # ["Composite Benchmark", N/A, - # ((1, 1), ["v1*w1+v21*w2", None]), - # ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]] - # First we will create a row of [key, weight, [[0] * iterations] * labels] - row = [None] * len(header) - row[0] = '%s (samples)' % k - row[1] = 'N/A' - for label_index in range(2, len(row)): - row[label_index] = [0] * iterations - - for cur_row in table[1:]: - # Iterate through each benchmark - if len(cur_row) > 1: - for label_index in range(2, len(cur_row)): - # Iterate through each run in a single benchmark - # each result should look like ((pass, fail), [values_list]) - bench_runs = cur_row[label_index][1] - for index in range(iterations): - # Accumulate each run result to composite benchmark run - # If any run fails, then we set this run for composite benchmark - # to None so that we know it fails. - if bench_runs[index] and row[label_index][index] is not None: - row[label_index][index] += bench_runs[index] + row = [k] + row.append(self._weights[k]) + for run_list in bench_runs: + run_pass = 0 + run_fail = 0 + v = [] + for run in run_list: + if "samples" in run: + if isinstance(run["samples"], list): + val = run["samples"][0] * self._weights[k] + unit = run["samples"][1] + else: + val = run["samples"] * self._weights[k] + v.append(val) + run_pass += 1 + else: + v.append(None) + run_fail += 1 + one_tuple = ((run_pass, run_fail), v) + if iterations not in (0, run_pass + run_fail): + raise ValueError( + "Iterations of each benchmark run " + "are not the same" + ) + iterations = run_pass + run_fail + row.append(one_tuple) + if unit: + keyname = row[0] + " (%s) " % unit + row[0] = keyname + table.append(row) + rows += 1 + if rows == number_of_rows: + break + + k = "Composite Benchmark" + if k in keys: + raise RuntimeError("Composite benchmark already exists in results") + + # Create a new composite benchmark row at the bottom of the summary table + # The new row will be like the format in example: + # ["Composite Benchmark", N/A, + # ((1, 1), ["v1*w1+v21*w2", None]), + # ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]] + # First we will create a row of [key, weight, [[0] * iterations] * labels] + row = [None] * len(header) + row[0] = "%s (samples)" % k + row[1] = "N/A" + for label_index in range(2, len(row)): + row[label_index] = [0] * iterations + + for cur_row in table[1:]: + # Iterate through each benchmark + if len(cur_row) > 1: + for label_index in range(2, len(cur_row)): + # Iterate through each run in a single benchmark + # each result should look like ((pass, fail), [values_list]) + bench_runs = cur_row[label_index][1] + for index in range(iterations): + # Accumulate each run result to composite benchmark run + # If any run fails, then we set this run for composite benchmark + # to None so that we know it fails. + if ( + bench_runs[index] + and row[label_index][index] is not None + ): + row[label_index][index] += bench_runs[index] + else: + row[label_index][index] = None else: - row[label_index][index] = None - else: - # One benchmark totally fails, no valid data will be in final result + # One benchmark totally fails, no valid data will be in final result + for label_index in range(2, len(row)): + row[label_index] = [None] * iterations + break + # Calculate pass and fail count for composite benchmark for label_index in range(2, len(row)): - row[label_index] = [None] * iterations - break - # Calculate pass and fail count for composite benchmark - for label_index in range(2, len(row)): - run_pass = 0 - run_fail = 0 - for run in row[label_index]: - if run: - run_pass += 1 - else: - run_fail += 1 - row[label_index] = ((run_pass, run_fail), row[label_index]) - table.append(row) - - # Now that we have the table genearted, we want to store this new composite - # benchmark into the benchmark_result in ResultReport object. - # This will be used to generate a full table which contains our composite - # benchmark. - # We need to create composite benchmark result and add it to keyvals in - # benchmark_results. - v = [] - for label in row[2:]: - # each label's result looks like ((pass, fail), [values]) - benchmark_runs = label[1] - # List of values of each label - single_run_list = [] - for run in benchmark_runs: - # Result of each run under the same label is a dict of keys. - # Here the only key we will add for composite benchmark is the - # weighted_samples we added up. - one_dict = {} - if run: - one_dict[u'weighted_samples'] = [run, u'samples'] - one_dict['retval'] = 0 - else: - one_dict['retval'] = 1 - single_run_list.append(one_dict) - v.append(single_run_list) - - self._runs[k] = v - self._iter_counts[k] = iterations + run_pass = 0 + run_fail = 0 + for run in row[label_index]: + if run: + run_pass += 1 + else: + run_fail += 1 + row[label_index] = ((run_pass, run_fail), row[label_index]) + table.append(row) - return (table, self._runs, self._iter_counts) + # Now that we have the table genearted, we want to store this new composite + # benchmark into the benchmark_result in ResultReport object. + # This will be used to generate a full table which contains our composite + # benchmark. + # We need to create composite benchmark result and add it to keyvals in + # benchmark_results. + v = [] + for label in row[2:]: + # each label's result looks like ((pass, fail), [values]) + benchmark_runs = label[1] + # List of values of each label + single_run_list = [] + for run in benchmark_runs: + # Result of each run under the same label is a dict of keys. + # Here the only key we will add for composite benchmark is the + # weighted_samples we added up. + one_dict = {} + if run: + one_dict[u"weighted_samples"] = [run, u"samples"] + one_dict["retval"] = 0 + else: + one_dict["retval"] = 1 + single_run_list.append(one_dict) + v.append(single_run_list) + + self._runs[k] = v + self._iter_counts[k] = iterations + + return (table, self._runs, self._iter_counts) class Result(object): - """A class that respresents a single result. - - This single result is obtained by condensing the information from a list of - runs and a list of baseline runs. - """ - - def __init__(self): - pass - - def _AllStringsSame(self, values): - values_set = set(values) - return len(values_set) == 1 - - def NeedsBaseline(self): - return False - - # pylint: disable=unused-argument - def _Literal(self, cell, values, baseline_values): - cell.value = ' '.join([str(v) for v in values]) - - def _ComputeFloat(self, cell, values, baseline_values): - self._Literal(cell, values, baseline_values) - - def _ComputeString(self, cell, values, baseline_values): - self._Literal(cell, values, baseline_values) - - def _InvertIfLowerIsBetter(self, cell): - pass + """A class that respresents a single result. - def _GetGmean(self, values): - if not values: - return float('nan') - if any([v < 0 for v in values]): - return float('nan') - if any([v == 0 for v in values]): - return 0.0 - log_list = [math.log(v) for v in values] - gmean_log = sum(log_list) / len(log_list) - return math.exp(gmean_log) - - def Compute(self, cell, values, baseline_values): - """Compute the result given a list of values and baseline values. - - Args: - cell: A cell data structure to populate. - values: List of values. - baseline_values: List of baseline values. Can be none if this is the - baseline itself. + This single result is obtained by condensing the information from a list of + runs and a list of baseline runs. """ - all_floats = True - values = _StripNone(values) - if not values: - cell.value = '' - return - if _AllFloat(values): - float_values = _GetFloats(values) - else: - all_floats = False - if baseline_values: - baseline_values = _StripNone(baseline_values) - if baseline_values: - if _AllFloat(baseline_values): - float_baseline_values = _GetFloats(baseline_values) - else: - all_floats = False - else: - if self.NeedsBaseline(): - cell.value = '' - return - float_baseline_values = None - if all_floats: - self._ComputeFloat(cell, float_values, float_baseline_values) - self._InvertIfLowerIsBetter(cell) - else: - self._ComputeString(cell, values, baseline_values) + + def __init__(self): + pass + + def _AllStringsSame(self, values): + values_set = set(values) + return len(values_set) == 1 + + def NeedsBaseline(self): + return False + + # pylint: disable=unused-argument + def _Literal(self, cell, values, baseline_values): + cell.value = " ".join([str(v) for v in values]) + + def _ComputeFloat(self, cell, values, baseline_values): + self._Literal(cell, values, baseline_values) + + def _ComputeString(self, cell, values, baseline_values): + self._Literal(cell, values, baseline_values) + + def _InvertIfLowerIsBetter(self, cell): + pass + + def _GetGmean(self, values): + if not values: + return float("nan") + if any([v < 0 for v in values]): + return float("nan") + if any([v == 0 for v in values]): + return 0.0 + log_list = [math.log(v) for v in values] + gmean_log = sum(log_list) / len(log_list) + return math.exp(gmean_log) + + def Compute(self, cell, values, baseline_values): + """Compute the result given a list of values and baseline values. + + Args: + cell: A cell data structure to populate. + values: List of values. + baseline_values: List of baseline values. Can be none if this is the + baseline itself. + """ + all_floats = True + values = _StripNone(values) + if not values: + cell.value = "" + return + if _AllFloat(values): + float_values = _GetFloats(values) + else: + all_floats = False + if baseline_values: + baseline_values = _StripNone(baseline_values) + if baseline_values: + if _AllFloat(baseline_values): + float_baseline_values = _GetFloats(baseline_values) + else: + all_floats = False + else: + if self.NeedsBaseline(): + cell.value = "" + return + float_baseline_values = None + if all_floats: + self._ComputeFloat(cell, float_values, float_baseline_values) + self._InvertIfLowerIsBetter(cell) + else: + self._ComputeString(cell, values, baseline_values) class LiteralResult(Result): - """A literal result.""" + """A literal result.""" - def __init__(self, iteration=0): - super(LiteralResult, self).__init__() - self.iteration = iteration + def __init__(self, iteration=0): + super(LiteralResult, self).__init__() + self.iteration = iteration - def Compute(self, cell, values, baseline_values): - try: - cell.value = values[self.iteration] - except IndexError: - cell.value = '-' + def Compute(self, cell, values, baseline_values): + try: + cell.value = values[self.iteration] + except IndexError: + cell.value = "-" class NonEmptyCountResult(Result): - """A class that counts the number of non-empty results. - - The number of non-empty values will be stored in the cell. - """ + """A class that counts the number of non-empty results. - def Compute(self, cell, values, baseline_values): - """Put the number of non-empty values in the cell result. - - Args: - cell: Put the result in cell.value. - values: A list of values for the row. - baseline_values: A list of baseline values for the row. + The number of non-empty values will be stored in the cell. """ - cell.value = len(_StripNone(values)) - if not baseline_values: - return - base_value = len(_StripNone(baseline_values)) - if cell.value == base_value: - return - f = ColorBoxFormat() - len_values = len(values) - len_baseline_values = len(baseline_values) - tmp_cell = Cell() - tmp_cell.value = 1.0 + ( - float(cell.value - base_value) / (max(len_values, len_baseline_values))) - f.Compute(tmp_cell) - cell.bgcolor = tmp_cell.bgcolor + + def Compute(self, cell, values, baseline_values): + """Put the number of non-empty values in the cell result. + + Args: + cell: Put the result in cell.value. + values: A list of values for the row. + baseline_values: A list of baseline values for the row. + """ + cell.value = len(_StripNone(values)) + if not baseline_values: + return + base_value = len(_StripNone(baseline_values)) + if cell.value == base_value: + return + f = ColorBoxFormat() + len_values = len(values) + len_baseline_values = len(baseline_values) + tmp_cell = Cell() + tmp_cell.value = 1.0 + ( + float(cell.value - base_value) + / (max(len_values, len_baseline_values)) + ) + f.Compute(tmp_cell) + cell.bgcolor = tmp_cell.bgcolor class StringMeanResult(Result): - """Mean of string values.""" + """Mean of string values.""" - def _ComputeString(self, cell, values, baseline_values): - if self._AllStringsSame(values): - cell.value = str(values[0]) - else: - cell.value = '?' + def _ComputeString(self, cell, values, baseline_values): + if self._AllStringsSame(values): + cell.value = str(values[0]) + else: + cell.value = "?" class AmeanResult(StringMeanResult): - """Arithmetic mean.""" + """Arithmetic mean.""" - def __init__(self, ignore_min_max=False): - super(AmeanResult, self).__init__() - self.ignore_min_max = ignore_min_max + def __init__(self, ignore_min_max=False): + super(AmeanResult, self).__init__() + self.ignore_min_max = ignore_min_max - def _ComputeFloat(self, cell, values, baseline_values): - if self.ignore_min_max: - values = _RemoveMinMax(cell, values) - cell.value = statistics.mean(values) + def _ComputeFloat(self, cell, values, baseline_values): + if self.ignore_min_max: + values = _RemoveMinMax(cell, values) + cell.value = statistics.mean(values) class RawResult(Result): - """Raw result.""" + """Raw result.""" class IterationResult(Result): - """Iteration result.""" + """Iteration result.""" class MinResult(Result): - """Minimum.""" + """Minimum.""" - def _ComputeFloat(self, cell, values, baseline_values): - cell.value = min(values) + def _ComputeFloat(self, cell, values, baseline_values): + cell.value = min(values) - def _ComputeString(self, cell, values, baseline_values): - if values: - cell.value = min(values) - else: - cell.value = '' + def _ComputeString(self, cell, values, baseline_values): + if values: + cell.value = min(values) + else: + cell.value = "" class MaxResult(Result): - """Maximum.""" + """Maximum.""" - def _ComputeFloat(self, cell, values, baseline_values): - cell.value = max(values) + def _ComputeFloat(self, cell, values, baseline_values): + cell.value = max(values) - def _ComputeString(self, cell, values, baseline_values): - if values: - cell.value = max(values) - else: - cell.value = '' + def _ComputeString(self, cell, values, baseline_values): + if values: + cell.value = max(values) + else: + cell.value = "" class NumericalResult(Result): - """Numerical result.""" + """Numerical result.""" - def _ComputeString(self, cell, values, baseline_values): - cell.value = '?' + def _ComputeString(self, cell, values, baseline_values): + cell.value = "?" class StdResult(NumericalResult): - """Standard deviation.""" + """Standard deviation.""" - def __init__(self, ignore_min_max=False): - super(StdResult, self).__init__() - self.ignore_min_max = ignore_min_max + def __init__(self, ignore_min_max=False): + super(StdResult, self).__init__() + self.ignore_min_max = ignore_min_max - def _ComputeFloat(self, cell, values, baseline_values): - if self.ignore_min_max: - values = _RemoveMinMax(cell, values) - cell.value = statistics.pstdev(values) + def _ComputeFloat(self, cell, values, baseline_values): + if self.ignore_min_max: + values = _RemoveMinMax(cell, values) + cell.value = statistics.pstdev(values) class CoeffVarResult(NumericalResult): - """Standard deviation / Mean""" + """Standard deviation / Mean""" - def __init__(self, ignore_min_max=False): - super(CoeffVarResult, self).__init__() - self.ignore_min_max = ignore_min_max + def __init__(self, ignore_min_max=False): + super(CoeffVarResult, self).__init__() + self.ignore_min_max = ignore_min_max - def _ComputeFloat(self, cell, values, baseline_values): - if self.ignore_min_max: - values = _RemoveMinMax(cell, values) - if statistics.mean(values) != 0.0: - noise = abs(statistics.pstdev(values) / statistics.mean(values)) - else: - noise = 0.0 - cell.value = noise + def _ComputeFloat(self, cell, values, baseline_values): + if self.ignore_min_max: + values = _RemoveMinMax(cell, values) + if statistics.mean(values) != 0.0: + noise = abs(statistics.pstdev(values) / statistics.mean(values)) + else: + noise = 0.0 + cell.value = noise class ComparisonResult(Result): - """Same or Different.""" - - def NeedsBaseline(self): - return True - - def _ComputeString(self, cell, values, baseline_values): - value = None - baseline_value = None - if self._AllStringsSame(values): - value = values[0] - if self._AllStringsSame(baseline_values): - baseline_value = baseline_values[0] - if value is not None and baseline_value is not None: - if value == baseline_value: - cell.value = 'SAME' - else: - cell.value = 'DIFFERENT' - else: - cell.value = '?' + """Same or Different.""" + + def NeedsBaseline(self): + return True + + def _ComputeString(self, cell, values, baseline_values): + value = None + baseline_value = None + if self._AllStringsSame(values): + value = values[0] + if self._AllStringsSame(baseline_values): + baseline_value = baseline_values[0] + if value is not None and baseline_value is not None: + if value == baseline_value: + cell.value = "SAME" + else: + cell.value = "DIFFERENT" + else: + cell.value = "?" class PValueResult(ComparisonResult): - """P-value.""" + """P-value.""" - def __init__(self, ignore_min_max=False): - super(PValueResult, self).__init__() - self.ignore_min_max = ignore_min_max + def __init__(self, ignore_min_max=False): + super(PValueResult, self).__init__() + self.ignore_min_max = ignore_min_max - def _ComputeFloat(self, cell, values, baseline_values): - if self.ignore_min_max: - values = _RemoveMinMax(cell, values) - baseline_values = _RemoveMinMax(cell, baseline_values) - if len(values) < 2 or len(baseline_values) < 2: - cell.value = float('nan') - return - _, cell.value = scipy.stats.ttest_ind(values, baseline_values) + def _ComputeFloat(self, cell, values, baseline_values): + if self.ignore_min_max: + values = _RemoveMinMax(cell, values) + baseline_values = _RemoveMinMax(cell, baseline_values) + if len(values) < 2 or len(baseline_values) < 2: + cell.value = float("nan") + return + _, cell.value = scipy.stats.ttest_ind(values, baseline_values) - def _ComputeString(self, cell, values, baseline_values): - return float('nan') + def _ComputeString(self, cell, values, baseline_values): + return float("nan") class KeyAwareComparisonResult(ComparisonResult): - """Automatic key aware comparison.""" - - def _IsLowerBetter(self, key): - # Units in histograms should include directions - if 'smallerIsBetter' in key: - return True - if 'biggerIsBetter' in key: - return False - - # For units in chartjson: - # TODO(llozano): Trying to guess direction by looking at the name of the - # test does not seem like a good idea. Test frameworks should provide this - # info explicitly. I believe Telemetry has this info. Need to find it out. - # - # Below are some test names for which we are not sure what the - # direction is. - # - # For these we dont know what the direction is. But, since we dont - # specify anything, crosperf will assume higher is better: - # --percent_impl_scrolled--percent_impl_scrolled--percent - # --solid_color_tiles_analyzed--solid_color_tiles_analyzed--count - # --total_image_cache_hit_count--total_image_cache_hit_count--count - # --total_texture_upload_time_by_url - # - # About these we are doubtful but we made a guess: - # --average_num_missing_tiles_by_url--*--units (low is good) - # --experimental_mean_frame_time_by_url--*--units (low is good) - # --experimental_median_frame_time_by_url--*--units (low is good) - # --texture_upload_count--texture_upload_count--count (high is good) - # --total_deferred_image_decode_count--count (low is good) - # --total_tiles_analyzed--total_tiles_analyzed--count (high is good) - lower_is_better_keys = [ - 'milliseconds', 'ms_', 'seconds_', 'KB', 'rdbytes', 'wrbytes', - 'dropped_percent', '(ms)', '(seconds)', '--ms', - '--average_num_missing_tiles', '--experimental_jank', - '--experimental_mean_frame', '--experimental_median_frame_time', - '--total_deferred_image_decode_count', '--seconds', 'samples', 'bytes' - ] - - return any([l in key for l in lower_is_better_keys]) - - def _InvertIfLowerIsBetter(self, cell): - if self._IsLowerBetter(cell.name): - if cell.value: - cell.value = 1.0 / cell.value + """Automatic key aware comparison.""" + + def _IsLowerBetter(self, key): + # Units in histograms should include directions + if "smallerIsBetter" in key: + return True + if "biggerIsBetter" in key: + return False + + # For units in chartjson: + # TODO(llozano): Trying to guess direction by looking at the name of the + # test does not seem like a good idea. Test frameworks should provide this + # info explicitly. I believe Telemetry has this info. Need to find it out. + # + # Below are some test names for which we are not sure what the + # direction is. + # + # For these we dont know what the direction is. But, since we dont + # specify anything, crosperf will assume higher is better: + # --percent_impl_scrolled--percent_impl_scrolled--percent + # --solid_color_tiles_analyzed--solid_color_tiles_analyzed--count + # --total_image_cache_hit_count--total_image_cache_hit_count--count + # --total_texture_upload_time_by_url + # + # About these we are doubtful but we made a guess: + # --average_num_missing_tiles_by_url--*--units (low is good) + # --experimental_mean_frame_time_by_url--*--units (low is good) + # --experimental_median_frame_time_by_url--*--units (low is good) + # --texture_upload_count--texture_upload_count--count (high is good) + # --total_deferred_image_decode_count--count (low is good) + # --total_tiles_analyzed--total_tiles_analyzed--count (high is good) + lower_is_better_keys = [ + "milliseconds", + "ms_", + "seconds_", + "KB", + "rdbytes", + "wrbytes", + "dropped_percent", + "(ms)", + "(seconds)", + "--ms", + "--average_num_missing_tiles", + "--experimental_jank", + "--experimental_mean_frame", + "--experimental_median_frame_time", + "--total_deferred_image_decode_count", + "--seconds", + "samples", + "bytes", + ] + + return any([l in key for l in lower_is_better_keys]) + + def _InvertIfLowerIsBetter(self, cell): + if self._IsLowerBetter(cell.name): + if cell.value: + cell.value = 1.0 / cell.value class AmeanRatioResult(KeyAwareComparisonResult): - """Ratio of arithmetic means of values vs. baseline values.""" - - def __init__(self, ignore_min_max=False): - super(AmeanRatioResult, self).__init__() - self.ignore_min_max = ignore_min_max - - def _ComputeFloat(self, cell, values, baseline_values): - if self.ignore_min_max: - values = _RemoveMinMax(cell, values) - baseline_values = _RemoveMinMax(cell, baseline_values) - - baseline_mean = statistics.mean(baseline_values) - values_mean = statistics.mean(values) - if baseline_mean != 0: - cell.value = values_mean / baseline_mean - elif values_mean != 0: - cell.value = 0.00 - # cell.value = 0 means the values and baseline_values have big difference - else: - cell.value = 1.00 - # no difference if both values and baseline_values are 0 + """Ratio of arithmetic means of values vs. baseline values.""" + + def __init__(self, ignore_min_max=False): + super(AmeanRatioResult, self).__init__() + self.ignore_min_max = ignore_min_max + + def _ComputeFloat(self, cell, values, baseline_values): + if self.ignore_min_max: + values = _RemoveMinMax(cell, values) + baseline_values = _RemoveMinMax(cell, baseline_values) + + baseline_mean = statistics.mean(baseline_values) + values_mean = statistics.mean(values) + if baseline_mean != 0: + cell.value = values_mean / baseline_mean + elif values_mean != 0: + cell.value = 0.00 + # cell.value = 0 means the values and baseline_values have big difference + else: + cell.value = 1.00 + # no difference if both values and baseline_values are 0 class GmeanRatioResult(KeyAwareComparisonResult): - """Ratio of geometric means of values vs. baseline values.""" - - def __init__(self, ignore_min_max=False): - super(GmeanRatioResult, self).__init__() - self.ignore_min_max = ignore_min_max - - def _ComputeFloat(self, cell, values, baseline_values): - if self.ignore_min_max: - values = _RemoveMinMax(cell, values) - baseline_values = _RemoveMinMax(cell, baseline_values) - if self._GetGmean(baseline_values) != 0: - cell.value = self._GetGmean(values) / self._GetGmean(baseline_values) - elif self._GetGmean(values) != 0: - cell.value = 0.00 - else: - cell.value = 1.00 + """Ratio of geometric means of values vs. baseline values.""" + + def __init__(self, ignore_min_max=False): + super(GmeanRatioResult, self).__init__() + self.ignore_min_max = ignore_min_max + + def _ComputeFloat(self, cell, values, baseline_values): + if self.ignore_min_max: + values = _RemoveMinMax(cell, values) + baseline_values = _RemoveMinMax(cell, baseline_values) + if self._GetGmean(baseline_values) != 0: + cell.value = self._GetGmean(values) / self._GetGmean( + baseline_values + ) + elif self._GetGmean(values) != 0: + cell.value = 0.00 + else: + cell.value = 1.00 class Color(object): - """Class that represents color in RGBA format.""" - - def __init__(self, r=0, g=0, b=0, a=0): - self.r = r - self.g = g - self.b = b - self.a = a - - def __str__(self): - return 'r: %s g: %s: b: %s: a: %s' % (self.r, self.g, self.b, self.a) + """Class that represents color in RGBA format.""" + + def __init__(self, r=0, g=0, b=0, a=0): + self.r = r + self.g = g + self.b = b + self.a = a + + def __str__(self): + return "r: %s g: %s: b: %s: a: %s" % (self.r, self.g, self.b, self.a) + + def Round(self): + """Round RGBA values to the nearest integer.""" + self.r = int(self.r) + self.g = int(self.g) + self.b = int(self.b) + self.a = int(self.a) + + def GetRGB(self): + """Get a hex representation of the color.""" + return "%02x%02x%02x" % (self.r, self.g, self.b) + + @classmethod + def Lerp(cls, ratio, a, b): + """Perform linear interpolation between two colors. + + Args: + ratio: The ratio to use for linear polation. + a: The first color object (used when ratio is 0). + b: The second color object (used when ratio is 1). + + Returns: + Linearly interpolated color. + """ + ret = cls() + ret.r = (b.r - a.r) * ratio + a.r + ret.g = (b.g - a.g) * ratio + a.g + ret.b = (b.b - a.b) * ratio + a.b + ret.a = (b.a - a.a) * ratio + a.a + return ret - def Round(self): - """Round RGBA values to the nearest integer.""" - self.r = int(self.r) - self.g = int(self.g) - self.b = int(self.b) - self.a = int(self.a) - def GetRGB(self): - """Get a hex representation of the color.""" - return '%02x%02x%02x' % (self.r, self.g, self.b) +class Format(object): + """A class that represents the format of a column.""" - @classmethod - def Lerp(cls, ratio, a, b): - """Perform linear interpolation between two colors. + def __init__(self): + pass - Args: - ratio: The ratio to use for linear polation. - a: The first color object (used when ratio is 0). - b: The second color object (used when ratio is 1). + def Compute(self, cell): + """Computes the attributes of a cell based on its value. - Returns: - Linearly interpolated color. - """ - ret = cls() - ret.r = (b.r - a.r) * ratio + a.r - ret.g = (b.g - a.g) * ratio + a.g - ret.b = (b.b - a.b) * ratio + a.b - ret.a = (b.a - a.a) * ratio + a.a - return ret + Attributes typically are color, width, etc. + Args: + cell: The cell whose attributes are to be populated. + """ + if cell.value is None: + cell.string_value = "" + if isinstance(cell.value, float): + self._ComputeFloat(cell) + else: + self._ComputeString(cell) -class Format(object): - """A class that represents the format of a column.""" + def _ComputeFloat(self, cell): + cell.string_value = "{0:.2f}".format(cell.value) - def __init__(self): - pass + def _ComputeString(self, cell): + cell.string_value = str(cell.value) - def Compute(self, cell): - """Computes the attributes of a cell based on its value. + def _GetColor(self, value, low, mid, high, power=6, mid_value=1.0): + min_value = 0.0 + max_value = 2.0 + if math.isnan(value): + return mid + if value > mid_value: + value = max_value - mid_value / value - Attributes typically are color, width, etc. + return self._GetColorBetweenRange( + value, min_value, mid_value, max_value, low, mid, high, power + ) - Args: - cell: The cell whose attributes are to be populated. - """ - if cell.value is None: - cell.string_value = '' - if isinstance(cell.value, float): - self._ComputeFloat(cell) - else: - self._ComputeString(cell) - - def _ComputeFloat(self, cell): - cell.string_value = '{0:.2f}'.format(cell.value) - - def _ComputeString(self, cell): - cell.string_value = str(cell.value) - - def _GetColor(self, value, low, mid, high, power=6, mid_value=1.0): - min_value = 0.0 - max_value = 2.0 - if math.isnan(value): - return mid - if value > mid_value: - value = max_value - mid_value / value - - return self._GetColorBetweenRange(value, min_value, mid_value, max_value, - low, mid, high, power) - - def _GetColorBetweenRange(self, value, min_value, mid_value, max_value, - low_color, mid_color, high_color, power): - assert value <= max_value - assert value >= min_value - if value > mid_value: - value = (max_value - value) / (max_value - mid_value) - value **= power - ret = Color.Lerp(value, high_color, mid_color) - else: - value = (value - min_value) / (mid_value - min_value) - value **= power - ret = Color.Lerp(value, low_color, mid_color) - ret.Round() - return ret + def _GetColorBetweenRange( + self, + value, + min_value, + mid_value, + max_value, + low_color, + mid_color, + high_color, + power, + ): + assert value <= max_value + assert value >= min_value + if value > mid_value: + value = (max_value - value) / (max_value - mid_value) + value **= power + ret = Color.Lerp(value, high_color, mid_color) + else: + value = (value - min_value) / (mid_value - min_value) + value **= power + ret = Color.Lerp(value, low_color, mid_color) + ret.Round() + return ret class PValueFormat(Format): - """Formatting for p-value.""" + """Formatting for p-value.""" - def _ComputeFloat(self, cell): - cell.string_value = '%0.2f' % float(cell.value) - if float(cell.value) < 0.05: - cell.bgcolor = self._GetColor( - cell.value, - Color(255, 255, 0, 0), - Color(255, 255, 255, 0), - Color(255, 255, 255, 0), - mid_value=0.05, - power=1) + def _ComputeFloat(self, cell): + cell.string_value = "%0.2f" % float(cell.value) + if float(cell.value) < 0.05: + cell.bgcolor = self._GetColor( + cell.value, + Color(255, 255, 0, 0), + Color(255, 255, 255, 0), + Color(255, 255, 255, 0), + mid_value=0.05, + power=1, + ) class WeightFormat(Format): - """Formatting for weight in cwp mode.""" + """Formatting for weight in cwp mode.""" - def _ComputeFloat(self, cell): - cell.string_value = '%0.4f' % float(cell.value) + def _ComputeFloat(self, cell): + cell.string_value = "%0.4f" % float(cell.value) class StorageFormat(Format): - """Format the cell as a storage number. + """Format the cell as a storage number. - Examples: - If the cell contains a value of 1024, the string_value will be 1.0K. - """ - - def _ComputeFloat(self, cell): - base = 1024 - suffices = ['K', 'M', 'G'] - v = float(cell.value) - current = 0 - while v >= base**(current + 1) and current < len(suffices): - current += 1 + Examples: + If the cell contains a value of 1024, the string_value will be 1.0K. + """ - if current: - divisor = base**current - cell.string_value = '%1.1f%s' % ((v / divisor), suffices[current - 1]) - else: - cell.string_value = str(cell.value) + def _ComputeFloat(self, cell): + base = 1024 + suffices = ["K", "M", "G"] + v = float(cell.value) + current = 0 + while v >= base ** (current + 1) and current < len(suffices): + current += 1 + + if current: + divisor = base ** current + cell.string_value = "%1.1f%s" % ( + (v / divisor), + suffices[current - 1], + ) + else: + cell.string_value = str(cell.value) class CoeffVarFormat(Format): - """Format the cell as a percent. + """Format the cell as a percent. - Examples: - If the cell contains a value of 1.5, the string_value will be +150%. - """ + Examples: + If the cell contains a value of 1.5, the string_value will be +150%. + """ - def _ComputeFloat(self, cell): - cell.string_value = '%1.1f%%' % (float(cell.value) * 100) - cell.color = self._GetColor( - cell.value, - Color(0, 255, 0, 0), - Color(0, 0, 0, 0), - Color(255, 0, 0, 0), - mid_value=0.02, - power=1) + def _ComputeFloat(self, cell): + cell.string_value = "%1.1f%%" % (float(cell.value) * 100) + cell.color = self._GetColor( + cell.value, + Color(0, 255, 0, 0), + Color(0, 0, 0, 0), + Color(255, 0, 0, 0), + mid_value=0.02, + power=1, + ) class PercentFormat(Format): - """Format the cell as a percent. + """Format the cell as a percent. - Examples: - If the cell contains a value of 1.5, the string_value will be +50%. - """ + Examples: + If the cell contains a value of 1.5, the string_value will be +50%. + """ - def _ComputeFloat(self, cell): - cell.string_value = '%+1.1f%%' % ((float(cell.value) - 1) * 100) - cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0), - Color(0, 0, 0, 0), Color(0, 255, 0, 0)) + def _ComputeFloat(self, cell): + cell.string_value = "%+1.1f%%" % ((float(cell.value) - 1) * 100) + cell.color = self._GetColor( + cell.value, + Color(255, 0, 0, 0), + Color(0, 0, 0, 0), + Color(0, 255, 0, 0), + ) class RatioFormat(Format): - """Format the cell as a ratio. + """Format the cell as a ratio. - Examples: - If the cell contains a value of 1.5642, the string_value will be 1.56. - """ + Examples: + If the cell contains a value of 1.5642, the string_value will be 1.56. + """ - def _ComputeFloat(self, cell): - cell.string_value = '%+1.1f%%' % ((cell.value - 1) * 100) - cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0), - Color(0, 0, 0, 0), Color(0, 255, 0, 0)) + def _ComputeFloat(self, cell): + cell.string_value = "%+1.1f%%" % ((cell.value - 1) * 100) + cell.color = self._GetColor( + cell.value, + Color(255, 0, 0, 0), + Color(0, 0, 0, 0), + Color(0, 255, 0, 0), + ) class ColorBoxFormat(Format): - """Format the cell as a color box. + """Format the cell as a color box. - Examples: - If the cell contains a value of 1.5, it will get a green color. - If the cell contains a value of 0.5, it will get a red color. - The intensity of the green/red will be determined by how much above or below - 1.0 the value is. - """ + Examples: + If the cell contains a value of 1.5, it will get a green color. + If the cell contains a value of 0.5, it will get a red color. + The intensity of the green/red will be determined by how much above or below + 1.0 the value is. + """ - def _ComputeFloat(self, cell): - cell.string_value = '--' - bgcolor = self._GetColor(cell.value, Color(255, 0, 0, 0), - Color(255, 255, 255, 0), Color(0, 255, 0, 0)) - cell.bgcolor = bgcolor - cell.color = bgcolor + def _ComputeFloat(self, cell): + cell.string_value = "--" + bgcolor = self._GetColor( + cell.value, + Color(255, 0, 0, 0), + Color(255, 255, 255, 0), + Color(0, 255, 0, 0), + ) + cell.bgcolor = bgcolor + cell.color = bgcolor class Cell(object): - """A class to represent a cell in a table. - - Attributes: - value: The raw value of the cell. - color: The color of the cell. - bgcolor: The background color of the cell. - string_value: The string value of the cell. - suffix: A string suffix to be attached to the value when displaying. - prefix: A string prefix to be attached to the value when displaying. - color_row: Indicates whether the whole row is to inherit this cell's color. - bgcolor_row: Indicates whether the whole row is to inherit this cell's - bgcolor. - width: Optional specifier to make a column narrower than the usual width. - The usual width of a column is the max of all its cells widths. - colspan: Set the colspan of the cell in the HTML table, this is used for - table headers. Default value is 1. - name: the test name of the cell. - header: Whether this is a header in html. - """ - - def __init__(self): - self.value = None - self.color = None - self.bgcolor = None - self.string_value = None - self.suffix = None - self.prefix = None - # Entire row inherits this color. - self.color_row = False - self.bgcolor_row = False - self.width = 0 - self.colspan = 1 - self.name = None - self.header = False - - def __str__(self): - l = [] - l.append('value: %s' % self.value) - l.append('string_value: %s' % self.string_value) - return ' '.join(l) + """A class to represent a cell in a table. + + Attributes: + value: The raw value of the cell. + color: The color of the cell. + bgcolor: The background color of the cell. + string_value: The string value of the cell. + suffix: A string suffix to be attached to the value when displaying. + prefix: A string prefix to be attached to the value when displaying. + color_row: Indicates whether the whole row is to inherit this cell's color. + bgcolor_row: Indicates whether the whole row is to inherit this cell's + bgcolor. + width: Optional specifier to make a column narrower than the usual width. + The usual width of a column is the max of all its cells widths. + colspan: Set the colspan of the cell in the HTML table, this is used for + table headers. Default value is 1. + name: the test name of the cell. + header: Whether this is a header in html. + """ + + def __init__(self): + self.value = None + self.color = None + self.bgcolor = None + self.string_value = None + self.suffix = None + self.prefix = None + # Entire row inherits this color. + self.color_row = False + self.bgcolor_row = False + self.width = 0 + self.colspan = 1 + self.name = None + self.header = False + + def __str__(self): + l = [] + l.append("value: %s" % self.value) + l.append("string_value: %s" % self.string_value) + return " ".join(l) class Column(object): - """Class representing a column in a table. + """Class representing a column in a table. - Attributes: - result: an object of the Result class. - fmt: an object of the Format class. - """ + Attributes: + result: an object of the Result class. + fmt: an object of the Format class. + """ - def __init__(self, result, fmt, name=''): - self.result = result - self.fmt = fmt - self.name = name + def __init__(self, result, fmt, name=""): + self.result = result + self.fmt = fmt + self.name = name # Takes in: @@ -1033,536 +1093,561 @@ class Column(object): # ["k", avg("v", "v2"), stddev("v", "v2"), etc.]] # according to format string class TableFormatter(object): - """Class to convert a plain table into a cell-table. + """Class to convert a plain table into a cell-table. - This class takes in a table generated by TableGenerator and a list of column - formats to apply to the table and returns a table of cells. - """ + This class takes in a table generated by TableGenerator and a list of column + formats to apply to the table and returns a table of cells. + """ - def __init__(self, table, columns, samples_table=False): - """The constructor takes in a table and a list of columns. + def __init__(self, table, columns, samples_table=False): + """The constructor takes in a table and a list of columns. + + Args: + table: A list of lists of values. + columns: A list of column containing what to produce and how to format + it. + samples_table: A flag to check whether we are generating a table of + samples in CWP apporximation mode. + """ + self._table = table + self._columns = columns + self._samples_table = samples_table + self._table_columns = [] + self._out_table = [] + + def GenerateCellTable(self, table_type): + row_index = 0 + all_failed = False + + for row in self._table[1:]: + # If we are generating samples_table, the second value will be weight + # rather than values. + start_col = 2 if self._samples_table else 1 + # It does not make sense to put retval in the summary table. + if str(row[0]) == "retval" and table_type == "summary": + # Check to see if any runs passed, and update all_failed. + all_failed = True + for values in row[start_col:]: + if 0 in values: + all_failed = False + continue + key = Cell() + key.string_value = str(row[0]) + out_row = [key] + if self._samples_table: + # Add one column for weight if in samples_table mode + weight = Cell() + weight.value = row[1] + f = WeightFormat() + f.Compute(weight) + out_row.append(weight) + baseline = None + for results in row[start_col:]: + column_start = 0 + values = None + # If generating sample table, we will split a tuple of iterations info + # from the results + if isinstance(results, tuple): + it, values = results + column_start = 1 + cell = Cell() + cell.string_value = "[%d: %d]" % (it[0], it[1]) + out_row.append(cell) + if not row_index: + self._table_columns.append(self._columns[0]) + else: + values = results + # Parse each column + for column in self._columns[column_start:]: + cell = Cell() + cell.name = key.string_value + if ( + not column.result.NeedsBaseline() + or baseline is not None + ): + column.result.Compute(cell, values, baseline) + column.fmt.Compute(cell) + out_row.append(cell) + if not row_index: + self._table_columns.append(column) + + if baseline is None: + baseline = values + self._out_table.append(out_row) + row_index += 1 + + # If this is a summary table, and the only row in it is 'retval', and + # all the test runs failed, we need to a 'Results' row to the output + # table. + if table_type == "summary" and all_failed and len(self._table) == 2: + labels_row = self._table[0] + key = Cell() + key.string_value = "Results" + out_row = [key] + baseline = None + for _ in labels_row[1:]: + for column in self._columns: + cell = Cell() + cell.name = key.string_value + column.result.Compute(cell, ["Fail"], baseline) + column.fmt.Compute(cell) + out_row.append(cell) + if not row_index: + self._table_columns.append(column) + self._out_table.append(out_row) + + def AddColumnName(self): + """Generate Column name at the top of table.""" + key = Cell() + key.header = True + key.string_value = "Keys" if not self._samples_table else "Benchmarks" + header = [key] + if self._samples_table: + weight = Cell() + weight.header = True + weight.string_value = "Weights" + header.append(weight) + for column in self._table_columns: + cell = Cell() + cell.header = True + if column.name: + cell.string_value = column.name + else: + result_name = column.result.__class__.__name__ + format_name = column.fmt.__class__.__name__ - Args: - table: A list of lists of values. - columns: A list of column containing what to produce and how to format - it. - samples_table: A flag to check whether we are generating a table of - samples in CWP apporximation mode. - """ - self._table = table - self._columns = columns - self._samples_table = samples_table - self._table_columns = [] - self._out_table = [] - - def GenerateCellTable(self, table_type): - row_index = 0 - all_failed = False - - for row in self._table[1:]: - # If we are generating samples_table, the second value will be weight - # rather than values. - start_col = 2 if self._samples_table else 1 - # It does not make sense to put retval in the summary table. - if str(row[0]) == 'retval' and table_type == 'summary': - # Check to see if any runs passed, and update all_failed. - all_failed = True - for values in row[start_col:]: - if 0 in values: - all_failed = False - continue - key = Cell() - key.string_value = str(row[0]) - out_row = [key] - if self._samples_table: - # Add one column for weight if in samples_table mode - weight = Cell() - weight.value = row[1] - f = WeightFormat() - f.Compute(weight) - out_row.append(weight) - baseline = None - for results in row[start_col:]: - column_start = 0 - values = None - # If generating sample table, we will split a tuple of iterations info - # from the results - if isinstance(results, tuple): - it, values = results - column_start = 1 - cell = Cell() - cell.string_value = '[%d: %d]' % (it[0], it[1]) - out_row.append(cell) - if not row_index: - self._table_columns.append(self._columns[0]) - else: - values = results - # Parse each column - for column in self._columns[column_start:]: - cell = Cell() - cell.name = key.string_value - if not column.result.NeedsBaseline() or baseline is not None: - column.result.Compute(cell, values, baseline) - column.fmt.Compute(cell) - out_row.append(cell) - if not row_index: - self._table_columns.append(column) - - if baseline is None: - baseline = values - self._out_table.append(out_row) - row_index += 1 - - # If this is a summary table, and the only row in it is 'retval', and - # all the test runs failed, we need to a 'Results' row to the output - # table. - if table_type == 'summary' and all_failed and len(self._table) == 2: - labels_row = self._table[0] - key = Cell() - key.string_value = 'Results' - out_row = [key] - baseline = None - for _ in labels_row[1:]: - for column in self._columns: - cell = Cell() - cell.name = key.string_value - column.result.Compute(cell, ['Fail'], baseline) - column.fmt.Compute(cell) - out_row.append(cell) - if not row_index: - self._table_columns.append(column) - self._out_table.append(out_row) - - def AddColumnName(self): - """Generate Column name at the top of table.""" - key = Cell() - key.header = True - key.string_value = 'Keys' if not self._samples_table else 'Benchmarks' - header = [key] - if self._samples_table: - weight = Cell() - weight.header = True - weight.string_value = 'Weights' - header.append(weight) - for column in self._table_columns: - cell = Cell() - cell.header = True - if column.name: - cell.string_value = column.name - else: - result_name = column.result.__class__.__name__ - format_name = column.fmt.__class__.__name__ - - cell.string_value = '%s %s' % ( - result_name.replace('Result', ''), - format_name.replace('Format', ''), - ) + cell.string_value = "%s %s" % ( + result_name.replace("Result", ""), + format_name.replace("Format", ""), + ) - header.append(cell) - - self._out_table = [header] + self._out_table - - def AddHeader(self, s): - """Put additional string on the top of the table.""" - cell = Cell() - cell.header = True - cell.string_value = str(s) - header = [cell] - colspan = max(1, max(len(row) for row in self._table)) - cell.colspan = colspan - self._out_table = [header] + self._out_table - - def GetPassesAndFails(self, values): - passes = 0 - fails = 0 - for val in values: - if val == 0: - passes = passes + 1 - else: - fails = fails + 1 - return passes, fails - - def AddLabelName(self): - """Put label on the top of the table.""" - top_header = [] - base_colspan = len( - [c for c in self._columns if not c.result.NeedsBaseline()]) - compare_colspan = len(self._columns) - # Find the row with the key 'retval', if it exists. This - # will be used to calculate the number of iterations that passed and - # failed for each image label. - retval_row = None - for row in self._table: - if row[0] == 'retval': - retval_row = row - # The label is organized as follows - # "keys" label_base, label_comparison1, label_comparison2 - # The first cell has colspan 1, the second is base_colspan - # The others are compare_colspan - column_position = 0 - for label in self._table[0]: - cell = Cell() - cell.header = True - # Put the number of pass/fail iterations in the image label header. - if column_position > 0 and retval_row: - retval_values = retval_row[column_position] - if isinstance(retval_values, list): - passes, fails = self.GetPassesAndFails(retval_values) - cell.string_value = str(label) + ' (pass:%d fail:%d)' % (passes, - fails) - else: - cell.string_value = str(label) - else: - cell.string_value = str(label) - if top_header: - if not self._samples_table or (self._samples_table and - len(top_header) == 2): - cell.colspan = base_colspan - if len(top_header) > 1: - if not self._samples_table or (self._samples_table and - len(top_header) > 2): - cell.colspan = compare_colspan - top_header.append(cell) - column_position = column_position + 1 - self._out_table = [top_header] + self._out_table - - def _PrintOutTable(self): - o = '' - for row in self._out_table: - for cell in row: - o += str(cell) + ' ' - o += '\n' - print(o) - - def GetCellTable(self, table_type='full', headers=True): - """Function to return a table of cells. - - The table (list of lists) is converted into a table of cells by this - function. + header.append(cell) - Args: - table_type: Can be 'full' or 'summary' - headers: A boolean saying whether we want default headers + self._out_table = [header] + self._out_table - Returns: - A table of cells with each cell having the properties and string values as - requiested by the columns passed in the constructor. - """ - # Generate the cell table, creating a list of dynamic columns on the fly. - if not self._out_table: - self.GenerateCellTable(table_type) - if headers: - self.AddColumnName() - self.AddLabelName() - return self._out_table + def AddHeader(self, s): + """Put additional string on the top of the table.""" + cell = Cell() + cell.header = True + cell.string_value = str(s) + header = [cell] + colspan = max(1, max(len(row) for row in self._table)) + cell.colspan = colspan + self._out_table = [header] + self._out_table + + def GetPassesAndFails(self, values): + passes = 0 + fails = 0 + for val in values: + if val == 0: + passes = passes + 1 + else: + fails = fails + 1 + return passes, fails + + def AddLabelName(self): + """Put label on the top of the table.""" + top_header = [] + base_colspan = len( + [c for c in self._columns if not c.result.NeedsBaseline()] + ) + compare_colspan = len(self._columns) + # Find the row with the key 'retval', if it exists. This + # will be used to calculate the number of iterations that passed and + # failed for each image label. + retval_row = None + for row in self._table: + if row[0] == "retval": + retval_row = row + # The label is organized as follows + # "keys" label_base, label_comparison1, label_comparison2 + # The first cell has colspan 1, the second is base_colspan + # The others are compare_colspan + column_position = 0 + for label in self._table[0]: + cell = Cell() + cell.header = True + # Put the number of pass/fail iterations in the image label header. + if column_position > 0 and retval_row: + retval_values = retval_row[column_position] + if isinstance(retval_values, list): + passes, fails = self.GetPassesAndFails(retval_values) + cell.string_value = str(label) + " (pass:%d fail:%d)" % ( + passes, + fails, + ) + else: + cell.string_value = str(label) + else: + cell.string_value = str(label) + if top_header: + if not self._samples_table or ( + self._samples_table and len(top_header) == 2 + ): + cell.colspan = base_colspan + if len(top_header) > 1: + if not self._samples_table or ( + self._samples_table and len(top_header) > 2 + ): + cell.colspan = compare_colspan + top_header.append(cell) + column_position = column_position + 1 + self._out_table = [top_header] + self._out_table + + def _PrintOutTable(self): + o = "" + for row in self._out_table: + for cell in row: + o += str(cell) + " " + o += "\n" + print(o) + + def GetCellTable(self, table_type="full", headers=True): + """Function to return a table of cells. + + The table (list of lists) is converted into a table of cells by this + function. + + Args: + table_type: Can be 'full' or 'summary' + headers: A boolean saying whether we want default headers + + Returns: + A table of cells with each cell having the properties and string values as + requiested by the columns passed in the constructor. + """ + # Generate the cell table, creating a list of dynamic columns on the fly. + if not self._out_table: + self.GenerateCellTable(table_type) + if headers: + self.AddColumnName() + self.AddLabelName() + return self._out_table class TablePrinter(object): - """Class to print a cell table to the console, file or html.""" - PLAIN = 0 - CONSOLE = 1 - HTML = 2 - TSV = 3 - EMAIL = 4 - - def __init__(self, table, output_type): - """Constructor that stores the cell table and output type.""" - self._table = table - self._output_type = output_type - self._row_styles = [] - self._column_styles = [] - - # Compute whole-table properties like max-size, etc. - def _ComputeStyle(self): - self._row_styles = [] - for row in self._table: - row_style = Cell() - for cell in row: - if cell.color_row: - assert cell.color, 'Cell color not set but color_row set!' - assert not row_style.color, 'Multiple row_style.colors found!' - row_style.color = cell.color - if cell.bgcolor_row: - assert cell.bgcolor, 'Cell bgcolor not set but bgcolor_row set!' - assert not row_style.bgcolor, 'Multiple row_style.bgcolors found!' - row_style.bgcolor = cell.bgcolor - self._row_styles.append(row_style) - - self._column_styles = [] - if len(self._table) < 2: - return - - for i in range(max(len(row) for row in self._table)): - column_style = Cell() - for row in self._table: - if not any([cell.colspan != 1 for cell in row]): - column_style.width = max(column_style.width, len(row[i].string_value)) - self._column_styles.append(column_style) - - def _GetBGColorFix(self, color): - if self._output_type == self.CONSOLE: - prefix = misc.rgb2short(color.r, color.g, color.b) - # pylint: disable=anomalous-backslash-in-string - prefix = '\033[48;5;%sm' % prefix - suffix = '\033[0m' - elif self._output_type in [self.EMAIL, self.HTML]: - rgb = color.GetRGB() - prefix = ('<FONT style="BACKGROUND-COLOR:#{0}">'.format(rgb)) - suffix = '</FONT>' - elif self._output_type in [self.PLAIN, self.TSV]: - prefix = '' - suffix = '' - return prefix, suffix - - def _GetColorFix(self, color): - if self._output_type == self.CONSOLE: - prefix = misc.rgb2short(color.r, color.g, color.b) - # pylint: disable=anomalous-backslash-in-string - prefix = '\033[38;5;%sm' % prefix - suffix = '\033[0m' - elif self._output_type in [self.EMAIL, self.HTML]: - rgb = color.GetRGB() - prefix = '<FONT COLOR=#{0}>'.format(rgb) - suffix = '</FONT>' - elif self._output_type in [self.PLAIN, self.TSV]: - prefix = '' - suffix = '' - return prefix, suffix - - def Print(self): - """Print the table to a console, html, etc. - - Returns: - A string that contains the desired representation of the table. - """ - self._ComputeStyle() - return self._GetStringValue() - - def _GetCellValue(self, i, j): - cell = self._table[i][j] - out = cell.string_value - raw_width = len(out) - - if cell.color: - p, s = self._GetColorFix(cell.color) - out = '%s%s%s' % (p, out, s) - - if cell.bgcolor: - p, s = self._GetBGColorFix(cell.bgcolor) - out = '%s%s%s' % (p, out, s) - - if self._output_type in [self.PLAIN, self.CONSOLE, self.EMAIL]: - if cell.width: - width = cell.width - else: - if self._column_styles: - width = self._column_styles[j].width - else: - width = len(cell.string_value) - if cell.colspan > 1: - width = 0 - start = 0 - for k in range(j): - start += self._table[i][k].colspan - for k in range(cell.colspan): - width += self._column_styles[start + k].width - if width > raw_width: - padding = ('%' + str(width - raw_width) + 's') % '' - out = padding + out - - if self._output_type == self.HTML: - if cell.header: - tag = 'th' - else: - tag = 'td' - out = '<{0} colspan = "{2}"> {1} </{0}>'.format(tag, out, cell.colspan) - - return out - - def _GetHorizontalSeparator(self): - if self._output_type in [self.CONSOLE, self.PLAIN, self.EMAIL]: - return ' ' - if self._output_type == self.HTML: - return '' - if self._output_type == self.TSV: - return '\t' - - def _GetVerticalSeparator(self): - if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]: - return '\n' - if self._output_type == self.HTML: - return '</tr>\n<tr>' - - def _GetPrefix(self): - if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]: - return '' - if self._output_type == self.HTML: - return '<p></p><table id="box-table-a">\n<tr>' - - def _GetSuffix(self): - if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]: - return '' - if self._output_type == self.HTML: - return '</tr>\n</table>' - - def _GetStringValue(self): - o = '' - o += self._GetPrefix() - for i in range(len(self._table)): - row = self._table[i] - # Apply row color and bgcolor. - p = s = bgp = bgs = '' - if self._row_styles[i].bgcolor: - bgp, bgs = self._GetBGColorFix(self._row_styles[i].bgcolor) - if self._row_styles[i].color: - p, s = self._GetColorFix(self._row_styles[i].color) - o += p + bgp - for j in range(len(row)): - out = self._GetCellValue(i, j) - o += out + self._GetHorizontalSeparator() - o += s + bgs - o += self._GetVerticalSeparator() - o += self._GetSuffix() - return o + """Class to print a cell table to the console, file or html.""" + + PLAIN = 0 + CONSOLE = 1 + HTML = 2 + TSV = 3 + EMAIL = 4 + + def __init__(self, table, output_type): + """Constructor that stores the cell table and output type.""" + self._table = table + self._output_type = output_type + self._row_styles = [] + self._column_styles = [] + + # Compute whole-table properties like max-size, etc. + def _ComputeStyle(self): + self._row_styles = [] + for row in self._table: + row_style = Cell() + for cell in row: + if cell.color_row: + assert cell.color, "Cell color not set but color_row set!" + assert ( + not row_style.color + ), "Multiple row_style.colors found!" + row_style.color = cell.color + if cell.bgcolor_row: + assert ( + cell.bgcolor + ), "Cell bgcolor not set but bgcolor_row set!" + assert ( + not row_style.bgcolor + ), "Multiple row_style.bgcolors found!" + row_style.bgcolor = cell.bgcolor + self._row_styles.append(row_style) + + self._column_styles = [] + if len(self._table) < 2: + return + + for i in range(max(len(row) for row in self._table)): + column_style = Cell() + for row in self._table: + if not any([cell.colspan != 1 for cell in row]): + column_style.width = max( + column_style.width, len(row[i].string_value) + ) + self._column_styles.append(column_style) + + def _GetBGColorFix(self, color): + if self._output_type == self.CONSOLE: + prefix = misc.rgb2short(color.r, color.g, color.b) + # pylint: disable=anomalous-backslash-in-string + prefix = "\033[48;5;%sm" % prefix + suffix = "\033[0m" + elif self._output_type in [self.EMAIL, self.HTML]: + rgb = color.GetRGB() + prefix = '<FONT style="BACKGROUND-COLOR:#{0}">'.format(rgb) + suffix = "</FONT>" + elif self._output_type in [self.PLAIN, self.TSV]: + prefix = "" + suffix = "" + return prefix, suffix + + def _GetColorFix(self, color): + if self._output_type == self.CONSOLE: + prefix = misc.rgb2short(color.r, color.g, color.b) + # pylint: disable=anomalous-backslash-in-string + prefix = "\033[38;5;%sm" % prefix + suffix = "\033[0m" + elif self._output_type in [self.EMAIL, self.HTML]: + rgb = color.GetRGB() + prefix = "<FONT COLOR=#{0}>".format(rgb) + suffix = "</FONT>" + elif self._output_type in [self.PLAIN, self.TSV]: + prefix = "" + suffix = "" + return prefix, suffix + + def Print(self): + """Print the table to a console, html, etc. + + Returns: + A string that contains the desired representation of the table. + """ + self._ComputeStyle() + return self._GetStringValue() + + def _GetCellValue(self, i, j): + cell = self._table[i][j] + out = cell.string_value + raw_width = len(out) + + if cell.color: + p, s = self._GetColorFix(cell.color) + out = "%s%s%s" % (p, out, s) + + if cell.bgcolor: + p, s = self._GetBGColorFix(cell.bgcolor) + out = "%s%s%s" % (p, out, s) + + if self._output_type in [self.PLAIN, self.CONSOLE, self.EMAIL]: + if cell.width: + width = cell.width + else: + if self._column_styles: + width = self._column_styles[j].width + else: + width = len(cell.string_value) + if cell.colspan > 1: + width = 0 + start = 0 + for k in range(j): + start += self._table[i][k].colspan + for k in range(cell.colspan): + width += self._column_styles[start + k].width + if width > raw_width: + padding = ("%" + str(width - raw_width) + "s") % "" + out = padding + out + + if self._output_type == self.HTML: + if cell.header: + tag = "th" + else: + tag = "td" + out = '<{0} colspan = "{2}"> {1} </{0}>'.format( + tag, out, cell.colspan + ) + + return out + + def _GetHorizontalSeparator(self): + if self._output_type in [self.CONSOLE, self.PLAIN, self.EMAIL]: + return " " + if self._output_type == self.HTML: + return "" + if self._output_type == self.TSV: + return "\t" + + def _GetVerticalSeparator(self): + if self._output_type in [ + self.PLAIN, + self.CONSOLE, + self.TSV, + self.EMAIL, + ]: + return "\n" + if self._output_type == self.HTML: + return "</tr>\n<tr>" + + def _GetPrefix(self): + if self._output_type in [ + self.PLAIN, + self.CONSOLE, + self.TSV, + self.EMAIL, + ]: + return "" + if self._output_type == self.HTML: + return '<p></p><table id="box-table-a">\n<tr>' + + def _GetSuffix(self): + if self._output_type in [ + self.PLAIN, + self.CONSOLE, + self.TSV, + self.EMAIL, + ]: + return "" + if self._output_type == self.HTML: + return "</tr>\n</table>" + + def _GetStringValue(self): + o = "" + o += self._GetPrefix() + for i in range(len(self._table)): + row = self._table[i] + # Apply row color and bgcolor. + p = s = bgp = bgs = "" + if self._row_styles[i].bgcolor: + bgp, bgs = self._GetBGColorFix(self._row_styles[i].bgcolor) + if self._row_styles[i].color: + p, s = self._GetColorFix(self._row_styles[i].color) + o += p + bgp + for j in range(len(row)): + out = self._GetCellValue(i, j) + o += out + self._GetHorizontalSeparator() + o += s + bgs + o += self._GetVerticalSeparator() + o += self._GetSuffix() + return o # Some common drivers def GetSimpleTable(table, out_to=TablePrinter.CONSOLE): - """Prints a simple table. - - This is used by code that has a very simple list-of-lists and wants to - produce a table with ameans, a percentage ratio of ameans and a colorbox. - - Examples: - GetSimpleConsoleTable([["binary", "b1", "b2"],["size", "300", "400"]]) - will produce a colored table that can be printed to the console. - - Args: - table: a list of lists. - out_to: specify the fomat of output. Currently it supports HTML and CONSOLE. - - Returns: - A string version of the table that can be printed to the console. - """ - columns = [ - Column(AmeanResult(), Format()), - Column(AmeanRatioResult(), PercentFormat()), - Column(AmeanRatioResult(), ColorBoxFormat()), - ] - our_table = [table[0]] - for row in table[1:]: - our_row = [row[0]] - for v in row[1:]: - our_row.append([v]) - our_table.append(our_row) - - tf = TableFormatter(our_table, columns) - cell_table = tf.GetCellTable() - tp = TablePrinter(cell_table, out_to) - return tp.Print() + """Prints a simple table. + + This is used by code that has a very simple list-of-lists and wants to + produce a table with ameans, a percentage ratio of ameans and a colorbox. + + Examples: + GetSimpleConsoleTable([["binary", "b1", "b2"],["size", "300", "400"]]) + will produce a colored table that can be printed to the console. + + Args: + table: a list of lists. + out_to: specify the fomat of output. Currently it supports HTML and CONSOLE. + + Returns: + A string version of the table that can be printed to the console. + """ + columns = [ + Column(AmeanResult(), Format()), + Column(AmeanRatioResult(), PercentFormat()), + Column(AmeanRatioResult(), ColorBoxFormat()), + ] + our_table = [table[0]] + for row in table[1:]: + our_row = [row[0]] + for v in row[1:]: + our_row.append([v]) + our_table.append(our_row) + + tf = TableFormatter(our_table, columns) + cell_table = tf.GetCellTable() + tp = TablePrinter(cell_table, out_to) + return tp.Print() # pylint: disable=redefined-outer-name def GetComplexTable(runs, labels, out_to=TablePrinter.CONSOLE): - """Prints a complex table. + """Prints a complex table. - This can be used to generate a table with arithmetic mean, standard deviation, - coefficient of variation, p-values, etc. + This can be used to generate a table with arithmetic mean, standard deviation, + coefficient of variation, p-values, etc. - Args: - runs: A list of lists with data to tabulate. - labels: A list of labels that correspond to the runs. - out_to: specifies the format of the table (example CONSOLE or HTML). + Args: + runs: A list of lists with data to tabulate. + labels: A list of labels that correspond to the runs. + out_to: specifies the format of the table (example CONSOLE or HTML). - Returns: - A string table that can be printed to the console or put in an HTML file. - """ - tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC) - table = tg.GetTable() - columns = [ - Column(LiteralResult(), Format(), 'Literal'), - Column(AmeanResult(), Format()), - Column(StdResult(), Format()), - Column(CoeffVarResult(), CoeffVarFormat()), - Column(NonEmptyCountResult(), Format()), - Column(AmeanRatioResult(), PercentFormat()), - Column(AmeanRatioResult(), RatioFormat()), - Column(GmeanRatioResult(), RatioFormat()), - Column(PValueResult(), PValueFormat()) - ] - tf = TableFormatter(table, columns) - cell_table = tf.GetCellTable() - tp = TablePrinter(cell_table, out_to) - return tp.Print() - - -if __name__ == '__main__': - # Run a few small tests here. - run1 = { - 'k1': '10', - 'k2': '12', - 'k5': '40', - 'k6': '40', - 'ms_1': '20', - 'k7': 'FAIL', - 'k8': 'PASS', - 'k9': 'PASS', - 'k10': '0' - } - run2 = { - 'k1': '13', - 'k2': '14', - 'k3': '15', - 'ms_1': '10', - 'k8': 'PASS', - 'k9': 'FAIL', - 'k10': '0' - } - run3 = { - 'k1': '50', - 'k2': '51', - 'k3': '52', - 'k4': '53', - 'k5': '35', - 'k6': '45', - 'ms_1': '200', - 'ms_2': '20', - 'k7': 'FAIL', - 'k8': 'PASS', - 'k9': 'PASS' - } - runs = [[run1, run2], [run3]] - labels = ['vanilla', 'modified'] - t = GetComplexTable(runs, labels, TablePrinter.CONSOLE) - print(t) - email = GetComplexTable(runs, labels, TablePrinter.EMAIL) - - runs = [[{ - 'k1': '1' - }, { - 'k1': '1.1' - }, { - 'k1': '1.2' - }], [{ - 'k1': '5' - }, { - 'k1': '5.1' - }, { - 'k1': '5.2' - }]] - t = GetComplexTable(runs, labels, TablePrinter.CONSOLE) - print(t) - - simple_table = [ - ['binary', 'b1', 'b2', 'b3'], - ['size', 100, 105, 108], - ['rodata', 100, 80, 70], - ['data', 100, 100, 100], - ['debug', 100, 140, 60], - ] - t = GetSimpleTable(simple_table) - print(t) - email += GetSimpleTable(simple_table, TablePrinter.HTML) - email_to = [getpass.getuser()] - email = "<pre style='font-size: 13px'>%s</pre>" % email - EmailSender().SendEmail(email_to, 'SimpleTableTest', email, msg_type='html') + Returns: + A string table that can be printed to the console or put in an HTML file. + """ + tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC) + table = tg.GetTable() + columns = [ + Column(LiteralResult(), Format(), "Literal"), + Column(AmeanResult(), Format()), + Column(StdResult(), Format()), + Column(CoeffVarResult(), CoeffVarFormat()), + Column(NonEmptyCountResult(), Format()), + Column(AmeanRatioResult(), PercentFormat()), + Column(AmeanRatioResult(), RatioFormat()), + Column(GmeanRatioResult(), RatioFormat()), + Column(PValueResult(), PValueFormat()), + ] + tf = TableFormatter(table, columns) + cell_table = tf.GetCellTable() + tp = TablePrinter(cell_table, out_to) + return tp.Print() + + +if __name__ == "__main__": + # Run a few small tests here. + run1 = { + "k1": "10", + "k2": "12", + "k5": "40", + "k6": "40", + "ms_1": "20", + "k7": "FAIL", + "k8": "PASS", + "k9": "PASS", + "k10": "0", + } + run2 = { + "k1": "13", + "k2": "14", + "k3": "15", + "ms_1": "10", + "k8": "PASS", + "k9": "FAIL", + "k10": "0", + } + run3 = { + "k1": "50", + "k2": "51", + "k3": "52", + "k4": "53", + "k5": "35", + "k6": "45", + "ms_1": "200", + "ms_2": "20", + "k7": "FAIL", + "k8": "PASS", + "k9": "PASS", + } + runs = [[run1, run2], [run3]] + labels = ["vanilla", "modified"] + t = GetComplexTable(runs, labels, TablePrinter.CONSOLE) + print(t) + email = GetComplexTable(runs, labels, TablePrinter.EMAIL) + + runs = [ + [{"k1": "1"}, {"k1": "1.1"}, {"k1": "1.2"}], + [{"k1": "5"}, {"k1": "5.1"}, {"k1": "5.2"}], + ] + t = GetComplexTable(runs, labels, TablePrinter.CONSOLE) + print(t) + + simple_table = [ + ["binary", "b1", "b2", "b3"], + ["size", 100, 105, 108], + ["rodata", 100, 80, 70], + ["data", 100, 100, 100], + ["debug", 100, 140, 60], + ] + t = GetSimpleTable(simple_table) + print(t) + email += GetSimpleTable(simple_table, TablePrinter.HTML) + email_to = [getpass.getuser()] + email = "<pre style='font-size: 13px'>%s</pre>" % email + EmailSender().SendEmail(email_to, "SimpleTableTest", email, msg_type="html") diff --git a/cros_utils/timeline.py b/cros_utils/timeline.py index af844c0a..d6d4cc0b 100644 --- a/cros_utils/timeline.py +++ b/cros_utils/timeline.py @@ -7,49 +7,51 @@ from __future__ import print_function -__author__ = 'yunlian@google.com (Yunlian Jiang)' + +__author__ = "yunlian@google.com (Yunlian Jiang)" import time class Event(object): - """One event on the timeline.""" + """One event on the timeline.""" - def __init__(self, name='', cur_time=0): - self.name = name - self.timestamp = cur_time + def __init__(self, name="", cur_time=0): + self.name = name + self.timestamp = cur_time class Timeline(object): - """Use a dict to store the timeline.""" - - def __init__(self): - self.events = [] - - def Record(self, event): - for e in self.events: - assert e.name != event, ( - 'The event {0} is already recorded.'.format(event)) - cur_event = Event(name=event, cur_time=time.time()) - self.events.append(cur_event) - - def GetEvents(self): - return ([e.name for e in self.events]) - - def GetEventDict(self): - tl = {} - for e in self.events: - tl[e.name] = e.timestamp - return tl - - def GetEventTime(self, event): - for e in self.events: - if e.name == event: - return e.timestamp - raise IndexError('The event {0} is not recorded'.format(event)) - - def GetLastEventTime(self): - return self.events[-1].timestamp - - def GetLastEvent(self): - return self.events[-1].name + """Use a dict to store the timeline.""" + + def __init__(self): + self.events = [] + + def Record(self, event): + for e in self.events: + assert e.name != event, "The event {0} is already recorded.".format( + event + ) + cur_event = Event(name=event, cur_time=time.time()) + self.events.append(cur_event) + + def GetEvents(self): + return [e.name for e in self.events] + + def GetEventDict(self): + tl = {} + for e in self.events: + tl[e.name] = e.timestamp + return tl + + def GetEventTime(self, event): + for e in self.events: + if e.name == event: + return e.timestamp + raise IndexError("The event {0} is not recorded".format(event)) + + def GetLastEventTime(self): + return self.events[-1].timestamp + + def GetLastEvent(self): + return self.events[-1].name diff --git a/cros_utils/timeline_test.py b/cros_utils/timeline_test.py index 337a6676..6743b986 100755 --- a/cros_utils/timeline_test.py +++ b/cros_utils/timeline_test.py @@ -8,7 +8,8 @@ from __future__ import print_function -__author__ = 'yunlian@google.com (Yunlian Jiang)' + +__author__ = "yunlian@google.com (Yunlian Jiang)" import time import unittest @@ -17,46 +18,46 @@ from cros_utils import timeline class TimeLineTest(unittest.TestCase): - """Tests for the Timeline class.""" - - def testRecord(self): - tl = timeline.Timeline() - tl.Record('A') - t = time.time() - t1 = tl.events[0].timestamp - self.assertEqual(int(t1 - t), 0) - self.assertRaises(AssertionError, tl.Record, 'A') - - def testGetEvents(self): - tl = timeline.Timeline() - tl.Record('A') - e = tl.GetEvents() - self.assertEqual(e, ['A']) - tl.Record('B') - e = tl.GetEvents() - self.assertEqual(e, ['A', 'B']) - - def testGetEventTime(self): - tl = timeline.Timeline() - tl.Record('A') - t = time.time() - t1 = tl.GetEventTime('A') - self.assertEqual(int(t1 - t), 0) - self.assertRaises(IndexError, tl.GetEventTime, 'B') - - def testGetLastEventTime(self): - tl = timeline.Timeline() - self.assertRaises(IndexError, tl.GetLastEventTime) - tl.Record('A') - t = time.time() - t1 = tl.GetLastEventTime() - self.assertEqual(int(t1 - t), 0) - time.sleep(2) - tl.Record('B') - t = time.time() - t1 = tl.GetLastEventTime() - self.assertEqual(int(t1 - t), 0) - - -if __name__ == '__main__': - unittest.main() + """Tests for the Timeline class.""" + + def testRecord(self): + tl = timeline.Timeline() + tl.Record("A") + t = time.time() + t1 = tl.events[0].timestamp + self.assertEqual(int(t1 - t), 0) + self.assertRaises(AssertionError, tl.Record, "A") + + def testGetEvents(self): + tl = timeline.Timeline() + tl.Record("A") + e = tl.GetEvents() + self.assertEqual(e, ["A"]) + tl.Record("B") + e = tl.GetEvents() + self.assertEqual(e, ["A", "B"]) + + def testGetEventTime(self): + tl = timeline.Timeline() + tl.Record("A") + t = time.time() + t1 = tl.GetEventTime("A") + self.assertEqual(int(t1 - t), 0) + self.assertRaises(IndexError, tl.GetEventTime, "B") + + def testGetLastEventTime(self): + tl = timeline.Timeline() + self.assertRaises(IndexError, tl.GetLastEventTime) + tl.Record("A") + t = time.time() + t1 = tl.GetLastEventTime() + self.assertEqual(int(t1 - t), 0) + time.sleep(2) + tl.Record("B") + t = time.time() + t1 = tl.GetLastEventTime() + self.assertEqual(int(t1 - t), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/cros_utils/tiny_render.py b/cros_utils/tiny_render.py index 13463d10..978bf05c 100644 --- a/cros_utils/tiny_render.py +++ b/cros_utils/tiny_render.py @@ -57,12 +57,13 @@ import collections import html import typing as t -Bold = collections.namedtuple('Bold', ['inner']) -LineBreak = collections.namedtuple('LineBreak', []) -Link = collections.namedtuple('Link', ['href', 'inner']) -UnorderedList = collections.namedtuple('UnorderedList', ['items']) + +Bold = collections.namedtuple("Bold", ["inner"]) +LineBreak = collections.namedtuple("LineBreak", []) +Link = collections.namedtuple("Link", ["href", "inner"]) +UnorderedList = collections.namedtuple("UnorderedList", ["items"]) # Outputs different data depending on whether we're emitting text or HTML. -Switch = collections.namedtuple('Switch', ['text', 'html']) +Switch = collections.namedtuple("Switch", ["text", "html"]) line_break = LineBreak() @@ -85,97 +86,98 @@ line_break = LineBreak() Piece = t.Any # pylint: disable=invalid-name -def _render_text_pieces(piece: Piece, indent_level: int, - into: t.List[str]) -> None: - """Helper for |render_text_pieces|. Accumulates strs into |into|.""" - if isinstance(piece, LineBreak): - into.append('\n' + indent_level * ' ') - return +def _render_text_pieces( + piece: Piece, indent_level: int, into: t.List[str] +) -> None: + """Helper for |render_text_pieces|. Accumulates strs into |into|.""" + if isinstance(piece, LineBreak): + into.append("\n" + indent_level * " ") + return - if isinstance(piece, str): - into.append(piece) - return + if isinstance(piece, str): + into.append(piece) + return - if isinstance(piece, Bold): - into.append('**') - _render_text_pieces(piece.inner, indent_level, into) - into.append('**') - return + if isinstance(piece, Bold): + into.append("**") + _render_text_pieces(piece.inner, indent_level, into) + into.append("**") + return - if isinstance(piece, Link): - # Don't even try; it's ugly more often than not. - _render_text_pieces(piece.inner, indent_level, into) - return + if isinstance(piece, Link): + # Don't even try; it's ugly more often than not. + _render_text_pieces(piece.inner, indent_level, into) + return - if isinstance(piece, UnorderedList): - for p in piece.items: - _render_text_pieces([line_break, '- ', p], indent_level + 2, into) - return + if isinstance(piece, UnorderedList): + for p in piece.items: + _render_text_pieces([line_break, "- ", p], indent_level + 2, into) + return - if isinstance(piece, Switch): - _render_text_pieces(piece.text, indent_level, into) - return + if isinstance(piece, Switch): + _render_text_pieces(piece.text, indent_level, into) + return - if isinstance(piece, (list, tuple)): - for p in piece: - _render_text_pieces(p, indent_level, into) - return + if isinstance(piece, (list, tuple)): + for p in piece: + _render_text_pieces(p, indent_level, into) + return - raise ValueError('Unknown piece type: %s' % type(piece)) + raise ValueError("Unknown piece type: %s" % type(piece)) def render_text_pieces(piece: Piece) -> str: - """Renders the given Pieces into text.""" - into = [] - _render_text_pieces(piece, 0, into) - return ''.join(into) + """Renders the given Pieces into text.""" + into = [] + _render_text_pieces(piece, 0, into) + return "".join(into) def _render_html_pieces(piece: Piece, into: t.List[str]) -> None: - """Helper for |render_html_pieces|. Accumulates strs into |into|.""" - if piece is line_break: - into.append('<br />\n') - return - - if isinstance(piece, str): - into.append(html.escape(piece)) - return - - if isinstance(piece, Bold): - into.append('<b>') - _render_html_pieces(piece.inner, into) - into.append('</b>') - return - - if isinstance(piece, Link): - into.append('<a href="' + piece.href + '">') - _render_html_pieces(piece.inner, into) - into.append('</a>') - return - - if isinstance(piece, UnorderedList): - into.append('<ul>\n') - for p in piece.items: - into.append('<li>') - _render_html_pieces(p, into) - into.append('</li>\n') - into.append('</ul>\n') - return - - if isinstance(piece, Switch): - _render_html_pieces(piece.html, into) - return - - if isinstance(piece, (list, tuple)): - for p in piece: - _render_html_pieces(p, into) - return - - raise ValueError('Unknown piece type: %s' % type(piece)) + """Helper for |render_html_pieces|. Accumulates strs into |into|.""" + if piece is line_break: + into.append("<br />\n") + return + + if isinstance(piece, str): + into.append(html.escape(piece)) + return + + if isinstance(piece, Bold): + into.append("<b>") + _render_html_pieces(piece.inner, into) + into.append("</b>") + return + + if isinstance(piece, Link): + into.append('<a href="' + piece.href + '">') + _render_html_pieces(piece.inner, into) + into.append("</a>") + return + + if isinstance(piece, UnorderedList): + into.append("<ul>\n") + for p in piece.items: + into.append("<li>") + _render_html_pieces(p, into) + into.append("</li>\n") + into.append("</ul>\n") + return + + if isinstance(piece, Switch): + _render_html_pieces(piece.html, into) + return + + if isinstance(piece, (list, tuple)): + for p in piece: + _render_html_pieces(p, into) + return + + raise ValueError("Unknown piece type: %s" % type(piece)) def render_html_pieces(piece: Piece) -> str: - """Renders the given Pieces into HTML.""" - into = [] - _render_html_pieces(piece, into) - return ''.join(into) + """Renders the given Pieces into HTML.""" + into = [] + _render_html_pieces(piece, into) + return "".join(into) diff --git a/cros_utils/tiny_render_test.py b/cros_utils/tiny_render_test.py index 93ad00e7..6534a9d7 100755 --- a/cros_utils/tiny_render_test.py +++ b/cros_utils/tiny_render_test.py @@ -18,160 +18,184 @@ import tiny_render # shipped alongside the plain-text, the hope is that people won't have to # subject themselves to reading the HTML often. :) class Test(unittest.TestCase): - """Tests for tiny_render.""" - - def test_bold(self): - pieces = [ - tiny_render.Bold('hello'), - ', ', - tiny_render.Bold(['world', '!']), - ] - - self.assertEqual( - tiny_render.render_text_pieces(pieces), - '**hello**, **world!**', - ) - - self.assertEqual( - tiny_render.render_html_pieces(pieces), - '<b>hello</b>, <b>world!</b>', - ) - - def test_line_break(self): - pieces = [ - 'hello', - tiny_render.line_break, - ['world', '!'], - ] - - self.assertEqual( - tiny_render.render_text_pieces(pieces), - 'hello\nworld!', - ) - - self.assertEqual( - tiny_render.render_html_pieces(pieces), - 'hello<br />\nworld!', - ) - - def test_linkification(self): - pieces = [ - 'hello ', - tiny_render.Link(href='https://google.com', inner='world!'), - ] - - self.assertEqual( - tiny_render.render_text_pieces(pieces), - 'hello world!', - ) - - self.assertEqual( - tiny_render.render_html_pieces(pieces), - 'hello <a href="https://google.com">world!</a>', - ) - - def test_unordered_list(self): - pieces = [ - 'hello:', - tiny_render.UnorderedList([ - 'world', - 'w o r l d', - ]), - ] - - self.assertEqual( - tiny_render.render_text_pieces(pieces), - '\n'.join(( - 'hello:', - ' - world', - ' - w o r l d', - )), - ) - - self.assertEqual( - tiny_render.render_html_pieces(pieces), - '\n'.join(( - 'hello:<ul>', - '<li>world</li>', - '<li>w o r l d</li>', - '</ul>', - '', - )), - ) - - def test_nested_unordered_list(self): - pieces = [ - 'hello:', - tiny_render.UnorderedList([ - 'world', - ['and more:', tiny_render.UnorderedList(['w o r l d'])], - 'world2', - ]) - ] - - self.assertEqual( - tiny_render.render_text_pieces(pieces), - '\n'.join(( - 'hello:', - ' - world', - ' - and more:', - ' - w o r l d', - ' - world2', - )), - ) - - self.assertEqual( - tiny_render.render_html_pieces(pieces), - '\n'.join(( - 'hello:<ul>', - '<li>world</li>', - '<li>and more:<ul>', - '<li>w o r l d</li>', - '</ul>', - '</li>', - '<li>world2</li>', - '</ul>', - '', - )), - ) - - def test_switch(self): - pieces = ['hello ', tiny_render.Switch(text='text', html='html')] - self.assertEqual(tiny_render.render_text_pieces(pieces), 'hello text') - self.assertEqual(tiny_render.render_html_pieces(pieces), 'hello html') - - def test_golden(self): - pieces = [ - 'hello', - tiny_render.UnorderedList([ - tiny_render.Switch(text='text', html=tiny_render.Bold('html')), - 'the', - tiny_render.Bold('sun'), - ]), - tiny_render.line_break, - ['is', ' out!'], - ] - - self.assertEqual( - tiny_render.render_text_pieces(pieces), '\n'.join(( - 'hello', - ' - text', - ' - the', - ' - **sun**', - 'is out!', - ))) - - self.assertEqual( - tiny_render.render_html_pieces(pieces), '\n'.join(( - 'hello<ul>', - '<li><b>html</b></li>', - '<li>the</li>', - '<li><b>sun</b></li>', - '</ul>', - '<br />', - 'is out!', - ))) - - -if __name__ == '__main__': - unittest.main() + """Tests for tiny_render.""" + + def test_bold(self): + pieces = [ + tiny_render.Bold("hello"), + ", ", + tiny_render.Bold(["world", "!"]), + ] + + self.assertEqual( + tiny_render.render_text_pieces(pieces), + "**hello**, **world!**", + ) + + self.assertEqual( + tiny_render.render_html_pieces(pieces), + "<b>hello</b>, <b>world!</b>", + ) + + def test_line_break(self): + pieces = [ + "hello", + tiny_render.line_break, + ["world", "!"], + ] + + self.assertEqual( + tiny_render.render_text_pieces(pieces), + "hello\nworld!", + ) + + self.assertEqual( + tiny_render.render_html_pieces(pieces), + "hello<br />\nworld!", + ) + + def test_linkification(self): + pieces = [ + "hello ", + tiny_render.Link(href="https://google.com", inner="world!"), + ] + + self.assertEqual( + tiny_render.render_text_pieces(pieces), + "hello world!", + ) + + self.assertEqual( + tiny_render.render_html_pieces(pieces), + 'hello <a href="https://google.com">world!</a>', + ) + + def test_unordered_list(self): + pieces = [ + "hello:", + tiny_render.UnorderedList( + [ + "world", + "w o r l d", + ] + ), + ] + + self.assertEqual( + tiny_render.render_text_pieces(pieces), + "\n".join( + ( + "hello:", + " - world", + " - w o r l d", + ) + ), + ) + + self.assertEqual( + tiny_render.render_html_pieces(pieces), + "\n".join( + ( + "hello:<ul>", + "<li>world</li>", + "<li>w o r l d</li>", + "</ul>", + "", + ) + ), + ) + + def test_nested_unordered_list(self): + pieces = [ + "hello:", + tiny_render.UnorderedList( + [ + "world", + ["and more:", tiny_render.UnorderedList(["w o r l d"])], + "world2", + ] + ), + ] + + self.assertEqual( + tiny_render.render_text_pieces(pieces), + "\n".join( + ( + "hello:", + " - world", + " - and more:", + " - w o r l d", + " - world2", + ) + ), + ) + + self.assertEqual( + tiny_render.render_html_pieces(pieces), + "\n".join( + ( + "hello:<ul>", + "<li>world</li>", + "<li>and more:<ul>", + "<li>w o r l d</li>", + "</ul>", + "</li>", + "<li>world2</li>", + "</ul>", + "", + ) + ), + ) + + def test_switch(self): + pieces = ["hello ", tiny_render.Switch(text="text", html="html")] + self.assertEqual(tiny_render.render_text_pieces(pieces), "hello text") + self.assertEqual(tiny_render.render_html_pieces(pieces), "hello html") + + def test_golden(self): + pieces = [ + "hello", + tiny_render.UnorderedList( + [ + tiny_render.Switch( + text="text", html=tiny_render.Bold("html") + ), + "the", + tiny_render.Bold("sun"), + ] + ), + tiny_render.line_break, + ["is", " out!"], + ] + + self.assertEqual( + tiny_render.render_text_pieces(pieces), + "\n".join( + ( + "hello", + " - text", + " - the", + " - **sun**", + "is out!", + ) + ), + ) + + self.assertEqual( + tiny_render.render_html_pieces(pieces), + "\n".join( + ( + "hello<ul>", + "<li><b>html</b></li>", + "<li>the</li>", + "<li><b>sun</b></li>", + "</ul>", + "<br />", + "is out!", + ) + ), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py index 8b918934..473ab547 100644 --- a/crosperf/benchmark.py +++ b/crosperf/benchmark.py @@ -9,79 +9,85 @@ from __future__ import division from __future__ import print_function import math + # FIXME(denik): Fix the import in chroot. # pylint: disable=import-error from scipy import stats + # See crbug.com/673558 for how these are estimated. _estimated_stddev = { - 'octane': 0.015, - 'kraken': 0.019, - 'speedometer': 0.007, - 'speedometer2': 0.006, - 'dromaeo.domcoreattr': 0.023, - 'dromaeo.domcoremodify': 0.011, - 'graphics_WebGLAquarium': 0.008, - 'page_cycler_v2.typical_25': 0.021, - 'loading.desktop': 0.021, # Copied from page_cycler initially + "octane": 0.015, + "kraken": 0.019, + "speedometer": 0.007, + "speedometer2": 0.006, + "dromaeo.domcoreattr": 0.023, + "dromaeo.domcoremodify": 0.011, + "graphics_WebGLAquarium": 0.008, + "page_cycler_v2.typical_25": 0.021, + "loading.desktop": 0.021, # Copied from page_cycler initially } # Get #samples needed to guarantee a given confidence interval, assuming the # samples follow normal distribution. def _samples(b): - # TODO: Make this an option - # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%. - p = 0.9 - e = 0.02 - if b not in _estimated_stddev: - return 1 - d = _estimated_stddev[b] - # Get at least 2 samples so as to calculate standard deviation, which is - # needed in T-test for p-value. - n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e)**2)) - return n if n > 1 else 2 + # TODO: Make this an option + # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%. + p = 0.9 + e = 0.02 + if b not in _estimated_stddev: + return 1 + d = _estimated_stddev[b] + # Get at least 2 samples so as to calculate standard deviation, which is + # needed in T-test for p-value. + n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e) ** 2)) + return n if n > 1 else 2 class Benchmark(object): - """Class representing a benchmark to be run. + """Class representing a benchmark to be run. - Contains details of the benchmark suite, arguments to pass to the suite, - iterations to run the benchmark suite and so on. Note that the benchmark name - can be different to the test suite name. For example, you may want to have - two different benchmarks which run the same test_name with different - arguments. - """ + Contains details of the benchmark suite, arguments to pass to the suite, + iterations to run the benchmark suite and so on. Note that the benchmark name + can be different to the test suite name. For example, you may want to have + two different benchmarks which run the same test_name with different + arguments. + """ - def __init__(self, - name, - test_name, - test_args, - iterations, - rm_chroot_tmp, - perf_args, - suite='', - show_all_results=False, - retries=0, - run_local=False, - cwp_dso='', - weight=0): - self.name = name - # For telemetry, this is the benchmark name. - self.test_name = test_name - # For telemetry, this is the data. - self.test_args = test_args - self.iterations = iterations if iterations > 0 else _samples(name) - self.perf_args = perf_args - self.rm_chroot_tmp = rm_chroot_tmp - self.iteration_adjusted = False - self.suite = suite - self.show_all_results = show_all_results - self.retries = retries - if self.suite == 'telemetry': - self.show_all_results = True - if run_local and self.suite != 'telemetry_Crosperf': - raise RuntimeError('run_local is only supported by telemetry_Crosperf.') - self.run_local = run_local - self.cwp_dso = cwp_dso - self.weight = weight + def __init__( + self, + name, + test_name, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite="", + show_all_results=False, + retries=0, + run_local=False, + cwp_dso="", + weight=0, + ): + self.name = name + # For telemetry, this is the benchmark name. + self.test_name = test_name + # For telemetry, this is the data. + self.test_args = test_args + self.iterations = iterations if iterations > 0 else _samples(name) + self.perf_args = perf_args + self.rm_chroot_tmp = rm_chroot_tmp + self.iteration_adjusted = False + self.suite = suite + self.show_all_results = show_all_results + self.retries = retries + if self.suite == "telemetry": + self.show_all_results = True + if run_local and self.suite != "telemetry_Crosperf": + raise RuntimeError( + "run_local is only supported by telemetry_Crosperf." + ) + self.run_local = run_local + self.cwp_dso = cwp_dso + self.weight = weight diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py index 16bef78b..79cfdd1c 100644 --- a/crosperf/benchmark_run.py +++ b/crosperf/benchmark_run.py @@ -13,254 +13,339 @@ import traceback from cros_utils import command_executer from cros_utils import timeline - -from suite_runner import SuiteRunner from results_cache import MockResult from results_cache import MockResultsCache from results_cache import Result from results_cache import ResultsCache +from suite_runner import SuiteRunner + -STATUS_FAILED = 'FAILED' -STATUS_SUCCEEDED = 'SUCCEEDED' -STATUS_IMAGING = 'IMAGING' -STATUS_RUNNING = 'RUNNING' -STATUS_WAITING = 'WAITING' -STATUS_PENDING = 'PENDING' +STATUS_FAILED = "FAILED" +STATUS_SUCCEEDED = "SUCCEEDED" +STATUS_IMAGING = "IMAGING" +STATUS_RUNNING = "RUNNING" +STATUS_WAITING = "WAITING" +STATUS_PENDING = "PENDING" class BenchmarkRun(threading.Thread): - """The benchmarkrun class.""" - - def __init__(self, name, benchmark, label, iteration, cache_conditions, - machine_manager, logger_to_use, log_level, share_cache, - dut_config): - threading.Thread.__init__(self) - self.name = name - self._logger = logger_to_use - self.log_level = log_level - self.benchmark = benchmark - self.iteration = iteration - self.label = label - self.result = None - self.terminated = False - self.retval = None - self.run_completed = False - self.machine_manager = machine_manager - self.suite_runner = SuiteRunner(dut_config, self._logger, self.log_level) - self.machine = None - self.cache_conditions = cache_conditions - self.runs_complete = 0 - self.cache_hit = False - self.failure_reason = '' - self.test_args = benchmark.test_args - self.cache = None - self.profiler_args = self.GetExtraAutotestArgs() - self._ce = command_executer.GetCommandExecuter( - self._logger, log_level=self.log_level) - self.timeline = timeline.Timeline() - self.timeline.Record(STATUS_PENDING) - self.share_cache = share_cache - self.cache_has_been_read = False - - # This is used by schedv2. - self.owner_thread = None - - def ReadCache(self): - # Just use the first machine for running the cached version, - # without locking it. - self.cache = ResultsCache() - self.cache.Init(self.label.chromeos_image, self.label.chromeos_root, - self.benchmark.test_name, self.iteration, self.test_args, - self.profiler_args, self.machine_manager, self.machine, - self.label.board, self.cache_conditions, self._logger, - self.log_level, self.label, self.share_cache, - self.benchmark.suite, self.benchmark.show_all_results, - self.benchmark.run_local, self.benchmark.cwp_dso) - - self.result = self.cache.ReadResult() - self.cache_hit = (self.result is not None) - self.cache_has_been_read = True - - def run(self): - try: - if not self.cache_has_been_read: - self.ReadCache() - - if self.result: - self._logger.LogOutput('%s: Cache hit.' % self.name) - self._logger.LogOutput(self.result.out, print_to_console=False) - self._logger.LogError(self.result.err, print_to_console=False) - - elif self.label.cache_only: - self._logger.LogOutput('%s: No cache hit.' % self.name) - output = '%s: No Cache hit.' % self.name - retval = 1 - err = 'No cache hit.' - self.result = Result.CreateFromRun( - self._logger, self.log_level, self.label, self.machine, output, err, - retval, self.benchmark.test_name, self.benchmark.suite, - self.benchmark.cwp_dso) - - else: - self._logger.LogOutput('%s: No cache hit.' % self.name) - self.timeline.Record(STATUS_WAITING) - # Try to acquire a machine now. - self.machine = self.AcquireMachine() - self.cache.machine = self.machine - self.result = self.RunTest(self.machine) - - self.cache.remote = self.machine.name - self.label.chrome_version = self.machine_manager.GetChromeVersion( - self.machine) - self.cache.StoreResult(self.result) - - if not self.label.chrome_version: - if self.machine: - self.label.chrome_version = self.machine_manager.GetChromeVersion( - self.machine) - elif self.result.chrome_version: - self.label.chrome_version = self.result.chrome_version - - if self.terminated: - return - - if not self.result.retval: - self.timeline.Record(STATUS_SUCCEEDED) - else: + """The benchmarkrun class.""" + + def __init__( + self, + name, + benchmark, + label, + iteration, + cache_conditions, + machine_manager, + logger_to_use, + log_level, + share_cache, + dut_config, + ): + threading.Thread.__init__(self) + self.name = name + self._logger = logger_to_use + self.log_level = log_level + self.benchmark = benchmark + self.iteration = iteration + self.label = label + self.result = None + self.terminated = False + self.retval = None + self.run_completed = False + self.machine_manager = machine_manager + self.suite_runner = SuiteRunner( + dut_config, self._logger, self.log_level + ) + self.machine = None + self.cache_conditions = cache_conditions + self.runs_complete = 0 + self.cache_hit = False + self.failure_reason = "" + self.test_args = benchmark.test_args + self.cache = None + self.profiler_args = self.GetExtraAutotestArgs() + self._ce = command_executer.GetCommandExecuter( + self._logger, log_level=self.log_level + ) + self.timeline = timeline.Timeline() + self.timeline.Record(STATUS_PENDING) + self.share_cache = share_cache + self.cache_has_been_read = False + + # This is used by schedv2. + self.owner_thread = None + + def ReadCache(self): + # Just use the first machine for running the cached version, + # without locking it. + self.cache = ResultsCache() + self.cache.Init( + self.label.chromeos_image, + self.label.chromeos_root, + self.benchmark.test_name, + self.iteration, + self.test_args, + self.profiler_args, + self.machine_manager, + self.machine, + self.label.board, + self.cache_conditions, + self._logger, + self.log_level, + self.label, + self.share_cache, + self.benchmark.suite, + self.benchmark.show_all_results, + self.benchmark.run_local, + self.benchmark.cwp_dso, + ) + + self.result = self.cache.ReadResult() + self.cache_hit = self.result is not None + self.cache_has_been_read = True + + def run(self): + try: + if not self.cache_has_been_read: + self.ReadCache() + + if self.result: + self._logger.LogOutput("%s: Cache hit." % self.name) + self._logger.LogOutput(self.result.out, print_to_console=False) + self._logger.LogError(self.result.err, print_to_console=False) + + elif self.label.cache_only: + self._logger.LogOutput("%s: No cache hit." % self.name) + output = "%s: No Cache hit." % self.name + retval = 1 + err = "No cache hit." + self.result = Result.CreateFromRun( + self._logger, + self.log_level, + self.label, + self.machine, + output, + err, + retval, + self.benchmark.test_name, + self.benchmark.suite, + self.benchmark.cwp_dso, + ) + + else: + self._logger.LogOutput("%s: No cache hit." % self.name) + self.timeline.Record(STATUS_WAITING) + # Try to acquire a machine now. + self.machine = self.AcquireMachine() + self.cache.machine = self.machine + self.result = self.RunTest(self.machine) + + self.cache.remote = self.machine.name + self.label.chrome_version = ( + self.machine_manager.GetChromeVersion(self.machine) + ) + self.cache.StoreResult(self.result) + + if not self.label.chrome_version: + if self.machine: + self.label.chrome_version = ( + self.machine_manager.GetChromeVersion(self.machine) + ) + elif self.result.chrome_version: + self.label.chrome_version = self.result.chrome_version + + if self.terminated: + return + + if not self.result.retval: + self.timeline.Record(STATUS_SUCCEEDED) + else: + if self.timeline.GetLastEvent() != STATUS_FAILED: + self.failure_reason = ( + "Return value of test suite was non-zero." + ) + self.timeline.Record(STATUS_FAILED) + + except Exception as e: + self._logger.LogError( + "Benchmark run: '%s' failed: %s" % (self.name, e) + ) + traceback.print_exc() + if self.timeline.GetLastEvent() != STATUS_FAILED: + self.timeline.Record(STATUS_FAILED) + self.failure_reason = str(e) + finally: + if self.owner_thread is not None: + # In schedv2 mode, we do not lock machine locally. So noop here. + pass + elif self.machine: + if not self.machine.IsReachable(): + self._logger.LogOutput( + "Machine %s is not reachable, removing it." + % self.machine.name + ) + self.machine_manager.RemoveMachine(self.machine.name) + self._logger.LogOutput( + "Releasing machine: %s" % self.machine.name + ) + self.machine_manager.ReleaseMachine(self.machine) + self._logger.LogOutput( + "Released machine: %s" % self.machine.name + ) + + def Terminate(self): + self.terminated = True + self.suite_runner.Terminate() if self.timeline.GetLastEvent() != STATUS_FAILED: - self.failure_reason = 'Return value of test suite was non-zero.' - self.timeline.Record(STATUS_FAILED) - - except Exception as e: - self._logger.LogError("Benchmark run: '%s' failed: %s" % (self.name, e)) - traceback.print_exc() - if self.timeline.GetLastEvent() != STATUS_FAILED: - self.timeline.Record(STATUS_FAILED) - self.failure_reason = str(e) - finally: - if self.owner_thread is not None: - # In schedv2 mode, we do not lock machine locally. So noop here. - pass - elif self.machine: - if not self.machine.IsReachable(): - self._logger.LogOutput( - 'Machine %s is not reachable, removing it.' % self.machine.name) - self.machine_manager.RemoveMachine(self.machine.name) - self._logger.LogOutput('Releasing machine: %s' % self.machine.name) - self.machine_manager.ReleaseMachine(self.machine) - self._logger.LogOutput('Released machine: %s' % self.machine.name) - - def Terminate(self): - self.terminated = True - self.suite_runner.Terminate() - if self.timeline.GetLastEvent() != STATUS_FAILED: - self.timeline.Record(STATUS_FAILED) - self.failure_reason = 'Thread terminated.' - - def AcquireMachine(self): - if self.owner_thread is not None: - # No need to lock machine locally, DutWorker, which is a thread, is - # responsible for running br. - return self.owner_thread.dut() - while True: - machine = None - if self.terminated: - raise RuntimeError('Thread terminated while trying to acquire machine.') - - machine = self.machine_manager.AcquireMachine(self.label) - - if machine: - self._logger.LogOutput( - '%s: Machine %s acquired at %s' % (self.name, machine.name, - datetime.datetime.now())) - break - time.sleep(10) - return machine - - def GetExtraAutotestArgs(self): - if (self.benchmark.perf_args and - self.benchmark.suite != 'telemetry_Crosperf'): - self._logger.LogError( - 'Non-telemetry benchmark does not support profiler.') - self.benchmark.perf_args = '' - - if self.benchmark.perf_args: - perf_args_list = self.benchmark.perf_args.split(' ') - perf_args_list = [perf_args_list[0]] + ['-a'] + perf_args_list[1:] - perf_args = ' '.join(perf_args_list) - if not perf_args_list[0] in ['record', 'stat']: - raise SyntaxError('perf_args must start with either record or stat') - extra_test_args = [ - '--profiler=custom_perf', - ('--profiler_args=\'perf_options="%s"\'' % perf_args) - ] - return ' '.join(extra_test_args) - else: - return '' - - def RunTest(self, machine): - self.timeline.Record(STATUS_IMAGING) - if self.owner_thread is not None: - # In schedv2 mode, do not even call ImageMachine. Machine image is - # guarenteed. - pass - else: - self.machine_manager.ImageMachine(machine, self.label) - self.timeline.Record(STATUS_RUNNING) - retval, out, err = self.suite_runner.Run( - machine, self.label, self.benchmark, self.test_args, self.profiler_args) - self.run_completed = True - return Result.CreateFromRun(self._logger, self.log_level, self.label, - self.machine, out, err, retval, - self.benchmark.test_name, self.benchmark.suite, - self.benchmark.cwp_dso) - - def SetCacheConditions(self, cache_conditions): - self.cache_conditions = cache_conditions - - def logger(self): - """Return the logger, only used by unittest. - - Returns: - self._logger - """ - - return self._logger - - def __str__(self): - """For better debugging.""" - - return 'BenchmarkRun[name="{}"]'.format(self.name) + self.timeline.Record(STATUS_FAILED) + self.failure_reason = "Thread terminated." + + def AcquireMachine(self): + if self.owner_thread is not None: + # No need to lock machine locally, DutWorker, which is a thread, is + # responsible for running br. + return self.owner_thread.dut() + while True: + machine = None + if self.terminated: + raise RuntimeError( + "Thread terminated while trying to acquire machine." + ) + + machine = self.machine_manager.AcquireMachine(self.label) + + if machine: + self._logger.LogOutput( + "%s: Machine %s acquired at %s" + % (self.name, machine.name, datetime.datetime.now()) + ) + break + time.sleep(10) + return machine + + def GetExtraAutotestArgs(self): + if ( + self.benchmark.perf_args + and self.benchmark.suite != "telemetry_Crosperf" + ): + self._logger.LogError( + "Non-telemetry benchmark does not support profiler." + ) + self.benchmark.perf_args = "" + + if self.benchmark.perf_args: + perf_args_list = self.benchmark.perf_args.split(" ") + perf_args_list = [perf_args_list[0]] + ["-a"] + perf_args_list[1:] + perf_args = " ".join(perf_args_list) + if not perf_args_list[0] in ["record", "stat"]: + raise SyntaxError( + "perf_args must start with either record or stat" + ) + extra_test_args = [ + "--profiler=custom_perf", + ("--profiler_args='perf_options=\"%s\"'" % perf_args), + ] + return " ".join(extra_test_args) + else: + return "" + + def RunTest(self, machine): + self.timeline.Record(STATUS_IMAGING) + if self.owner_thread is not None: + # In schedv2 mode, do not even call ImageMachine. Machine image is + # guarenteed. + pass + else: + self.machine_manager.ImageMachine(machine, self.label) + self.timeline.Record(STATUS_RUNNING) + retval, out, err = self.suite_runner.Run( + machine, + self.label, + self.benchmark, + self.test_args, + self.profiler_args, + ) + self.run_completed = True + return Result.CreateFromRun( + self._logger, + self.log_level, + self.label, + self.machine, + out, + err, + retval, + self.benchmark.test_name, + self.benchmark.suite, + self.benchmark.cwp_dso, + ) + + def SetCacheConditions(self, cache_conditions): + self.cache_conditions = cache_conditions + + def logger(self): + """Return the logger, only used by unittest. + + Returns: + self._logger + """ + + return self._logger + + def __str__(self): + """For better debugging.""" + + return 'BenchmarkRun[name="{}"]'.format(self.name) class MockBenchmarkRun(BenchmarkRun): - """Inherited from BenchmarkRun.""" - - def ReadCache(self): - # Just use the first machine for running the cached version, - # without locking it. - self.cache = MockResultsCache() - self.cache.Init(self.label.chromeos_image, self.label.chromeos_root, - self.benchmark.test_name, self.iteration, self.test_args, - self.profiler_args, self.machine_manager, self.machine, - self.label.board, self.cache_conditions, self._logger, - self.log_level, self.label, self.share_cache, - self.benchmark.suite, self.benchmark.show_all_results, - self.benchmark.run_local, self.benchmark.cwp_dso) - - self.result = self.cache.ReadResult() - self.cache_hit = (self.result is not None) - - def RunTest(self, machine): - """Remove Result.CreateFromRun for testing.""" - self.timeline.Record(STATUS_IMAGING) - self.machine_manager.ImageMachine(machine, self.label) - self.timeline.Record(STATUS_RUNNING) - [retval, out, err] = self.suite_runner.Run( - machine, self.label, self.benchmark, self.test_args, self.profiler_args) - self.run_completed = True - rr = MockResult('logger', self.label, self.log_level, machine) - rr.out = out - rr.err = err - rr.retval = retval - return rr + """Inherited from BenchmarkRun.""" + + def ReadCache(self): + # Just use the first machine for running the cached version, + # without locking it. + self.cache = MockResultsCache() + self.cache.Init( + self.label.chromeos_image, + self.label.chromeos_root, + self.benchmark.test_name, + self.iteration, + self.test_args, + self.profiler_args, + self.machine_manager, + self.machine, + self.label.board, + self.cache_conditions, + self._logger, + self.log_level, + self.label, + self.share_cache, + self.benchmark.suite, + self.benchmark.show_all_results, + self.benchmark.run_local, + self.benchmark.cwp_dso, + ) + + self.result = self.cache.ReadResult() + self.cache_hit = self.result is not None + + def RunTest(self, machine): + """Remove Result.CreateFromRun for testing.""" + self.timeline.Record(STATUS_IMAGING) + self.machine_manager.ImageMachine(machine, self.label) + self.timeline.Record(STATUS_RUNNING) + [retval, out, err] = self.suite_runner.Run( + machine, + self.label, + self.benchmark, + self.test_args, + self.profiler_args, + ) + self.run_completed = True + rr = MockResult("logger", self.label, self.log_level, machine) + rr.out = out + rr.err = err + rr.retval = retval + return rr diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py index d35a53bc..e59b275c 100755 --- a/crosperf/benchmark_run_unittest.py +++ b/crosperf/benchmark_run_unittest.py @@ -13,430 +13,534 @@ import inspect import unittest import unittest.mock as mock +from benchmark import Benchmark import benchmark_run - from cros_utils import logger -from suite_runner import MockSuiteRunner -from suite_runner import SuiteRunner from label import MockLabel -from benchmark import Benchmark -from machine_manager import MockMachineManager from machine_manager import MachineManager from machine_manager import MockCrosMachine -from results_cache import MockResultsCache +from machine_manager import MockMachineManager from results_cache import CacheConditions +from results_cache import MockResultsCache from results_cache import Result from results_cache import ResultsCache +from suite_runner import MockSuiteRunner +from suite_runner import SuiteRunner class BenchmarkRunTest(unittest.TestCase): - """Unit tests for the BenchmarkRun class and all of its methods.""" - - def setUp(self): - self.status = [] - self.called_ReadCache = None - self.log_error = [] - self.log_output = [] - self.err_msg = None - self.test_benchmark = Benchmark( - 'page_cycler.netsim.top_10', # name - 'page_cycler.netsim.top_10', # test_name - '', # test_args - 1, # iterations - False, # rm_chroot_tmp - '', # perf_args - suite='telemetry_Crosperf') # suite - - self.test_label = MockLabel( - 'test1', - 'build', - 'image1', - 'autotest_dir', - 'debug_dir', - '/tmp/test_benchmark_run', - 'x86-alex', - 'chromeos2-row1-rack4-host9.cros', - image_args='', - cache_dir='', - cache_only=False, - log_level='average', - compiler='gcc', - crosfleet=False) - - self.test_cache_conditions = [ - CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH - ] - - self.mock_logger = logger.GetLogger(log_dir='', mock=True) - - self.mock_machine_manager = mock.Mock(spec=MachineManager) - - def testDryRun(self): - my_label = MockLabel( - 'test1', - 'build', - 'image1', - 'autotest_dir', - 'debug_dir', - '/tmp/test_benchmark_run', - 'x86-alex', - 'chromeos2-row1-rack4-host9.cros', - image_args='', - cache_dir='', - cache_only=False, - log_level='average', - compiler='gcc', - crosfleet=False) - - logging_level = 'average' - m = MockMachineManager('/tmp/chromeos_root', 0, logging_level, '') - m.AddMachine('chromeos2-row1-rack4-host9.cros') - bench = Benchmark( - 'page_cycler.netsim.top_10', # name - 'page_cycler.netsim.top_10', # test_name - '', # test_args - 1, # iterations - False, # rm_chroot_tmp - '', # perf_args - suite='telemetry_Crosperf') # suite - dut_conf = { - 'cooldown_time': 5, - 'cooldown_temp': 45, - 'governor': 'powersave', - 'cpu_usage': 'big_only', - 'cpu_freq_pct': 80, - } - b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m, - logger.GetLogger(), logging_level, '', - dut_conf) - b.cache = MockResultsCache() - b.suite_runner = MockSuiteRunner() - b.start() - - # Make sure the arguments to BenchmarkRun.__init__ have not changed - # since the last time this test was updated: - args_list = [ - 'self', 'name', 'benchmark', 'label', 'iteration', 'cache_conditions', - 'machine_manager', 'logger_to_use', 'log_level', 'share_cache', - 'dut_config' - ] - arg_spec = inspect.getfullargspec(benchmark_run.BenchmarkRun.__init__) - self.assertEqual(len(arg_spec.args), len(args_list)) - self.assertEqual(arg_spec.args, args_list) - - def test_init(self): - # Nothing really worth testing here; just field assignments. - pass - - def test_read_cache(self): - # Nothing really worth testing here, either. - pass - - def test_run(self): - br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark, - self.test_label, 1, - self.test_cache_conditions, - self.mock_machine_manager, self.mock_logger, - 'average', '', {}) - - def MockLogOutput(msg, print_to_console=False): - """Helper function for test_run.""" - del print_to_console - self.log_output.append(msg) - - def MockLogError(msg, print_to_console=False): - """Helper function for test_run.""" - del print_to_console - self.log_error.append(msg) - - def MockRecordStatus(msg): - """Helper function for test_run.""" - self.status.append(msg) - - def FakeReadCache(): - """Helper function for test_run.""" - br.cache = mock.Mock(spec=ResultsCache) - self.called_ReadCache = True - return 0 - - def FakeReadCacheSucceed(): - """Helper function for test_run.""" - br.cache = mock.Mock(spec=ResultsCache) - br.result = mock.Mock(spec=Result) - br.result.out = 'result.out stuff' - br.result.err = 'result.err stuff' - br.result.retval = 0 - self.called_ReadCache = True - return 0 - - def FakeReadCacheException(): - """Helper function for test_run.""" - raise RuntimeError('This is an exception test; it is supposed to happen') - - def FakeAcquireMachine(): - """Helper function for test_run.""" - mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros', - 'chromeos', 'average') - return mock_machine - - def FakeRunTest(_machine): - """Helper function for test_run.""" - mock_result = mock.Mock(spec=Result) - mock_result.retval = 0 - return mock_result - - def FakeRunTestFail(_machine): - """Helper function for test_run.""" - mock_result = mock.Mock(spec=Result) - mock_result.retval = 1 - return mock_result - - def ResetTestValues(): - """Helper function for test_run.""" - self.log_output = [] - self.log_error = [] - self.status = [] - br.result = None - self.called_ReadCache = False - - # Assign all the fake functions to the appropriate objects. - br.logger().LogOutput = MockLogOutput - br.logger().LogError = MockLogError - br.timeline.Record = MockRecordStatus - br.ReadCache = FakeReadCache - br.RunTest = FakeRunTest - br.AcquireMachine = FakeAcquireMachine - - # First test: No cache hit, all goes well. - ResetTestValues() - br.run() - self.assertTrue(self.called_ReadCache) - self.assertEqual(self.log_output, [ - 'test_run: No cache hit.', - 'Releasing machine: chromeos1-row3-rack5-host7.cros', - 'Released machine: chromeos1-row3-rack5-host7.cros' - ]) - self.assertEqual(len(self.log_error), 0) - self.assertEqual(self.status, ['WAITING', 'SUCCEEDED']) - - # Second test: No cached result found; test run was "terminated" for some - # reason. - ResetTestValues() - br.terminated = True - br.run() - self.assertTrue(self.called_ReadCache) - self.assertEqual(self.log_output, [ - 'test_run: No cache hit.', - 'Releasing machine: chromeos1-row3-rack5-host7.cros', - 'Released machine: chromeos1-row3-rack5-host7.cros' - ]) - self.assertEqual(len(self.log_error), 0) - self.assertEqual(self.status, ['WAITING']) - - # Third test. No cached result found; RunTest failed for some reason. - ResetTestValues() - br.terminated = False - br.RunTest = FakeRunTestFail - br.run() - self.assertTrue(self.called_ReadCache) - self.assertEqual(self.log_output, [ - 'test_run: No cache hit.', - 'Releasing machine: chromeos1-row3-rack5-host7.cros', - 'Released machine: chromeos1-row3-rack5-host7.cros' - ]) - self.assertEqual(len(self.log_error), 0) - self.assertEqual(self.status, ['WAITING', 'FAILED']) - - # Fourth test: ReadCache found a cached result. - ResetTestValues() - br.RunTest = FakeRunTest - br.ReadCache = FakeReadCacheSucceed - br.run() - self.assertTrue(self.called_ReadCache) - self.assertEqual(self.log_output, [ - 'test_run: Cache hit.', 'result.out stuff', - 'Releasing machine: chromeos1-row3-rack5-host7.cros', - 'Released machine: chromeos1-row3-rack5-host7.cros' - ]) - self.assertEqual(self.log_error, ['result.err stuff']) - self.assertEqual(self.status, ['SUCCEEDED']) - - # Fifth test: ReadCache generates an exception; does the try/finally block - # work? - ResetTestValues() - br.ReadCache = FakeReadCacheException - br.machine = FakeAcquireMachine() - br.run() - self.assertEqual(self.log_error, [ - "Benchmark run: 'test_run' failed: This is an exception test; it is " - 'supposed to happen' - ]) - self.assertEqual(self.status, ['FAILED']) - - def test_terminate_pass(self): - br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark, - self.test_label, 1, - self.test_cache_conditions, - self.mock_machine_manager, self.mock_logger, - 'average', '', {}) - - def GetLastEventPassed(): - """Helper function for test_terminate_pass""" - return benchmark_run.STATUS_SUCCEEDED - - def RecordStub(status): - """Helper function for test_terminate_pass""" - self.status = status - - self.status = benchmark_run.STATUS_SUCCEEDED - self.assertFalse(br.terminated) - self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated()) - - br.timeline.GetLastEvent = GetLastEventPassed - br.timeline.Record = RecordStub - - br.Terminate() - - self.assertTrue(br.terminated) - self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated()) - self.assertEqual(self.status, benchmark_run.STATUS_FAILED) - - def test_terminate_fail(self): - br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark, - self.test_label, 1, - self.test_cache_conditions, - self.mock_machine_manager, self.mock_logger, - 'average', '', {}) - - def GetLastEventFailed(): - """Helper function for test_terminate_fail""" - return benchmark_run.STATUS_FAILED - - def RecordStub(status): - """Helper function for test_terminate_fail""" - self.status = status - - self.status = benchmark_run.STATUS_SUCCEEDED - self.assertFalse(br.terminated) - self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated()) - - br.timeline.GetLastEvent = GetLastEventFailed - br.timeline.Record = RecordStub - - br.Terminate() - - self.assertTrue(br.terminated) - self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated()) - self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED) - - def test_acquire_machine(self): - br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark, - self.test_label, 1, - self.test_cache_conditions, - self.mock_machine_manager, self.mock_logger, - 'average', '', {}) - - br.terminated = True - self.assertRaises(Exception, br.AcquireMachine) - - br.terminated = False - mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros', - 'chromeos', 'average') - self.mock_machine_manager.AcquireMachine.return_value = mock_machine - - machine = br.AcquireMachine() - self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros') - - def test_get_extra_autotest_args(self): - br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark, - self.test_label, 1, - self.test_cache_conditions, - self.mock_machine_manager, self.mock_logger, - 'average', '', {}) - - def MockLogError(err_msg): - """Helper function for test_get_extra_autotest_args""" - self.err_msg = err_msg - - self.mock_logger.LogError = MockLogError - - result = br.GetExtraAutotestArgs() - self.assertEqual(result, '') - - self.test_benchmark.perf_args = 'record -e cycles' - result = br.GetExtraAutotestArgs() - self.assertEqual( - result, - '--profiler=custom_perf --profiler_args=\'perf_options="record -a -e ' - 'cycles"\'') - - self.test_benchmark.perf_args = 'record -e cycles' - self.test_benchmark.suite = 'test_that' - result = br.GetExtraAutotestArgs() - self.assertEqual(result, '') - self.assertEqual(self.err_msg, - 'Non-telemetry benchmark does not support profiler.') - - self.test_benchmark.perf_args = 'junk args' - self.test_benchmark.suite = 'telemetry_Crosperf' - self.assertRaises(Exception, br.GetExtraAutotestArgs) - - @mock.patch.object(SuiteRunner, 'Run') - @mock.patch.object(Result, 'CreateFromRun') - def test_run_test(self, mock_result, mock_runner): - br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark, - self.test_label, 1, - self.test_cache_conditions, - self.mock_machine_manager, self.mock_logger, - 'average', '', {}) - - self.status = [] - - def MockRecord(status): - self.status.append(status) - - br.timeline.Record = MockRecord - mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros', - 'chromeos', 'average') - mock_runner.return_value = [0, "{'Score':100}", ''] - - br.RunTest(mock_machine) - - self.assertTrue(br.run_completed) - self.assertEqual( - self.status, - [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING]) - - self.assertEqual(br.machine_manager.ImageMachine.call_count, 1) - br.machine_manager.ImageMachine.assert_called_with(mock_machine, - self.test_label) - self.assertEqual(mock_runner.call_count, 1) - mock_runner.assert_called_with(mock_machine, br.label, br.benchmark, '', - br.profiler_args) - - self.assertEqual(mock_result.call_count, 1) - mock_result.assert_called_with(self.mock_logger, 'average', self.test_label, - None, "{'Score':100}", '', 0, - 'page_cycler.netsim.top_10', - 'telemetry_Crosperf', '') - - def test_set_cache_conditions(self): - br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark, - self.test_label, 1, - self.test_cache_conditions, - self.mock_machine_manager, self.mock_logger, - 'average', '', {}) - - phony_cache_conditions = [123, 456, True, False] - - self.assertEqual(br.cache_conditions, self.test_cache_conditions) - - br.SetCacheConditions(phony_cache_conditions) - self.assertEqual(br.cache_conditions, phony_cache_conditions) - - br.SetCacheConditions(self.test_cache_conditions) - self.assertEqual(br.cache_conditions, self.test_cache_conditions) - - -if __name__ == '__main__': - unittest.main() + """Unit tests for the BenchmarkRun class and all of its methods.""" + + def setUp(self): + self.status = [] + self.called_ReadCache = None + self.log_error = [] + self.log_output = [] + self.err_msg = None + self.test_benchmark = Benchmark( + "page_cycler.netsim.top_10", # name + "page_cycler.netsim.top_10", # test_name + "", # test_args + 1, # iterations + False, # rm_chroot_tmp + "", # perf_args + suite="telemetry_Crosperf", + ) # suite + + self.test_label = MockLabel( + "test1", + "build", + "image1", + "autotest_dir", + "debug_dir", + "/tmp/test_benchmark_run", + "x86-alex", + "chromeos2-row1-rack4-host9.cros", + image_args="", + cache_dir="", + cache_only=False, + log_level="average", + compiler="gcc", + crosfleet=False, + ) + + self.test_cache_conditions = [ + CacheConditions.CACHE_FILE_EXISTS, + CacheConditions.CHECKSUMS_MATCH, + ] + + self.mock_logger = logger.GetLogger(log_dir="", mock=True) + + self.mock_machine_manager = mock.Mock(spec=MachineManager) + + def testDryRun(self): + my_label = MockLabel( + "test1", + "build", + "image1", + "autotest_dir", + "debug_dir", + "/tmp/test_benchmark_run", + "x86-alex", + "chromeos2-row1-rack4-host9.cros", + image_args="", + cache_dir="", + cache_only=False, + log_level="average", + compiler="gcc", + crosfleet=False, + ) + + logging_level = "average" + m = MockMachineManager("/tmp/chromeos_root", 0, logging_level, "") + m.AddMachine("chromeos2-row1-rack4-host9.cros") + bench = Benchmark( + "page_cycler.netsim.top_10", # name + "page_cycler.netsim.top_10", # test_name + "", # test_args + 1, # iterations + False, # rm_chroot_tmp + "", # perf_args + suite="telemetry_Crosperf", + ) # suite + dut_conf = { + "cooldown_time": 5, + "cooldown_temp": 45, + "governor": "powersave", + "cpu_usage": "big_only", + "cpu_freq_pct": 80, + } + b = benchmark_run.MockBenchmarkRun( + "test run", + bench, + my_label, + 1, + [], + m, + logger.GetLogger(), + logging_level, + "", + dut_conf, + ) + b.cache = MockResultsCache() + b.suite_runner = MockSuiteRunner() + b.start() + + # Make sure the arguments to BenchmarkRun.__init__ have not changed + # since the last time this test was updated: + args_list = [ + "self", + "name", + "benchmark", + "label", + "iteration", + "cache_conditions", + "machine_manager", + "logger_to_use", + "log_level", + "share_cache", + "dut_config", + ] + arg_spec = inspect.getfullargspec(benchmark_run.BenchmarkRun.__init__) + self.assertEqual(len(arg_spec.args), len(args_list)) + self.assertEqual(arg_spec.args, args_list) + + def test_init(self): + # Nothing really worth testing here; just field assignments. + pass + + def test_read_cache(self): + # Nothing really worth testing here, either. + pass + + def test_run(self): + br = benchmark_run.BenchmarkRun( + "test_run", + self.test_benchmark, + self.test_label, + 1, + self.test_cache_conditions, + self.mock_machine_manager, + self.mock_logger, + "average", + "", + {}, + ) + + def MockLogOutput(msg, print_to_console=False): + """Helper function for test_run.""" + del print_to_console + self.log_output.append(msg) + + def MockLogError(msg, print_to_console=False): + """Helper function for test_run.""" + del print_to_console + self.log_error.append(msg) + + def MockRecordStatus(msg): + """Helper function for test_run.""" + self.status.append(msg) + + def FakeReadCache(): + """Helper function for test_run.""" + br.cache = mock.Mock(spec=ResultsCache) + self.called_ReadCache = True + return 0 + + def FakeReadCacheSucceed(): + """Helper function for test_run.""" + br.cache = mock.Mock(spec=ResultsCache) + br.result = mock.Mock(spec=Result) + br.result.out = "result.out stuff" + br.result.err = "result.err stuff" + br.result.retval = 0 + self.called_ReadCache = True + return 0 + + def FakeReadCacheException(): + """Helper function for test_run.""" + raise RuntimeError( + "This is an exception test; it is supposed to happen" + ) + + def FakeAcquireMachine(): + """Helper function for test_run.""" + mock_machine = MockCrosMachine( + "chromeos1-row3-rack5-host7.cros", "chromeos", "average" + ) + return mock_machine + + def FakeRunTest(_machine): + """Helper function for test_run.""" + mock_result = mock.Mock(spec=Result) + mock_result.retval = 0 + return mock_result + + def FakeRunTestFail(_machine): + """Helper function for test_run.""" + mock_result = mock.Mock(spec=Result) + mock_result.retval = 1 + return mock_result + + def ResetTestValues(): + """Helper function for test_run.""" + self.log_output = [] + self.log_error = [] + self.status = [] + br.result = None + self.called_ReadCache = False + + # Assign all the fake functions to the appropriate objects. + br.logger().LogOutput = MockLogOutput + br.logger().LogError = MockLogError + br.timeline.Record = MockRecordStatus + br.ReadCache = FakeReadCache + br.RunTest = FakeRunTest + br.AcquireMachine = FakeAcquireMachine + + # First test: No cache hit, all goes well. + ResetTestValues() + br.run() + self.assertTrue(self.called_ReadCache) + self.assertEqual( + self.log_output, + [ + "test_run: No cache hit.", + "Releasing machine: chromeos1-row3-rack5-host7.cros", + "Released machine: chromeos1-row3-rack5-host7.cros", + ], + ) + self.assertEqual(len(self.log_error), 0) + self.assertEqual(self.status, ["WAITING", "SUCCEEDED"]) + + # Second test: No cached result found; test run was "terminated" for some + # reason. + ResetTestValues() + br.terminated = True + br.run() + self.assertTrue(self.called_ReadCache) + self.assertEqual( + self.log_output, + [ + "test_run: No cache hit.", + "Releasing machine: chromeos1-row3-rack5-host7.cros", + "Released machine: chromeos1-row3-rack5-host7.cros", + ], + ) + self.assertEqual(len(self.log_error), 0) + self.assertEqual(self.status, ["WAITING"]) + + # Third test. No cached result found; RunTest failed for some reason. + ResetTestValues() + br.terminated = False + br.RunTest = FakeRunTestFail + br.run() + self.assertTrue(self.called_ReadCache) + self.assertEqual( + self.log_output, + [ + "test_run: No cache hit.", + "Releasing machine: chromeos1-row3-rack5-host7.cros", + "Released machine: chromeos1-row3-rack5-host7.cros", + ], + ) + self.assertEqual(len(self.log_error), 0) + self.assertEqual(self.status, ["WAITING", "FAILED"]) + + # Fourth test: ReadCache found a cached result. + ResetTestValues() + br.RunTest = FakeRunTest + br.ReadCache = FakeReadCacheSucceed + br.run() + self.assertTrue(self.called_ReadCache) + self.assertEqual( + self.log_output, + [ + "test_run: Cache hit.", + "result.out stuff", + "Releasing machine: chromeos1-row3-rack5-host7.cros", + "Released machine: chromeos1-row3-rack5-host7.cros", + ], + ) + self.assertEqual(self.log_error, ["result.err stuff"]) + self.assertEqual(self.status, ["SUCCEEDED"]) + + # Fifth test: ReadCache generates an exception; does the try/finally block + # work? + ResetTestValues() + br.ReadCache = FakeReadCacheException + br.machine = FakeAcquireMachine() + br.run() + self.assertEqual( + self.log_error, + [ + "Benchmark run: 'test_run' failed: This is an exception test; it is " + "supposed to happen" + ], + ) + self.assertEqual(self.status, ["FAILED"]) + + def test_terminate_pass(self): + br = benchmark_run.BenchmarkRun( + "test_run", + self.test_benchmark, + self.test_label, + 1, + self.test_cache_conditions, + self.mock_machine_manager, + self.mock_logger, + "average", + "", + {}, + ) + + def GetLastEventPassed(): + """Helper function for test_terminate_pass""" + return benchmark_run.STATUS_SUCCEEDED + + def RecordStub(status): + """Helper function for test_terminate_pass""" + self.status = status + + self.status = benchmark_run.STATUS_SUCCEEDED + self.assertFalse(br.terminated) + self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated()) + + br.timeline.GetLastEvent = GetLastEventPassed + br.timeline.Record = RecordStub + + br.Terminate() + + self.assertTrue(br.terminated) + self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated()) + self.assertEqual(self.status, benchmark_run.STATUS_FAILED) + + def test_terminate_fail(self): + br = benchmark_run.BenchmarkRun( + "test_run", + self.test_benchmark, + self.test_label, + 1, + self.test_cache_conditions, + self.mock_machine_manager, + self.mock_logger, + "average", + "", + {}, + ) + + def GetLastEventFailed(): + """Helper function for test_terminate_fail""" + return benchmark_run.STATUS_FAILED + + def RecordStub(status): + """Helper function for test_terminate_fail""" + self.status = status + + self.status = benchmark_run.STATUS_SUCCEEDED + self.assertFalse(br.terminated) + self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated()) + + br.timeline.GetLastEvent = GetLastEventFailed + br.timeline.Record = RecordStub + + br.Terminate() + + self.assertTrue(br.terminated) + self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated()) + self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED) + + def test_acquire_machine(self): + br = benchmark_run.BenchmarkRun( + "test_run", + self.test_benchmark, + self.test_label, + 1, + self.test_cache_conditions, + self.mock_machine_manager, + self.mock_logger, + "average", + "", + {}, + ) + + br.terminated = True + self.assertRaises(Exception, br.AcquireMachine) + + br.terminated = False + mock_machine = MockCrosMachine( + "chromeos1-row3-rack5-host7.cros", "chromeos", "average" + ) + self.mock_machine_manager.AcquireMachine.return_value = mock_machine + + machine = br.AcquireMachine() + self.assertEqual(machine.name, "chromeos1-row3-rack5-host7.cros") + + def test_get_extra_autotest_args(self): + br = benchmark_run.BenchmarkRun( + "test_run", + self.test_benchmark, + self.test_label, + 1, + self.test_cache_conditions, + self.mock_machine_manager, + self.mock_logger, + "average", + "", + {}, + ) + + def MockLogError(err_msg): + """Helper function for test_get_extra_autotest_args""" + self.err_msg = err_msg + + self.mock_logger.LogError = MockLogError + + result = br.GetExtraAutotestArgs() + self.assertEqual(result, "") + + self.test_benchmark.perf_args = "record -e cycles" + result = br.GetExtraAutotestArgs() + self.assertEqual( + result, + "--profiler=custom_perf --profiler_args='perf_options=\"record -a -e " + "cycles\"'", + ) + + self.test_benchmark.perf_args = "record -e cycles" + self.test_benchmark.suite = "test_that" + result = br.GetExtraAutotestArgs() + self.assertEqual(result, "") + self.assertEqual( + self.err_msg, "Non-telemetry benchmark does not support profiler." + ) + + self.test_benchmark.perf_args = "junk args" + self.test_benchmark.suite = "telemetry_Crosperf" + self.assertRaises(Exception, br.GetExtraAutotestArgs) + + @mock.patch.object(SuiteRunner, "Run") + @mock.patch.object(Result, "CreateFromRun") + def test_run_test(self, mock_result, mock_runner): + br = benchmark_run.BenchmarkRun( + "test_run", + self.test_benchmark, + self.test_label, + 1, + self.test_cache_conditions, + self.mock_machine_manager, + self.mock_logger, + "average", + "", + {}, + ) + + self.status = [] + + def MockRecord(status): + self.status.append(status) + + br.timeline.Record = MockRecord + mock_machine = MockCrosMachine( + "chromeos1-row3-rack5-host7.cros", "chromeos", "average" + ) + mock_runner.return_value = [0, "{'Score':100}", ""] + + br.RunTest(mock_machine) + + self.assertTrue(br.run_completed) + self.assertEqual( + self.status, + [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING], + ) + + self.assertEqual(br.machine_manager.ImageMachine.call_count, 1) + br.machine_manager.ImageMachine.assert_called_with( + mock_machine, self.test_label + ) + self.assertEqual(mock_runner.call_count, 1) + mock_runner.assert_called_with( + mock_machine, br.label, br.benchmark, "", br.profiler_args + ) + + self.assertEqual(mock_result.call_count, 1) + mock_result.assert_called_with( + self.mock_logger, + "average", + self.test_label, + None, + "{'Score':100}", + "", + 0, + "page_cycler.netsim.top_10", + "telemetry_Crosperf", + "", + ) + + def test_set_cache_conditions(self): + br = benchmark_run.BenchmarkRun( + "test_run", + self.test_benchmark, + self.test_label, + 1, + self.test_cache_conditions, + self.mock_machine_manager, + self.mock_logger, + "average", + "", + {}, + ) + + phony_cache_conditions = [123, 456, True, False] + + self.assertEqual(br.cache_conditions, self.test_cache_conditions) + + br.SetCacheConditions(phony_cache_conditions) + self.assertEqual(br.cache_conditions, phony_cache_conditions) + + br.SetCacheConditions(self.test_cache_conditions) + self.assertEqual(br.cache_conditions, self.test_cache_conditions) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py index 0f5d1980..31a95f25 100755 --- a/crosperf/benchmark_unittest.py +++ b/crosperf/benchmark_unittest.py @@ -16,57 +16,70 @@ from benchmark import Benchmark class BenchmarkTestCase(unittest.TestCase): - """Individual tests for the Benchmark class.""" + """Individual tests for the Benchmark class.""" - def test_benchmark(self): - # Test creating a benchmark with all the fields filled out. - b1 = Benchmark( - 'b1_test', # name - 'octane', # test_name - '', # test_args - 3, # iterations - False, # rm_chroot_tmp - 'record -e cycles', # perf_args - 'telemetry_Crosperf', # suite - True) # show_all_results - self.assertTrue(b1.suite, 'telemetry_Crosperf') + def test_benchmark(self): + # Test creating a benchmark with all the fields filled out. + b1 = Benchmark( + "b1_test", # name + "octane", # test_name + "", # test_args + 3, # iterations + False, # rm_chroot_tmp + "record -e cycles", # perf_args + "telemetry_Crosperf", # suite + True, + ) # show_all_results + self.assertTrue(b1.suite, "telemetry_Crosperf") - # Test creating a benchmark field with default fields left out. - b2 = Benchmark( - 'b2_test', # name - 'octane', # test_name - '', # test_args - 3, # iterations - False, # rm_chroot_tmp - 'record -e cycles') # perf_args - self.assertEqual(b2.suite, '') - self.assertFalse(b2.show_all_results) + # Test creating a benchmark field with default fields left out. + b2 = Benchmark( + "b2_test", # name + "octane", # test_name + "", # test_args + 3, # iterations + False, # rm_chroot_tmp + "record -e cycles", + ) # perf_args + self.assertEqual(b2.suite, "") + self.assertFalse(b2.show_all_results) - # Test explicitly creating 'suite=Telemetry' and 'show_all_results=False" - # and see what happens. - b3 = Benchmark( - 'b3_test', # name - 'octane', # test_name - '', # test_args - 3, # iterations - False, # rm_chroot_tmp - 'record -e cycles', # perf_args - 'telemetry', # suite - False) # show_all_results - self.assertTrue(b3.show_all_results) + # Test explicitly creating 'suite=Telemetry' and 'show_all_results=False" + # and see what happens. + b3 = Benchmark( + "b3_test", # name + "octane", # test_name + "", # test_args + 3, # iterations + False, # rm_chroot_tmp + "record -e cycles", # perf_args + "telemetry", # suite + False, + ) # show_all_results + self.assertTrue(b3.show_all_results) - # Check to see if the args to Benchmark have changed since the last time - # this test was updated. - args_list = [ - 'self', 'name', 'test_name', 'test_args', 'iterations', 'rm_chroot_tmp', - 'perf_args', 'suite', 'show_all_results', 'retries', 'run_local', - 'cwp_dso', 'weight' - ] - arg_spec = inspect.getfullargspec(Benchmark.__init__) - self.assertEqual(len(arg_spec.args), len(args_list)) - for arg in args_list: - self.assertIn(arg, arg_spec.args) + # Check to see if the args to Benchmark have changed since the last time + # this test was updated. + args_list = [ + "self", + "name", + "test_name", + "test_args", + "iterations", + "rm_chroot_tmp", + "perf_args", + "suite", + "show_all_results", + "retries", + "run_local", + "cwp_dso", + "weight", + ] + arg_spec = inspect.getfullargspec(Benchmark.__init__) + self.assertEqual(len(arg_spec.args), len(args_list)) + for arg in args_list: + self.assertIn(arg, arg_spec.args) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/column_chart.py b/crosperf/column_chart.py index 6063421d..7d6bd0dd 100644 --- a/crosperf/column_chart.py +++ b/crosperf/column_chart.py @@ -7,46 +7,46 @@ class ColumnChart(object): - """class to draw column chart.""" - - def __init__(self, title, width, height): - self.title = title - self.chart_div = ''.join(t for t in title if t.isalnum()) - self.width = width - self.height = height - self.columns = [] - self.rows = [] - self.series = [] - - def AddSeries(self, column_name, series_type, color): - for i in range(len(self.columns)): - if column_name == self.columns[i][1]: - self.series.append((i - 1, series_type, color)) - break - - def AddColumn(self, name, column_type): - self.columns.append((column_type, name)) - - def AddRow(self, row): - self.rows.append(row) - - def GetJavascript(self): - res = 'var data = new google.visualization.DataTable();\n' - for column in self.columns: - res += "data.addColumn('%s', '%s');\n" % column - res += 'data.addRows(%s);\n' % len(self.rows) - for row in range(len(self.rows)): - for column in range(len(self.columns)): - val = self.rows[row][column] - if isinstance(val, str): - val = "'%s'" % val - res += 'data.setValue(%s, %s, %s);\n' % (row, column, val) - - series_javascript = '' - for series in self.series: - series_javascript += "%s: {type: '%s', color: '%s'}, " % series - - chart_add_javascript = """ + """class to draw column chart.""" + + def __init__(self, title, width, height): + self.title = title + self.chart_div = "".join(t for t in title if t.isalnum()) + self.width = width + self.height = height + self.columns = [] + self.rows = [] + self.series = [] + + def AddSeries(self, column_name, series_type, color): + for i in range(len(self.columns)): + if column_name == self.columns[i][1]: + self.series.append((i - 1, series_type, color)) + break + + def AddColumn(self, name, column_type): + self.columns.append((column_type, name)) + + def AddRow(self, row): + self.rows.append(row) + + def GetJavascript(self): + res = "var data = new google.visualization.DataTable();\n" + for column in self.columns: + res += "data.addColumn('%s', '%s');\n" % column + res += "data.addRows(%s);\n" % len(self.rows) + for row in range(len(self.rows)): + for column in range(len(self.columns)): + val = self.rows[row][column] + if isinstance(val, str): + val = "'%s'" % val + res += "data.setValue(%s, %s, %s);\n" % (row, column, val) + + series_javascript = "" + for series in self.series: + series_javascript += "%s: {type: '%s', color: '%s'}, " % series + + chart_add_javascript = """ var chart_%s = new google.visualization.ComboChart( document.getElementById('%s')); chart_%s.draw(data, {width: %s, height: %s, title: '%s', legend: 'none', @@ -54,10 +54,16 @@ chart_%s.draw(data, {width: %s, height: %s, title: '%s', legend: 'none', vAxis: {minValue: 0}}) """ - res += chart_add_javascript % (self.chart_div, self.chart_div, - self.chart_div, self.width, self.height, - self.title, series_javascript) - return res + res += chart_add_javascript % ( + self.chart_div, + self.chart_div, + self.chart_div, + self.width, + self.height, + self.title, + series_javascript, + ) + return res - def GetDiv(self): - return "<div id='%s' class='chart'></div>" % self.chart_div + def GetDiv(self): + return "<div id='%s' class='chart'></div>" % self.chart_div diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py index c25fd5ab..338c039b 100644 --- a/crosperf/compare_machines.py +++ b/crosperf/compare_machines.py @@ -7,61 +7,66 @@ from __future__ import print_function +import argparse import os.path import sys -import argparse from machine_manager import CrosMachine def PrintUsage(msg): - print(msg) - print('Usage: ') - print('\n compare_machines.py --chromeos_root=/path/to/chroot/ ' - 'machine1 machine2 ...') + print(msg) + print("Usage: ") + print( + "\n compare_machines.py --chromeos_root=/path/to/chroot/ " + "machine1 machine2 ..." + ) def Main(argv): - parser = argparse.ArgumentParser() - parser.add_argument( - '--chromeos_root', - default='/path/to/chromeos', - dest='chromeos_root', - help='ChromeOS root checkout directory') - parser.add_argument('remotes', nargs=argparse.REMAINDER) - - options = parser.parse_args(argv) - - machine_list = options.remotes - if len(machine_list) < 2: - PrintUsage('ERROR: Must specify at least two machines.') - return 1 - elif not os.path.exists(options.chromeos_root): - PrintUsage('Error: chromeos_root does not exist %s' % options.chromeos_root) - return 1 - - chroot = options.chromeos_root - cros_machines = [] - test_machine_checksum = None - for m in machine_list: - cm = CrosMachine(m, chroot, 'average') - cros_machines = cros_machines + [cm] - test_machine_checksum = cm.machine_checksum - - ret = 0 - for cm in cros_machines: - print('checksum for %s : %s' % (cm.name, cm.machine_checksum)) - if cm.machine_checksum != test_machine_checksum: - ret = 1 - print('Machine checksums do not all match') - - if ret == 0: - print('Machines all match.') - - return ret - - -if __name__ == '__main__': - retval = Main(sys.argv[1:]) - sys.exit(retval) + parser = argparse.ArgumentParser() + parser.add_argument( + "--chromeos_root", + default="/path/to/chromeos", + dest="chromeos_root", + help="ChromeOS root checkout directory", + ) + parser.add_argument("remotes", nargs=argparse.REMAINDER) + + options = parser.parse_args(argv) + + machine_list = options.remotes + if len(machine_list) < 2: + PrintUsage("ERROR: Must specify at least two machines.") + return 1 + elif not os.path.exists(options.chromeos_root): + PrintUsage( + "Error: chromeos_root does not exist %s" % options.chromeos_root + ) + return 1 + + chroot = options.chromeos_root + cros_machines = [] + test_machine_checksum = None + for m in machine_list: + cm = CrosMachine(m, chroot, "average") + cros_machines = cros_machines + [cm] + test_machine_checksum = cm.machine_checksum + + ret = 0 + for cm in cros_machines: + print("checksum for %s : %s" % (cm.name, cm.machine_checksum)) + if cm.machine_checksum != test_machine_checksum: + ret = 1 + print("Machine checksums do not all match") + + if ret == 0: + print("Machines all match.") + + return ret + + +if __name__ == "__main__": + retval = Main(sys.argv[1:]) + sys.exit(retval) diff --git a/crosperf/config.py b/crosperf/config.py index 171f98af..82a5ee38 100644 --- a/crosperf/config.py +++ b/crosperf/config.py @@ -8,8 +8,8 @@ config = {} def GetConfig(key): - return config.get(key) + return config.get(key) def AddConfig(key, value): - config[key] = value + config[key] = value diff --git a/crosperf/config_unittest.py b/crosperf/config_unittest.py index 05592d0b..47387c71 100755 --- a/crosperf/config_unittest.py +++ b/crosperf/config_unittest.py @@ -14,40 +14,40 @@ import config class ConfigTestCase(unittest.TestCase): - """Class for the config unit tests.""" + """Class for the config unit tests.""" - def test_config(self): - # Verify that config exists, that it's a dictionary, and that it's - # empty. - self.assertTrue(isinstance(config.config, dict)) - self.assertEqual(len(config.config), 0) + def test_config(self): + # Verify that config exists, that it's a dictionary, and that it's + # empty. + self.assertTrue(isinstance(config.config, dict)) + self.assertEqual(len(config.config), 0) - # Verify that attempting to get a non-existant key out of the - # dictionary returns None. - self.assertIsNone(config.GetConfig('rabbit')) - self.assertIsNone(config.GetConfig('key1')) + # Verify that attempting to get a non-existant key out of the + # dictionary returns None. + self.assertIsNone(config.GetConfig("rabbit")) + self.assertIsNone(config.GetConfig("key1")) - config.AddConfig('key1', 16) - config.AddConfig('key2', 32) - config.AddConfig('key3', 'third value') + config.AddConfig("key1", 16) + config.AddConfig("key2", 32) + config.AddConfig("key3", "third value") - # Verify that after 3 calls to AddConfig we have 3 values in the - # dictionary. - self.assertEqual(len(config.config), 3) + # Verify that after 3 calls to AddConfig we have 3 values in the + # dictionary. + self.assertEqual(len(config.config), 3) - # Verify that GetConfig works and gets the expected values. - self.assertIs(config.GetConfig('key2'), 32) - self.assertIs(config.GetConfig('key3'), 'third value') - self.assertIs(config.GetConfig('key1'), 16) + # Verify that GetConfig works and gets the expected values. + self.assertIs(config.GetConfig("key2"), 32) + self.assertIs(config.GetConfig("key3"), "third value") + self.assertIs(config.GetConfig("key1"), 16) - # Re-set config. - config.config.clear() + # Re-set config. + config.config.clear() - # Verify that config exists, that it's a dictionary, and that it's - # empty. - self.assertTrue(isinstance(config.config, dict)) - self.assertEqual(len(config.config), 0) + # Verify that config exists, that it's a dictionary, and that it's + # empty. + self.assertTrue(isinstance(config.config, dict)) + self.assertEqual(len(config.config), 0) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py index eaeceae8..7ce78afd 100755 --- a/crosperf/crosperf.py +++ b/crosperf/crosperf.py @@ -14,139 +14,145 @@ import os import signal import sys -from experiment_runner import ExperimentRunner -from experiment_runner import MockExperimentRunner -from experiment_factory import ExperimentFactory -from experiment_file import ExperimentFile -from settings_factory import GlobalSettings - # This import causes pylint to warn about "No name 'logger' in module # 'cros_utils'". I do not understand why. The import works fine in python. # pylint: disable=no-name-in-module from cros_utils import logger - +from experiment_factory import ExperimentFactory +from experiment_file import ExperimentFile +from experiment_runner import ExperimentRunner +from experiment_runner import MockExperimentRunner +from settings_factory import GlobalSettings import test_flag + HAS_FAILURE = 1 ALL_FAILED = 2 def SetupParserOptions(parser): - """Add all options to the parser.""" - parser.add_argument( - '--dry_run', - dest='dry_run', - help=('Parse the experiment file and ' - 'show what will be done'), - action='store_true', - default=False) - # Allow each of the global fields to be overridden by passing in - # options. Add each global field as an option. - option_settings = GlobalSettings('') - for field_name in option_settings.fields: - field = option_settings.fields[field_name] + """Add all options to the parser.""" parser.add_argument( - '--%s' % field.name, - dest=field.name, - help=field.description, - action='store') + "--dry_run", + dest="dry_run", + help=("Parse the experiment file and " "show what will be done"), + action="store_true", + default=False, + ) + # Allow each of the global fields to be overridden by passing in + # options. Add each global field as an option. + option_settings = GlobalSettings("") + for field_name in option_settings.fields: + field = option_settings.fields[field_name] + parser.add_argument( + "--%s" % field.name, + dest=field.name, + help=field.description, + action="store", + ) def ConvertOptionsToSettings(options): - """Convert options passed in into global settings.""" - option_settings = GlobalSettings('option_settings') - for option_name in options.__dict__: - if (options.__dict__[option_name] is not None and - option_name in option_settings.fields): - option_settings.SetField(option_name, options.__dict__[option_name]) - return option_settings + """Convert options passed in into global settings.""" + option_settings = GlobalSettings("option_settings") + for option_name in options.__dict__: + if ( + options.__dict__[option_name] is not None + and option_name in option_settings.fields + ): + option_settings.SetField(option_name, options.__dict__[option_name]) + return option_settings def Cleanup(experiment): - """Handler function which is registered to the atexit handler.""" - experiment.Cleanup() + """Handler function which is registered to the atexit handler.""" + experiment.Cleanup() def CallExitHandler(signum, _): - """Signal handler that transforms a signal into a call to exit. + """Signal handler that transforms a signal into a call to exit. - This is useful because functionality registered by "atexit" will - be called. It also means you can "catch" the signal by catching - the SystemExit exception. - """ - sys.exit(128 + signum) + This is useful because functionality registered by "atexit" will + be called. It also means you can "catch" the signal by catching + the SystemExit exception. + """ + sys.exit(128 + signum) def RunCrosperf(argv): - parser = argparse.ArgumentParser() - - parser.add_argument( - '--noschedv2', - dest='noschedv2', - default=False, - action='store_true', - help=('Do not use new scheduler. ' - 'Use original scheduler instead.')) - parser.add_argument( - '-l', - '--log_dir', - dest='log_dir', - default='', - help='The log_dir, default is under <crosperf_logs>/logs') - - SetupParserOptions(parser) - options, args = parser.parse_known_args(argv) - - # Convert the relevant options that are passed in into a settings - # object which will override settings in the experiment file. - option_settings = ConvertOptionsToSettings(options) - log_dir = os.path.abspath(os.path.expanduser(options.log_dir)) - logger.GetLogger(log_dir) - - if len(args) == 2: - experiment_filename = args[1] - else: - parser.error('Invalid number arguments.') - - working_directory = os.getcwd() - if options.dry_run: - test_flag.SetTestMode(True) - - experiment_file = ExperimentFile( - open(experiment_filename, encoding='utf-8'), option_settings) - if not experiment_file.GetGlobalSettings().GetField('name'): - experiment_name = os.path.basename(experiment_filename) - experiment_file.GetGlobalSettings().SetField('name', experiment_name) - experiment = ExperimentFactory().GetExperiment(experiment_file, - working_directory, log_dir) - - json_report = experiment_file.GetGlobalSettings().GetField('json_report') - - signal.signal(signal.SIGTERM, CallExitHandler) - atexit.register(Cleanup, experiment) - - if options.dry_run: - runner = MockExperimentRunner(experiment, json_report) - else: - runner = ExperimentRunner( - experiment, json_report, using_schedv2=(not options.noschedv2)) - - ret = runner.Run() - if ret == HAS_FAILURE: - raise RuntimeError('One or more benchmarks failed.') - if ret == ALL_FAILED: - raise RuntimeError('All benchmarks failed to run.') + parser = argparse.ArgumentParser() + + parser.add_argument( + "--noschedv2", + dest="noschedv2", + default=False, + action="store_true", + help=("Do not use new scheduler. " "Use original scheduler instead."), + ) + parser.add_argument( + "-l", + "--log_dir", + dest="log_dir", + default="", + help="The log_dir, default is under <crosperf_logs>/logs", + ) + + SetupParserOptions(parser) + options, args = parser.parse_known_args(argv) + + # Convert the relevant options that are passed in into a settings + # object which will override settings in the experiment file. + option_settings = ConvertOptionsToSettings(options) + log_dir = os.path.abspath(os.path.expanduser(options.log_dir)) + logger.GetLogger(log_dir) + + if len(args) == 2: + experiment_filename = args[1] + else: + parser.error("Invalid number arguments.") + + working_directory = os.getcwd() + if options.dry_run: + test_flag.SetTestMode(True) + + experiment_file = ExperimentFile( + open(experiment_filename, encoding="utf-8"), option_settings + ) + if not experiment_file.GetGlobalSettings().GetField("name"): + experiment_name = os.path.basename(experiment_filename) + experiment_file.GetGlobalSettings().SetField("name", experiment_name) + experiment = ExperimentFactory().GetExperiment( + experiment_file, working_directory, log_dir + ) + + json_report = experiment_file.GetGlobalSettings().GetField("json_report") + + signal.signal(signal.SIGTERM, CallExitHandler) + atexit.register(Cleanup, experiment) + + if options.dry_run: + runner = MockExperimentRunner(experiment, json_report) + else: + runner = ExperimentRunner( + experiment, json_report, using_schedv2=(not options.noschedv2) + ) + + ret = runner.Run() + if ret == HAS_FAILURE: + raise RuntimeError("One or more benchmarks failed.") + if ret == ALL_FAILED: + raise RuntimeError("All benchmarks failed to run.") def Main(argv): - try: - RunCrosperf(argv) - except Exception: - # Flush buffers before exiting to avoid out of order printing - sys.stdout.flush() - # Raise exception prints out traceback - raise + try: + RunCrosperf(argv) + except Exception: + # Flush buffers before exiting to avoid out of order printing + sys.stdout.flush() + # Raise exception prints out traceback + raise -if __name__ == '__main__': - Main(sys.argv) +if __name__ == "__main__": + Main(sys.argv) diff --git a/crosperf/crosperf_autolock.py b/crosperf/crosperf_autolock.py index 92168599..9a44936f 100755 --- a/crosperf/crosperf_autolock.py +++ b/crosperf/crosperf_autolock.py @@ -23,260 +23,291 @@ sys.path.append(PARENT_DIR) def main(sys_args: List[str]) -> Optional[str]: - """Run crosperf_autolock. Returns error msg or None""" - args, leftover_args = parse_args(sys_args) - fleet_params = [ - CrosfleetParams(board=args.board, - pool=args.pool, - lease_time=args.lease_time) - for _ in range(args.num_leases) - ] - if not fleet_params: - return ('No board names identified. If you want to use' - ' a known host, just use crosperf directly.') - try: - _run_crosperf(fleet_params, args.dut_lock_timeout, leftover_args) - except BoardLockError as e: - _eprint('ERROR:', e) - _eprint('May need to login to crosfleet? Run "crosfleet login"') - _eprint('The leases may also be successful later on. ' - 'Check with "crosfleet dut leases"') - return 'crosperf_autolock failed' - except BoardReleaseError as e: - _eprint('ERROR:', e) - _eprint('May need to re-run "crosfleet dut abandon"') - return 'crosperf_autolock failed' - return None + """Run crosperf_autolock. Returns error msg or None""" + args, leftover_args = parse_args(sys_args) + fleet_params = [ + CrosfleetParams( + board=args.board, pool=args.pool, lease_time=args.lease_time + ) + for _ in range(args.num_leases) + ] + if not fleet_params: + return ( + "No board names identified. If you want to use" + " a known host, just use crosperf directly." + ) + try: + _run_crosperf(fleet_params, args.dut_lock_timeout, leftover_args) + except BoardLockError as e: + _eprint("ERROR:", e) + _eprint('May need to login to crosfleet? Run "crosfleet login"') + _eprint( + "The leases may also be successful later on. " + 'Check with "crosfleet dut leases"' + ) + return "crosperf_autolock failed" + except BoardReleaseError as e: + _eprint("ERROR:", e) + _eprint('May need to re-run "crosfleet dut abandon"') + return "crosperf_autolock failed" + return None def parse_args(args: List[str]) -> Tuple[Any, List]: - """Parse the CLI arguments.""" - parser = argparse.ArgumentParser( - 'crosperf_autolock', - description='Wrapper around crosperf' - ' to autolock DUTs from crosfleet.', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--board', - type=str, - help='Space or comma separated list of boards to lock', - required=True, - default=argparse.SUPPRESS) - parser.add_argument('--num-leases', - type=int, - help='Number of boards to lock.', - metavar='NUM', - default=1) - parser.add_argument('--pool', - type=str, - help='Pool to pull from.', - default='DUT_POOL_QUOTA') - parser.add_argument('--dut-lock-timeout', - type=float, - metavar='SEC', - help='Number of seconds we want to try to lease a board' - ' from crosfleet. This option does NOT change the' - ' lease length.', - default=600) - parser.add_argument('--lease-time', - type=int, - metavar='MIN', - help='Number of minutes to lock the board. Max is 1440.', - default=1440) - parser.epilog = ( - 'For more detailed flags, you have to read the args taken by the' - ' crosperf executable. Args are passed transparently to crosperf.') - return parser.parse_known_args(args) + """Parse the CLI arguments.""" + parser = argparse.ArgumentParser( + "crosperf_autolock", + description="Wrapper around crosperf" + " to autolock DUTs from crosfleet.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "--board", + type=str, + help="Space or comma separated list of boards to lock", + required=True, + default=argparse.SUPPRESS, + ) + parser.add_argument( + "--num-leases", + type=int, + help="Number of boards to lock.", + metavar="NUM", + default=1, + ) + parser.add_argument( + "--pool", type=str, help="Pool to pull from.", default="DUT_POOL_QUOTA" + ) + parser.add_argument( + "--dut-lock-timeout", + type=float, + metavar="SEC", + help="Number of seconds we want to try to lease a board" + " from crosfleet. This option does NOT change the" + " lease length.", + default=600, + ) + parser.add_argument( + "--lease-time", + type=int, + metavar="MIN", + help="Number of minutes to lock the board. Max is 1440.", + default=1440, + ) + parser.epilog = ( + "For more detailed flags, you have to read the args taken by the" + " crosperf executable. Args are passed transparently to crosperf." + ) + return parser.parse_known_args(args) class BoardLockError(Exception): - """Error to indicate failure to lock a board.""" + """Error to indicate failure to lock a board.""" - def __init__(self, msg: str): - self.msg = 'BoardLockError: ' + msg - super().__init__(self.msg) + def __init__(self, msg: str): + self.msg = "BoardLockError: " + msg + super().__init__(self.msg) class BoardReleaseError(Exception): - """Error to indicate failure to release a board.""" + """Error to indicate failure to release a board.""" - def __init__(self, msg: str): - self.msg = 'BoardReleaseError: ' + msg - super().__init__(self.msg) + def __init__(self, msg: str): + self.msg = "BoardReleaseError: " + msg + super().__init__(self.msg) @dataclasses.dataclass(frozen=True) class CrosfleetParams: - """Dataclass to hold all crosfleet parameterizations.""" - board: str - pool: str - lease_time: int + """Dataclass to hold all crosfleet parameterizations.""" + + board: str + pool: str + lease_time: int def _eprint(*msg, **kwargs): - print(*msg, file=sys.stderr, **kwargs) - - -def _run_crosperf(crosfleet_params: List[CrosfleetParams], lock_timeout: float, - leftover_args: List[str]): - """Autolock devices and run crosperf with leftover arguments. - - Raises: - BoardLockError: When board was unable to be locked. - BoardReleaseError: When board was unable to be released. - """ - if not crosfleet_params: - raise ValueError('No crosfleet params given; cannot call crosfleet.') - - # We'll assume all the boards are the same type, which seems to be the case - # in experiments that actually get used. - passed_board_arg = crosfleet_params[0].board - with contextlib.ExitStack() as stack: - dut_hostnames = [] - for param in crosfleet_params: - print( - f'Sent lock request for {param.board} for {param.lease_time} minutes' - '\nIf this fails, you may need to run "crosfleet dut abandon <...>"') - # May raise BoardLockError, abandoning previous DUTs. - dut_hostname = stack.enter_context( - crosfleet_machine_ctx( - param.board, - param.lease_time, - lock_timeout, - {'label-pool': param.pool}, - )) - if dut_hostname: - print(f'Locked {param.board} machine: {dut_hostname}') - dut_hostnames.append(dut_hostname) - - # We import crosperf late, because this import is extremely slow. - # We don't want the user to wait several seconds just to get - # help info. - import crosperf - for dut_hostname in dut_hostnames: - crosperf.Main([ - sys.argv[0], - '--no_lock', - 'True', - '--remote', - dut_hostname, - '--board', - passed_board_arg, - ] + leftover_args) + print(*msg, file=sys.stderr, **kwargs) + + +def _run_crosperf( + crosfleet_params: List[CrosfleetParams], + lock_timeout: float, + leftover_args: List[str], +): + """Autolock devices and run crosperf with leftover arguments. + + Raises: + BoardLockError: When board was unable to be locked. + BoardReleaseError: When board was unable to be released. + """ + if not crosfleet_params: + raise ValueError("No crosfleet params given; cannot call crosfleet.") + + # We'll assume all the boards are the same type, which seems to be the case + # in experiments that actually get used. + passed_board_arg = crosfleet_params[0].board + with contextlib.ExitStack() as stack: + dut_hostnames = [] + for param in crosfleet_params: + print( + f"Sent lock request for {param.board} for {param.lease_time} minutes" + '\nIf this fails, you may need to run "crosfleet dut abandon <...>"' + ) + # May raise BoardLockError, abandoning previous DUTs. + dut_hostname = stack.enter_context( + crosfleet_machine_ctx( + param.board, + param.lease_time, + lock_timeout, + {"label-pool": param.pool}, + ) + ) + if dut_hostname: + print(f"Locked {param.board} machine: {dut_hostname}") + dut_hostnames.append(dut_hostname) + + # We import crosperf late, because this import is extremely slow. + # We don't want the user to wait several seconds just to get + # help info. + import crosperf + + for dut_hostname in dut_hostnames: + crosperf.Main( + [ + sys.argv[0], + "--no_lock", + "True", + "--remote", + dut_hostname, + "--board", + passed_board_arg, + ] + + leftover_args + ) @contextlib.contextmanager -def crosfleet_machine_ctx(board: str, - lease_minutes: int, - lock_timeout: float, - dims: Dict[str, Any], - abandon_timeout: float = 120.0) -> Any: - """Acquire dut from crosfleet, and release once it leaves the context. - - Args: - board: Board type to lease. - lease_minutes: Length of lease, in minutes. - lock_timeout: How long to wait for a lock until quitting. - dims: Dictionary of dimension arguments to pass to crosfleet's '-dims' - abandon_timeout: How long to wait for releasing until quitting. - - Yields: - A string representing the crosfleet DUT hostname. - - Raises: - BoardLockError: When board was unable to be locked. - BoardReleaseError: When board was unable to be released. - """ - # This lock may raise an exception, but if it does, we can't release - # the DUT anyways as we won't have the dut_hostname. - dut_hostname = crosfleet_autolock(board, lease_minutes, dims, lock_timeout) - try: - yield dut_hostname - finally: - if dut_hostname: - crosfleet_release(dut_hostname, abandon_timeout) - - -def crosfleet_autolock(board: str, lease_minutes: int, dims: Dict[str, Any], - timeout_sec: float) -> str: - """Lock a device using crosfleet, paramaterized by the board type. - - Args: - board: Board of the DUT we want to lock. - lease_minutes: Number of minutes we're trying to lease the DUT for. - dims: Dictionary of dimension arguments to pass to crosfleet's '-dims' - timeout_sec: Number of seconds to try to lease the DUT. Default 120s. - - Returns: - The hostname of the board, or empty string if it couldn't be parsed. - - Raises: - BoardLockError: When board was unable to be locked. - """ - crosfleet_cmd_args = [ - 'crosfleet', - 'dut', - 'lease', - '-json', - '-reason="crosperf autolock"', - f'-board={board}', - f'-minutes={lease_minutes}', - ] - if dims: - dims_arg = ','.join(f'{k}={v}' for k, v in dims.items()) - crosfleet_cmd_args.extend(['-dims', f'{dims_arg}']) - - try: - output = subprocess.check_output(crosfleet_cmd_args, - timeout=timeout_sec, - encoding='utf-8') - except subprocess.CalledProcessError as e: - raise BoardLockError( - f'crosfleet dut lease failed with exit code: {e.returncode}') - except subprocess.TimeoutExpired as e: - raise BoardLockError(f'crosfleet dut lease timed out after {timeout_sec}s;' - ' please abandon the dut manually.') - - try: - json_obj = json.loads(output) - dut_hostname = json_obj['DUT']['Hostname'] - if not isinstance(dut_hostname, str): - raise TypeError('dut_hostname was not a string') - except (json.JSONDecodeError, IndexError, KeyError, TypeError) as e: - raise BoardLockError( - f'crosfleet dut lease output was parsed incorrectly: {e!r};' - f' observed output was {output}') - return _maybe_append_suffix(dut_hostname) +def crosfleet_machine_ctx( + board: str, + lease_minutes: int, + lock_timeout: float, + dims: Dict[str, Any], + abandon_timeout: float = 120.0, +) -> Any: + """Acquire dut from crosfleet, and release once it leaves the context. + + Args: + board: Board type to lease. + lease_minutes: Length of lease, in minutes. + lock_timeout: How long to wait for a lock until quitting. + dims: Dictionary of dimension arguments to pass to crosfleet's '-dims' + abandon_timeout: How long to wait for releasing until quitting. + + Yields: + A string representing the crosfleet DUT hostname. + + Raises: + BoardLockError: When board was unable to be locked. + BoardReleaseError: When board was unable to be released. + """ + # This lock may raise an exception, but if it does, we can't release + # the DUT anyways as we won't have the dut_hostname. + dut_hostname = crosfleet_autolock(board, lease_minutes, dims, lock_timeout) + try: + yield dut_hostname + finally: + if dut_hostname: + crosfleet_release(dut_hostname, abandon_timeout) + + +def crosfleet_autolock( + board: str, lease_minutes: int, dims: Dict[str, Any], timeout_sec: float +) -> str: + """Lock a device using crosfleet, paramaterized by the board type. + + Args: + board: Board of the DUT we want to lock. + lease_minutes: Number of minutes we're trying to lease the DUT for. + dims: Dictionary of dimension arguments to pass to crosfleet's '-dims' + timeout_sec: Number of seconds to try to lease the DUT. Default 120s. + + Returns: + The hostname of the board, or empty string if it couldn't be parsed. + + Raises: + BoardLockError: When board was unable to be locked. + """ + crosfleet_cmd_args = [ + "crosfleet", + "dut", + "lease", + "-json", + '-reason="crosperf autolock"', + f"-board={board}", + f"-minutes={lease_minutes}", + ] + if dims: + dims_arg = ",".join(f"{k}={v}" for k, v in dims.items()) + crosfleet_cmd_args.extend(["-dims", f"{dims_arg}"]) + + try: + output = subprocess.check_output( + crosfleet_cmd_args, timeout=timeout_sec, encoding="utf-8" + ) + except subprocess.CalledProcessError as e: + raise BoardLockError( + f"crosfleet dut lease failed with exit code: {e.returncode}" + ) + except subprocess.TimeoutExpired as e: + raise BoardLockError( + f"crosfleet dut lease timed out after {timeout_sec}s;" + " please abandon the dut manually." + ) + + try: + json_obj = json.loads(output) + dut_hostname = json_obj["DUT"]["Hostname"] + if not isinstance(dut_hostname, str): + raise TypeError("dut_hostname was not a string") + except (json.JSONDecodeError, IndexError, KeyError, TypeError) as e: + raise BoardLockError( + f"crosfleet dut lease output was parsed incorrectly: {e!r};" + f" observed output was {output}" + ) + return _maybe_append_suffix(dut_hostname) def crosfleet_release(dut_hostname: str, timeout_sec: float = 120.0): - """Release a crosfleet device. - - Consider using the context managed crosfleet_machine_context - - Args: - dut_hostname: Name of the device we want to release. - timeout_sec: Number of seconds to try to release the DUT. Default is 120s. - - Raises: - BoardReleaseError: Potentially failed to abandon the lease. - """ - crosfleet_cmd_args = [ - 'crosfleet', - 'dut', - 'abandon', - dut_hostname, - ] - exit_code = subprocess.call(crosfleet_cmd_args, timeout=timeout_sec) - if exit_code != 0: - raise BoardReleaseError( - f'"crosfleet dut abandon" had exit code {exit_code}') + """Release a crosfleet device. + + Consider using the context managed crosfleet_machine_context + + Args: + dut_hostname: Name of the device we want to release. + timeout_sec: Number of seconds to try to release the DUT. Default is 120s. + + Raises: + BoardReleaseError: Potentially failed to abandon the lease. + """ + crosfleet_cmd_args = [ + "crosfleet", + "dut", + "abandon", + dut_hostname, + ] + exit_code = subprocess.call(crosfleet_cmd_args, timeout=timeout_sec) + if exit_code != 0: + raise BoardReleaseError( + f'"crosfleet dut abandon" had exit code {exit_code}' + ) def _maybe_append_suffix(hostname: str) -> str: - if hostname.endswith('.cros') or '.cros.' in hostname: - return hostname - return hostname + '.cros' + if hostname.endswith(".cros") or ".cros." in hostname: + return hostname + return hostname + ".cros" -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py index 26f6b6a9..4cbf71d8 100755 --- a/crosperf/crosperf_unittest.py +++ b/crosperf/crosperf_unittest.py @@ -17,8 +17,9 @@ import unittest import unittest.mock as mock import crosperf -import settings_factory import experiment_file +import settings_factory + EXPERIMENT_FILE_1 = """ board: x86-alex @@ -41,50 +42,51 @@ EXPERIMENT_FILE_1 = """ class CrosperfTest(unittest.TestCase): - """Crosperf test class.""" - - def setUp(self): - input_file = io.StringIO(EXPERIMENT_FILE_1) - self.exp_file = experiment_file.ExperimentFile(input_file) - - def testDryRun(self): - with tempfile.NamedTemporaryFile('w', encoding='utf-8') as f: - f.write(EXPERIMENT_FILE_1) - f.flush() - crosperf.Main(['', f.name, '--dry_run']) - - def testConvertOptionsToSettings(self): - parser = argparse.ArgumentParser() - parser.add_argument('-l', - '--log_dir', - dest='log_dir', - default='', - help='The log_dir, default is under ' - '<crosperf_logs>/logs') - crosperf.SetupParserOptions(parser) - argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True'] - options, _ = parser.parse_known_args(argv) - settings = crosperf.ConvertOptionsToSettings(options) - self.assertIsNotNone(settings) - self.assertIsInstance(settings, settings_factory.GlobalSettings) - self.assertEqual(len(settings.fields), 40) - self.assertTrue(settings.GetField('rerun')) - argv = ['crosperf/crosperf.py', 'temp.exp'] - options, _ = parser.parse_known_args(argv) - settings = crosperf.ConvertOptionsToSettings(options) - self.assertFalse(settings.GetField('rerun')) - - def testExceptionPrintTraceback(self): - """Test the main function can print traceback in exception.""" - - def mock_RunCrosperf(*_args, **_kwargs): - return 10 / 0 - - with mock.patch('crosperf.RunCrosperf', new=mock_RunCrosperf): - with self.assertRaises(ZeroDivisionError) as context: - crosperf.Main([]) - self.assertEqual('division by zero', str(context.exception)) - - -if __name__ == '__main__': - unittest.main() + """Crosperf test class.""" + + def setUp(self): + input_file = io.StringIO(EXPERIMENT_FILE_1) + self.exp_file = experiment_file.ExperimentFile(input_file) + + def testDryRun(self): + with tempfile.NamedTemporaryFile("w", encoding="utf-8") as f: + f.write(EXPERIMENT_FILE_1) + f.flush() + crosperf.Main(["", f.name, "--dry_run"]) + + def testConvertOptionsToSettings(self): + parser = argparse.ArgumentParser() + parser.add_argument( + "-l", + "--log_dir", + dest="log_dir", + default="", + help="The log_dir, default is under " "<crosperf_logs>/logs", + ) + crosperf.SetupParserOptions(parser) + argv = ["crosperf/crosperf.py", "temp.exp", "--rerun=True"] + options, _ = parser.parse_known_args(argv) + settings = crosperf.ConvertOptionsToSettings(options) + self.assertIsNotNone(settings) + self.assertIsInstance(settings, settings_factory.GlobalSettings) + self.assertEqual(len(settings.fields), 40) + self.assertTrue(settings.GetField("rerun")) + argv = ["crosperf/crosperf.py", "temp.exp"] + options, _ = parser.parse_known_args(argv) + settings = crosperf.ConvertOptionsToSettings(options) + self.assertFalse(settings.GetField("rerun")) + + def testExceptionPrintTraceback(self): + """Test the main function can print traceback in exception.""" + + def mock_RunCrosperf(*_args, **_kwargs): + return 10 / 0 + + with mock.patch("crosperf.RunCrosperf", new=mock_RunCrosperf): + with self.assertRaises(ZeroDivisionError) as context: + crosperf.Main([]) + self.assertEqual("division by zero", str(context.exception)) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/download_images.py b/crosperf/download_images.py index 51c8325e..da73d941 100644 --- a/crosperf/download_images.py +++ b/crosperf/download_images.py @@ -10,318 +10,391 @@ from __future__ import print_function import ast import os +from cros_utils import command_executer import test_flag -from cros_utils import command_executer -GS_UTIL = 'src/chromium/depot_tools/gsutil.py' +GS_UTIL = "src/chromium/depot_tools/gsutil.py" class MissingImage(Exception): - """Raised when the requested image does not exist in gs://""" + """Raised when the requested image does not exist in gs://""" class MissingFile(Exception): - """Raised when the requested file does not exist in gs://""" + """Raised when the requested file does not exist in gs://""" class RunCommandExceptionHandler(object): - """Handle Exceptions from calls to RunCommand""" + """Handle Exceptions from calls to RunCommand""" - def __init__(self, logger_to_use, log_level, cmd_exec, command): - self.logger = logger_to_use - self.log_level = log_level - self.ce = cmd_exec - self.cleanup_command = command + def __init__(self, logger_to_use, log_level, cmd_exec, command): + self.logger = logger_to_use + self.log_level = log_level + self.ce = cmd_exec + self.cleanup_command = command - def HandleException(self, _, e): - # Exception handler, Run specified command - if self.log_level != 'verbose' and self.cleanup_command is not None: - self.logger.LogOutput('CMD: %s' % self.cleanup_command) - if self.cleanup_command is not None: - _ = self.ce.RunCommand(self.cleanup_command) - # Raise exception again - raise e + def HandleException(self, _, e): + # Exception handler, Run specified command + if self.log_level != "verbose" and self.cleanup_command is not None: + self.logger.LogOutput("CMD: %s" % self.cleanup_command) + if self.cleanup_command is not None: + _ = self.ce.RunCommand(self.cleanup_command) + # Raise exception again + raise e class ImageDownloader(object): - """Download images from Cloud Storage.""" - - def __init__(self, logger_to_use=None, log_level='verbose', cmd_exec=None): - self._logger = logger_to_use - self.log_level = log_level - self._ce = cmd_exec or command_executer.GetCommandExecuter( - self._logger, log_level=self.log_level) - - def GetBuildID(self, chromeos_root, xbuddy_label): - # Get the translation of the xbuddy_label into the real Google Storage - # image name. - command = ('cd /mnt/host/source/src/third_party/toolchain-utils/crosperf; ' - "./translate_xbuddy.py '%s'" % xbuddy_label) - _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput( - chromeos_root, command) - if not build_id_tuple_str: - raise MissingImage("Unable to find image for '%s'" % xbuddy_label) - - build_id_tuple = ast.literal_eval(build_id_tuple_str) - build_id = build_id_tuple[0] - - return build_id - - def DownloadImage(self, chromeos_root, build_id, image_name): - if self.log_level == 'average': - self._logger.LogOutput('Preparing to download %s image to local ' - 'directory.' % build_id) - - # Make sure the directory for downloading the image exists. - download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id) - image_path = os.path.join(download_path, 'chromiumos_test_image.bin') - if not os.path.exists(download_path): - os.makedirs(download_path) - - # Check to see if the image has already been downloaded. If not, - # download the image. - if not os.path.exists(image_path): - gsutil_cmd = os.path.join(chromeos_root, GS_UTIL) - command = '%s cp %s %s' % (gsutil_cmd, image_name, download_path) - - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % command) - status = self._ce.RunCommand(command) - downloaded_image_name = os.path.join(download_path, - 'chromiumos_test_image.tar.xz') - if status != 0 or not os.path.exists(downloaded_image_name): - raise MissingImage('Cannot download image: %s.' % downloaded_image_name) - - return image_path - - def UncompressImage(self, chromeos_root, build_id): - # Check to see if the file has already been uncompresssed, etc. - if os.path.exists( - os.path.join(chromeos_root, 'chroot/tmp', build_id, - 'chromiumos_test_image.bin')): - return - - # Uncompress and untar the downloaded image. - download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id) - command = ('cd %s ; tar -Jxf chromiumos_test_image.tar.xz ' % download_path) - # Cleanup command for exception handler - clean_cmd = ('cd %s ; rm -f chromiumos_test_image.bin ' % download_path) - exception_handler = RunCommandExceptionHandler(self._logger, self.log_level, - self._ce, clean_cmd) - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % command) - print('(Uncompressing and un-tarring may take a couple of minutes...' - 'please be patient.)') - retval = self._ce.RunCommand( - command, except_handler=exception_handler.HandleException) - if retval != 0: - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % clean_cmd) - print('(Removing file chromiumos_test_image.bin.)') - # Remove partially uncompressed file - _ = self._ce.RunCommand(clean_cmd) - # Raise exception for failure to uncompress - raise MissingImage('Cannot uncompress image: %s.' % build_id) - - # Remove compressed image - command = ('cd %s ; rm -f chromiumos_test_image.tar.xz; ' % download_path) - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % command) - print('(Removing file chromiumos_test_image.tar.xz.)') - # try removing file, its ok to have an error, print if encountered - retval = self._ce.RunCommand(command) - if retval != 0: - print('(Warning: Could not remove file chromiumos_test_image.tar.xz .)') - - def DownloadSingleFile(self, chromeos_root, build_id, package_file_name): - # Verify if package files exist - status = 0 - gs_package_name = ('gs://chromeos-image-archive/%s/%s' % - (build_id, package_file_name)) - gsutil_cmd = os.path.join(chromeos_root, GS_UTIL) - if not test_flag.GetTestMode(): - cmd = '%s ls %s' % (gsutil_cmd, gs_package_name) - status = self._ce.RunCommand(cmd) - if status != 0: - raise MissingFile('Cannot find package file: %s.' % package_file_name) - - if self.log_level == 'average': - self._logger.LogOutput('Preparing to download %s package to local ' - 'directory.' % package_file_name) - - # Make sure the directory for downloading the package exists. - download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id) - package_path = os.path.join(download_path, package_file_name) - if not os.path.exists(download_path): - os.makedirs(download_path) - - # Check to see if the package file has already been downloaded. If not, - # download it. - if not os.path.exists(package_path): - command = '%s cp %s %s' % (gsutil_cmd, gs_package_name, download_path) - - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % command) - status = self._ce.RunCommand(command) - if status != 0 or not os.path.exists(package_path): - raise MissingFile('Cannot download package: %s .' % package_path) - - def UncompressSingleFile(self, chromeos_root, build_id, package_file_name, - uncompress_cmd): - # Uncompress file - download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id) - command = ('cd %s ; %s %s' % - (download_path, uncompress_cmd, package_file_name)) - - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % command) - print('(Uncompressing file %s .)' % package_file_name) - retval = self._ce.RunCommand(command) - if retval != 0: - raise MissingFile('Cannot uncompress file: %s.' % package_file_name) - # Remove uncompressed downloaded file - command = ('cd %s ; rm -f %s' % (download_path, package_file_name)) - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % command) - print('(Removing processed file %s .)' % package_file_name) - # try removing file, its ok to have an error, print if encountered - retval = self._ce.RunCommand(command) - if retval != 0: - print('(Warning: Could not remove file %s .)' % package_file_name) - - def VerifyFileExists(self, chromeos_root, build_id, package_file): - # Quickly verify if the files are there - status = 0 - gs_package_name = ('gs://chromeos-image-archive/%s/%s' % - (build_id, package_file)) - gsutil_cmd = os.path.join(chromeos_root, GS_UTIL) - if not test_flag.GetTestMode(): - cmd = '%s ls %s' % (gsutil_cmd, gs_package_name) - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % cmd) - status = self._ce.RunCommand(cmd) - if status != 0: - print('(Warning: Could not find file %s )' % gs_package_name) - return 1 - # Package exists on server - return 0 - - def DownloadAutotestFiles(self, chromeos_root, build_id): - # Download autest package files (3 files) - autotest_packages_name = ('autotest_packages.tar') - autotest_server_package_name = ('autotest_server_package.tar.bz2') - autotest_control_files_name = ('control_files.tar') - - download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id) - # Autotest directory relative path wrt chroot - autotest_rel_path = os.path.join('/tmp', build_id, 'autotest_files') - # Absolute Path to download files - autotest_path = os.path.join(chromeos_root, 'chroot/tmp', build_id, - 'autotest_files') - - if not os.path.exists(autotest_path): - # Quickly verify if the files are present on server - # If not, just exit with warning - status = self.VerifyFileExists(chromeos_root, build_id, - autotest_packages_name) - if status != 0: - default_autotest_dir = '/mnt/host/source/src/third_party/autotest/files' - print('(Warning: Could not find autotest packages .)\n' - '(Warning: Defaulting autotest path to %s .' % - default_autotest_dir) - return default_autotest_dir - - # Files exist on server, download and uncompress them - self.DownloadSingleFile(chromeos_root, build_id, autotest_packages_name) - self.DownloadSingleFile(chromeos_root, build_id, - autotest_server_package_name) - self.DownloadSingleFile(chromeos_root, build_id, - autotest_control_files_name) - - self.UncompressSingleFile(chromeos_root, build_id, autotest_packages_name, - 'tar -xf ') - self.UncompressSingleFile(chromeos_root, build_id, - autotest_server_package_name, 'tar -jxf ') - self.UncompressSingleFile(chromeos_root, build_id, - autotest_control_files_name, 'tar -xf ') - # Rename created autotest directory to autotest_files - command = ('cd %s ; mv autotest autotest_files' % download_path) - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % command) - print('(Moving downloaded autotest files to autotest_files)') - retval = self._ce.RunCommand(command) - if retval != 0: - raise MissingFile('Could not create directory autotest_files') - - return autotest_rel_path - - def DownloadDebugFile(self, chromeos_root, build_id): - # Download autest package files (3 files) - debug_archive_name = 'debug.tgz' - - download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id) - # Debug directory relative path wrt chroot - debug_rel_path = os.path.join('/tmp', build_id, 'debug_files') - # Debug path to download files - debug_path = os.path.join(chromeos_root, 'chroot/tmp', build_id, - 'debug_files') - - if not os.path.exists(debug_path): - # Quickly verify if the file is present on server - # If not, just exit with warning - status = self.VerifyFileExists(chromeos_root, build_id, - debug_archive_name) - if status != 0: - self._logger.LogOutput('WARNING: Could not find debug archive on gs') - return '' - - # File exists on server, download and uncompress it - self.DownloadSingleFile(chromeos_root, build_id, debug_archive_name) - - self.UncompressSingleFile(chromeos_root, build_id, debug_archive_name, - 'tar -xf ') - # Extract and move debug files into the proper location. - debug_dir = 'debug_files/usr/lib' - command = ('cd %s ; mkdir -p %s; mv debug %s' % - (download_path, debug_dir, debug_dir)) - if self.log_level != 'verbose': - self._logger.LogOutput('CMD: %s' % command) - print('Moving downloaded debug files to %s' % debug_dir) - retval = self._ce.RunCommand(command) - if retval != 0: - raise MissingFile('Could not create directory %s' % - os.path.join(debug_dir, 'debug')) - - return debug_rel_path - - def Run(self, chromeos_root, xbuddy_label, autotest_path, debug_path, - download_debug): - build_id = self.GetBuildID(chromeos_root, xbuddy_label) - image_name = ( - 'gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz' % - build_id) - - # Verify that image exists for build_id, before attempting to - # download it. - status = 0 - if not test_flag.GetTestMode(): - gsutil_cmd = os.path.join(chromeos_root, GS_UTIL) - cmd = '%s ls %s' % (gsutil_cmd, image_name) - status = self._ce.RunCommand(cmd) - if status != 0: - raise MissingImage('Cannot find official image: %s.' % image_name) - - image_path = self.DownloadImage(chromeos_root, build_id, image_name) - self.UncompressImage(chromeos_root, build_id) - - if self.log_level != 'quiet': - self._logger.LogOutput('Using image from %s.' % image_path) - - if autotest_path == '': - autotest_path = self.DownloadAutotestFiles(chromeos_root, build_id) - - if debug_path == '' and download_debug: - debug_path = self.DownloadDebugFile(chromeos_root, build_id) - - return image_path, autotest_path, debug_path + """Download images from Cloud Storage.""" + + def __init__(self, logger_to_use=None, log_level="verbose", cmd_exec=None): + self._logger = logger_to_use + self.log_level = log_level + self._ce = cmd_exec or command_executer.GetCommandExecuter( + self._logger, log_level=self.log_level + ) + + def GetBuildID(self, chromeos_root, xbuddy_label): + # Get the translation of the xbuddy_label into the real Google Storage + # image name. + command = ( + "cd /mnt/host/source/src/third_party/toolchain-utils/crosperf; " + "./translate_xbuddy.py '%s'" % xbuddy_label + ) + _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput( + chromeos_root, command + ) + if not build_id_tuple_str: + raise MissingImage("Unable to find image for '%s'" % xbuddy_label) + + build_id_tuple = ast.literal_eval(build_id_tuple_str) + build_id = build_id_tuple[0] + + return build_id + + def DownloadImage(self, chromeos_root, build_id, image_name): + if self.log_level == "average": + self._logger.LogOutput( + "Preparing to download %s image to local " + "directory." % build_id + ) + + # Make sure the directory for downloading the image exists. + download_path = os.path.join(chromeos_root, "chroot/tmp", build_id) + image_path = os.path.join(download_path, "chromiumos_test_image.bin") + if not os.path.exists(download_path): + os.makedirs(download_path) + + # Check to see if the image has already been downloaded. If not, + # download the image. + if not os.path.exists(image_path): + gsutil_cmd = os.path.join(chromeos_root, GS_UTIL) + command = "%s cp %s %s" % (gsutil_cmd, image_name, download_path) + + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % command) + status = self._ce.RunCommand(command) + downloaded_image_name = os.path.join( + download_path, "chromiumos_test_image.tar.xz" + ) + if status != 0 or not os.path.exists(downloaded_image_name): + raise MissingImage( + "Cannot download image: %s." % downloaded_image_name + ) + + return image_path + + def UncompressImage(self, chromeos_root, build_id): + # Check to see if the file has already been uncompresssed, etc. + if os.path.exists( + os.path.join( + chromeos_root, + "chroot/tmp", + build_id, + "chromiumos_test_image.bin", + ) + ): + return + + # Uncompress and untar the downloaded image. + download_path = os.path.join(chromeos_root, "chroot/tmp", build_id) + command = ( + "cd %s ; tar -Jxf chromiumos_test_image.tar.xz " % download_path + ) + # Cleanup command for exception handler + clean_cmd = "cd %s ; rm -f chromiumos_test_image.bin " % download_path + exception_handler = RunCommandExceptionHandler( + self._logger, self.log_level, self._ce, clean_cmd + ) + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % command) + print( + "(Uncompressing and un-tarring may take a couple of minutes..." + "please be patient.)" + ) + retval = self._ce.RunCommand( + command, except_handler=exception_handler.HandleException + ) + if retval != 0: + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % clean_cmd) + print("(Removing file chromiumos_test_image.bin.)") + # Remove partially uncompressed file + _ = self._ce.RunCommand(clean_cmd) + # Raise exception for failure to uncompress + raise MissingImage("Cannot uncompress image: %s." % build_id) + + # Remove compressed image + command = "cd %s ; rm -f chromiumos_test_image.tar.xz; " % download_path + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % command) + print("(Removing file chromiumos_test_image.tar.xz.)") + # try removing file, its ok to have an error, print if encountered + retval = self._ce.RunCommand(command) + if retval != 0: + print( + "(Warning: Could not remove file chromiumos_test_image.tar.xz .)" + ) + + def DownloadSingleFile(self, chromeos_root, build_id, package_file_name): + # Verify if package files exist + status = 0 + gs_package_name = "gs://chromeos-image-archive/%s/%s" % ( + build_id, + package_file_name, + ) + gsutil_cmd = os.path.join(chromeos_root, GS_UTIL) + if not test_flag.GetTestMode(): + cmd = "%s ls %s" % (gsutil_cmd, gs_package_name) + status = self._ce.RunCommand(cmd) + if status != 0: + raise MissingFile( + "Cannot find package file: %s." % package_file_name + ) + + if self.log_level == "average": + self._logger.LogOutput( + "Preparing to download %s package to local " + "directory." % package_file_name + ) + + # Make sure the directory for downloading the package exists. + download_path = os.path.join(chromeos_root, "chroot/tmp", build_id) + package_path = os.path.join(download_path, package_file_name) + if not os.path.exists(download_path): + os.makedirs(download_path) + + # Check to see if the package file has already been downloaded. If not, + # download it. + if not os.path.exists(package_path): + command = "%s cp %s %s" % ( + gsutil_cmd, + gs_package_name, + download_path, + ) + + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % command) + status = self._ce.RunCommand(command) + if status != 0 or not os.path.exists(package_path): + raise MissingFile( + "Cannot download package: %s ." % package_path + ) + + def UncompressSingleFile( + self, chromeos_root, build_id, package_file_name, uncompress_cmd + ): + # Uncompress file + download_path = os.path.join(chromeos_root, "chroot/tmp", build_id) + command = "cd %s ; %s %s" % ( + download_path, + uncompress_cmd, + package_file_name, + ) + + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % command) + print("(Uncompressing file %s .)" % package_file_name) + retval = self._ce.RunCommand(command) + if retval != 0: + raise MissingFile("Cannot uncompress file: %s." % package_file_name) + # Remove uncompressed downloaded file + command = "cd %s ; rm -f %s" % (download_path, package_file_name) + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % command) + print("(Removing processed file %s .)" % package_file_name) + # try removing file, its ok to have an error, print if encountered + retval = self._ce.RunCommand(command) + if retval != 0: + print("(Warning: Could not remove file %s .)" % package_file_name) + + def VerifyFileExists(self, chromeos_root, build_id, package_file): + # Quickly verify if the files are there + status = 0 + gs_package_name = "gs://chromeos-image-archive/%s/%s" % ( + build_id, + package_file, + ) + gsutil_cmd = os.path.join(chromeos_root, GS_UTIL) + if not test_flag.GetTestMode(): + cmd = "%s ls %s" % (gsutil_cmd, gs_package_name) + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % cmd) + status = self._ce.RunCommand(cmd) + if status != 0: + print("(Warning: Could not find file %s )" % gs_package_name) + return 1 + # Package exists on server + return 0 + + def DownloadAutotestFiles(self, chromeos_root, build_id): + # Download autest package files (3 files) + autotest_packages_name = "autotest_packages.tar" + autotest_server_package_name = "autotest_server_package.tar.bz2" + autotest_control_files_name = "control_files.tar" + + download_path = os.path.join(chromeos_root, "chroot/tmp", build_id) + # Autotest directory relative path wrt chroot + autotest_rel_path = os.path.join("/tmp", build_id, "autotest_files") + # Absolute Path to download files + autotest_path = os.path.join( + chromeos_root, "chroot/tmp", build_id, "autotest_files" + ) + + if not os.path.exists(autotest_path): + # Quickly verify if the files are present on server + # If not, just exit with warning + status = self.VerifyFileExists( + chromeos_root, build_id, autotest_packages_name + ) + if status != 0: + default_autotest_dir = ( + "/mnt/host/source/src/third_party/autotest/files" + ) + print( + "(Warning: Could not find autotest packages .)\n" + "(Warning: Defaulting autotest path to %s ." + % default_autotest_dir + ) + return default_autotest_dir + + # Files exist on server, download and uncompress them + self.DownloadSingleFile( + chromeos_root, build_id, autotest_packages_name + ) + self.DownloadSingleFile( + chromeos_root, build_id, autotest_server_package_name + ) + self.DownloadSingleFile( + chromeos_root, build_id, autotest_control_files_name + ) + + self.UncompressSingleFile( + chromeos_root, build_id, autotest_packages_name, "tar -xf " + ) + self.UncompressSingleFile( + chromeos_root, + build_id, + autotest_server_package_name, + "tar -jxf ", + ) + self.UncompressSingleFile( + chromeos_root, build_id, autotest_control_files_name, "tar -xf " + ) + # Rename created autotest directory to autotest_files + command = "cd %s ; mv autotest autotest_files" % download_path + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % command) + print("(Moving downloaded autotest files to autotest_files)") + retval = self._ce.RunCommand(command) + if retval != 0: + raise MissingFile("Could not create directory autotest_files") + + return autotest_rel_path + + def DownloadDebugFile(self, chromeos_root, build_id): + # Download autest package files (3 files) + debug_archive_name = "debug.tgz" + + download_path = os.path.join(chromeos_root, "chroot/tmp", build_id) + # Debug directory relative path wrt chroot + debug_rel_path = os.path.join("/tmp", build_id, "debug_files") + # Debug path to download files + debug_path = os.path.join( + chromeos_root, "chroot/tmp", build_id, "debug_files" + ) + + if not os.path.exists(debug_path): + # Quickly verify if the file is present on server + # If not, just exit with warning + status = self.VerifyFileExists( + chromeos_root, build_id, debug_archive_name + ) + if status != 0: + self._logger.LogOutput( + "WARNING: Could not find debug archive on gs" + ) + return "" + + # File exists on server, download and uncompress it + self.DownloadSingleFile(chromeos_root, build_id, debug_archive_name) + + self.UncompressSingleFile( + chromeos_root, build_id, debug_archive_name, "tar -xf " + ) + # Extract and move debug files into the proper location. + debug_dir = "debug_files/usr/lib" + command = "cd %s ; mkdir -p %s; mv debug %s" % ( + download_path, + debug_dir, + debug_dir, + ) + if self.log_level != "verbose": + self._logger.LogOutput("CMD: %s" % command) + print("Moving downloaded debug files to %s" % debug_dir) + retval = self._ce.RunCommand(command) + if retval != 0: + raise MissingFile( + "Could not create directory %s" + % os.path.join(debug_dir, "debug") + ) + + return debug_rel_path + + def Run( + self, + chromeos_root, + xbuddy_label, + autotest_path, + debug_path, + download_debug, + ): + build_id = self.GetBuildID(chromeos_root, xbuddy_label) + image_name = ( + "gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz" + % build_id + ) + + # Verify that image exists for build_id, before attempting to + # download it. + status = 0 + if not test_flag.GetTestMode(): + gsutil_cmd = os.path.join(chromeos_root, GS_UTIL) + cmd = "%s ls %s" % (gsutil_cmd, image_name) + status = self._ce.RunCommand(cmd) + if status != 0: + raise MissingImage("Cannot find official image: %s." % image_name) + + image_path = self.DownloadImage(chromeos_root, build_id, image_name) + self.UncompressImage(chromeos_root, build_id) + + if self.log_level != "quiet": + self._logger.LogOutput("Using image from %s." % image_path) + + if autotest_path == "": + autotest_path = self.DownloadAutotestFiles(chromeos_root, build_id) + + if debug_path == "" and download_debug: + debug_path = self.DownloadDebugFile(chromeos_root, build_id) + + return image_path, autotest_path, debug_path diff --git a/crosperf/download_images_buildid_test.py b/crosperf/download_images_buildid_test.py index 036f1442..b5063ed9 100755 --- a/crosperf/download_images_buildid_test.py +++ b/crosperf/download_images_buildid_test.py @@ -13,6 +13,7 @@ import sys import download_images + # On May 1, 2014: # latest : lumpy-release/R34-5500.132.0 # latest-beta : lumpy-release/R35-5712.43.0 @@ -22,93 +23,111 @@ import download_images class ImageDownloaderBuildIDTest(object): - """Test translation of xbuddy names.""" - - def __init__(self): - parser = argparse.ArgumentParser() - parser.add_argument( - '-c', - '--chromeos_root', - dest='chromeos_root', - help='Directory containing ChromeOS root.') - - options = parser.parse_known_args(sys.argv[1:])[0] - if options.chromeos_root is None: - self._usage(parser, '--chromeos_root must be set') - self.chromeos_root = options.chromeos_root - self.tests_passed = 0 - self.tests_run = 0 - self.tests_failed = 0 - - def _usage(self, parser, message): - print('ERROR: ' + message) - parser.print_help() - sys.exit(0) - - def print_test_status(self): - print('----------------------------------------\n') - print('Tests attempted: %d' % self.tests_run) - print('Tests passed: %d' % self.tests_passed) - print('Tests failed: %d' % self.tests_failed) - print('\n----------------------------------------') - - def assert_failure(self, msg): - print('Assert failure: %s' % msg) - self.print_test_status() - sys.exit(1) - - def assertIsNotNone(self, arg, arg_name): - if arg is None: - self.tests_failed = self.tests_failed + 1 - self.assert_failure('%s is not None' % arg_name) - - def assertNotEqual(self, arg1, arg2, arg1_name, arg2_name): - if arg1 == arg2: - self.tests_failed = self.tests_failed + 1 - self.assert_failure('%s is not NotEqual to %s' % (arg1_name, arg2_name)) - - def assertEqual(self, arg1, arg2, arg1_name, arg2_name): - if arg1 != arg2: - self.tests_failed = self.tests_failed + 1 - self.assert_failure('%s is not Equal to %s' % (arg1_name, arg2_name)) - - def test_one_id(self, downloader, test_id, result_string, exact_match): - print("Translating '%s'" % test_id) - self.tests_run = self.tests_run + 1 - - result = downloader.GetBuildID(self.chromeos_root, test_id) - # Verify that we got a build id back. - self.assertIsNotNone(result, 'result') - - # Verify that the result either contains or exactly matches the - # result_string, depending on the exact_match argument. - if exact_match: - self.assertEqual(result, result_string, 'result', result_string) - else: - self.assertNotEqual(result.find(result_string), -1, 'result.find', '-1') - self.tests_passed = self.tests_passed + 1 - - def test_get_build_id(self): - """Test that the actual translating of xbuddy names is working properly.""" - downloader = download_images.ImageDownloader(log_level='quiet') - - self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R', - False) - self.test_one_id(downloader, - 'remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86', - 'trybot-lumpy-release-afdo-use/R35-5672.0.0-b86', True) - self.test_one_id(downloader, 'remote/lumpy-release/R35-5672.0.0', - 'lumpy-release/R35-5672.0.0', True) - self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R', - False) - self.test_one_id(downloader, 'remote/lumpy/latest-official', - 'lumpy-release/R', False) - self.test_one_id(downloader, 'remote/lumpy/latest-beta', 'lumpy-release/R', - False) - - self.print_test_status() - - -if __name__ == '__main__': - tester = ImageDownloaderBuildIDTest() - tester.test_get_build_id() + """Test translation of xbuddy names.""" + + def __init__(self): + parser = argparse.ArgumentParser() + parser.add_argument( + "-c", + "--chromeos_root", + dest="chromeos_root", + help="Directory containing ChromeOS root.", + ) + + options = parser.parse_known_args(sys.argv[1:])[0] + if options.chromeos_root is None: + self._usage(parser, "--chromeos_root must be set") + self.chromeos_root = options.chromeos_root + self.tests_passed = 0 + self.tests_run = 0 + self.tests_failed = 0 + + def _usage(self, parser, message): + print("ERROR: " + message) + parser.print_help() + sys.exit(0) + + def print_test_status(self): + print("----------------------------------------\n") + print("Tests attempted: %d" % self.tests_run) + print("Tests passed: %d" % self.tests_passed) + print("Tests failed: %d" % self.tests_failed) + print("\n----------------------------------------") + + def assert_failure(self, msg): + print("Assert failure: %s" % msg) + self.print_test_status() + sys.exit(1) + + def assertIsNotNone(self, arg, arg_name): + if arg is None: + self.tests_failed = self.tests_failed + 1 + self.assert_failure("%s is not None" % arg_name) + + def assertNotEqual(self, arg1, arg2, arg1_name, arg2_name): + if arg1 == arg2: + self.tests_failed = self.tests_failed + 1 + self.assert_failure( + "%s is not NotEqual to %s" % (arg1_name, arg2_name) + ) + + def assertEqual(self, arg1, arg2, arg1_name, arg2_name): + if arg1 != arg2: + self.tests_failed = self.tests_failed + 1 + self.assert_failure( + "%s is not Equal to %s" % (arg1_name, arg2_name) + ) + + def test_one_id(self, downloader, test_id, result_string, exact_match): + print("Translating '%s'" % test_id) + self.tests_run = self.tests_run + 1 + + result = downloader.GetBuildID(self.chromeos_root, test_id) + # Verify that we got a build id back. + self.assertIsNotNone(result, "result") + + # Verify that the result either contains or exactly matches the + # result_string, depending on the exact_match argument. + if exact_match: + self.assertEqual(result, result_string, "result", result_string) + else: + self.assertNotEqual( + result.find(result_string), -1, "result.find", "-1" + ) + self.tests_passed = self.tests_passed + 1 + + def test_get_build_id(self): + """Test that the actual translating of xbuddy names is working properly.""" + downloader = download_images.ImageDownloader(log_level="quiet") + + self.test_one_id( + downloader, "remote/lumpy/latest-dev", "lumpy-release/R", False + ) + self.test_one_id( + downloader, + "remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86", + "trybot-lumpy-release-afdo-use/R35-5672.0.0-b86", + True, + ) + self.test_one_id( + downloader, + "remote/lumpy-release/R35-5672.0.0", + "lumpy-release/R35-5672.0.0", + True, + ) + self.test_one_id( + downloader, "remote/lumpy/latest-dev", "lumpy-release/R", False + ) + self.test_one_id( + downloader, "remote/lumpy/latest-official", "lumpy-release/R", False + ) + self.test_one_id( + downloader, "remote/lumpy/latest-beta", "lumpy-release/R", False + ) + + self.print_test_status() + + +if __name__ == "__main__": + tester = ImageDownloaderBuildIDTest() + tester.test_get_build_id() diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py index 73ac8d67..5206bd3d 100755 --- a/crosperf/download_images_unittest.py +++ b/crosperf/download_images_unittest.py @@ -12,266 +12,306 @@ import os import unittest import unittest.mock as mock -import download_images from cros_utils import command_executer from cros_utils import logger - +import download_images import test_flag -MOCK_LOGGER = logger.GetLogger(log_dir='', mock=True) + +MOCK_LOGGER = logger.GetLogger(log_dir="", mock=True) class ImageDownloaderTestcast(unittest.TestCase): - """The image downloader test class.""" - - def __init__(self, *args, **kwargs): - super(ImageDownloaderTestcast, self).__init__(*args, **kwargs) - self.called_download_image = False - self.called_uncompress_image = False - self.called_get_build_id = False - self.called_download_autotest_files = False - self.called_download_debug_file = False - - @mock.patch.object(os, 'makedirs') - @mock.patch.object(os.path, 'exists') - def test_download_image(self, mock_path_exists, mock_mkdirs): - - # Set mock and test values. - mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - test_chroot = '/usr/local/home/chromeos' - test_build_id = 'lumpy-release/R36-5814.0.0' - image_path = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz' - % test_build_id) - - downloader = download_images.ImageDownloader( - logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec) - - # Set os.path.exists to always return False and run downloader - mock_path_exists.return_value = False - test_flag.SetTestMode(True) - self.assertRaises(download_images.MissingImage, downloader.DownloadImage, - test_chroot, test_build_id, image_path) - - # Verify os.path.exists was called twice, with proper arguments. - self.assertEqual(mock_path_exists.call_count, 2) - mock_path_exists.assert_called_with( - '/usr/local/home/chromeos/chroot/tmp/lumpy-release/' - 'R36-5814.0.0/chromiumos_test_image.bin') - mock_path_exists.assert_any_call( - '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0') - - # Verify we called os.mkdirs - self.assertEqual(mock_mkdirs.call_count, 1) - mock_mkdirs.assert_called_with( - '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0') - - # Verify we called RunCommand once, with proper arguments. - self.assertEqual(mock_cmd_exec.RunCommand.call_count, 1) - expected_args = ( - '/usr/local/home/chromeos/src/chromium/depot_tools/gsutil.py ' - 'cp gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/' - 'chromiumos_test_image.tar.xz ' - '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0') - - mock_cmd_exec.RunCommand.assert_called_with(expected_args) - - # Reset the velues in the mocks; set os.path.exists to always return True. - mock_path_exists.reset_mock() - mock_cmd_exec.reset_mock() - mock_path_exists.return_value = True - - # Run downloader - downloader.DownloadImage(test_chroot, test_build_id, image_path) - - # Verify os.path.exists was called twice, with proper arguments. - self.assertEqual(mock_path_exists.call_count, 2) - mock_path_exists.assert_called_with( - '/usr/local/home/chromeos/chroot/tmp/lumpy-release/' - 'R36-5814.0.0/chromiumos_test_image.bin') - mock_path_exists.assert_any_call( - '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0') - - # Verify we made no RunCommand or ChrootRunCommand calls (since - # os.path.exists returned True, there was no work do be done). - self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0) - self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0) - - @mock.patch.object(os.path, 'exists') - def test_uncompress_image(self, mock_path_exists): - - # set mock and test values. - mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - test_chroot = '/usr/local/home/chromeos' - test_build_id = 'lumpy-release/R36-5814.0.0' - - downloader = download_images.ImageDownloader( - logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec) - - # Set os.path.exists to always return False and run uncompress. - mock_path_exists.return_value = False - self.assertRaises(download_images.MissingImage, downloader.UncompressImage, - test_chroot, test_build_id) - - # Verify os.path.exists was called once, with correct arguments. - self.assertEqual(mock_path_exists.call_count, 1) - mock_path_exists.assert_called_with( - '/usr/local/home/chromeos/chroot/tmp/lumpy-release/' - 'R36-5814.0.0/chromiumos_test_image.bin') - - # Verify RunCommand was called twice with correct arguments. - self.assertEqual(mock_cmd_exec.RunCommand.call_count, 2) - # Call 1, should have 2 arguments - self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[0]), 2) - actual_arg = mock_cmd_exec.RunCommand.call_args_list[0][0] - expected_arg = ( - 'cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; ' - 'tar -Jxf chromiumos_test_image.tar.xz ',) - self.assertEqual(expected_arg, actual_arg) - # 2nd arg must be exception handler - except_handler_string = 'RunCommandExceptionHandler.HandleException' - self.assertTrue( - except_handler_string in repr(mock_cmd_exec.RunCommand.call_args_list[0] - [1])) - - # Call 2, should have 2 arguments - self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2) - actual_arg = mock_cmd_exec.RunCommand.call_args_list[1][0] - expected_arg = ( - 'cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; ' - 'rm -f chromiumos_test_image.bin ',) - self.assertEqual(expected_arg, actual_arg) - # 2nd arg must be empty - self.assertTrue('{}' in repr(mock_cmd_exec.RunCommand.call_args_list[1][1])) - - # Set os.path.exists to always return True and run uncompress. - mock_path_exists.reset_mock() - mock_cmd_exec.reset_mock() - mock_path_exists.return_value = True - downloader.UncompressImage(test_chroot, test_build_id) - - # Verify os.path.exists was called once, with correct arguments. - self.assertEqual(mock_path_exists.call_count, 1) - mock_path_exists.assert_called_with( - '/usr/local/home/chromeos/chroot/tmp/lumpy-release/' - 'R36-5814.0.0/chromiumos_test_image.bin') - - # Verify RunCommand was not called. - self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0) - - def test_run(self): - - # Set test arguments - test_chroot = '/usr/local/home/chromeos' - test_build_id = 'remote/lumpy/latest-dev' - test_empty_autotest_path = '' - test_empty_debug_path = '' - test_autotest_path = '/tmp/autotest' - test_debug_path = '/tmp/debug' - download_debug = True - - # Set values to test/check. - self.called_download_image = False - self.called_uncompress_image = False - self.called_get_build_id = False - self.called_download_autotest_files = False - self.called_download_debug_file = False - - # Define fake stub functions for Run to call - def FakeGetBuildID(unused_root, unused_xbuddy_label): - self.called_get_build_id = True - return 'lumpy-release/R36-5814.0.0' - - def GoodDownloadImage(root, build_id, image_path): - if root or build_id or image_path: - pass - self.called_download_image = True - return 'chromiumos_test_image.bin' - - def BadDownloadImage(root, build_id, image_path): - if root or build_id or image_path: - pass - self.called_download_image = True - raise download_images.MissingImage('Could not download image') - - def FakeUncompressImage(root, build_id): - if root or build_id: - pass - self.called_uncompress_image = True - return 0 - - def FakeDownloadAutotestFiles(root, build_id): - if root or build_id: - pass - self.called_download_autotest_files = True - return 'autotest' - - def FakeDownloadDebugFile(root, build_id): - if root or build_id: - pass - self.called_download_debug_file = True - return 'debug' - - # Initialize downloader - downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER) - - # Set downloader to call fake stubs. - downloader.GetBuildID = FakeGetBuildID - downloader.UncompressImage = FakeUncompressImage - downloader.DownloadImage = GoodDownloadImage - downloader.DownloadAutotestFiles = FakeDownloadAutotestFiles - downloader.DownloadDebugFile = FakeDownloadDebugFile - - # Call Run. - image_path, autotest_path, debug_path = downloader.Run( - test_chroot, test_build_id, test_empty_autotest_path, - test_empty_debug_path, download_debug) - - # Make sure it called both _DownloadImage and _UncompressImage - self.assertTrue(self.called_download_image) - self.assertTrue(self.called_uncompress_image) - # Make sure it called DownloadAutotestFiles - self.assertTrue(self.called_download_autotest_files) - # Make sure it called DownloadDebugFile - self.assertTrue(self.called_download_debug_file) - # Make sure it returned an image and autotest path returned from this call - self.assertTrue(image_path == 'chromiumos_test_image.bin') - self.assertTrue(autotest_path == 'autotest') - self.assertTrue(debug_path == 'debug') - - # Call Run with a non-empty autotest and debug path - self.called_download_autotest_files = False - self.called_download_debug_file = False - - image_path, autotest_path, debug_path = downloader.Run( - test_chroot, test_build_id, test_autotest_path, test_debug_path, - download_debug) - - # Verify that downloadAutotestFiles was not called - self.assertFalse(self.called_download_autotest_files) - # Make sure it returned the specified autotest path returned from this call - self.assertTrue(autotest_path == test_autotest_path) - # Make sure it returned the specified debug path returned from this call - self.assertTrue(debug_path == test_debug_path) - - # Reset values; Now use fake stub that simulates DownloadImage failing. - self.called_download_image = False - self.called_uncompress_image = False - self.called_download_autotest_files = False - self.called_download_debug_file = False - downloader.DownloadImage = BadDownloadImage - - # Call Run again. - self.assertRaises(download_images.MissingImage, downloader.Run, test_chroot, - test_autotest_path, test_debug_path, test_build_id, - download_debug) - - # Verify that UncompressImage and downloadAutotestFiles were not called, - # since _DownloadImage "failed" - self.assertTrue(self.called_download_image) - self.assertFalse(self.called_uncompress_image) - self.assertFalse(self.called_download_autotest_files) - self.assertFalse(self.called_download_debug_file) - - -if __name__ == '__main__': - unittest.main() + """The image downloader test class.""" + + def __init__(self, *args, **kwargs): + super(ImageDownloaderTestcast, self).__init__(*args, **kwargs) + self.called_download_image = False + self.called_uncompress_image = False + self.called_get_build_id = False + self.called_download_autotest_files = False + self.called_download_debug_file = False + + @mock.patch.object(os, "makedirs") + @mock.patch.object(os.path, "exists") + def test_download_image(self, mock_path_exists, mock_mkdirs): + + # Set mock and test values. + mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) + test_chroot = "/usr/local/home/chromeos" + test_build_id = "lumpy-release/R36-5814.0.0" + image_path = ( + "gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz" + % test_build_id + ) + + downloader = download_images.ImageDownloader( + logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec + ) + + # Set os.path.exists to always return False and run downloader + mock_path_exists.return_value = False + test_flag.SetTestMode(True) + self.assertRaises( + download_images.MissingImage, + downloader.DownloadImage, + test_chroot, + test_build_id, + image_path, + ) + + # Verify os.path.exists was called twice, with proper arguments. + self.assertEqual(mock_path_exists.call_count, 2) + mock_path_exists.assert_called_with( + "/usr/local/home/chromeos/chroot/tmp/lumpy-release/" + "R36-5814.0.0/chromiumos_test_image.bin" + ) + mock_path_exists.assert_any_call( + "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0" + ) + + # Verify we called os.mkdirs + self.assertEqual(mock_mkdirs.call_count, 1) + mock_mkdirs.assert_called_with( + "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0" + ) + + # Verify we called RunCommand once, with proper arguments. + self.assertEqual(mock_cmd_exec.RunCommand.call_count, 1) + expected_args = ( + "/usr/local/home/chromeos/src/chromium/depot_tools/gsutil.py " + "cp gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/" + "chromiumos_test_image.tar.xz " + "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0" + ) + + mock_cmd_exec.RunCommand.assert_called_with(expected_args) + + # Reset the velues in the mocks; set os.path.exists to always return True. + mock_path_exists.reset_mock() + mock_cmd_exec.reset_mock() + mock_path_exists.return_value = True + + # Run downloader + downloader.DownloadImage(test_chroot, test_build_id, image_path) + + # Verify os.path.exists was called twice, with proper arguments. + self.assertEqual(mock_path_exists.call_count, 2) + mock_path_exists.assert_called_with( + "/usr/local/home/chromeos/chroot/tmp/lumpy-release/" + "R36-5814.0.0/chromiumos_test_image.bin" + ) + mock_path_exists.assert_any_call( + "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0" + ) + + # Verify we made no RunCommand or ChrootRunCommand calls (since + # os.path.exists returned True, there was no work do be done). + self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0) + self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0) + + @mock.patch.object(os.path, "exists") + def test_uncompress_image(self, mock_path_exists): + + # set mock and test values. + mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) + test_chroot = "/usr/local/home/chromeos" + test_build_id = "lumpy-release/R36-5814.0.0" + + downloader = download_images.ImageDownloader( + logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec + ) + + # Set os.path.exists to always return False and run uncompress. + mock_path_exists.return_value = False + self.assertRaises( + download_images.MissingImage, + downloader.UncompressImage, + test_chroot, + test_build_id, + ) + + # Verify os.path.exists was called once, with correct arguments. + self.assertEqual(mock_path_exists.call_count, 1) + mock_path_exists.assert_called_with( + "/usr/local/home/chromeos/chroot/tmp/lumpy-release/" + "R36-5814.0.0/chromiumos_test_image.bin" + ) + + # Verify RunCommand was called twice with correct arguments. + self.assertEqual(mock_cmd_exec.RunCommand.call_count, 2) + # Call 1, should have 2 arguments + self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[0]), 2) + actual_arg = mock_cmd_exec.RunCommand.call_args_list[0][0] + expected_arg = ( + "cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; " + "tar -Jxf chromiumos_test_image.tar.xz ", + ) + self.assertEqual(expected_arg, actual_arg) + # 2nd arg must be exception handler + except_handler_string = "RunCommandExceptionHandler.HandleException" + self.assertTrue( + except_handler_string + in repr(mock_cmd_exec.RunCommand.call_args_list[0][1]) + ) + + # Call 2, should have 2 arguments + self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2) + actual_arg = mock_cmd_exec.RunCommand.call_args_list[1][0] + expected_arg = ( + "cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; " + "rm -f chromiumos_test_image.bin ", + ) + self.assertEqual(expected_arg, actual_arg) + # 2nd arg must be empty + self.assertTrue( + "{}" in repr(mock_cmd_exec.RunCommand.call_args_list[1][1]) + ) + + # Set os.path.exists to always return True and run uncompress. + mock_path_exists.reset_mock() + mock_cmd_exec.reset_mock() + mock_path_exists.return_value = True + downloader.UncompressImage(test_chroot, test_build_id) + + # Verify os.path.exists was called once, with correct arguments. + self.assertEqual(mock_path_exists.call_count, 1) + mock_path_exists.assert_called_with( + "/usr/local/home/chromeos/chroot/tmp/lumpy-release/" + "R36-5814.0.0/chromiumos_test_image.bin" + ) + + # Verify RunCommand was not called. + self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0) + + def test_run(self): + + # Set test arguments + test_chroot = "/usr/local/home/chromeos" + test_build_id = "remote/lumpy/latest-dev" + test_empty_autotest_path = "" + test_empty_debug_path = "" + test_autotest_path = "/tmp/autotest" + test_debug_path = "/tmp/debug" + download_debug = True + + # Set values to test/check. + self.called_download_image = False + self.called_uncompress_image = False + self.called_get_build_id = False + self.called_download_autotest_files = False + self.called_download_debug_file = False + + # Define fake stub functions for Run to call + def FakeGetBuildID(unused_root, unused_xbuddy_label): + self.called_get_build_id = True + return "lumpy-release/R36-5814.0.0" + + def GoodDownloadImage(root, build_id, image_path): + if root or build_id or image_path: + pass + self.called_download_image = True + return "chromiumos_test_image.bin" + + def BadDownloadImage(root, build_id, image_path): + if root or build_id or image_path: + pass + self.called_download_image = True + raise download_images.MissingImage("Could not download image") + + def FakeUncompressImage(root, build_id): + if root or build_id: + pass + self.called_uncompress_image = True + return 0 + + def FakeDownloadAutotestFiles(root, build_id): + if root or build_id: + pass + self.called_download_autotest_files = True + return "autotest" + + def FakeDownloadDebugFile(root, build_id): + if root or build_id: + pass + self.called_download_debug_file = True + return "debug" + + # Initialize downloader + downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER) + + # Set downloader to call fake stubs. + downloader.GetBuildID = FakeGetBuildID + downloader.UncompressImage = FakeUncompressImage + downloader.DownloadImage = GoodDownloadImage + downloader.DownloadAutotestFiles = FakeDownloadAutotestFiles + downloader.DownloadDebugFile = FakeDownloadDebugFile + + # Call Run. + image_path, autotest_path, debug_path = downloader.Run( + test_chroot, + test_build_id, + test_empty_autotest_path, + test_empty_debug_path, + download_debug, + ) + + # Make sure it called both _DownloadImage and _UncompressImage + self.assertTrue(self.called_download_image) + self.assertTrue(self.called_uncompress_image) + # Make sure it called DownloadAutotestFiles + self.assertTrue(self.called_download_autotest_files) + # Make sure it called DownloadDebugFile + self.assertTrue(self.called_download_debug_file) + # Make sure it returned an image and autotest path returned from this call + self.assertTrue(image_path == "chromiumos_test_image.bin") + self.assertTrue(autotest_path == "autotest") + self.assertTrue(debug_path == "debug") + + # Call Run with a non-empty autotest and debug path + self.called_download_autotest_files = False + self.called_download_debug_file = False + + image_path, autotest_path, debug_path = downloader.Run( + test_chroot, + test_build_id, + test_autotest_path, + test_debug_path, + download_debug, + ) + + # Verify that downloadAutotestFiles was not called + self.assertFalse(self.called_download_autotest_files) + # Make sure it returned the specified autotest path returned from this call + self.assertTrue(autotest_path == test_autotest_path) + # Make sure it returned the specified debug path returned from this call + self.assertTrue(debug_path == test_debug_path) + + # Reset values; Now use fake stub that simulates DownloadImage failing. + self.called_download_image = False + self.called_uncompress_image = False + self.called_download_autotest_files = False + self.called_download_debug_file = False + downloader.DownloadImage = BadDownloadImage + + # Call Run again. + self.assertRaises( + download_images.MissingImage, + downloader.Run, + test_chroot, + test_autotest_path, + test_debug_path, + test_build_id, + download_debug, + ) + + # Verify that UncompressImage and downloadAutotestFiles were not called, + # since _DownloadImage "failed" + self.assertTrue(self.called_download_image) + self.assertFalse(self.called_uncompress_image) + self.assertFalse(self.called_download_autotest_files) + self.assertFalse(self.called_download_debug_file) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/experiment.py b/crosperf/experiment.py index 0cf01db7..cfd56b8f 100644 --- a/crosperf/experiment.py +++ b/crosperf/experiment.py @@ -8,14 +8,12 @@ from __future__ import print_function import os -import time - from threading import Lock +import time +import benchmark_run from cros_utils import logger from cros_utils import misc - -import benchmark_run from machine_manager import BadChecksum from machine_manager import MachineManager from machine_manager import MockMachineManager @@ -23,208 +21,249 @@ import test_flag class Experiment(object): - """Class representing an Experiment to be run.""" - - def __init__(self, name, remote, working_directory, chromeos_root, - cache_conditions, labels, benchmarks, experiment_file, email_to, - acquire_timeout, log_dir, log_level, share_cache, - results_directory, compress_results, locks_directory, cwp_dso, - ignore_min_max, crosfleet, dut_config, no_lock: bool): - self.name = name - self.working_directory = working_directory - self.remote = remote - self.chromeos_root = chromeos_root - self.cache_conditions = cache_conditions - self.experiment_file = experiment_file - self.email_to = email_to - if not results_directory: - self.results_directory = os.path.join(self.working_directory, - self.name + '_results') - else: - self.results_directory = misc.CanonicalizePath(results_directory) - self.compress_results = compress_results - self.log_dir = log_dir - self.log_level = log_level - self.labels = labels - self.benchmarks = benchmarks - self.num_complete = 0 - self.num_run_complete = 0 - self.share_cache = share_cache - self.active_threads = [] - self.locks_dir = locks_directory - self.locked_machines = [] - self.lock_mgr = None - self.cwp_dso = cwp_dso - self.ignore_min_max = ignore_min_max - self.crosfleet = crosfleet - self.no_lock = no_lock - self.l = logger.GetLogger(log_dir) - - if not self.benchmarks: - raise RuntimeError('No benchmarks specified') - if not self.labels: - raise RuntimeError('No labels specified') - if not remote and not self.crosfleet: - raise RuntimeError('No remote hosts specified') - - # We need one chromeos_root to run the benchmarks in, but it doesn't - # matter where it is, unless the ABIs are different. - if not chromeos_root: - for label in self.labels: - if label.chromeos_root: - chromeos_root = label.chromeos_root - break - if not chromeos_root: - raise RuntimeError('No chromeos_root given and could not determine ' - 'one from the image path.') - - machine_manager_fn = MachineManager - if test_flag.GetTestMode(): - machine_manager_fn = MockMachineManager - self.machine_manager = machine_manager_fn(chromeos_root, acquire_timeout, - log_level, locks_directory) - self.l = logger.GetLogger(log_dir) - - for machine in self.remote: - # machine_manager.AddMachine only adds reachable machines. - self.machine_manager.AddMachine(machine) - # Now machine_manager._all_machines contains a list of reachable - # machines. This is a subset of self.remote. We make both lists the same. - self.remote = [m.name for m in self.machine_manager.GetAllMachines()] - if not self.remote: - raise RuntimeError('No machine available for running experiment.') - - # Initialize checksums for all machines, ignore errors at this time. - # The checksum will be double checked, and image will be flashed after - # duts are locked/leased. - self.SetCheckSums() - - self.start_time = None - self.benchmark_runs = self._GenerateBenchmarkRuns(dut_config) - - self._schedv2 = None - self._internal_counter_lock = Lock() - - def set_schedv2(self, schedv2): - self._schedv2 = schedv2 - - def schedv2(self): - return self._schedv2 - - def _GenerateBenchmarkRuns(self, dut_config): - """Generate benchmark runs from labels and benchmark defintions.""" - benchmark_runs = [] - for label in self.labels: - for benchmark in self.benchmarks: - for iteration in range(1, benchmark.iterations + 1): - - benchmark_run_name = '%s: %s (%s)' % (label.name, benchmark.name, - iteration) - full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration) - logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name), - True) - benchmark_runs.append( - benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label, - iteration, self.cache_conditions, - self.machine_manager, logger_to_use, - self.log_level, self.share_cache, - dut_config)) - - return benchmark_runs - - def SetCheckSums(self, forceSameImage=False): - for label in self.labels: - # We filter out label remotes that are not reachable (not in - # self.remote). So each label.remote is a sublist of experiment.remote. - label.remote = [r for r in label.remote if r in self.remote] - try: - self.machine_manager.ComputeCommonCheckSum(label) - except BadChecksum: - # Force same image on all machines, then we do checksum again. No - # bailout if checksums still do not match. - # TODO (zhizhouy): Need to figure out how flashing image will influence - # the new checksum. - if forceSameImage: - self.machine_manager.ForceSameImageToAllMachines(label) - self.machine_manager.ComputeCommonCheckSum(label) - - self.machine_manager.ComputeCommonCheckSumString(label) - - def Build(self): - pass - - def Terminate(self): - if self._schedv2 is not None: - self._schedv2.terminate() - else: - for t in self.benchmark_runs: - if t.isAlive(): - self.l.LogError("Terminating run: '%s'." % t.name) - t.Terminate() - - def IsComplete(self): - if self._schedv2: - return self._schedv2.is_complete() - if self.active_threads: - for t in self.active_threads: - if t.isAlive(): - t.join(0) - if not t.isAlive(): - self.num_complete += 1 - if not t.cache_hit: - self.num_run_complete += 1 - self.active_threads.remove(t) - return False - return True - - def BenchmarkRunFinished(self, br): - """Update internal counters after br finishes. - - Note this is only used by schedv2 and is called by multiple threads. - Never throw any exception here. - """ - - assert self._schedv2 is not None - with self._internal_counter_lock: - self.num_complete += 1 - if not br.cache_hit: - self.num_run_complete += 1 - - def Run(self): - self.start_time = time.time() - if self._schedv2 is not None: - self._schedv2.run_sched() - else: - self.active_threads = [] - for run in self.benchmark_runs: - # Set threads to daemon so program exits when ctrl-c is pressed. - run.daemon = True - run.start() - self.active_threads.append(run) - - def SetCacheConditions(self, cache_conditions): - for run in self.benchmark_runs: - run.SetCacheConditions(cache_conditions) - - def Cleanup(self): - """Make sure all machines are unlocked.""" - if self.locks_dir: - # We are using the file locks mechanism, so call machine_manager.Cleanup - # to unlock everything. - self.machine_manager.Cleanup() - - if test_flag.GetTestMode() or not self.locked_machines: - return - - # If we locked any machines earlier, make sure we unlock them now. - if self.lock_mgr: - machine_states = self.lock_mgr.GetMachineStates('unlock') - self.lock_mgr.CheckMachineLocks(machine_states, 'unlock') - unlocked_machines = self.lock_mgr.UpdateMachines(False) - failed_machines = [ - m for m in self.locked_machines if m not in unlocked_machines - ] - if failed_machines: - raise RuntimeError('These machines are not unlocked correctly: %s' % - failed_machines) - self.lock_mgr = None + """Class representing an Experiment to be run.""" + + def __init__( + self, + name, + remote, + working_directory, + chromeos_root, + cache_conditions, + labels, + benchmarks, + experiment_file, + email_to, + acquire_timeout, + log_dir, + log_level, + share_cache, + results_directory, + compress_results, + locks_directory, + cwp_dso, + ignore_min_max, + crosfleet, + dut_config, + no_lock: bool, + ): + self.name = name + self.working_directory = working_directory + self.remote = remote + self.chromeos_root = chromeos_root + self.cache_conditions = cache_conditions + self.experiment_file = experiment_file + self.email_to = email_to + if not results_directory: + self.results_directory = os.path.join( + self.working_directory, self.name + "_results" + ) + else: + self.results_directory = misc.CanonicalizePath(results_directory) + self.compress_results = compress_results + self.log_dir = log_dir + self.log_level = log_level + self.labels = labels + self.benchmarks = benchmarks + self.num_complete = 0 + self.num_run_complete = 0 + self.share_cache = share_cache + self.active_threads = [] + self.locks_dir = locks_directory + self.locked_machines = [] + self.lock_mgr = None + self.cwp_dso = cwp_dso + self.ignore_min_max = ignore_min_max + self.crosfleet = crosfleet + self.no_lock = no_lock + self.l = logger.GetLogger(log_dir) + + if not self.benchmarks: + raise RuntimeError("No benchmarks specified") + if not self.labels: + raise RuntimeError("No labels specified") + if not remote and not self.crosfleet: + raise RuntimeError("No remote hosts specified") + + # We need one chromeos_root to run the benchmarks in, but it doesn't + # matter where it is, unless the ABIs are different. + if not chromeos_root: + for label in self.labels: + if label.chromeos_root: + chromeos_root = label.chromeos_root + break + if not chromeos_root: + raise RuntimeError( + "No chromeos_root given and could not determine " + "one from the image path." + ) + + machine_manager_fn = MachineManager + if test_flag.GetTestMode(): + machine_manager_fn = MockMachineManager + self.machine_manager = machine_manager_fn( + chromeos_root, acquire_timeout, log_level, locks_directory + ) + self.l = logger.GetLogger(log_dir) + + for machine in self.remote: + # machine_manager.AddMachine only adds reachable machines. + self.machine_manager.AddMachine(machine) + # Now machine_manager._all_machines contains a list of reachable + # machines. This is a subset of self.remote. We make both lists the same. + self.remote = [m.name for m in self.machine_manager.GetAllMachines()] + if not self.remote: + raise RuntimeError("No machine available for running experiment.") + + # Initialize checksums for all machines, ignore errors at this time. + # The checksum will be double checked, and image will be flashed after + # duts are locked/leased. + self.SetCheckSums() + + self.start_time = None + self.benchmark_runs = self._GenerateBenchmarkRuns(dut_config) + + self._schedv2 = None + self._internal_counter_lock = Lock() + + def set_schedv2(self, schedv2): + self._schedv2 = schedv2 + + def schedv2(self): + return self._schedv2 + + def _GenerateBenchmarkRuns(self, dut_config): + """Generate benchmark runs from labels and benchmark defintions.""" + benchmark_runs = [] + for label in self.labels: + for benchmark in self.benchmarks: + for iteration in range(1, benchmark.iterations + 1): + + benchmark_run_name = "%s: %s (%s)" % ( + label.name, + benchmark.name, + iteration, + ) + full_name = "%s_%s_%s" % ( + label.name, + benchmark.name, + iteration, + ) + logger_to_use = logger.Logger( + self.log_dir, "run.%s" % (full_name), True + ) + benchmark_runs.append( + benchmark_run.BenchmarkRun( + benchmark_run_name, + benchmark, + label, + iteration, + self.cache_conditions, + self.machine_manager, + logger_to_use, + self.log_level, + self.share_cache, + dut_config, + ) + ) + + return benchmark_runs + + def SetCheckSums(self, forceSameImage=False): + for label in self.labels: + # We filter out label remotes that are not reachable (not in + # self.remote). So each label.remote is a sublist of experiment.remote. + label.remote = [r for r in label.remote if r in self.remote] + try: + self.machine_manager.ComputeCommonCheckSum(label) + except BadChecksum: + # Force same image on all machines, then we do checksum again. No + # bailout if checksums still do not match. + # TODO (zhizhouy): Need to figure out how flashing image will influence + # the new checksum. + if forceSameImage: + self.machine_manager.ForceSameImageToAllMachines(label) + self.machine_manager.ComputeCommonCheckSum(label) + + self.machine_manager.ComputeCommonCheckSumString(label) + + def Build(self): + pass + + def Terminate(self): + if self._schedv2 is not None: + self._schedv2.terminate() + else: + for t in self.benchmark_runs: + if t.isAlive(): + self.l.LogError("Terminating run: '%s'." % t.name) + t.Terminate() + + def IsComplete(self): + if self._schedv2: + return self._schedv2.is_complete() + if self.active_threads: + for t in self.active_threads: + if t.isAlive(): + t.join(0) + if not t.isAlive(): + self.num_complete += 1 + if not t.cache_hit: + self.num_run_complete += 1 + self.active_threads.remove(t) + return False + return True + + def BenchmarkRunFinished(self, br): + """Update internal counters after br finishes. + + Note this is only used by schedv2 and is called by multiple threads. + Never throw any exception here. + """ + + assert self._schedv2 is not None + with self._internal_counter_lock: + self.num_complete += 1 + if not br.cache_hit: + self.num_run_complete += 1 + + def Run(self): + self.start_time = time.time() + if self._schedv2 is not None: + self._schedv2.run_sched() + else: + self.active_threads = [] + for run in self.benchmark_runs: + # Set threads to daemon so program exits when ctrl-c is pressed. + run.daemon = True + run.start() + self.active_threads.append(run) + + def SetCacheConditions(self, cache_conditions): + for run in self.benchmark_runs: + run.SetCacheConditions(cache_conditions) + + def Cleanup(self): + """Make sure all machines are unlocked.""" + if self.locks_dir: + # We are using the file locks mechanism, so call machine_manager.Cleanup + # to unlock everything. + self.machine_manager.Cleanup() + + if test_flag.GetTestMode() or not self.locked_machines: + return + + # If we locked any machines earlier, make sure we unlock them now. + if self.lock_mgr: + machine_states = self.lock_mgr.GetMachineStates("unlock") + self.lock_mgr.CheckMachineLocks(machine_states, "unlock") + unlocked_machines = self.lock_mgr.UpdateMachines(False) + failed_machines = [ + m for m in self.locked_machines if m not in unlocked_machines + ] + if failed_machines: + raise RuntimeError( + "These machines are not unlocked correctly: %s" + % failed_machines + ) + self.lock_mgr = None diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index 882f652f..9a89cb9c 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -6,76 +6,83 @@ """A module to generate experiments.""" from __future__ import print_function + import os import re import socket import sys from benchmark import Benchmark -import config -from cros_utils import logger from cros_utils import command_executer +from cros_utils import logger from experiment import Experiment +import file_lock_machine from label import Label from label import MockLabel from results_cache import CacheConditions import test_flag -import file_lock_machine + +import config + # Users may want to run Telemetry tests either individually, or in # specified sets. Here we define sets of tests that users may want # to run together. telemetry_perfv2_tests = [ - 'kraken', - 'octane', + "kraken", + "octane", ] telemetry_pagecycler_tests = [ - 'page_cycler_v2.intl_ar_fa_he', - 'page_cycler_v2.intl_es_fr_pt-BR', - 'page_cycler_v2.intl_hi_ru', - 'page_cycler_v2.intl_ja_zh', - 'page_cycler_v2.intl_ko_th_vi', - 'page_cycler_v2.typical_25', + "page_cycler_v2.intl_ar_fa_he", + "page_cycler_v2.intl_es_fr_pt-BR", + "page_cycler_v2.intl_hi_ru", + "page_cycler_v2.intl_ja_zh", + "page_cycler_v2.intl_ko_th_vi", + "page_cycler_v2.typical_25", ] telemetry_toolchain_old_perf_tests = [ - 'page_cycler_v2.intl_es_fr_pt-BR', - 'page_cycler_v2.intl_hi_ru', - 'page_cycler_v2.intl_ja_zh', - 'page_cycler_v2.intl_ko_th_vi', - 'page_cycler_v2.netsim.top_10', - 'page_cycler_v2.typical_25', - 'spaceport', - 'tab_switching.top_10', + "page_cycler_v2.intl_es_fr_pt-BR", + "page_cycler_v2.intl_hi_ru", + "page_cycler_v2.intl_ja_zh", + "page_cycler_v2.intl_ko_th_vi", + "page_cycler_v2.netsim.top_10", + "page_cycler_v2.typical_25", + "spaceport", + "tab_switching.top_10", ] telemetry_toolchain_perf_tests = [ - 'octane', 'kraken', 'speedometer', 'speedometer2', 'jetstream2' + "octane", + "kraken", + "speedometer", + "speedometer2", + "jetstream2", ] graphics_perf_tests = [ - 'graphics_GLBench', - 'graphics_GLMark2', - 'graphics_SanAngeles', - 'graphics_WebGLAquarium', - 'graphics_WebGLPerformance', + "graphics_GLBench", + "graphics_GLMark2", + "graphics_SanAngeles", + "graphics_WebGLAquarium", + "graphics_WebGLPerformance", ] # TODO: disable rendering.desktop by default as the benchmark is # currently in a bad state # page_cycler_v2.typical_25 is deprecated and the recommend replacement is # loading.desktop@@typical (crbug.com/916340) telemetry_crosbolt_perf_tests = [ - 'octane', - 'kraken', - 'speedometer2', - 'jetstream', - 'loading.desktop', + "octane", + "kraken", + "speedometer2", + "jetstream", + "loading.desktop", # 'rendering.desktop', ] crosbolt_perf_tests = [ - 'graphics_WebGLAquarium', - 'tast.video.PlaybackPerfVP91080P30FPS', + "graphics_WebGLAquarium", + "tast.video.PlaybackPerfVP91080P30FPS", ] # 'cheets_AntutuTest', @@ -85,424 +92,582 @@ crosbolt_perf_tests = [ # ] dso_list = [ - 'all', - 'chrome', - 'kallsyms', + "all", + "chrome", + "kallsyms", ] class ExperimentFactory(object): - """Factory class for building an Experiment, given an ExperimentFile as input. - - This factory is currently hardcoded to produce an experiment for running - ChromeOS benchmarks, but the idea is that in the future, other types - of experiments could be produced. - """ - - def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args, - iterations, rm_chroot_tmp, perf_args, suite, - show_all_results, retries, run_local, cwp_dso, - weight): - """Add all the tests in a set to the benchmarks list.""" - for test_name in benchmark_list: - telemetry_benchmark = Benchmark(test_name, test_name, test_args, - iterations, rm_chroot_tmp, perf_args, - suite, show_all_results, retries, - run_local, cwp_dso, weight) - benchmarks.append(telemetry_benchmark) - - def GetExperiment(self, experiment_file, working_directory, log_dir): - """Construct an experiment from an experiment file.""" - global_settings = experiment_file.GetGlobalSettings() - experiment_name = global_settings.GetField('name') - board = global_settings.GetField('board') - chromeos_root = global_settings.GetField('chromeos_root') - log_level = global_settings.GetField('logging_level') - if log_level not in ('quiet', 'average', 'verbose'): - log_level = 'verbose' - - crosfleet = global_settings.GetField('crosfleet') - no_lock = bool(global_settings.GetField('no_lock')) - # Check whether crosfleet tool is installed correctly for crosfleet mode. - if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level): - sys.exit(0) - - remote = global_settings.GetField('remote') - # This is used to remove the ",' from the remote if user - # add them to the remote string. - new_remote = [] - if remote: - for i in remote: - c = re.sub('["\']', '', i) - new_remote.append(c) - remote = new_remote - rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp') - perf_args = global_settings.GetField('perf_args') - download_debug = global_settings.GetField('download_debug') - # Do not download debug symbols when perf_args is not specified. - if not perf_args and download_debug: - download_debug = False - acquire_timeout = global_settings.GetField('acquire_timeout') - cache_dir = global_settings.GetField('cache_dir') - cache_only = global_settings.GetField('cache_only') - config.AddConfig('no_email', global_settings.GetField('no_email')) - share_cache = global_settings.GetField('share_cache') - results_dir = global_settings.GetField('results_dir') - compress_results = global_settings.GetField('compress_results') - # Warn user that option use_file_locks is deprecated. - use_file_locks = global_settings.GetField('use_file_locks') - if use_file_locks: - l = logger.GetLogger() - l.LogWarning('Option use_file_locks is deprecated, please remove it ' - 'from your experiment settings.') - locks_dir = global_settings.GetField('locks_dir') - # If not specified, set the locks dir to the default locks dir in - # file_lock_machine. - if not locks_dir: - locks_dir = file_lock_machine.Machine.LOCKS_DIR - if not os.path.exists(locks_dir): - raise RuntimeError('Cannot access default lock directory. ' - 'Please run prodaccess or specify a local directory') - chrome_src = global_settings.GetField('chrome_src') - show_all_results = global_settings.GetField('show_all_results') - cwp_dso = global_settings.GetField('cwp_dso') - if cwp_dso and not cwp_dso in dso_list: - raise RuntimeError('The DSO specified is not supported') - ignore_min_max = global_settings.GetField('ignore_min_max') - dut_config = { - 'enable_aslr': global_settings.GetField('enable_aslr'), - 'intel_pstate': global_settings.GetField('intel_pstate'), - 'cooldown_time': global_settings.GetField('cooldown_time'), - 'cooldown_temp': global_settings.GetField('cooldown_temp'), - 'governor': global_settings.GetField('governor'), - 'cpu_usage': global_settings.GetField('cpu_usage'), - 'cpu_freq_pct': global_settings.GetField('cpu_freq_pct'), - 'turbostat': global_settings.GetField('turbostat'), - 'top_interval': global_settings.GetField('top_interval'), - } - - # Default cache hit conditions. The image checksum in the cache and the - # computed checksum of the image must match. Also a cache file must exist. - cache_conditions = [ - CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH - ] - if global_settings.GetField('rerun_if_failed'): - cache_conditions.append(CacheConditions.RUN_SUCCEEDED) - if global_settings.GetField('rerun'): - cache_conditions.append(CacheConditions.FALSE) - if global_settings.GetField('same_machine'): - cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH) - if global_settings.GetField('same_specs'): - cache_conditions.append(CacheConditions.MACHINES_MATCH) - - # Construct benchmarks. - # Some fields are common with global settings. The values are - # inherited and/or merged with the global settings values. - benchmarks = [] - all_benchmark_settings = experiment_file.GetSettings('benchmark') - - # Check if there is duplicated benchmark name - benchmark_names = {} - # Check if in cwp_dso mode, all benchmarks should have same iterations - cwp_dso_iterations = 0 - - for benchmark_settings in all_benchmark_settings: - benchmark_name = benchmark_settings.name - test_name = benchmark_settings.GetField('test_name') - if not test_name: - test_name = benchmark_name - test_args = benchmark_settings.GetField('test_args') - - # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified - # in test_args. Make sure these two tags only appear once. - story_count = 0 - for arg in test_args.split(): - if '--story-filter=' in arg or '--story-tag-filter=' in arg: - story_count += 1 - if story_count > 1: - raise RuntimeError('Only one story or story-tag filter allowed in ' - 'a single benchmark run') - # Rename benchmark name with an extension of 'story'-option - benchmark_name = '%s@@%s' % (benchmark_name, arg.split('=')[-1]) - - # Check for duplicated benchmark name after renaming - if not benchmark_name in benchmark_names: - benchmark_names[benchmark_name] = True - else: - raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name) - - iterations = benchmark_settings.GetField('iterations') - if cwp_dso: - if cwp_dso_iterations not in (0, iterations): - raise RuntimeError('Iterations of each benchmark run are not the ' - 'same') - cwp_dso_iterations = iterations - - suite = benchmark_settings.GetField('suite') - retries = benchmark_settings.GetField('retries') - run_local = benchmark_settings.GetField('run_local') - weight = benchmark_settings.GetField('weight') - if weight: - if not cwp_dso: - raise RuntimeError('Weight can only be set when DSO specified') - if suite != 'telemetry_Crosperf': - raise RuntimeError('CWP approximation weight only works with ' - 'telemetry_Crosperf suite') - if run_local: - raise RuntimeError('run_local must be set to False to use CWP ' - 'approximation') - if weight < 0: - raise RuntimeError('Weight should be a float >=0') - elif cwp_dso: - raise RuntimeError('With DSO specified, each benchmark should have a ' - 'weight') - - if suite == 'telemetry_Crosperf': - if test_name == 'all_perfv2': - self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, - test_args, iterations, rm_chroot_tmp, - perf_args, suite, show_all_results, retries, - run_local, cwp_dso, weight) - elif test_name == 'all_pagecyclers': - self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests, - test_args, iterations, rm_chroot_tmp, - perf_args, suite, show_all_results, retries, - run_local, cwp_dso, weight) - elif test_name == 'all_crosbolt_perf': - self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests, - test_args, iterations, rm_chroot_tmp, - perf_args, 'telemetry_Crosperf', - show_all_results, retries, run_local, - cwp_dso, weight) - self.AppendBenchmarkSet(benchmarks, - crosbolt_perf_tests, - '', - iterations, - rm_chroot_tmp, - perf_args, - '', - show_all_results, - retries, - run_local=False, - cwp_dso=cwp_dso, - weight=weight) - elif test_name == 'all_toolchain_perf': - self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests, - test_args, iterations, rm_chroot_tmp, - perf_args, suite, show_all_results, retries, - run_local, cwp_dso, weight) - # Add non-telemetry toolchain-perf benchmarks: - - # Tast test platform.ReportDiskUsage for image size. - benchmarks.append( - Benchmark( - 'platform.ReportDiskUsage', - 'platform.ReportDiskUsage', - '', - 1, # This is not a performance benchmark, only run once. - rm_chroot_tmp, - '', - 'tast', # Specify the suite to be 'tast' - show_all_results, - retries)) - - # TODO: crbug.com/1057755 Do not enable graphics_WebGLAquarium until - # it gets fixed. - # - # benchmarks.append( - # Benchmark( - # 'graphics_WebGLAquarium', - # 'graphics_WebGLAquarium', - # '', - # iterations, - # rm_chroot_tmp, - # perf_args, - # 'crosperf_Wrapper', # Use client wrapper in Autotest - # show_all_results, - # retries, - # run_local=False, - # cwp_dso=cwp_dso, - # weight=weight)) - elif test_name == 'all_toolchain_perf_old': - self.AppendBenchmarkSet(benchmarks, - telemetry_toolchain_old_perf_tests, - test_args, iterations, rm_chroot_tmp, - perf_args, suite, show_all_results, retries, - run_local, cwp_dso, weight) - else: - benchmark = Benchmark(benchmark_name, test_name, test_args, - iterations, rm_chroot_tmp, perf_args, suite, - show_all_results, retries, run_local, cwp_dso, - weight) - benchmarks.append(benchmark) - else: - if test_name == 'all_graphics_perf': - self.AppendBenchmarkSet(benchmarks, - graphics_perf_tests, - '', - iterations, - rm_chroot_tmp, - perf_args, - '', - show_all_results, - retries, - run_local=False, - cwp_dso=cwp_dso, - weight=weight) - else: - # Add the single benchmark. - benchmark = Benchmark(benchmark_name, - test_name, - test_args, - iterations, - rm_chroot_tmp, - perf_args, - suite, - show_all_results, - retries, - run_local=False, - cwp_dso=cwp_dso, - weight=weight) - benchmarks.append(benchmark) - - if not benchmarks: - raise RuntimeError('No benchmarks specified') - - # Construct labels. - # Some fields are common with global settings. The values are - # inherited and/or merged with the global settings values. - labels = [] - all_label_settings = experiment_file.GetSettings('label') - all_remote = list(remote) - for label_settings in all_label_settings: - label_name = label_settings.name - image = label_settings.GetField('chromeos_image') - build = label_settings.GetField('build') - autotest_path = label_settings.GetField('autotest_path') - debug_path = label_settings.GetField('debug_path') - chromeos_root = label_settings.GetField('chromeos_root') - my_remote = label_settings.GetField('remote') - compiler = label_settings.GetField('compiler') - new_remote = [] - if my_remote: - for i in my_remote: - c = re.sub('["\']', '', i) - new_remote.append(c) - my_remote = new_remote - - if image: - if crosfleet: - raise RuntimeError( - 'In crosfleet mode, local image should not be used.') - if build: - raise RuntimeError('Image path and build are provided at the same ' - 'time, please use only one of them.') - else: - if not build: - raise RuntimeError("Can not have empty 'build' field!") - image, autotest_path, debug_path = label_settings.GetXbuddyPath( - build, autotest_path, debug_path, board, chromeos_root, log_level, - download_debug) - - cache_dir = label_settings.GetField('cache_dir') - chrome_src = label_settings.GetField('chrome_src') - - # TODO(yunlian): We should consolidate code in machine_manager.py - # to derermine whether we are running from within google or not - if ('corp.google.com' in socket.gethostname() and not my_remote - and not crosfleet): - my_remote = self.GetDefaultRemotes(board) - if global_settings.GetField('same_machine') and len(my_remote) > 1: - raise RuntimeError('Only one remote is allowed when same_machine ' - 'is turned on') - all_remote += my_remote - image_args = label_settings.GetField('image_args') - if test_flag.GetTestMode(): - # pylint: disable=too-many-function-args - label = MockLabel(label_name, build, image, autotest_path, debug_path, - chromeos_root, board, my_remote, image_args, - cache_dir, cache_only, log_level, compiler, - crosfleet, chrome_src) - else: - label = Label(label_name, build, image, autotest_path, debug_path, - chromeos_root, board, my_remote, image_args, cache_dir, - cache_only, log_level, compiler, crosfleet, chrome_src) - labels.append(label) - - if not labels: - raise RuntimeError('No labels specified') - - email = global_settings.GetField('email') - all_remote += list(set(my_remote)) - all_remote = list(set(all_remote)) - if crosfleet: - for remote in all_remote: - self.CheckRemotesInCrosfleet(remote) - experiment = Experiment(experiment_name, - all_remote, - working_directory, - chromeos_root, - cache_conditions, - labels, - benchmarks, - experiment_file.Canonicalize(), - email, - acquire_timeout, - log_dir, - log_level, - share_cache, - results_dir, - compress_results, - locks_dir, - cwp_dso, - ignore_min_max, - crosfleet, - dut_config, - no_lock=no_lock) - - return experiment - - def GetDefaultRemotes(self, board): - default_remotes_file = os.path.join(os.path.dirname(__file__), - 'default_remotes') - try: - with open(default_remotes_file) as f: - for line in f: - key, v = line.split(':') - if key.strip() == board: - remotes = v.strip().split() - if remotes: - return remotes + """Factory class for building an Experiment, given an ExperimentFile as input. + + This factory is currently hardcoded to produce an experiment for running + ChromeOS benchmarks, but the idea is that in the future, other types + of experiments could be produced. + """ + + def AppendBenchmarkSet( + self, + benchmarks, + benchmark_list, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + ): + """Add all the tests in a set to the benchmarks list.""" + for test_name in benchmark_list: + telemetry_benchmark = Benchmark( + test_name, + test_name, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + ) + benchmarks.append(telemetry_benchmark) + + def GetExperiment(self, experiment_file, working_directory, log_dir): + """Construct an experiment from an experiment file.""" + global_settings = experiment_file.GetGlobalSettings() + experiment_name = global_settings.GetField("name") + board = global_settings.GetField("board") + chromeos_root = global_settings.GetField("chromeos_root") + log_level = global_settings.GetField("logging_level") + if log_level not in ("quiet", "average", "verbose"): + log_level = "verbose" + + crosfleet = global_settings.GetField("crosfleet") + no_lock = bool(global_settings.GetField("no_lock")) + # Check whether crosfleet tool is installed correctly for crosfleet mode. + if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level): + sys.exit(0) + + remote = global_settings.GetField("remote") + # This is used to remove the ",' from the remote if user + # add them to the remote string. + new_remote = [] + if remote: + for i in remote: + c = re.sub("[\"']", "", i) + new_remote.append(c) + remote = new_remote + rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp") + perf_args = global_settings.GetField("perf_args") + download_debug = global_settings.GetField("download_debug") + # Do not download debug symbols when perf_args is not specified. + if not perf_args and download_debug: + download_debug = False + acquire_timeout = global_settings.GetField("acquire_timeout") + cache_dir = global_settings.GetField("cache_dir") + cache_only = global_settings.GetField("cache_only") + config.AddConfig("no_email", global_settings.GetField("no_email")) + share_cache = global_settings.GetField("share_cache") + results_dir = global_settings.GetField("results_dir") + compress_results = global_settings.GetField("compress_results") + # Warn user that option use_file_locks is deprecated. + use_file_locks = global_settings.GetField("use_file_locks") + if use_file_locks: + l = logger.GetLogger() + l.LogWarning( + "Option use_file_locks is deprecated, please remove it " + "from your experiment settings." + ) + locks_dir = global_settings.GetField("locks_dir") + # If not specified, set the locks dir to the default locks dir in + # file_lock_machine. + if not locks_dir: + locks_dir = file_lock_machine.Machine.LOCKS_DIR + if not os.path.exists(locks_dir): + raise RuntimeError( + "Cannot access default lock directory. " + "Please run prodaccess or specify a local directory" + ) + chrome_src = global_settings.GetField("chrome_src") + show_all_results = global_settings.GetField("show_all_results") + cwp_dso = global_settings.GetField("cwp_dso") + if cwp_dso and not cwp_dso in dso_list: + raise RuntimeError("The DSO specified is not supported") + ignore_min_max = global_settings.GetField("ignore_min_max") + dut_config = { + "enable_aslr": global_settings.GetField("enable_aslr"), + "intel_pstate": global_settings.GetField("intel_pstate"), + "cooldown_time": global_settings.GetField("cooldown_time"), + "cooldown_temp": global_settings.GetField("cooldown_temp"), + "governor": global_settings.GetField("governor"), + "cpu_usage": global_settings.GetField("cpu_usage"), + "cpu_freq_pct": global_settings.GetField("cpu_freq_pct"), + "turbostat": global_settings.GetField("turbostat"), + "top_interval": global_settings.GetField("top_interval"), + } + + # Default cache hit conditions. The image checksum in the cache and the + # computed checksum of the image must match. Also a cache file must exist. + cache_conditions = [ + CacheConditions.CACHE_FILE_EXISTS, + CacheConditions.CHECKSUMS_MATCH, + ] + if global_settings.GetField("rerun_if_failed"): + cache_conditions.append(CacheConditions.RUN_SUCCEEDED) + if global_settings.GetField("rerun"): + cache_conditions.append(CacheConditions.FALSE) + if global_settings.GetField("same_machine"): + cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH) + if global_settings.GetField("same_specs"): + cache_conditions.append(CacheConditions.MACHINES_MATCH) + + # Construct benchmarks. + # Some fields are common with global settings. The values are + # inherited and/or merged with the global settings values. + benchmarks = [] + all_benchmark_settings = experiment_file.GetSettings("benchmark") + + # Check if there is duplicated benchmark name + benchmark_names = {} + # Check if in cwp_dso mode, all benchmarks should have same iterations + cwp_dso_iterations = 0 + + for benchmark_settings in all_benchmark_settings: + benchmark_name = benchmark_settings.name + test_name = benchmark_settings.GetField("test_name") + if not test_name: + test_name = benchmark_name + test_args = benchmark_settings.GetField("test_args") + + # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified + # in test_args. Make sure these two tags only appear once. + story_count = 0 + for arg in test_args.split(): + if "--story-filter=" in arg or "--story-tag-filter=" in arg: + story_count += 1 + if story_count > 1: + raise RuntimeError( + "Only one story or story-tag filter allowed in " + "a single benchmark run" + ) + # Rename benchmark name with an extension of 'story'-option + benchmark_name = "%s@@%s" % ( + benchmark_name, + arg.split("=")[-1], + ) + + # Check for duplicated benchmark name after renaming + if not benchmark_name in benchmark_names: + benchmark_names[benchmark_name] = True + else: + raise SyntaxError( + "Duplicate benchmark name: '%s'." % benchmark_name + ) + + iterations = benchmark_settings.GetField("iterations") + if cwp_dso: + if cwp_dso_iterations not in (0, iterations): + raise RuntimeError( + "Iterations of each benchmark run are not the " "same" + ) + cwp_dso_iterations = iterations + + suite = benchmark_settings.GetField("suite") + retries = benchmark_settings.GetField("retries") + run_local = benchmark_settings.GetField("run_local") + weight = benchmark_settings.GetField("weight") + if weight: + if not cwp_dso: + raise RuntimeError( + "Weight can only be set when DSO specified" + ) + if suite != "telemetry_Crosperf": + raise RuntimeError( + "CWP approximation weight only works with " + "telemetry_Crosperf suite" + ) + if run_local: + raise RuntimeError( + "run_local must be set to False to use CWP " + "approximation" + ) + if weight < 0: + raise RuntimeError("Weight should be a float >=0") + elif cwp_dso: + raise RuntimeError( + "With DSO specified, each benchmark should have a " "weight" + ) + + if suite == "telemetry_Crosperf": + if test_name == "all_perfv2": + self.AppendBenchmarkSet( + benchmarks, + telemetry_perfv2_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + ) + elif test_name == "all_pagecyclers": + self.AppendBenchmarkSet( + benchmarks, + telemetry_pagecycler_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + ) + elif test_name == "all_crosbolt_perf": + self.AppendBenchmarkSet( + benchmarks, + telemetry_crosbolt_perf_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + "telemetry_Crosperf", + show_all_results, + retries, + run_local, + cwp_dso, + weight, + ) + self.AppendBenchmarkSet( + benchmarks, + crosbolt_perf_tests, + "", + iterations, + rm_chroot_tmp, + perf_args, + "", + show_all_results, + retries, + run_local=False, + cwp_dso=cwp_dso, + weight=weight, + ) + elif test_name == "all_toolchain_perf": + self.AppendBenchmarkSet( + benchmarks, + telemetry_toolchain_perf_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + ) + # Add non-telemetry toolchain-perf benchmarks: + + # Tast test platform.ReportDiskUsage for image size. + benchmarks.append( + Benchmark( + "platform.ReportDiskUsage", + "platform.ReportDiskUsage", + "", + 1, # This is not a performance benchmark, only run once. + rm_chroot_tmp, + "", + "tast", # Specify the suite to be 'tast' + show_all_results, + retries, + ) + ) + + # TODO: crbug.com/1057755 Do not enable graphics_WebGLAquarium until + # it gets fixed. + # + # benchmarks.append( + # Benchmark( + # 'graphics_WebGLAquarium', + # 'graphics_WebGLAquarium', + # '', + # iterations, + # rm_chroot_tmp, + # perf_args, + # 'crosperf_Wrapper', # Use client wrapper in Autotest + # show_all_results, + # retries, + # run_local=False, + # cwp_dso=cwp_dso, + # weight=weight)) + elif test_name == "all_toolchain_perf_old": + self.AppendBenchmarkSet( + benchmarks, + telemetry_toolchain_old_perf_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + ) + else: + benchmark = Benchmark( + benchmark_name, + test_name, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + ) + benchmarks.append(benchmark) else: - raise RuntimeError('There is no remote for {0}'.format(board)) - except IOError: - # TODO: rethrow instead of throwing different exception. - raise RuntimeError( - 'IOError while reading file {0}'.format(default_remotes_file)) - else: - raise RuntimeError('There is no remote for {0}'.format(board)) - - def CheckRemotesInCrosfleet(self, remote): - # TODO: (AI:zhizhouy) need to check whether a remote is a local or lab - # machine. If not lab machine, raise an error. - pass - - def CheckCrosfleetTool(self, chromeos_root, log_level): - CROSFLEET_PATH = 'crosfleet' - if os.path.exists(CROSFLEET_PATH): - return True - l = logger.GetLogger() - l.LogOutput('Crosfleet tool not installed, trying to install it.') - ce = command_executer.GetCommandExecuter(l, log_level=log_level) - setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin', - 'lab-tools', 'setup_lab_tools') - cmd = '%s' % setup_lab_tools - status = ce.RunCommand(cmd) - if status != 0: - raise RuntimeError( - 'Crosfleet tool not installed correctly, please try to ' - 'manually install it from %s' % setup_lab_tools) - l.LogOutput('Crosfleet is installed at %s, please login before first use. ' - 'Login by running "crosfleet login" and follow instructions.' % - CROSFLEET_PATH) - return False + if test_name == "all_graphics_perf": + self.AppendBenchmarkSet( + benchmarks, + graphics_perf_tests, + "", + iterations, + rm_chroot_tmp, + perf_args, + "", + show_all_results, + retries, + run_local=False, + cwp_dso=cwp_dso, + weight=weight, + ) + else: + # Add the single benchmark. + benchmark = Benchmark( + benchmark_name, + test_name, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local=False, + cwp_dso=cwp_dso, + weight=weight, + ) + benchmarks.append(benchmark) + + if not benchmarks: + raise RuntimeError("No benchmarks specified") + + # Construct labels. + # Some fields are common with global settings. The values are + # inherited and/or merged with the global settings values. + labels = [] + all_label_settings = experiment_file.GetSettings("label") + all_remote = list(remote) + for label_settings in all_label_settings: + label_name = label_settings.name + image = label_settings.GetField("chromeos_image") + build = label_settings.GetField("build") + autotest_path = label_settings.GetField("autotest_path") + debug_path = label_settings.GetField("debug_path") + chromeos_root = label_settings.GetField("chromeos_root") + my_remote = label_settings.GetField("remote") + compiler = label_settings.GetField("compiler") + new_remote = [] + if my_remote: + for i in my_remote: + c = re.sub("[\"']", "", i) + new_remote.append(c) + my_remote = new_remote + + if image: + if crosfleet: + raise RuntimeError( + "In crosfleet mode, local image should not be used." + ) + if build: + raise RuntimeError( + "Image path and build are provided at the same " + "time, please use only one of them." + ) + else: + if not build: + raise RuntimeError("Can not have empty 'build' field!") + image, autotest_path, debug_path = label_settings.GetXbuddyPath( + build, + autotest_path, + debug_path, + board, + chromeos_root, + log_level, + download_debug, + ) + + cache_dir = label_settings.GetField("cache_dir") + chrome_src = label_settings.GetField("chrome_src") + + # TODO(yunlian): We should consolidate code in machine_manager.py + # to derermine whether we are running from within google or not + if ( + "corp.google.com" in socket.gethostname() + and not my_remote + and not crosfleet + ): + my_remote = self.GetDefaultRemotes(board) + if global_settings.GetField("same_machine") and len(my_remote) > 1: + raise RuntimeError( + "Only one remote is allowed when same_machine " + "is turned on" + ) + all_remote += my_remote + image_args = label_settings.GetField("image_args") + if test_flag.GetTestMode(): + # pylint: disable=too-many-function-args + label = MockLabel( + label_name, + build, + image, + autotest_path, + debug_path, + chromeos_root, + board, + my_remote, + image_args, + cache_dir, + cache_only, + log_level, + compiler, + crosfleet, + chrome_src, + ) + else: + label = Label( + label_name, + build, + image, + autotest_path, + debug_path, + chromeos_root, + board, + my_remote, + image_args, + cache_dir, + cache_only, + log_level, + compiler, + crosfleet, + chrome_src, + ) + labels.append(label) + + if not labels: + raise RuntimeError("No labels specified") + + email = global_settings.GetField("email") + all_remote += list(set(my_remote)) + all_remote = list(set(all_remote)) + if crosfleet: + for remote in all_remote: + self.CheckRemotesInCrosfleet(remote) + experiment = Experiment( + experiment_name, + all_remote, + working_directory, + chromeos_root, + cache_conditions, + labels, + benchmarks, + experiment_file.Canonicalize(), + email, + acquire_timeout, + log_dir, + log_level, + share_cache, + results_dir, + compress_results, + locks_dir, + cwp_dso, + ignore_min_max, + crosfleet, + dut_config, + no_lock=no_lock, + ) + + return experiment + + def GetDefaultRemotes(self, board): + default_remotes_file = os.path.join( + os.path.dirname(__file__), "default_remotes" + ) + try: + with open(default_remotes_file) as f: + for line in f: + key, v = line.split(":") + if key.strip() == board: + remotes = v.strip().split() + if remotes: + return remotes + else: + raise RuntimeError( + "There is no remote for {0}".format(board) + ) + except IOError: + # TODO: rethrow instead of throwing different exception. + raise RuntimeError( + "IOError while reading file {0}".format(default_remotes_file) + ) + else: + raise RuntimeError("There is no remote for {0}".format(board)) + + def CheckRemotesInCrosfleet(self, remote): + # TODO: (AI:zhizhouy) need to check whether a remote is a local or lab + # machine. If not lab machine, raise an error. + pass + + def CheckCrosfleetTool(self, chromeos_root, log_level): + CROSFLEET_PATH = "crosfleet" + if os.path.exists(CROSFLEET_PATH): + return True + l = logger.GetLogger() + l.LogOutput("Crosfleet tool not installed, trying to install it.") + ce = command_executer.GetCommandExecuter(l, log_level=log_level) + setup_lab_tools = os.path.join( + chromeos_root, "chromeos-admin", "lab-tools", "setup_lab_tools" + ) + cmd = "%s" % setup_lab_tools + status = ce.RunCommand(cmd) + if status != 0: + raise RuntimeError( + "Crosfleet tool not installed correctly, please try to " + "manually install it from %s" % setup_lab_tools + ) + l.LogOutput( + "Crosfleet is installed at %s, please login before first use. " + 'Login by running "crosfleet login" and follow instructions.' + % CROSFLEET_PATH + ) + return False diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py index 139e69ab..115061e6 100755 --- a/crosperf/experiment_factory_unittest.py +++ b/crosperf/experiment_factory_unittest.py @@ -15,15 +15,15 @@ import socket import unittest import unittest.mock as mock +import benchmark from cros_utils import command_executer from cros_utils.file_utils import FileUtils - -from experiment_file import ExperimentFile -import test_flag -import benchmark import experiment_factory from experiment_factory import ExperimentFactory +from experiment_file import ExperimentFile import settings_factory +import test_flag + EXPERIMENT_FILE_1 = """ board: x86-alex @@ -78,377 +78,455 @@ EXPERIMENT_FILE_2 = """ class ExperimentFactoryTest(unittest.TestCase): - """Class for running experiment factory unittests.""" - - def setUp(self): - self.append_benchmark_call_args = [] - - def testLoadExperimentFile1(self): - experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1)) - exp = ExperimentFactory().GetExperiment(experiment_file, - working_directory='', - log_dir='') - self.assertEqual(exp.remote, ['chromeos-alex3']) - - self.assertEqual(len(exp.benchmarks), 2) - self.assertEqual(exp.benchmarks[0].name, 'PageCycler') - self.assertEqual(exp.benchmarks[0].test_name, 'PageCycler') - self.assertEqual(exp.benchmarks[0].iterations, 3) - self.assertEqual(exp.benchmarks[1].name, 'webrtc@@datachannel') - self.assertEqual(exp.benchmarks[1].test_name, 'webrtc') - self.assertEqual(exp.benchmarks[1].iterations, 1) - - self.assertEqual(len(exp.labels), 2) - self.assertEqual(exp.labels[0].chromeos_image, - '/usr/local/google/cros_image1.bin') - self.assertEqual(exp.labels[0].board, 'x86-alex') - - def testLoadExperimentFile2CWP(self): - experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2)) - exp = ExperimentFactory().GetExperiment(experiment_file, - working_directory='', - log_dir='') - self.assertEqual(exp.cwp_dso, 'kallsyms') - self.assertEqual(len(exp.benchmarks), 2) - self.assertEqual(exp.benchmarks[0].weight, 0.8) - self.assertEqual(exp.benchmarks[1].weight, 0.2) - - def testDuplecateBenchmark(self): - mock_experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1)) - mock_experiment_file.all_settings = [] - benchmark_settings1 = settings_factory.BenchmarkSettings('name') - mock_experiment_file.all_settings.append(benchmark_settings1) - benchmark_settings2 = settings_factory.BenchmarkSettings('name') - mock_experiment_file.all_settings.append(benchmark_settings2) - - with self.assertRaises(SyntaxError): - ef = ExperimentFactory() - ef.GetExperiment(mock_experiment_file, '', '') - - def testCWPExceptions(self): - mock_experiment_file = ExperimentFile(io.StringIO('')) - mock_experiment_file.all_settings = [] - global_settings = settings_factory.GlobalSettings('test_name') - global_settings.SetField('locks_dir', '/tmp') - - # Test 1: DSO type not supported - global_settings.SetField('cwp_dso', 'test') - self.assertEqual(global_settings.GetField('cwp_dso'), 'test') - mock_experiment_file.global_settings = global_settings - with self.assertRaises(RuntimeError) as msg: - ef = ExperimentFactory() - ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('The DSO specified is not supported', str(msg.exception)) - - # Test 2: No weight after DSO specified - global_settings.SetField('cwp_dso', 'kallsyms') - mock_experiment_file.global_settings = global_settings - benchmark_settings = settings_factory.BenchmarkSettings('name') - mock_experiment_file.all_settings.append(benchmark_settings) - with self.assertRaises(RuntimeError) as msg: - ef = ExperimentFactory() - ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('With DSO specified, each benchmark should have a weight', - str(msg.exception)) - - # Test 3: Weight is set, but no dso specified - global_settings.SetField('cwp_dso', '') - mock_experiment_file.global_settings = global_settings - benchmark_settings = settings_factory.BenchmarkSettings('name') - benchmark_settings.SetField('weight', '0.8') - mock_experiment_file.all_settings = [] - mock_experiment_file.all_settings.append(benchmark_settings) - with self.assertRaises(RuntimeError) as msg: - ef = ExperimentFactory() - ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('Weight can only be set when DSO specified', - str(msg.exception)) - - # Test 4: cwp_dso only works for telemetry_Crosperf benchmarks - global_settings.SetField('cwp_dso', 'kallsyms') - mock_experiment_file.global_settings = global_settings - benchmark_settings = settings_factory.BenchmarkSettings('name') - benchmark_settings.SetField('weight', '0.8') - mock_experiment_file.all_settings = [] - mock_experiment_file.all_settings.append(benchmark_settings) - with self.assertRaises(RuntimeError) as msg: - ef = ExperimentFactory() - ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual( - 'CWP approximation weight only works with ' - 'telemetry_Crosperf suite', str(msg.exception)) - - # Test 5: cwp_dso does not work for local run - benchmark_settings = settings_factory.BenchmarkSettings('name') - benchmark_settings.SetField('weight', '0.8') - benchmark_settings.SetField('suite', 'telemetry_Crosperf') - benchmark_settings.SetField('run_local', 'True') - mock_experiment_file.all_settings = [] - mock_experiment_file.all_settings.append(benchmark_settings) - with self.assertRaises(RuntimeError) as msg: - ef = ExperimentFactory() - ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('run_local must be set to False to use CWP approximation', - str(msg.exception)) - - # Test 6: weight should be float >=0 - benchmark_settings = settings_factory.BenchmarkSettings('name') - benchmark_settings.SetField('weight', '-1.2') - benchmark_settings.SetField('suite', 'telemetry_Crosperf') - benchmark_settings.SetField('run_local', 'False') - mock_experiment_file.all_settings = [] - mock_experiment_file.all_settings.append(benchmark_settings) - with self.assertRaises(RuntimeError) as msg: - ef = ExperimentFactory() - ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('Weight should be a float >=0', str(msg.exception)) - - # Test 7: more than one story tag in test_args - benchmark_settings = settings_factory.BenchmarkSettings('name') - benchmark_settings.SetField('test_args', - '--story-filter=a --story-tag-filter=b') - benchmark_settings.SetField('weight', '1.2') - benchmark_settings.SetField('suite', 'telemetry_Crosperf') - mock_experiment_file.all_settings = [] - mock_experiment_file.all_settings.append(benchmark_settings) - with self.assertRaises(RuntimeError) as msg: - ef = ExperimentFactory() - ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual( - 'Only one story or story-tag filter allowed in a single ' - 'benchmark run', str(msg.exception)) - - # Test 8: Iterations of each benchmark run are not same in cwp mode - mock_experiment_file.all_settings = [] - benchmark_settings = settings_factory.BenchmarkSettings('name1') - benchmark_settings.SetField('iterations', '4') - benchmark_settings.SetField('weight', '1.2') - benchmark_settings.SetField('suite', 'telemetry_Crosperf') - benchmark_settings.SetField('run_local', 'False') - mock_experiment_file.all_settings.append(benchmark_settings) - benchmark_settings = settings_factory.BenchmarkSettings('name2') - benchmark_settings.SetField('iterations', '3') - benchmark_settings.SetField('weight', '1.2') - benchmark_settings.SetField('suite', 'telemetry_Crosperf') - benchmark_settings.SetField('run_local', 'False') - mock_experiment_file.all_settings.append(benchmark_settings) - with self.assertRaises(RuntimeError) as msg: - ef = ExperimentFactory() - ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('Iterations of each benchmark run are not the same', - str(msg.exception)) - - def test_append_benchmark_set(self): - ef = ExperimentFactory() - - bench_list = [] - ef.AppendBenchmarkSet(bench_list, - experiment_factory.telemetry_perfv2_tests, '', 1, - False, '', 'telemetry_Crosperf', False, 0, False, '', - 0) - self.assertEqual(len(bench_list), - len(experiment_factory.telemetry_perfv2_tests)) - self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) - - bench_list = [] - ef.AppendBenchmarkSet(bench_list, - experiment_factory.telemetry_pagecycler_tests, '', 1, - False, '', 'telemetry_Crosperf', False, 0, False, '', - 0) - self.assertEqual(len(bench_list), - len(experiment_factory.telemetry_pagecycler_tests)) - self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) - - bench_list = [] - ef.AppendBenchmarkSet(bench_list, - experiment_factory.telemetry_toolchain_perf_tests, - '', 1, False, '', 'telemetry_Crosperf', False, 0, - False, '', 0) - self.assertEqual(len(bench_list), - len(experiment_factory.telemetry_toolchain_perf_tests)) - self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) - - @mock.patch.object(socket, 'gethostname') - def test_get_experiment(self, mock_socket): - - test_flag.SetTestMode(False) - self.append_benchmark_call_args = [] - - def FakeAppendBenchmarkSet(bench_list, set_list, args, iters, rm_ch, - perf_args, suite, show_all): - 'Helper function for test_get_experiment' - arg_list = [ - bench_list, set_list, args, iters, rm_ch, perf_args, suite, show_all - ] - self.append_benchmark_call_args.append(arg_list) - - def FakeGetDefaultRemotes(board): - if not board: - return [] - return ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'] - - def FakeGetXbuddyPath(build, autotest_dir, debug_dir, board, chroot, - log_level, perf_args): - autotest_path = autotest_dir - if not autotest_path: - autotest_path = 'fake_autotest_path' - debug_path = debug_dir - if not debug_path and perf_args: - debug_path = 'fake_debug_path' - if not build or not board or not chroot or not log_level: - return '', autotest_path, debug_path - return 'fake_image_path', autotest_path, debug_path - - ef = ExperimentFactory() - ef.AppendBenchmarkSet = FakeAppendBenchmarkSet - ef.GetDefaultRemotes = FakeGetDefaultRemotes - - label_settings = settings_factory.LabelSettings('image_label') - benchmark_settings = settings_factory.BenchmarkSettings('bench_test') - global_settings = settings_factory.GlobalSettings('test_name') - - label_settings.GetXbuddyPath = FakeGetXbuddyPath - - mock_experiment_file = ExperimentFile(io.StringIO('')) - mock_experiment_file.all_settings = [] - - test_flag.SetTestMode(True) - # Basic test. - global_settings.SetField('name', 'unittest_test') - global_settings.SetField('board', 'lumpy') - global_settings.SetField('locks_dir', '/tmp') - global_settings.SetField('remote', '123.45.67.89 123.45.76.80') - benchmark_settings.SetField('test_name', 'kraken') - benchmark_settings.SetField('suite', 'telemetry_Crosperf') - benchmark_settings.SetField('iterations', 1) - label_settings.SetField( - 'chromeos_image', - 'chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin') - label_settings.SetField('chrome_src', '/usr/local/google/home/chrome-top') - label_settings.SetField('autotest_path', '/tmp/autotest') - - mock_experiment_file.global_settings = global_settings - mock_experiment_file.all_settings.append(label_settings) - mock_experiment_file.all_settings.append(benchmark_settings) - mock_experiment_file.all_settings.append(global_settings) - - mock_socket.return_value = '' - - # First test. General test. - exp = ef.GetExperiment(mock_experiment_file, '', '') - self.assertCountEqual(exp.remote, ['123.45.67.89', '123.45.76.80']) - self.assertEqual(exp.cache_conditions, [0, 2, 1]) - self.assertEqual(exp.log_level, 'average') - - self.assertEqual(len(exp.benchmarks), 1) - self.assertEqual(exp.benchmarks[0].name, 'bench_test') - self.assertEqual(exp.benchmarks[0].test_name, 'kraken') - self.assertEqual(exp.benchmarks[0].iterations, 1) - self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf') - self.assertFalse(exp.benchmarks[0].show_all_results) - - self.assertEqual(len(exp.labels), 1) - self.assertEqual( - exp.labels[0].chromeos_image, 'chromeos/src/build/images/lumpy/latest/' - 'chromiumos_test_image.bin') - self.assertEqual(exp.labels[0].autotest_path, '/tmp/autotest') - self.assertEqual(exp.labels[0].board, 'lumpy') - - # Second test: Remotes listed in labels. + """Class for running experiment factory unittests.""" + + def setUp(self): + self.append_benchmark_call_args = [] + + def testLoadExperimentFile1(self): + experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1)) + exp = ExperimentFactory().GetExperiment( + experiment_file, working_directory="", log_dir="" + ) + self.assertEqual(exp.remote, ["chromeos-alex3"]) + + self.assertEqual(len(exp.benchmarks), 2) + self.assertEqual(exp.benchmarks[0].name, "PageCycler") + self.assertEqual(exp.benchmarks[0].test_name, "PageCycler") + self.assertEqual(exp.benchmarks[0].iterations, 3) + self.assertEqual(exp.benchmarks[1].name, "webrtc@@datachannel") + self.assertEqual(exp.benchmarks[1].test_name, "webrtc") + self.assertEqual(exp.benchmarks[1].iterations, 1) + + self.assertEqual(len(exp.labels), 2) + self.assertEqual( + exp.labels[0].chromeos_image, "/usr/local/google/cros_image1.bin" + ) + self.assertEqual(exp.labels[0].board, "x86-alex") + + def testLoadExperimentFile2CWP(self): + experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2)) + exp = ExperimentFactory().GetExperiment( + experiment_file, working_directory="", log_dir="" + ) + self.assertEqual(exp.cwp_dso, "kallsyms") + self.assertEqual(len(exp.benchmarks), 2) + self.assertEqual(exp.benchmarks[0].weight, 0.8) + self.assertEqual(exp.benchmarks[1].weight, 0.2) + + def testDuplecateBenchmark(self): + mock_experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1)) + mock_experiment_file.all_settings = [] + benchmark_settings1 = settings_factory.BenchmarkSettings("name") + mock_experiment_file.all_settings.append(benchmark_settings1) + benchmark_settings2 = settings_factory.BenchmarkSettings("name") + mock_experiment_file.all_settings.append(benchmark_settings2) + + with self.assertRaises(SyntaxError): + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, "", "") + + def testCWPExceptions(self): + mock_experiment_file = ExperimentFile(io.StringIO("")) + mock_experiment_file.all_settings = [] + global_settings = settings_factory.GlobalSettings("test_name") + global_settings.SetField("locks_dir", "/tmp") + + # Test 1: DSO type not supported + global_settings.SetField("cwp_dso", "test") + self.assertEqual(global_settings.GetField("cwp_dso"), "test") + mock_experiment_file.global_settings = global_settings + with self.assertRaises(RuntimeError) as msg: + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual( + "The DSO specified is not supported", str(msg.exception) + ) + + # Test 2: No weight after DSO specified + global_settings.SetField("cwp_dso", "kallsyms") + mock_experiment_file.global_settings = global_settings + benchmark_settings = settings_factory.BenchmarkSettings("name") + mock_experiment_file.all_settings.append(benchmark_settings) + with self.assertRaises(RuntimeError) as msg: + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual( + "With DSO specified, each benchmark should have a weight", + str(msg.exception), + ) + + # Test 3: Weight is set, but no dso specified + global_settings.SetField("cwp_dso", "") + mock_experiment_file.global_settings = global_settings + benchmark_settings = settings_factory.BenchmarkSettings("name") + benchmark_settings.SetField("weight", "0.8") + mock_experiment_file.all_settings = [] + mock_experiment_file.all_settings.append(benchmark_settings) + with self.assertRaises(RuntimeError) as msg: + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual( + "Weight can only be set when DSO specified", str(msg.exception) + ) + + # Test 4: cwp_dso only works for telemetry_Crosperf benchmarks + global_settings.SetField("cwp_dso", "kallsyms") + mock_experiment_file.global_settings = global_settings + benchmark_settings = settings_factory.BenchmarkSettings("name") + benchmark_settings.SetField("weight", "0.8") + mock_experiment_file.all_settings = [] + mock_experiment_file.all_settings.append(benchmark_settings) + with self.assertRaises(RuntimeError) as msg: + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual( + "CWP approximation weight only works with " + "telemetry_Crosperf suite", + str(msg.exception), + ) + + # Test 5: cwp_dso does not work for local run + benchmark_settings = settings_factory.BenchmarkSettings("name") + benchmark_settings.SetField("weight", "0.8") + benchmark_settings.SetField("suite", "telemetry_Crosperf") + benchmark_settings.SetField("run_local", "True") + mock_experiment_file.all_settings = [] + mock_experiment_file.all_settings.append(benchmark_settings) + with self.assertRaises(RuntimeError) as msg: + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual( + "run_local must be set to False to use CWP approximation", + str(msg.exception), + ) + + # Test 6: weight should be float >=0 + benchmark_settings = settings_factory.BenchmarkSettings("name") + benchmark_settings.SetField("weight", "-1.2") + benchmark_settings.SetField("suite", "telemetry_Crosperf") + benchmark_settings.SetField("run_local", "False") + mock_experiment_file.all_settings = [] + mock_experiment_file.all_settings.append(benchmark_settings) + with self.assertRaises(RuntimeError) as msg: + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual("Weight should be a float >=0", str(msg.exception)) + + # Test 7: more than one story tag in test_args + benchmark_settings = settings_factory.BenchmarkSettings("name") + benchmark_settings.SetField( + "test_args", "--story-filter=a --story-tag-filter=b" + ) + benchmark_settings.SetField("weight", "1.2") + benchmark_settings.SetField("suite", "telemetry_Crosperf") + mock_experiment_file.all_settings = [] + mock_experiment_file.all_settings.append(benchmark_settings) + with self.assertRaises(RuntimeError) as msg: + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual( + "Only one story or story-tag filter allowed in a single " + "benchmark run", + str(msg.exception), + ) + + # Test 8: Iterations of each benchmark run are not same in cwp mode + mock_experiment_file.all_settings = [] + benchmark_settings = settings_factory.BenchmarkSettings("name1") + benchmark_settings.SetField("iterations", "4") + benchmark_settings.SetField("weight", "1.2") + benchmark_settings.SetField("suite", "telemetry_Crosperf") + benchmark_settings.SetField("run_local", "False") + mock_experiment_file.all_settings.append(benchmark_settings) + benchmark_settings = settings_factory.BenchmarkSettings("name2") + benchmark_settings.SetField("iterations", "3") + benchmark_settings.SetField("weight", "1.2") + benchmark_settings.SetField("suite", "telemetry_Crosperf") + benchmark_settings.SetField("run_local", "False") + mock_experiment_file.all_settings.append(benchmark_settings) + with self.assertRaises(RuntimeError) as msg: + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual( + "Iterations of each benchmark run are not the same", + str(msg.exception), + ) + + def test_append_benchmark_set(self): + ef = ExperimentFactory() + + bench_list = [] + ef.AppendBenchmarkSet( + bench_list, + experiment_factory.telemetry_perfv2_tests, + "", + 1, + False, + "", + "telemetry_Crosperf", + False, + 0, + False, + "", + 0, + ) + self.assertEqual( + len(bench_list), len(experiment_factory.telemetry_perfv2_tests) + ) + self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) + + bench_list = [] + ef.AppendBenchmarkSet( + bench_list, + experiment_factory.telemetry_pagecycler_tests, + "", + 1, + False, + "", + "telemetry_Crosperf", + False, + 0, + False, + "", + 0, + ) + self.assertEqual( + len(bench_list), len(experiment_factory.telemetry_pagecycler_tests) + ) + self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) + + bench_list = [] + ef.AppendBenchmarkSet( + bench_list, + experiment_factory.telemetry_toolchain_perf_tests, + "", + 1, + False, + "", + "telemetry_Crosperf", + False, + 0, + False, + "", + 0, + ) + self.assertEqual( + len(bench_list), + len(experiment_factory.telemetry_toolchain_perf_tests), + ) + self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) + + @mock.patch.object(socket, "gethostname") + def test_get_experiment(self, mock_socket): + + test_flag.SetTestMode(False) + self.append_benchmark_call_args = [] + + def FakeAppendBenchmarkSet( + bench_list, set_list, args, iters, rm_ch, perf_args, suite, show_all + ): + "Helper function for test_get_experiment" + arg_list = [ + bench_list, + set_list, + args, + iters, + rm_ch, + perf_args, + suite, + show_all, + ] + self.append_benchmark_call_args.append(arg_list) + + def FakeGetDefaultRemotes(board): + if not board: + return [] + return [ + "fake_chromeos_machine1.cros", + "fake_chromeos_machine2.cros", + ] + + def FakeGetXbuddyPath( + build, autotest_dir, debug_dir, board, chroot, log_level, perf_args + ): + autotest_path = autotest_dir + if not autotest_path: + autotest_path = "fake_autotest_path" + debug_path = debug_dir + if not debug_path and perf_args: + debug_path = "fake_debug_path" + if not build or not board or not chroot or not log_level: + return "", autotest_path, debug_path + return "fake_image_path", autotest_path, debug_path + + ef = ExperimentFactory() + ef.AppendBenchmarkSet = FakeAppendBenchmarkSet + ef.GetDefaultRemotes = FakeGetDefaultRemotes + + label_settings = settings_factory.LabelSettings("image_label") + benchmark_settings = settings_factory.BenchmarkSettings("bench_test") + global_settings = settings_factory.GlobalSettings("test_name") + + label_settings.GetXbuddyPath = FakeGetXbuddyPath + + mock_experiment_file = ExperimentFile(io.StringIO("")) + mock_experiment_file.all_settings = [] + + test_flag.SetTestMode(True) + # Basic test. + global_settings.SetField("name", "unittest_test") + global_settings.SetField("board", "lumpy") + global_settings.SetField("locks_dir", "/tmp") + global_settings.SetField("remote", "123.45.67.89 123.45.76.80") + benchmark_settings.SetField("test_name", "kraken") + benchmark_settings.SetField("suite", "telemetry_Crosperf") + benchmark_settings.SetField("iterations", 1) + label_settings.SetField( + "chromeos_image", + "chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin", + ) + label_settings.SetField( + "chrome_src", "/usr/local/google/home/chrome-top" + ) + label_settings.SetField("autotest_path", "/tmp/autotest") + + mock_experiment_file.global_settings = global_settings + mock_experiment_file.all_settings.append(label_settings) + mock_experiment_file.all_settings.append(benchmark_settings) + mock_experiment_file.all_settings.append(global_settings) + + mock_socket.return_value = "" + + # First test. General test. + exp = ef.GetExperiment(mock_experiment_file, "", "") + self.assertCountEqual(exp.remote, ["123.45.67.89", "123.45.76.80"]) + self.assertEqual(exp.cache_conditions, [0, 2, 1]) + self.assertEqual(exp.log_level, "average") + + self.assertEqual(len(exp.benchmarks), 1) + self.assertEqual(exp.benchmarks[0].name, "bench_test") + self.assertEqual(exp.benchmarks[0].test_name, "kraken") + self.assertEqual(exp.benchmarks[0].iterations, 1) + self.assertEqual(exp.benchmarks[0].suite, "telemetry_Crosperf") + self.assertFalse(exp.benchmarks[0].show_all_results) + + self.assertEqual(len(exp.labels), 1) + self.assertEqual( + exp.labels[0].chromeos_image, + "chromeos/src/build/images/lumpy/latest/" + "chromiumos_test_image.bin", + ) + self.assertEqual(exp.labels[0].autotest_path, "/tmp/autotest") + self.assertEqual(exp.labels[0].board, "lumpy") + + # Second test: Remotes listed in labels. + test_flag.SetTestMode(True) + label_settings.SetField("remote", "chromeos1.cros chromeos2.cros") + exp = ef.GetExperiment(mock_experiment_file, "", "") + self.assertCountEqual( + exp.remote, + [ + "123.45.67.89", + "123.45.76.80", + "chromeos1.cros", + "chromeos2.cros", + ], + ) + + # Third test: Automatic fixing of bad logging_level param: + global_settings.SetField("logging_level", "really loud!") + exp = ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual(exp.log_level, "verbose") + + # Fourth test: Setting cache conditions; only 1 remote with "same_machine" + global_settings.SetField("rerun_if_failed", "true") + global_settings.SetField("rerun", "true") + global_settings.SetField("same_machine", "true") + global_settings.SetField("same_specs", "true") + + self.assertRaises( + Exception, ef.GetExperiment, mock_experiment_file, "", "" + ) + label_settings.SetField("remote", "") + global_settings.SetField("remote", "123.45.67.89") + exp = ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1]) + + # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all + # remotes (Call GetDefaultRemotes). + mock_socket.return_value = "test.corp.google.com" + global_settings.SetField("remote", "") + global_settings.SetField("same_machine", "false") + + label_settings_2 = settings_factory.LabelSettings( + "official_image_label" + ) + label_settings_2.SetField("chromeos_root", "chromeos") + label_settings_2.SetField("build", "official-dev") + label_settings_2.SetField("autotest_path", "") + label_settings_2.GetXbuddyPath = FakeGetXbuddyPath + + mock_experiment_file.all_settings.append(label_settings_2) + exp = ef.GetExperiment(mock_experiment_file, "", "") + self.assertEqual(len(exp.labels), 2) + self.assertEqual(exp.labels[1].chromeos_image, "fake_image_path") + self.assertEqual(exp.labels[1].autotest_path, "fake_autotest_path") + self.assertCountEqual( + exp.remote, + ["fake_chromeos_machine1.cros", "fake_chromeos_machine2.cros"], + ) + + def test_get_default_remotes(self): + board_list = [ + "bob", + "chell", + "coral", + "elm", + "kefka", + "nautilus", + "snappy", + ] + + ef = ExperimentFactory() + self.assertRaises(Exception, ef.GetDefaultRemotes, "bad-board") + + # Verify that we have entries for every board + for b in board_list: + remotes = ef.GetDefaultRemotes(b) + self.assertGreaterEqual(len(remotes), 1) + + @mock.patch.object(command_executer.CommandExecuter, "RunCommand") + @mock.patch.object(os.path, "exists") + def test_check_crosfleet_tool(self, mock_exists, mock_runcmd): + ef = ExperimentFactory() + chromeos_root = "/tmp/chromeos" + log_level = "average" + + mock_exists.return_value = True + ret = ef.CheckCrosfleetTool(chromeos_root, log_level) + self.assertTrue(ret) + + mock_exists.return_value = False + mock_runcmd.return_value = 1 + with self.assertRaises(RuntimeError) as err: + ef.CheckCrosfleetTool(chromeos_root, log_level) + self.assertEqual(mock_runcmd.call_count, 1) + self.assertEqual( + str(err.exception), + "Crosfleet tool not installed " + "correctly, please try to manually install it from " + "/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools", + ) + + mock_runcmd.return_value = 0 + mock_runcmd.call_count = 0 + ret = ef.CheckCrosfleetTool(chromeos_root, log_level) + self.assertEqual(mock_runcmd.call_count, 1) + self.assertFalse(ret) + + +if __name__ == "__main__": + FileUtils.Configure(True) test_flag.SetTestMode(True) - label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros') - exp = ef.GetExperiment(mock_experiment_file, '', '') - self.assertCountEqual( - exp.remote, - ['123.45.67.89', '123.45.76.80', 'chromeos1.cros', 'chromeos2.cros']) - - # Third test: Automatic fixing of bad logging_level param: - global_settings.SetField('logging_level', 'really loud!') - exp = ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual(exp.log_level, 'verbose') - - # Fourth test: Setting cache conditions; only 1 remote with "same_machine" - global_settings.SetField('rerun_if_failed', 'true') - global_settings.SetField('rerun', 'true') - global_settings.SetField('same_machine', 'true') - global_settings.SetField('same_specs', 'true') - - self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '', - '') - label_settings.SetField('remote', '') - global_settings.SetField('remote', '123.45.67.89') - exp = ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1]) - - # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all - # remotes (Call GetDefaultRemotes). - mock_socket.return_value = 'test.corp.google.com' - global_settings.SetField('remote', '') - global_settings.SetField('same_machine', 'false') - - label_settings_2 = settings_factory.LabelSettings('official_image_label') - label_settings_2.SetField('chromeos_root', 'chromeos') - label_settings_2.SetField('build', 'official-dev') - label_settings_2.SetField('autotest_path', '') - label_settings_2.GetXbuddyPath = FakeGetXbuddyPath - - mock_experiment_file.all_settings.append(label_settings_2) - exp = ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual(len(exp.labels), 2) - self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path') - self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path') - self.assertCountEqual( - exp.remote, - ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros']) - - def test_get_default_remotes(self): - board_list = [ - 'bob', - 'chell', - 'coral', - 'elm', - 'kefka', - 'nautilus', - 'snappy', - ] - - ef = ExperimentFactory() - self.assertRaises(Exception, ef.GetDefaultRemotes, 'bad-board') - - # Verify that we have entries for every board - for b in board_list: - remotes = ef.GetDefaultRemotes(b) - self.assertGreaterEqual(len(remotes), 1) - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommand') - @mock.patch.object(os.path, 'exists') - def test_check_crosfleet_tool(self, mock_exists, mock_runcmd): - ef = ExperimentFactory() - chromeos_root = '/tmp/chromeos' - log_level = 'average' - - mock_exists.return_value = True - ret = ef.CheckCrosfleetTool(chromeos_root, log_level) - self.assertTrue(ret) - - mock_exists.return_value = False - mock_runcmd.return_value = 1 - with self.assertRaises(RuntimeError) as err: - ef.CheckCrosfleetTool(chromeos_root, log_level) - self.assertEqual(mock_runcmd.call_count, 1) - self.assertEqual( - str(err.exception), 'Crosfleet tool not installed ' - 'correctly, please try to manually install it from ' - '/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools') - - mock_runcmd.return_value = 0 - mock_runcmd.call_count = 0 - ret = ef.CheckCrosfleetTool(chromeos_root, log_level) - self.assertEqual(mock_runcmd.call_count, 1) - self.assertFalse(ret) - - -if __name__ == '__main__': - FileUtils.Configure(True) - test_flag.SetTestMode(True) - unittest.main() + unittest.main() diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py index 18eced64..fbf16fe9 100644 --- a/crosperf/experiment_file.py +++ b/crosperf/experiment_file.py @@ -6,215 +6,237 @@ """The experiment file module. It manages the input file of crosperf.""" from __future__ import print_function + import os.path import re + from settings_factory import SettingsFactory class ExperimentFile(object): - """Class for parsing the experiment file format. + """Class for parsing the experiment file format. - The grammar for this format is: + The grammar for this format is: - experiment = { _FIELD_VALUE_RE | settings } - settings = _OPEN_SETTINGS_RE - { _FIELD_VALUE_RE } - _CLOSE_SETTINGS_RE + experiment = { _FIELD_VALUE_RE | settings } + settings = _OPEN_SETTINGS_RE + { _FIELD_VALUE_RE } + _CLOSE_SETTINGS_RE - Where the regexes are terminals defined below. This results in an format - which looks something like: + Where the regexes are terminals defined below. This results in an format + which looks something like: - field_name: value - settings_type: settings_name { - field_name: value field_name: value - } - """ - - # Field regex, e.g. "iterations: 3" - _FIELD_VALUE_RE = re.compile(r'(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)') - # Open settings regex, e.g. "label {" - _OPEN_SETTINGS_RE = re.compile(r'(?:([\w.-]+):)?\s*([\w.-]+)\s*{') - # Close settings regex. - _CLOSE_SETTINGS_RE = re.compile(r'}') - - def __init__(self, experiment_file, overrides=None): - """Construct object from file-like experiment_file. - - Args: - experiment_file: file-like object with text description of experiment. - overrides: A settings object that will override fields in other settings. - - Raises: - Exception: if invalid build type or description is invalid. + settings_type: settings_name { + field_name: value + field_name: value + } """ - self.all_settings = [] - self.global_settings = SettingsFactory().GetSettings('global', 'global') - self.all_settings.append(self.global_settings) - - self._Parse(experiment_file) - - for settings in self.all_settings: - settings.Inherit() - settings.Validate() - if overrides: - settings.Override(overrides) - - def GetSettings(self, settings_type): - """Return nested fields from the experiment file.""" - res = [] - for settings in self.all_settings: - if settings.settings_type == settings_type: - res.append(settings) - return res - - def GetGlobalSettings(self): - """Return the global fields from the experiment file.""" - return self.global_settings - - def _ParseField(self, reader): - """Parse a key/value field.""" - line = reader.CurrentLine().strip() - match = ExperimentFile._FIELD_VALUE_RE.match(line) - append, name, _, text_value = match.groups() - return (name, text_value, append) - - def _ParseSettings(self, reader): - """Parse a settings block.""" - line = reader.CurrentLine().strip() - match = ExperimentFile._OPEN_SETTINGS_RE.match(line) - settings_type = match.group(1) - if settings_type is None: - settings_type = '' - settings_name = match.group(2) - settings = SettingsFactory().GetSettings(settings_name, settings_type) - settings.SetParentSettings(self.global_settings) - - while reader.NextLine(): - line = reader.CurrentLine().strip() - - if not line: - continue - - if ExperimentFile._FIELD_VALUE_RE.match(line): - field = self._ParseField(reader) - settings.SetField(field[0], field[1], field[2]) - elif ExperimentFile._CLOSE_SETTINGS_RE.match(line): - return settings, settings_type - - raise EOFError('Unexpected EOF while parsing settings block.') - - def _Parse(self, experiment_file): - """Parse experiment file and create settings.""" - reader = ExperimentFileReader(experiment_file) - settings_names = {} - try: - while reader.NextLine(): + + # Field regex, e.g. "iterations: 3" + _FIELD_VALUE_RE = re.compile(r"(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)") + # Open settings regex, e.g. "label {" + _OPEN_SETTINGS_RE = re.compile(r"(?:([\w.-]+):)?\s*([\w.-]+)\s*{") + # Close settings regex. + _CLOSE_SETTINGS_RE = re.compile(r"}") + + def __init__(self, experiment_file, overrides=None): + """Construct object from file-like experiment_file. + + Args: + experiment_file: file-like object with text description of experiment. + overrides: A settings object that will override fields in other settings. + + Raises: + Exception: if invalid build type or description is invalid. + """ + self.all_settings = [] + self.global_settings = SettingsFactory().GetSettings("global", "global") + self.all_settings.append(self.global_settings) + + self._Parse(experiment_file) + + for settings in self.all_settings: + settings.Inherit() + settings.Validate() + if overrides: + settings.Override(overrides) + + def GetSettings(self, settings_type): + """Return nested fields from the experiment file.""" + res = [] + for settings in self.all_settings: + if settings.settings_type == settings_type: + res.append(settings) + return res + + def GetGlobalSettings(self): + """Return the global fields from the experiment file.""" + return self.global_settings + + def _ParseField(self, reader): + """Parse a key/value field.""" line = reader.CurrentLine().strip() + match = ExperimentFile._FIELD_VALUE_RE.match(line) + append, name, _, text_value = match.groups() + return (name, text_value, append) - if not line: - continue - - if ExperimentFile._OPEN_SETTINGS_RE.match(line): - new_settings, settings_type = self._ParseSettings(reader) - # We will allow benchmarks with duplicated settings name for now. - # Further decision will be made when parsing benchmark details in - # ExperimentFactory.GetExperiment(). - if settings_type != 'benchmark': - if new_settings.name in settings_names: - raise SyntaxError( - "Duplicate settings name: '%s'." % new_settings.name) - settings_names[new_settings.name] = True - self.all_settings.append(new_settings) - elif ExperimentFile._FIELD_VALUE_RE.match(line): - field = self._ParseField(reader) - self.global_settings.SetField(field[0], field[1], field[2]) - else: - raise IOError('Unexpected line.') - except Exception as err: - raise RuntimeError('Line %d: %s\n==> %s' % (reader.LineNo(), str(err), - reader.CurrentLine(False))) - - def Canonicalize(self): - """Convert parsed experiment file back into an experiment file.""" - res = '' - board = '' - for field_name in self.global_settings.fields: - field = self.global_settings.fields[field_name] - if field.assigned: - res += '%s: %s\n' % (field.name, field.GetString()) - if field.name == 'board': - board = field.GetString() - res += '\n' - - for settings in self.all_settings: - if settings.settings_type != 'global': - res += '%s: %s {\n' % (settings.settings_type, settings.name) - for field_name in settings.fields: - field = settings.fields[field_name] - if field.assigned: - res += '\t%s: %s\n' % (field.name, field.GetString()) - if field.name == 'chromeos_image': - real_file = ( - os.path.realpath(os.path.expanduser(field.GetString()))) - if real_file != field.GetString(): - res += '\t#actual_image: %s\n' % real_file - if field.name == 'build': - chromeos_root_field = settings.fields['chromeos_root'] - if chromeos_root_field: - chromeos_root = chromeos_root_field.GetString() - value = field.GetString() - autotest_field = settings.fields['autotest_path'] - autotest_path = '' - if autotest_field.assigned: - autotest_path = autotest_field.GetString() - debug_field = settings.fields['debug_path'] - debug_path = '' - if debug_field.assigned: - debug_path = autotest_field.GetString() - # Do not download the debug symbols since this function is for - # canonicalizing experiment file. - downlad_debug = False - image_path, autotest_path, debug_path = settings.GetXbuddyPath( - value, autotest_path, debug_path, board, chromeos_root, - 'quiet', downlad_debug) - res += '\t#actual_image: %s\n' % image_path - if not autotest_field.assigned: - res += '\t#actual_autotest_path: %s\n' % autotest_path - if not debug_field.assigned: - res += '\t#actual_debug_path: %s\n' % debug_path - - res += '}\n\n' - - return res + def _ParseSettings(self, reader): + """Parse a settings block.""" + line = reader.CurrentLine().strip() + match = ExperimentFile._OPEN_SETTINGS_RE.match(line) + settings_type = match.group(1) + if settings_type is None: + settings_type = "" + settings_name = match.group(2) + settings = SettingsFactory().GetSettings(settings_name, settings_type) + settings.SetParentSettings(self.global_settings) + + while reader.NextLine(): + line = reader.CurrentLine().strip() + + if not line: + continue + + if ExperimentFile._FIELD_VALUE_RE.match(line): + field = self._ParseField(reader) + settings.SetField(field[0], field[1], field[2]) + elif ExperimentFile._CLOSE_SETTINGS_RE.match(line): + return settings, settings_type + + raise EOFError("Unexpected EOF while parsing settings block.") + + def _Parse(self, experiment_file): + """Parse experiment file and create settings.""" + reader = ExperimentFileReader(experiment_file) + settings_names = {} + try: + while reader.NextLine(): + line = reader.CurrentLine().strip() + + if not line: + continue + + if ExperimentFile._OPEN_SETTINGS_RE.match(line): + new_settings, settings_type = self._ParseSettings(reader) + # We will allow benchmarks with duplicated settings name for now. + # Further decision will be made when parsing benchmark details in + # ExperimentFactory.GetExperiment(). + if settings_type != "benchmark": + if new_settings.name in settings_names: + raise SyntaxError( + "Duplicate settings name: '%s'." + % new_settings.name + ) + settings_names[new_settings.name] = True + self.all_settings.append(new_settings) + elif ExperimentFile._FIELD_VALUE_RE.match(line): + field = self._ParseField(reader) + self.global_settings.SetField(field[0], field[1], field[2]) + else: + raise IOError("Unexpected line.") + except Exception as err: + raise RuntimeError( + "Line %d: %s\n==> %s" + % (reader.LineNo(), str(err), reader.CurrentLine(False)) + ) + + def Canonicalize(self): + """Convert parsed experiment file back into an experiment file.""" + res = "" + board = "" + for field_name in self.global_settings.fields: + field = self.global_settings.fields[field_name] + if field.assigned: + res += "%s: %s\n" % (field.name, field.GetString()) + if field.name == "board": + board = field.GetString() + res += "\n" + + for settings in self.all_settings: + if settings.settings_type != "global": + res += "%s: %s {\n" % (settings.settings_type, settings.name) + for field_name in settings.fields: + field = settings.fields[field_name] + if field.assigned: + res += "\t%s: %s\n" % (field.name, field.GetString()) + if field.name == "chromeos_image": + real_file = os.path.realpath( + os.path.expanduser(field.GetString()) + ) + if real_file != field.GetString(): + res += "\t#actual_image: %s\n" % real_file + if field.name == "build": + chromeos_root_field = settings.fields[ + "chromeos_root" + ] + if chromeos_root_field: + chromeos_root = chromeos_root_field.GetString() + value = field.GetString() + autotest_field = settings.fields["autotest_path"] + autotest_path = "" + if autotest_field.assigned: + autotest_path = autotest_field.GetString() + debug_field = settings.fields["debug_path"] + debug_path = "" + if debug_field.assigned: + debug_path = autotest_field.GetString() + # Do not download the debug symbols since this function is for + # canonicalizing experiment file. + downlad_debug = False + ( + image_path, + autotest_path, + debug_path, + ) = settings.GetXbuddyPath( + value, + autotest_path, + debug_path, + board, + chromeos_root, + "quiet", + downlad_debug, + ) + res += "\t#actual_image: %s\n" % image_path + if not autotest_field.assigned: + res += ( + "\t#actual_autotest_path: %s\n" + % autotest_path + ) + if not debug_field.assigned: + res += "\t#actual_debug_path: %s\n" % debug_path + + res += "}\n\n" + + return res class ExperimentFileReader(object): - """Handle reading lines from an experiment file.""" - - def __init__(self, file_object): - self.file_object = file_object - self.current_line = None - self.current_line_no = 0 - - def CurrentLine(self, strip_comment=True): - """Return the next line from the file, without advancing the iterator.""" - if strip_comment: - return self._StripComment(self.current_line) - return self.current_line - - def NextLine(self, strip_comment=True): - """Advance the iterator and return the next line of the file.""" - self.current_line_no += 1 - self.current_line = self.file_object.readline() - return self.CurrentLine(strip_comment) - - def _StripComment(self, line): - """Strip comments starting with # from a line.""" - if '#' in line: - line = line[:line.find('#')] + line[-1] - return line - - def LineNo(self): - """Return the current line number.""" - return self.current_line_no + """Handle reading lines from an experiment file.""" + + def __init__(self, file_object): + self.file_object = file_object + self.current_line = None + self.current_line_no = 0 + + def CurrentLine(self, strip_comment=True): + """Return the next line from the file, without advancing the iterator.""" + if strip_comment: + return self._StripComment(self.current_line) + return self.current_line + + def NextLine(self, strip_comment=True): + """Advance the iterator and return the next line of the file.""" + self.current_line_no += 1 + self.current_line = self.file_object.readline() + return self.CurrentLine(strip_comment) + + def _StripComment(self, line): + """Strip comments starting with # from a line.""" + if "#" in line: + line = line[: line.find("#")] + line[-1] + return line + + def LineNo(self): + """Return the current line number.""" + return self.current_line_no diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py index 71269ad6..90c70fb3 100755 --- a/crosperf/experiment_file_unittest.py +++ b/crosperf/experiment_file_unittest.py @@ -13,6 +13,7 @@ import unittest from experiment_file import ExperimentFile + EXPERIMENT_FILE_1 = """ board: x86-alex remote: chromeos-alex3 @@ -158,94 +159,111 @@ label: image2 { class ExperimentFileTest(unittest.TestCase): - """The main class for Experiment File test.""" - - def testLoadExperimentFile1(self): - input_file = io.StringIO(EXPERIMENT_FILE_1) - experiment_file = ExperimentFile(input_file) - global_settings = experiment_file.GetGlobalSettings() - self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3']) - self.assertEqual( - global_settings.GetField('perf_args'), 'record -a -e cycles') - benchmark_settings = experiment_file.GetSettings('benchmark') - self.assertEqual(len(benchmark_settings), 1) - self.assertEqual(benchmark_settings[0].name, 'PageCycler') - self.assertEqual(benchmark_settings[0].GetField('iterations'), 3) - - label_settings = experiment_file.GetSettings('label') - self.assertEqual(len(label_settings), 2) - self.assertEqual(label_settings[0].name, 'image1') - self.assertEqual(label_settings[0].GetField('chromeos_image'), - '/usr/local/google/cros_image1.bin') - self.assertEqual(label_settings[1].GetField('remote'), ['chromeos-lumpy1']) - self.assertEqual(label_settings[0].GetField('remote'), ['chromeos-alex3']) - - def testOverrideSetting(self): - input_file = io.StringIO(EXPERIMENT_FILE_2) - experiment_file = ExperimentFile(input_file) - global_settings = experiment_file.GetGlobalSettings() - self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3']) - - benchmark_settings = experiment_file.GetSettings('benchmark') - self.assertEqual(len(benchmark_settings), 2) - self.assertEqual(benchmark_settings[0].name, 'PageCycler') - self.assertEqual(benchmark_settings[0].GetField('iterations'), 3) - self.assertEqual(benchmark_settings[1].name, 'AndroidBench') - self.assertEqual(benchmark_settings[1].GetField('iterations'), 2) - - def testDuplicateLabel(self): - input_file = io.StringIO(EXPERIMENT_FILE_3) - self.assertRaises(Exception, ExperimentFile, input_file) - - def testDuplicateBenchmark(self): - input_file = io.StringIO(EXPERIMENT_FILE_4) - experiment_file = ExperimentFile(input_file) - benchmark_settings = experiment_file.GetSettings('benchmark') - self.assertEqual(benchmark_settings[0].name, 'webrtc') - self.assertEqual(benchmark_settings[0].GetField('test_args'), - '--story-filter=datachannel') - self.assertEqual(benchmark_settings[1].name, 'webrtc') - self.assertEqual(benchmark_settings[1].GetField('test_args'), - '--story-tag-filter=smoothness') - - def testCanonicalize(self): - input_file = io.StringIO(EXPERIMENT_FILE_1) - experiment_file = ExperimentFile(input_file) - res = experiment_file.Canonicalize() - self.assertEqual(res, OUTPUT_FILE) - - def testLoadDutConfigExperimentFile_Good(self): - input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_GOOD) - experiment_file = ExperimentFile(input_file) - global_settings = experiment_file.GetGlobalSettings() - self.assertEqual(global_settings.GetField('turbostat'), False) - self.assertEqual(global_settings.GetField('intel_pstate'), 'no_hwp') - self.assertEqual(global_settings.GetField('governor'), 'powersave') - self.assertEqual(global_settings.GetField('cpu_usage'), 'exclusive_cores') - self.assertEqual(global_settings.GetField('cpu_freq_pct'), 50) - self.assertEqual(global_settings.GetField('cooldown_time'), 5) - self.assertEqual(global_settings.GetField('cooldown_temp'), 38) - self.assertEqual(global_settings.GetField('top_interval'), 5) - - def testLoadDutConfigExperimentFile_WrongGovernor(self): - input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_GOV) - with self.assertRaises(RuntimeError) as msg: - ExperimentFile(input_file) - self.assertRegex(str(msg.exception), 'governor: misspelled_governor') - self.assertRegex( - str(msg.exception), "Invalid enum value for field 'governor'." - r' Must be one of \(performance, powersave, userspace, ondemand,' - r' conservative, schedutils, sched, interactive\)') - - def testLoadDutConfigExperimentFile_WrongCpuUsage(self): - input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_CPUUSE) - with self.assertRaises(RuntimeError) as msg: - ExperimentFile(input_file) - self.assertRegex(str(msg.exception), 'cpu_usage: unknown') - self.assertRegex( - str(msg.exception), "Invalid enum value for field 'cpu_usage'." - r' Must be one of \(all, big_only, little_only, exclusive_cores\)') - - -if __name__ == '__main__': - unittest.main() + """The main class for Experiment File test.""" + + def testLoadExperimentFile1(self): + input_file = io.StringIO(EXPERIMENT_FILE_1) + experiment_file = ExperimentFile(input_file) + global_settings = experiment_file.GetGlobalSettings() + self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"]) + self.assertEqual( + global_settings.GetField("perf_args"), "record -a -e cycles" + ) + benchmark_settings = experiment_file.GetSettings("benchmark") + self.assertEqual(len(benchmark_settings), 1) + self.assertEqual(benchmark_settings[0].name, "PageCycler") + self.assertEqual(benchmark_settings[0].GetField("iterations"), 3) + + label_settings = experiment_file.GetSettings("label") + self.assertEqual(len(label_settings), 2) + self.assertEqual(label_settings[0].name, "image1") + self.assertEqual( + label_settings[0].GetField("chromeos_image"), + "/usr/local/google/cros_image1.bin", + ) + self.assertEqual( + label_settings[1].GetField("remote"), ["chromeos-lumpy1"] + ) + self.assertEqual( + label_settings[0].GetField("remote"), ["chromeos-alex3"] + ) + + def testOverrideSetting(self): + input_file = io.StringIO(EXPERIMENT_FILE_2) + experiment_file = ExperimentFile(input_file) + global_settings = experiment_file.GetGlobalSettings() + self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"]) + + benchmark_settings = experiment_file.GetSettings("benchmark") + self.assertEqual(len(benchmark_settings), 2) + self.assertEqual(benchmark_settings[0].name, "PageCycler") + self.assertEqual(benchmark_settings[0].GetField("iterations"), 3) + self.assertEqual(benchmark_settings[1].name, "AndroidBench") + self.assertEqual(benchmark_settings[1].GetField("iterations"), 2) + + def testDuplicateLabel(self): + input_file = io.StringIO(EXPERIMENT_FILE_3) + self.assertRaises(Exception, ExperimentFile, input_file) + + def testDuplicateBenchmark(self): + input_file = io.StringIO(EXPERIMENT_FILE_4) + experiment_file = ExperimentFile(input_file) + benchmark_settings = experiment_file.GetSettings("benchmark") + self.assertEqual(benchmark_settings[0].name, "webrtc") + self.assertEqual( + benchmark_settings[0].GetField("test_args"), + "--story-filter=datachannel", + ) + self.assertEqual(benchmark_settings[1].name, "webrtc") + self.assertEqual( + benchmark_settings[1].GetField("test_args"), + "--story-tag-filter=smoothness", + ) + + def testCanonicalize(self): + input_file = io.StringIO(EXPERIMENT_FILE_1) + experiment_file = ExperimentFile(input_file) + res = experiment_file.Canonicalize() + self.assertEqual(res, OUTPUT_FILE) + + def testLoadDutConfigExperimentFile_Good(self): + input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_GOOD) + experiment_file = ExperimentFile(input_file) + global_settings = experiment_file.GetGlobalSettings() + self.assertEqual(global_settings.GetField("turbostat"), False) + self.assertEqual(global_settings.GetField("intel_pstate"), "no_hwp") + self.assertEqual(global_settings.GetField("governor"), "powersave") + self.assertEqual( + global_settings.GetField("cpu_usage"), "exclusive_cores" + ) + self.assertEqual(global_settings.GetField("cpu_freq_pct"), 50) + self.assertEqual(global_settings.GetField("cooldown_time"), 5) + self.assertEqual(global_settings.GetField("cooldown_temp"), 38) + self.assertEqual(global_settings.GetField("top_interval"), 5) + + def testLoadDutConfigExperimentFile_WrongGovernor(self): + input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_GOV) + with self.assertRaises(RuntimeError) as msg: + ExperimentFile(input_file) + self.assertRegex(str(msg.exception), "governor: misspelled_governor") + self.assertRegex( + str(msg.exception), + "Invalid enum value for field 'governor'." + r" Must be one of \(performance, powersave, userspace, ondemand," + r" conservative, schedutils, sched, interactive\)", + ) + + def testLoadDutConfigExperimentFile_WrongCpuUsage(self): + input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_CPUUSE) + with self.assertRaises(RuntimeError) as msg: + ExperimentFile(input_file) + self.assertRegex(str(msg.exception), "cpu_usage: unknown") + self.assertRegex( + str(msg.exception), + "Invalid enum value for field 'cpu_usage'." + r" Must be one of \(all, big_only, little_only, exclusive_cores\)", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py index e0ba4a91..c65917c3 100644 --- a/crosperf/experiment_runner.py +++ b/crosperf/experiment_runner.py @@ -11,353 +11,393 @@ import os import shutil import time -import lock_machine -import test_flag - from cros_utils import command_executer from cros_utils import logger from cros_utils.email_sender import EmailSender from cros_utils.file_utils import FileUtils - -import config from experiment_status import ExperimentStatus +import lock_machine from results_cache import CacheConditions from results_cache import ResultsCache from results_report import HTMLResultsReport -from results_report import TextResultsReport from results_report import JSONResultsReport +from results_report import TextResultsReport from schedv2 import Schedv2 +import test_flag + +import config def _WriteJSONReportToFile(experiment, results_dir, json_report): - """Writes a JSON report to a file in results_dir.""" - has_llvm = any('llvm' in l.compiler for l in experiment.labels) - compiler_string = 'llvm' if has_llvm else 'gcc' - board = experiment.labels[0].board - filename = 'report_%s_%s_%s.%s.json' % (board, json_report.date, - json_report.time.replace( - ':', '.'), compiler_string) - fullname = os.path.join(results_dir, filename) - report_text = json_report.GetReport() - with open(fullname, 'w') as out_file: - out_file.write(report_text) + """Writes a JSON report to a file in results_dir.""" + has_llvm = any("llvm" in l.compiler for l in experiment.labels) + compiler_string = "llvm" if has_llvm else "gcc" + board = experiment.labels[0].board + filename = "report_%s_%s_%s.%s.json" % ( + board, + json_report.date, + json_report.time.replace(":", "."), + compiler_string, + ) + fullname = os.path.join(results_dir, filename) + report_text = json_report.GetReport() + with open(fullname, "w") as out_file: + out_file.write(report_text) class ExperimentRunner(object): - """ExperimentRunner Class.""" - - STATUS_TIME_DELAY = 30 - THREAD_MONITOR_DELAY = 2 - - SUCCEEDED = 0 - HAS_FAILURE = 1 - ALL_FAILED = 2 - - def __init__(self, - experiment, - json_report, - using_schedv2=False, - log=None, - cmd_exec=None): - self._experiment = experiment - self.l = log or logger.GetLogger(experiment.log_dir) - self._ce = cmd_exec or command_executer.GetCommandExecuter(self.l) - self._terminated = False - self.json_report = json_report - self.locked_machines = [] - if experiment.log_level != 'verbose': - self.STATUS_TIME_DELAY = 10 - - # Setting this to True will use crosperf sched v2 (feature in progress). - self._using_schedv2 = using_schedv2 - - def _GetMachineList(self): - """Return a list of all requested machines. - - Create a list of all the requested machines, both global requests and - label-specific requests, and return the list. - """ - machines = self._experiment.remote - # All Label.remote is a sublist of experiment.remote. - for l in self._experiment.labels: - for r in l.remote: - assert r in machines - return machines - - def _UpdateMachineList(self, locked_machines): - """Update machines lists to contain only locked machines. - - Go through all the lists of requested machines, both global and - label-specific requests, and remove any machine that we were not - able to lock. - - Args: - locked_machines: A list of the machines we successfully locked. - """ - for m in self._experiment.remote: - if m not in locked_machines: - self._experiment.remote.remove(m) - - for l in self._experiment.labels: - for m in l.remote: - if m not in locked_machines: - l.remote.remove(m) - - def _GetMachineType(self, lock_mgr, machine): - """Get where is the machine from. - - Returns: - The location of the machine: local or crosfleet - """ - # We assume that lab machine always starts with chromeos*, and local - # machines are ip address. - if 'chromeos' in machine: - if lock_mgr.CheckMachineInCrosfleet(machine): - return 'crosfleet' - else: - raise RuntimeError('Lab machine not in Crosfleet.') - return 'local' - - def _LockAllMachines(self, experiment): - """Attempt to globally lock all of the machines requested for run. - - This method tries to lock all machines requested for this crosperf run - in three different modes automatically, to prevent any other crosperf runs - from being able to update/use the machines while this experiment is - running: - - Crosfleet machines: Use crosfleet lease-dut mechanism to lease - - Local machines: Use file lock mechanism to lock - """ - if test_flag.GetTestMode(): - self.locked_machines = self._GetMachineList() - experiment.locked_machines = self.locked_machines - else: - experiment.lock_mgr = lock_machine.LockManager( - self._GetMachineList(), - '', - experiment.labels[0].chromeos_root, - experiment.locks_dir, - log=self.l, - ) - for m in experiment.lock_mgr.machines: - machine_type = self._GetMachineType(experiment.lock_mgr, m) - if machine_type == 'local': - experiment.lock_mgr.AddMachineToLocal(m) - elif machine_type == 'crosfleet': - experiment.lock_mgr.AddMachineToCrosfleet(m) - machine_states = experiment.lock_mgr.GetMachineStates('lock') - experiment.lock_mgr.CheckMachineLocks(machine_states, 'lock') - self.locked_machines = experiment.lock_mgr.UpdateMachines(True) - experiment.locked_machines = self.locked_machines - self._UpdateMachineList(self.locked_machines) - experiment.machine_manager.RemoveNonLockedMachines(self.locked_machines) - if not self.locked_machines: - raise RuntimeError('Unable to lock any machines.') - - def _ClearCacheEntries(self, experiment): - for br in experiment.benchmark_runs: - cache = ResultsCache() - cache.Init(br.label.chromeos_image, br.label.chromeos_root, - br.benchmark.test_name, br.iteration, br.test_args, - br.profiler_args, br.machine_manager, br.machine, - br.label.board, br.cache_conditions, br.logger(), - br.log_level, br.label, br.share_cache, br.benchmark.suite, - br.benchmark.show_all_results, br.benchmark.run_local, - br.benchmark.cwp_dso) - cache_dir = cache.GetCacheDirForWrite() - if os.path.exists(cache_dir): - self.l.LogOutput('Removing cache dir: %s' % cache_dir) - shutil.rmtree(cache_dir) - - def _Run(self, experiment): - try: - # We should not lease machines if tests are launched via `crosfleet - # create-test`. This is because leasing DUT in crosfleet will create a - # no-op task on the DUT and new test created will be hanging there. - # TODO(zhizhouy): Need to check whether machine is ready or not before - # assigning a test to it. - if not experiment.no_lock and not experiment.crosfleet: - self._LockAllMachines(experiment) - # Calculate all checksums of avaiable/locked machines, to ensure same - # label has same machines for testing - experiment.SetCheckSums(forceSameImage=True) - if self._using_schedv2: - schedv2 = Schedv2(experiment) - experiment.set_schedv2(schedv2) - if CacheConditions.FALSE in experiment.cache_conditions: - self._ClearCacheEntries(experiment) - status = ExperimentStatus(experiment) - experiment.Run() - last_status_time = 0 - last_status_string = '' - try: - if experiment.log_level != 'verbose': - self.l.LogStartDots() - while not experiment.IsComplete(): - if last_status_time + self.STATUS_TIME_DELAY < time.time(): - last_status_time = time.time() - border = '==============================' - if experiment.log_level == 'verbose': - self.l.LogOutput(border) - self.l.LogOutput(status.GetProgressString()) - self.l.LogOutput(status.GetStatusString()) - self.l.LogOutput(border) + """ExperimentRunner Class.""" + + STATUS_TIME_DELAY = 30 + THREAD_MONITOR_DELAY = 2 + + SUCCEEDED = 0 + HAS_FAILURE = 1 + ALL_FAILED = 2 + + def __init__( + self, + experiment, + json_report, + using_schedv2=False, + log=None, + cmd_exec=None, + ): + self._experiment = experiment + self.l = log or logger.GetLogger(experiment.log_dir) + self._ce = cmd_exec or command_executer.GetCommandExecuter(self.l) + self._terminated = False + self.json_report = json_report + self.locked_machines = [] + if experiment.log_level != "verbose": + self.STATUS_TIME_DELAY = 10 + + # Setting this to True will use crosperf sched v2 (feature in progress). + self._using_schedv2 = using_schedv2 + + def _GetMachineList(self): + """Return a list of all requested machines. + + Create a list of all the requested machines, both global requests and + label-specific requests, and return the list. + """ + machines = self._experiment.remote + # All Label.remote is a sublist of experiment.remote. + for l in self._experiment.labels: + for r in l.remote: + assert r in machines + return machines + + def _UpdateMachineList(self, locked_machines): + """Update machines lists to contain only locked machines. + + Go through all the lists of requested machines, both global and + label-specific requests, and remove any machine that we were not + able to lock. + + Args: + locked_machines: A list of the machines we successfully locked. + """ + for m in self._experiment.remote: + if m not in locked_machines: + self._experiment.remote.remove(m) + + for l in self._experiment.labels: + for m in l.remote: + if m not in locked_machines: + l.remote.remove(m) + + def _GetMachineType(self, lock_mgr, machine): + """Get where is the machine from. + + Returns: + The location of the machine: local or crosfleet + """ + # We assume that lab machine always starts with chromeos*, and local + # machines are ip address. + if "chromeos" in machine: + if lock_mgr.CheckMachineInCrosfleet(machine): + return "crosfleet" else: - current_status_string = status.GetStatusString() - if current_status_string != last_status_string: - self.l.LogEndDots() - self.l.LogOutput(border) - self.l.LogOutput(current_status_string) - self.l.LogOutput(border) - last_status_string = current_status_string - else: - self.l.LogAppendDot() - time.sleep(self.THREAD_MONITOR_DELAY) - except KeyboardInterrupt: - self._terminated = True - self.l.LogError('Ctrl-c pressed. Cleaning up...') - experiment.Terminate() - raise - except SystemExit: - self._terminated = True - self.l.LogError('Unexpected exit. Cleaning up...') - experiment.Terminate() - raise - finally: - experiment.Cleanup() - - def _PrintTable(self, experiment): - self.l.LogOutput(TextResultsReport.FromExperiment(experiment).GetReport()) - - def _Email(self, experiment): - # Only email by default if a new run was completed. - send_mail = False - for benchmark_run in experiment.benchmark_runs: - if not benchmark_run.cache_hit: - send_mail = True - break - if (not send_mail and not experiment.email_to - or config.GetConfig('no_email')): - return - - label_names = [] - for label in experiment.labels: - label_names.append(label.name) - subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names)) - - text_report = TextResultsReport.FromExperiment(experiment, - True).GetReport() - text_report += ('\nResults are stored in %s.\n' % - experiment.results_directory) - text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report - html_report = HTMLResultsReport.FromExperiment(experiment).GetReport() - attachment = EmailSender.Attachment('report.html', html_report) - email_to = experiment.email_to or [] - email_to.append(getpass.getuser()) - EmailSender().SendEmail(email_to, - subject, - text_report, - attachments=[attachment], - msg_type='html') - - def _StoreResults(self, experiment): - if self._terminated: - return self.ALL_FAILED - - results_directory = experiment.results_directory - FileUtils().RmDir(results_directory) - FileUtils().MkDirP(results_directory) - self.l.LogOutput('Storing experiment file in %s.' % results_directory) - experiment_file_path = os.path.join(results_directory, 'experiment.exp') - FileUtils().WriteFile(experiment_file_path, experiment.experiment_file) - - has_failure = False - all_failed = True - - topstats_file = os.path.join(results_directory, 'topstats.log') - self.l.LogOutput('Storing top statistics of each benchmark run into %s.' % - topstats_file) - with open(topstats_file, 'w') as top_fd: - for benchmark_run in experiment.benchmark_runs: - if benchmark_run.result: - # FIXME: Pylint has a bug suggesting the following change, which - # should be fixed in pylint 2.0. Resolve this after pylint >= 2.0. - # Bug: https://github.com/PyCQA/pylint/issues/1984 - # pylint: disable=simplifiable-if-statement - if benchmark_run.result.retval: - has_failure = True - else: - all_failed = False - # Header with benchmark run name. - top_fd.write('%s\n' % str(benchmark_run)) - # Formatted string with top statistics. - top_fd.write(benchmark_run.result.FormatStringTopCommands()) - top_fd.write('\n\n') - - if all_failed: - return self.ALL_FAILED - - self.l.LogOutput('Storing results of each benchmark run.') - for benchmark_run in experiment.benchmark_runs: - if benchmark_run.result: - benchmark_run_name = ''.join(ch for ch in benchmark_run.name - if ch.isalnum()) - benchmark_run_path = os.path.join(results_directory, - benchmark_run_name) - if experiment.compress_results: - benchmark_run.result.CompressResultsTo(benchmark_run_path) + raise RuntimeError("Lab machine not in Crosfleet.") + return "local" + + def _LockAllMachines(self, experiment): + """Attempt to globally lock all of the machines requested for run. + + This method tries to lock all machines requested for this crosperf run + in three different modes automatically, to prevent any other crosperf runs + from being able to update/use the machines while this experiment is + running: + - Crosfleet machines: Use crosfleet lease-dut mechanism to lease + - Local machines: Use file lock mechanism to lock + """ + if test_flag.GetTestMode(): + self.locked_machines = self._GetMachineList() + experiment.locked_machines = self.locked_machines else: - benchmark_run.result.CopyResultsTo(benchmark_run_path) - benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp) - - self.l.LogOutput('Storing results report in %s.' % results_directory) - results_table_path = os.path.join(results_directory, 'results.html') - report = HTMLResultsReport.FromExperiment(experiment).GetReport() - if self.json_report: - json_report = JSONResultsReport.FromExperiment(experiment, - json_args={'indent': 2}) - _WriteJSONReportToFile(experiment, results_directory, json_report) - - FileUtils().WriteFile(results_table_path, report) - - self.l.LogOutput('Storing email message body in %s.' % results_directory) - msg_file_path = os.path.join(results_directory, 'msg_body.html') - text_report = TextResultsReport.FromExperiment(experiment, - True).GetReport() - text_report += ('\nResults are stored in %s.\n' % - experiment.results_directory) - msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report - FileUtils().WriteFile(msg_file_path, msg_body) - - return self.SUCCEEDED if not has_failure else self.HAS_FAILURE - - def Run(self): - try: - self._Run(self._experiment) - finally: - # Always print the report at the end of the run. - self._PrintTable(self._experiment) - ret = self._StoreResults(self._experiment) - if ret != self.ALL_FAILED: - self._Email(self._experiment) - return ret + experiment.lock_mgr = lock_machine.LockManager( + self._GetMachineList(), + "", + experiment.labels[0].chromeos_root, + experiment.locks_dir, + log=self.l, + ) + for m in experiment.lock_mgr.machines: + machine_type = self._GetMachineType(experiment.lock_mgr, m) + if machine_type == "local": + experiment.lock_mgr.AddMachineToLocal(m) + elif machine_type == "crosfleet": + experiment.lock_mgr.AddMachineToCrosfleet(m) + machine_states = experiment.lock_mgr.GetMachineStates("lock") + experiment.lock_mgr.CheckMachineLocks(machine_states, "lock") + self.locked_machines = experiment.lock_mgr.UpdateMachines(True) + experiment.locked_machines = self.locked_machines + self._UpdateMachineList(self.locked_machines) + experiment.machine_manager.RemoveNonLockedMachines( + self.locked_machines + ) + if not self.locked_machines: + raise RuntimeError("Unable to lock any machines.") + + def _ClearCacheEntries(self, experiment): + for br in experiment.benchmark_runs: + cache = ResultsCache() + cache.Init( + br.label.chromeos_image, + br.label.chromeos_root, + br.benchmark.test_name, + br.iteration, + br.test_args, + br.profiler_args, + br.machine_manager, + br.machine, + br.label.board, + br.cache_conditions, + br.logger(), + br.log_level, + br.label, + br.share_cache, + br.benchmark.suite, + br.benchmark.show_all_results, + br.benchmark.run_local, + br.benchmark.cwp_dso, + ) + cache_dir = cache.GetCacheDirForWrite() + if os.path.exists(cache_dir): + self.l.LogOutput("Removing cache dir: %s" % cache_dir) + shutil.rmtree(cache_dir) + + def _Run(self, experiment): + try: + # We should not lease machines if tests are launched via `crosfleet + # create-test`. This is because leasing DUT in crosfleet will create a + # no-op task on the DUT and new test created will be hanging there. + # TODO(zhizhouy): Need to check whether machine is ready or not before + # assigning a test to it. + if not experiment.no_lock and not experiment.crosfleet: + self._LockAllMachines(experiment) + # Calculate all checksums of avaiable/locked machines, to ensure same + # label has same machines for testing + experiment.SetCheckSums(forceSameImage=True) + if self._using_schedv2: + schedv2 = Schedv2(experiment) + experiment.set_schedv2(schedv2) + if CacheConditions.FALSE in experiment.cache_conditions: + self._ClearCacheEntries(experiment) + status = ExperimentStatus(experiment) + experiment.Run() + last_status_time = 0 + last_status_string = "" + try: + if experiment.log_level != "verbose": + self.l.LogStartDots() + while not experiment.IsComplete(): + if last_status_time + self.STATUS_TIME_DELAY < time.time(): + last_status_time = time.time() + border = "==============================" + if experiment.log_level == "verbose": + self.l.LogOutput(border) + self.l.LogOutput(status.GetProgressString()) + self.l.LogOutput(status.GetStatusString()) + self.l.LogOutput(border) + else: + current_status_string = status.GetStatusString() + if current_status_string != last_status_string: + self.l.LogEndDots() + self.l.LogOutput(border) + self.l.LogOutput(current_status_string) + self.l.LogOutput(border) + last_status_string = current_status_string + else: + self.l.LogAppendDot() + time.sleep(self.THREAD_MONITOR_DELAY) + except KeyboardInterrupt: + self._terminated = True + self.l.LogError("Ctrl-c pressed. Cleaning up...") + experiment.Terminate() + raise + except SystemExit: + self._terminated = True + self.l.LogError("Unexpected exit. Cleaning up...") + experiment.Terminate() + raise + finally: + experiment.Cleanup() + + def _PrintTable(self, experiment): + self.l.LogOutput( + TextResultsReport.FromExperiment(experiment).GetReport() + ) + + def _Email(self, experiment): + # Only email by default if a new run was completed. + send_mail = False + for benchmark_run in experiment.benchmark_runs: + if not benchmark_run.cache_hit: + send_mail = True + break + if ( + not send_mail + and not experiment.email_to + or config.GetConfig("no_email") + ): + return + + label_names = [] + for label in experiment.labels: + label_names.append(label.name) + subject = "%s: %s" % (experiment.name, " vs. ".join(label_names)) + + text_report = TextResultsReport.FromExperiment( + experiment, True + ).GetReport() + text_report += ( + "\nResults are stored in %s.\n" % experiment.results_directory + ) + text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report + html_report = HTMLResultsReport.FromExperiment(experiment).GetReport() + attachment = EmailSender.Attachment("report.html", html_report) + email_to = experiment.email_to or [] + email_to.append(getpass.getuser()) + EmailSender().SendEmail( + email_to, + subject, + text_report, + attachments=[attachment], + msg_type="html", + ) + + def _StoreResults(self, experiment): + if self._terminated: + return self.ALL_FAILED + + results_directory = experiment.results_directory + FileUtils().RmDir(results_directory) + FileUtils().MkDirP(results_directory) + self.l.LogOutput("Storing experiment file in %s." % results_directory) + experiment_file_path = os.path.join(results_directory, "experiment.exp") + FileUtils().WriteFile(experiment_file_path, experiment.experiment_file) + + has_failure = False + all_failed = True + + topstats_file = os.path.join(results_directory, "topstats.log") + self.l.LogOutput( + "Storing top statistics of each benchmark run into %s." + % topstats_file + ) + with open(topstats_file, "w") as top_fd: + for benchmark_run in experiment.benchmark_runs: + if benchmark_run.result: + # FIXME: Pylint has a bug suggesting the following change, which + # should be fixed in pylint 2.0. Resolve this after pylint >= 2.0. + # Bug: https://github.com/PyCQA/pylint/issues/1984 + # pylint: disable=simplifiable-if-statement + if benchmark_run.result.retval: + has_failure = True + else: + all_failed = False + # Header with benchmark run name. + top_fd.write("%s\n" % str(benchmark_run)) + # Formatted string with top statistics. + top_fd.write(benchmark_run.result.FormatStringTopCommands()) + top_fd.write("\n\n") + + if all_failed: + return self.ALL_FAILED + + self.l.LogOutput("Storing results of each benchmark run.") + for benchmark_run in experiment.benchmark_runs: + if benchmark_run.result: + benchmark_run_name = "".join( + ch for ch in benchmark_run.name if ch.isalnum() + ) + benchmark_run_path = os.path.join( + results_directory, benchmark_run_name + ) + if experiment.compress_results: + benchmark_run.result.CompressResultsTo(benchmark_run_path) + else: + benchmark_run.result.CopyResultsTo(benchmark_run_path) + benchmark_run.result.CleanUp( + benchmark_run.benchmark.rm_chroot_tmp + ) + + self.l.LogOutput("Storing results report in %s." % results_directory) + results_table_path = os.path.join(results_directory, "results.html") + report = HTMLResultsReport.FromExperiment(experiment).GetReport() + if self.json_report: + json_report = JSONResultsReport.FromExperiment( + experiment, json_args={"indent": 2} + ) + _WriteJSONReportToFile(experiment, results_directory, json_report) + + FileUtils().WriteFile(results_table_path, report) + + self.l.LogOutput( + "Storing email message body in %s." % results_directory + ) + msg_file_path = os.path.join(results_directory, "msg_body.html") + text_report = TextResultsReport.FromExperiment( + experiment, True + ).GetReport() + text_report += ( + "\nResults are stored in %s.\n" % experiment.results_directory + ) + msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report + FileUtils().WriteFile(msg_file_path, msg_body) + + return self.SUCCEEDED if not has_failure else self.HAS_FAILURE + + def Run(self): + try: + self._Run(self._experiment) + finally: + # Always print the report at the end of the run. + self._PrintTable(self._experiment) + ret = self._StoreResults(self._experiment) + if ret != self.ALL_FAILED: + self._Email(self._experiment) + return ret class MockExperimentRunner(ExperimentRunner): - """Mocked ExperimentRunner for testing.""" + """Mocked ExperimentRunner for testing.""" - def __init__(self, experiment, json_report): - super(MockExperimentRunner, self).__init__(experiment, json_report) + def __init__(self, experiment, json_report): + super(MockExperimentRunner, self).__init__(experiment, json_report) - def _Run(self, experiment): - self.l.LogOutput("Would run the following experiment: '%s'." % - experiment.name) + def _Run(self, experiment): + self.l.LogOutput( + "Would run the following experiment: '%s'." % experiment.name + ) - def _PrintTable(self, experiment): - self.l.LogOutput('Would print the experiment table.') + def _PrintTable(self, experiment): + self.l.LogOutput("Would print the experiment table.") - def _Email(self, experiment): - self.l.LogOutput('Would send result email.') + def _Email(self, experiment): + self.l.LogOutput("Would send result email.") - def _StoreResults(self, experiment): - self.l.LogOutput('Would store the results.') + def _StoreResults(self, experiment): + self.l.LogOutput("Would store the results.") diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py index a39f9f1f..241e1343 100755 --- a/crosperf/experiment_runner_unittest.py +++ b/crosperf/experiment_runner_unittest.py @@ -13,25 +13,24 @@ import getpass import io import os import time - import unittest import unittest.mock as mock +from cros_utils import command_executer +from cros_utils.email_sender import EmailSender +from cros_utils.file_utils import FileUtils +from experiment_factory import ExperimentFactory +from experiment_file import ExperimentFile import experiment_runner import experiment_status import machine_manager -import config -import test_flag - -from experiment_factory import ExperimentFactory -from experiment_file import ExperimentFile from results_cache import Result from results_report import HTMLResultsReport from results_report import TextResultsReport +import test_flag + +import config -from cros_utils import command_executer -from cros_utils.email_sender import EmailSender -from cros_utils.file_utils import FileUtils EXPERIMENT_FILE_1 = """ board: parrot @@ -57,445 +56,513 @@ EXPERIMENT_FILE_1 = """ class FakeLogger(object): - """Fake logger for tests.""" - - def __init__(self): - self.LogOutputCount = 0 - self.LogErrorCount = 0 - self.output_msgs = [] - self.error_msgs = [] - self.dot_count = 0 - self.LogStartDotsCount = 0 - self.LogEndDotsCount = 0 - self.LogAppendDotCount = 0 - - def LogOutput(self, msg): - self.LogOutputCount += 1 - self.output_msgs.append(msg) - - def LogError(self, msg): - self.LogErrorCount += 1 - self.error_msgs.append(msg) - - def LogStartDots(self): - self.LogStartDotsCount += 1 - self.dot_count += 1 - - def LogAppendDot(self): - self.LogAppendDotCount += 1 - self.dot_count += 1 - - def LogEndDots(self): - self.LogEndDotsCount += 1 - - def Reset(self): - self.LogOutputCount = 0 - self.LogErrorCount = 0 - self.output_msgs = [] - self.error_msgs = [] - self.dot_count = 0 - self.LogStartDotsCount = 0 - self.LogEndDotsCount = 0 - self.LogAppendDotCount = 0 + """Fake logger for tests.""" + + def __init__(self): + self.LogOutputCount = 0 + self.LogErrorCount = 0 + self.output_msgs = [] + self.error_msgs = [] + self.dot_count = 0 + self.LogStartDotsCount = 0 + self.LogEndDotsCount = 0 + self.LogAppendDotCount = 0 + + def LogOutput(self, msg): + self.LogOutputCount += 1 + self.output_msgs.append(msg) + + def LogError(self, msg): + self.LogErrorCount += 1 + self.error_msgs.append(msg) + + def LogStartDots(self): + self.LogStartDotsCount += 1 + self.dot_count += 1 + + def LogAppendDot(self): + self.LogAppendDotCount += 1 + self.dot_count += 1 + + def LogEndDots(self): + self.LogEndDotsCount += 1 + + def Reset(self): + self.LogOutputCount = 0 + self.LogErrorCount = 0 + self.output_msgs = [] + self.error_msgs = [] + self.dot_count = 0 + self.LogStartDotsCount = 0 + self.LogEndDotsCount = 0 + self.LogAppendDotCount = 0 class ExperimentRunnerTest(unittest.TestCase): - """Test for experiment runner class.""" - - run_count = 0 - is_complete_count = 0 - mock_logger = FakeLogger() - mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - - def make_fake_experiment(self): - test_flag.SetTestMode(True) - experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1)) - experiment = ExperimentFactory().GetExperiment( - experiment_file, working_directory='', log_dir='') - return experiment - - @mock.patch.object(machine_manager.MachineManager, 'AddMachine') - @mock.patch.object(os.path, 'isfile') - - # pylint: disable=arguments-differ - def setUp(self, mock_isfile, _mock_addmachine): - mock_isfile.return_value = True - self.exp = self.make_fake_experiment() - - def test_init(self): - er = experiment_runner.ExperimentRunner( - self.exp, - json_report=False, - using_schedv2=False, - log=self.mock_logger, - cmd_exec=self.mock_cmd_exec) - self.assertFalse(er._terminated) - self.assertEqual(er.STATUS_TIME_DELAY, 10) - - self.exp.log_level = 'verbose' - er = experiment_runner.ExperimentRunner( - self.exp, - json_report=False, - using_schedv2=False, - log=self.mock_logger, - cmd_exec=self.mock_cmd_exec) - self.assertEqual(er.STATUS_TIME_DELAY, 30) - - @mock.patch.object(time, 'time') - @mock.patch.object(time, 'sleep') - @mock.patch.object(experiment_status.ExperimentStatus, 'GetStatusString') - @mock.patch.object(experiment_status.ExperimentStatus, 'GetProgressString') - def test_run(self, mock_progress_string, mock_status_string, mock_sleep, - mock_time): - - self.run_count = 0 - self.is_complete_count = 0 - mock_sleep.return_value = None - # pylint: disable=range-builtin-not-iterating - mock_time.side_effect = range(1, 50, 1) - - def reset(): - self.run_count = 0 - self.is_complete_count = 0 - - def FakeRun(): - self.run_count += 1 - return 0 - - def FakeIsComplete(): - self.is_complete_count += 1 - if self.is_complete_count < 6: - return False - else: - return True - - self.mock_logger.Reset() - self.exp.Run = FakeRun - self.exp.IsComplete = FakeIsComplete - - # Test 1: log_level == "quiet" - self.exp.log_level = 'quiet' - er = experiment_runner.ExperimentRunner( - self.exp, - json_report=False, - using_schedv2=False, - log=self.mock_logger, - cmd_exec=self.mock_cmd_exec) - er.STATUS_TIME_DELAY = 2 - mock_status_string.return_value = 'Fake status string' - er._Run(self.exp) - self.assertEqual(self.run_count, 1) - self.assertTrue(self.is_complete_count > 0) - self.assertEqual(self.mock_logger.LogStartDotsCount, 1) - self.assertEqual(self.mock_logger.LogAppendDotCount, 1) - self.assertEqual(self.mock_logger.LogEndDotsCount, 1) - self.assertEqual(self.mock_logger.dot_count, 2) - self.assertEqual(mock_progress_string.call_count, 0) - self.assertEqual(mock_status_string.call_count, 2) - self.assertEqual(self.mock_logger.output_msgs, [ - '==============================', 'Fake status string', - '==============================' - ]) - self.assertEqual(len(self.mock_logger.error_msgs), 0) - - # Test 2: log_level == "average" - self.mock_logger.Reset() - reset() - self.exp.log_level = 'average' - mock_status_string.call_count = 0 - er = experiment_runner.ExperimentRunner( - self.exp, - json_report=False, - using_schedv2=False, - log=self.mock_logger, - cmd_exec=self.mock_cmd_exec) - er.STATUS_TIME_DELAY = 2 - mock_status_string.return_value = 'Fake status string' - er._Run(self.exp) - self.assertEqual(self.run_count, 1) - self.assertTrue(self.is_complete_count > 0) - self.assertEqual(self.mock_logger.LogStartDotsCount, 1) - self.assertEqual(self.mock_logger.LogAppendDotCount, 1) - self.assertEqual(self.mock_logger.LogEndDotsCount, 1) - self.assertEqual(self.mock_logger.dot_count, 2) - self.assertEqual(mock_progress_string.call_count, 0) - self.assertEqual(mock_status_string.call_count, 2) - self.assertEqual(self.mock_logger.output_msgs, [ - '==============================', 'Fake status string', - '==============================' - ]) - self.assertEqual(len(self.mock_logger.error_msgs), 0) - - # Test 3: log_level == "verbose" - self.mock_logger.Reset() - reset() - self.exp.log_level = 'verbose' - mock_status_string.call_count = 0 - er = experiment_runner.ExperimentRunner( - self.exp, - json_report=False, - using_schedv2=False, - log=self.mock_logger, - cmd_exec=self.mock_cmd_exec) - er.STATUS_TIME_DELAY = 2 - mock_status_string.return_value = 'Fake status string' - mock_progress_string.return_value = 'Fake progress string' - er._Run(self.exp) - self.assertEqual(self.run_count, 1) - self.assertTrue(self.is_complete_count > 0) - self.assertEqual(self.mock_logger.LogStartDotsCount, 0) - self.assertEqual(self.mock_logger.LogAppendDotCount, 0) - self.assertEqual(self.mock_logger.LogEndDotsCount, 0) - self.assertEqual(self.mock_logger.dot_count, 0) - self.assertEqual(mock_progress_string.call_count, 2) - self.assertEqual(mock_status_string.call_count, 2) - self.assertEqual(self.mock_logger.output_msgs, [ - '==============================', 'Fake progress string', - 'Fake status string', '==============================', - '==============================', 'Fake progress string', - 'Fake status string', '==============================' - ]) - self.assertEqual(len(self.mock_logger.error_msgs), 0) - - @mock.patch.object(TextResultsReport, 'GetReport') - def test_print_table(self, mock_report): - self.mock_logger.Reset() - mock_report.return_value = 'This is a fake experiment report.' - er = experiment_runner.ExperimentRunner( - self.exp, - json_report=False, - using_schedv2=False, - log=self.mock_logger, - cmd_exec=self.mock_cmd_exec) - er._PrintTable(self.exp) - self.assertEqual(mock_report.call_count, 1) - self.assertEqual(self.mock_logger.output_msgs, - ['This is a fake experiment report.']) - - @mock.patch.object(HTMLResultsReport, 'GetReport') - @mock.patch.object(TextResultsReport, 'GetReport') - @mock.patch.object(EmailSender, 'Attachment') - @mock.patch.object(EmailSender, 'SendEmail') - @mock.patch.object(getpass, 'getuser') - def test_email(self, mock_getuser, mock_emailer, mock_attachment, - mock_text_report, mock_html_report): - - mock_getuser.return_value = 'john.smith@google.com' - mock_text_report.return_value = 'This is a fake text report.' - mock_html_report.return_value = 'This is a fake html report.' - - self.mock_logger.Reset() - config.AddConfig('no_email', True) - self.exp.email_to = ['jane.doe@google.com'] - er = experiment_runner.ExperimentRunner( - self.exp, - json_report=False, - using_schedv2=False, - log=self.mock_logger, - cmd_exec=self.mock_cmd_exec) - # Test 1. Config:no_email; exp.email_to set ==> no email sent - er._Email(self.exp) - self.assertEqual(mock_getuser.call_count, 0) - self.assertEqual(mock_emailer.call_count, 0) - self.assertEqual(mock_attachment.call_count, 0) - self.assertEqual(mock_text_report.call_count, 0) - self.assertEqual(mock_html_report.call_count, 0) - - # Test 2. Config: email. exp.email_to set; cache hit. => send email - self.mock_logger.Reset() - config.AddConfig('no_email', False) - for r in self.exp.benchmark_runs: - r.cache_hit = True - er._Email(self.exp) - self.assertEqual(mock_getuser.call_count, 1) - self.assertEqual(mock_emailer.call_count, 1) - self.assertEqual(mock_attachment.call_count, 1) - self.assertEqual(mock_text_report.call_count, 1) - self.assertEqual(mock_html_report.call_count, 1) - self.assertEqual(len(mock_emailer.call_args), 2) - self.assertEqual(mock_emailer.call_args[0], - (['jane.doe@google.com', 'john.smith@google.com' - ], ': image1 vs. image2', - "<pre style='font-size: 13px'>This is a fake text " - 'report.\nResults are stored in _results.\n</pre>')) - self.assertTrue(isinstance(mock_emailer.call_args[1], dict)) - self.assertEqual(len(mock_emailer.call_args[1]), 2) - self.assertTrue('attachments' in mock_emailer.call_args[1].keys()) - self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html') - - mock_attachment.assert_called_with('report.html', - 'This is a fake html report.') - - # Test 3. Config: email; exp.mail_to set; no cache hit. => send email - self.mock_logger.Reset() - mock_getuser.reset_mock() - mock_emailer.reset_mock() - mock_attachment.reset_mock() - mock_text_report.reset_mock() - mock_html_report.reset_mock() - config.AddConfig('no_email', False) - for r in self.exp.benchmark_runs: - r.cache_hit = False - er._Email(self.exp) - self.assertEqual(mock_getuser.call_count, 1) - self.assertEqual(mock_emailer.call_count, 1) - self.assertEqual(mock_attachment.call_count, 1) - self.assertEqual(mock_text_report.call_count, 1) - self.assertEqual(mock_html_report.call_count, 1) - self.assertEqual(len(mock_emailer.call_args), 2) - self.assertEqual(mock_emailer.call_args[0], - ([ - 'jane.doe@google.com', 'john.smith@google.com', - 'john.smith@google.com' - ], ': image1 vs. image2', - "<pre style='font-size: 13px'>This is a fake text " - 'report.\nResults are stored in _results.\n</pre>')) - self.assertTrue(isinstance(mock_emailer.call_args[1], dict)) - self.assertEqual(len(mock_emailer.call_args[1]), 2) - self.assertTrue('attachments' in mock_emailer.call_args[1].keys()) - self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html') - - mock_attachment.assert_called_with('report.html', - 'This is a fake html report.') - - # Test 4. Config: email; exp.mail_to = None; no cache hit. => send email - self.mock_logger.Reset() - mock_getuser.reset_mock() - mock_emailer.reset_mock() - mock_attachment.reset_mock() - mock_text_report.reset_mock() - mock_html_report.reset_mock() - self.exp.email_to = [] - er._Email(self.exp) - self.assertEqual(mock_getuser.call_count, 1) - self.assertEqual(mock_emailer.call_count, 1) - self.assertEqual(mock_attachment.call_count, 1) - self.assertEqual(mock_text_report.call_count, 1) - self.assertEqual(mock_html_report.call_count, 1) - self.assertEqual(len(mock_emailer.call_args), 2) - self.assertEqual(mock_emailer.call_args[0], - (['john.smith@google.com'], ': image1 vs. image2', - "<pre style='font-size: 13px'>This is a fake text " - 'report.\nResults are stored in _results.\n</pre>')) - self.assertTrue(isinstance(mock_emailer.call_args[1], dict)) - self.assertEqual(len(mock_emailer.call_args[1]), 2) - self.assertTrue('attachments' in mock_emailer.call_args[1].keys()) - self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html') - - mock_attachment.assert_called_with('report.html', - 'This is a fake html report.') - - # Test 5. Config: email; exp.mail_to = None; cache hit => no email sent - self.mock_logger.Reset() - mock_getuser.reset_mock() - mock_emailer.reset_mock() - mock_attachment.reset_mock() - mock_text_report.reset_mock() - mock_html_report.reset_mock() - for r in self.exp.benchmark_runs: - r.cache_hit = True - er._Email(self.exp) - self.assertEqual(mock_getuser.call_count, 0) - self.assertEqual(mock_emailer.call_count, 0) - self.assertEqual(mock_attachment.call_count, 0) - self.assertEqual(mock_text_report.call_count, 0) - self.assertEqual(mock_html_report.call_count, 0) - - @mock.patch.object(FileUtils, 'RmDir') - @mock.patch.object(FileUtils, 'MkDirP') - @mock.patch.object(FileUtils, 'WriteFile') - @mock.patch.object(HTMLResultsReport, 'FromExperiment') - @mock.patch.object(TextResultsReport, 'FromExperiment') - @mock.patch.object(Result, 'CompressResultsTo') - @mock.patch.object(Result, 'CopyResultsTo') - @mock.patch.object(Result, 'CleanUp') - @mock.patch.object(Result, 'FormatStringTopCommands') - @mock.patch('builtins.open', new_callable=mock.mock_open) - def test_store_results(self, mock_open, mock_top_commands, mock_cleanup, - mock_copy, mock_compress, _mock_text_report, - mock_report, mock_writefile, mock_mkdir, mock_rmdir): - - self.mock_logger.Reset() - self.exp.results_directory = '/usr/local/crosperf-results' - bench_run = self.exp.benchmark_runs[5] - bench_path = '/usr/local/crosperf-results/' + ''.join( - ch for ch in bench_run.name if ch.isalnum()) - self.assertEqual(len(self.exp.benchmark_runs), 6) - - er = experiment_runner.ExperimentRunner( - self.exp, - json_report=False, - using_schedv2=False, - log=self.mock_logger, - cmd_exec=self.mock_cmd_exec) - - # Test 1. Make sure nothing is done if _terminated is true. - er._terminated = True - er._StoreResults(self.exp) - self.assertEqual(mock_cleanup.call_count, 0) - self.assertEqual(mock_copy.call_count, 0) - self.assertEqual(mock_compress.call_count, 0) - self.assertEqual(mock_report.call_count, 0) - self.assertEqual(mock_writefile.call_count, 0) - self.assertEqual(mock_mkdir.call_count, 0) - self.assertEqual(mock_rmdir.call_count, 0) - self.assertEqual(self.mock_logger.LogOutputCount, 0) - self.assertEqual(mock_open.call_count, 0) - self.assertEqual(mock_top_commands.call_count, 0) - - # Test 2. _terminated is false; everything works properly. - fake_result = Result(self.mock_logger, self.exp.labels[0], 'average', - 'daisy1') - for r in self.exp.benchmark_runs: - r.result = fake_result - er._terminated = False - self.exp.compress_results = False - er._StoreResults(self.exp) - self.assertEqual(mock_cleanup.call_count, 6) - mock_cleanup.assert_called_with(bench_run.benchmark.rm_chroot_tmp) - self.assertEqual(mock_copy.call_count, 6) - mock_copy.assert_called_with(bench_path) - self.assertEqual(mock_writefile.call_count, 3) - self.assertEqual(len(mock_writefile.call_args_list), 3) - first_args = mock_writefile.call_args_list[0] - second_args = mock_writefile.call_args_list[1] - self.assertEqual(first_args[0][0], - '/usr/local/crosperf-results/experiment.exp') - self.assertEqual(second_args[0][0], - '/usr/local/crosperf-results/results.html') - self.assertEqual(mock_mkdir.call_count, 1) - mock_mkdir.assert_called_with('/usr/local/crosperf-results') - self.assertEqual(mock_rmdir.call_count, 1) - mock_rmdir.assert_called_with('/usr/local/crosperf-results') - self.assertEqual(self.mock_logger.LogOutputCount, 5) - self.assertEqual(self.mock_logger.output_msgs, [ - 'Storing experiment file in /usr/local/crosperf-results.', - 'Storing top statistics of each benchmark run into' - ' /usr/local/crosperf-results/topstats.log.', - 'Storing results of each benchmark run.', - 'Storing results report in /usr/local/crosperf-results.', - 'Storing email message body in /usr/local/crosperf-results.', - ]) - self.assertEqual(mock_open.call_count, 1) - # Check write to a topstats.log file. - mock_open.assert_called_with('/usr/local/crosperf-results/topstats.log', - 'w') - mock_open().write.assert_called() - - # Check top calls with no arguments. - topcalls = [mock.call()] * 6 - self.assertEqual(mock_top_commands.call_args_list, topcalls) - - # Test 3. Test compress_results. - self.exp.compress_results = True - mock_copy.call_count = 0 - mock_compress.call_count = 0 - er._StoreResults(self.exp) - self.assertEqual(mock_copy.call_count, 0) - mock_copy.assert_called_with(bench_path) - self.assertEqual(mock_compress.call_count, 6) - mock_compress.assert_called_with(bench_path) - - -if __name__ == '__main__': - unittest.main() + """Test for experiment runner class.""" + + run_count = 0 + is_complete_count = 0 + mock_logger = FakeLogger() + mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) + + def make_fake_experiment(self): + test_flag.SetTestMode(True) + experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1)) + experiment = ExperimentFactory().GetExperiment( + experiment_file, working_directory="", log_dir="" + ) + return experiment + + @mock.patch.object(machine_manager.MachineManager, "AddMachine") + @mock.patch.object(os.path, "isfile") + + # pylint: disable=arguments-differ + def setUp(self, mock_isfile, _mock_addmachine): + mock_isfile.return_value = True + self.exp = self.make_fake_experiment() + + def test_init(self): + er = experiment_runner.ExperimentRunner( + self.exp, + json_report=False, + using_schedv2=False, + log=self.mock_logger, + cmd_exec=self.mock_cmd_exec, + ) + self.assertFalse(er._terminated) + self.assertEqual(er.STATUS_TIME_DELAY, 10) + + self.exp.log_level = "verbose" + er = experiment_runner.ExperimentRunner( + self.exp, + json_report=False, + using_schedv2=False, + log=self.mock_logger, + cmd_exec=self.mock_cmd_exec, + ) + self.assertEqual(er.STATUS_TIME_DELAY, 30) + + @mock.patch.object(time, "time") + @mock.patch.object(time, "sleep") + @mock.patch.object(experiment_status.ExperimentStatus, "GetStatusString") + @mock.patch.object(experiment_status.ExperimentStatus, "GetProgressString") + def test_run( + self, mock_progress_string, mock_status_string, mock_sleep, mock_time + ): + + self.run_count = 0 + self.is_complete_count = 0 + mock_sleep.return_value = None + # pylint: disable=range-builtin-not-iterating + mock_time.side_effect = range(1, 50, 1) + + def reset(): + self.run_count = 0 + self.is_complete_count = 0 + + def FakeRun(): + self.run_count += 1 + return 0 + + def FakeIsComplete(): + self.is_complete_count += 1 + if self.is_complete_count < 6: + return False + else: + return True + + self.mock_logger.Reset() + self.exp.Run = FakeRun + self.exp.IsComplete = FakeIsComplete + + # Test 1: log_level == "quiet" + self.exp.log_level = "quiet" + er = experiment_runner.ExperimentRunner( + self.exp, + json_report=False, + using_schedv2=False, + log=self.mock_logger, + cmd_exec=self.mock_cmd_exec, + ) + er.STATUS_TIME_DELAY = 2 + mock_status_string.return_value = "Fake status string" + er._Run(self.exp) + self.assertEqual(self.run_count, 1) + self.assertTrue(self.is_complete_count > 0) + self.assertEqual(self.mock_logger.LogStartDotsCount, 1) + self.assertEqual(self.mock_logger.LogAppendDotCount, 1) + self.assertEqual(self.mock_logger.LogEndDotsCount, 1) + self.assertEqual(self.mock_logger.dot_count, 2) + self.assertEqual(mock_progress_string.call_count, 0) + self.assertEqual(mock_status_string.call_count, 2) + self.assertEqual( + self.mock_logger.output_msgs, + [ + "==============================", + "Fake status string", + "==============================", + ], + ) + self.assertEqual(len(self.mock_logger.error_msgs), 0) + + # Test 2: log_level == "average" + self.mock_logger.Reset() + reset() + self.exp.log_level = "average" + mock_status_string.call_count = 0 + er = experiment_runner.ExperimentRunner( + self.exp, + json_report=False, + using_schedv2=False, + log=self.mock_logger, + cmd_exec=self.mock_cmd_exec, + ) + er.STATUS_TIME_DELAY = 2 + mock_status_string.return_value = "Fake status string" + er._Run(self.exp) + self.assertEqual(self.run_count, 1) + self.assertTrue(self.is_complete_count > 0) + self.assertEqual(self.mock_logger.LogStartDotsCount, 1) + self.assertEqual(self.mock_logger.LogAppendDotCount, 1) + self.assertEqual(self.mock_logger.LogEndDotsCount, 1) + self.assertEqual(self.mock_logger.dot_count, 2) + self.assertEqual(mock_progress_string.call_count, 0) + self.assertEqual(mock_status_string.call_count, 2) + self.assertEqual( + self.mock_logger.output_msgs, + [ + "==============================", + "Fake status string", + "==============================", + ], + ) + self.assertEqual(len(self.mock_logger.error_msgs), 0) + + # Test 3: log_level == "verbose" + self.mock_logger.Reset() + reset() + self.exp.log_level = "verbose" + mock_status_string.call_count = 0 + er = experiment_runner.ExperimentRunner( + self.exp, + json_report=False, + using_schedv2=False, + log=self.mock_logger, + cmd_exec=self.mock_cmd_exec, + ) + er.STATUS_TIME_DELAY = 2 + mock_status_string.return_value = "Fake status string" + mock_progress_string.return_value = "Fake progress string" + er._Run(self.exp) + self.assertEqual(self.run_count, 1) + self.assertTrue(self.is_complete_count > 0) + self.assertEqual(self.mock_logger.LogStartDotsCount, 0) + self.assertEqual(self.mock_logger.LogAppendDotCount, 0) + self.assertEqual(self.mock_logger.LogEndDotsCount, 0) + self.assertEqual(self.mock_logger.dot_count, 0) + self.assertEqual(mock_progress_string.call_count, 2) + self.assertEqual(mock_status_string.call_count, 2) + self.assertEqual( + self.mock_logger.output_msgs, + [ + "==============================", + "Fake progress string", + "Fake status string", + "==============================", + "==============================", + "Fake progress string", + "Fake status string", + "==============================", + ], + ) + self.assertEqual(len(self.mock_logger.error_msgs), 0) + + @mock.patch.object(TextResultsReport, "GetReport") + def test_print_table(self, mock_report): + self.mock_logger.Reset() + mock_report.return_value = "This is a fake experiment report." + er = experiment_runner.ExperimentRunner( + self.exp, + json_report=False, + using_schedv2=False, + log=self.mock_logger, + cmd_exec=self.mock_cmd_exec, + ) + er._PrintTable(self.exp) + self.assertEqual(mock_report.call_count, 1) + self.assertEqual( + self.mock_logger.output_msgs, ["This is a fake experiment report."] + ) + + @mock.patch.object(HTMLResultsReport, "GetReport") + @mock.patch.object(TextResultsReport, "GetReport") + @mock.patch.object(EmailSender, "Attachment") + @mock.patch.object(EmailSender, "SendEmail") + @mock.patch.object(getpass, "getuser") + def test_email( + self, + mock_getuser, + mock_emailer, + mock_attachment, + mock_text_report, + mock_html_report, + ): + + mock_getuser.return_value = "john.smith@google.com" + mock_text_report.return_value = "This is a fake text report." + mock_html_report.return_value = "This is a fake html report." + + self.mock_logger.Reset() + config.AddConfig("no_email", True) + self.exp.email_to = ["jane.doe@google.com"] + er = experiment_runner.ExperimentRunner( + self.exp, + json_report=False, + using_schedv2=False, + log=self.mock_logger, + cmd_exec=self.mock_cmd_exec, + ) + # Test 1. Config:no_email; exp.email_to set ==> no email sent + er._Email(self.exp) + self.assertEqual(mock_getuser.call_count, 0) + self.assertEqual(mock_emailer.call_count, 0) + self.assertEqual(mock_attachment.call_count, 0) + self.assertEqual(mock_text_report.call_count, 0) + self.assertEqual(mock_html_report.call_count, 0) + + # Test 2. Config: email. exp.email_to set; cache hit. => send email + self.mock_logger.Reset() + config.AddConfig("no_email", False) + for r in self.exp.benchmark_runs: + r.cache_hit = True + er._Email(self.exp) + self.assertEqual(mock_getuser.call_count, 1) + self.assertEqual(mock_emailer.call_count, 1) + self.assertEqual(mock_attachment.call_count, 1) + self.assertEqual(mock_text_report.call_count, 1) + self.assertEqual(mock_html_report.call_count, 1) + self.assertEqual(len(mock_emailer.call_args), 2) + self.assertEqual( + mock_emailer.call_args[0], + ( + ["jane.doe@google.com", "john.smith@google.com"], + ": image1 vs. image2", + "<pre style='font-size: 13px'>This is a fake text " + "report.\nResults are stored in _results.\n</pre>", + ), + ) + self.assertTrue(isinstance(mock_emailer.call_args[1], dict)) + self.assertEqual(len(mock_emailer.call_args[1]), 2) + self.assertTrue("attachments" in mock_emailer.call_args[1].keys()) + self.assertEqual(mock_emailer.call_args[1]["msg_type"], "html") + + mock_attachment.assert_called_with( + "report.html", "This is a fake html report." + ) + + # Test 3. Config: email; exp.mail_to set; no cache hit. => send email + self.mock_logger.Reset() + mock_getuser.reset_mock() + mock_emailer.reset_mock() + mock_attachment.reset_mock() + mock_text_report.reset_mock() + mock_html_report.reset_mock() + config.AddConfig("no_email", False) + for r in self.exp.benchmark_runs: + r.cache_hit = False + er._Email(self.exp) + self.assertEqual(mock_getuser.call_count, 1) + self.assertEqual(mock_emailer.call_count, 1) + self.assertEqual(mock_attachment.call_count, 1) + self.assertEqual(mock_text_report.call_count, 1) + self.assertEqual(mock_html_report.call_count, 1) + self.assertEqual(len(mock_emailer.call_args), 2) + self.assertEqual( + mock_emailer.call_args[0], + ( + [ + "jane.doe@google.com", + "john.smith@google.com", + "john.smith@google.com", + ], + ": image1 vs. image2", + "<pre style='font-size: 13px'>This is a fake text " + "report.\nResults are stored in _results.\n</pre>", + ), + ) + self.assertTrue(isinstance(mock_emailer.call_args[1], dict)) + self.assertEqual(len(mock_emailer.call_args[1]), 2) + self.assertTrue("attachments" in mock_emailer.call_args[1].keys()) + self.assertEqual(mock_emailer.call_args[1]["msg_type"], "html") + + mock_attachment.assert_called_with( + "report.html", "This is a fake html report." + ) + + # Test 4. Config: email; exp.mail_to = None; no cache hit. => send email + self.mock_logger.Reset() + mock_getuser.reset_mock() + mock_emailer.reset_mock() + mock_attachment.reset_mock() + mock_text_report.reset_mock() + mock_html_report.reset_mock() + self.exp.email_to = [] + er._Email(self.exp) + self.assertEqual(mock_getuser.call_count, 1) + self.assertEqual(mock_emailer.call_count, 1) + self.assertEqual(mock_attachment.call_count, 1) + self.assertEqual(mock_text_report.call_count, 1) + self.assertEqual(mock_html_report.call_count, 1) + self.assertEqual(len(mock_emailer.call_args), 2) + self.assertEqual( + mock_emailer.call_args[0], + ( + ["john.smith@google.com"], + ": image1 vs. image2", + "<pre style='font-size: 13px'>This is a fake text " + "report.\nResults are stored in _results.\n</pre>", + ), + ) + self.assertTrue(isinstance(mock_emailer.call_args[1], dict)) + self.assertEqual(len(mock_emailer.call_args[1]), 2) + self.assertTrue("attachments" in mock_emailer.call_args[1].keys()) + self.assertEqual(mock_emailer.call_args[1]["msg_type"], "html") + + mock_attachment.assert_called_with( + "report.html", "This is a fake html report." + ) + + # Test 5. Config: email; exp.mail_to = None; cache hit => no email sent + self.mock_logger.Reset() + mock_getuser.reset_mock() + mock_emailer.reset_mock() + mock_attachment.reset_mock() + mock_text_report.reset_mock() + mock_html_report.reset_mock() + for r in self.exp.benchmark_runs: + r.cache_hit = True + er._Email(self.exp) + self.assertEqual(mock_getuser.call_count, 0) + self.assertEqual(mock_emailer.call_count, 0) + self.assertEqual(mock_attachment.call_count, 0) + self.assertEqual(mock_text_report.call_count, 0) + self.assertEqual(mock_html_report.call_count, 0) + + @mock.patch.object(FileUtils, "RmDir") + @mock.patch.object(FileUtils, "MkDirP") + @mock.patch.object(FileUtils, "WriteFile") + @mock.patch.object(HTMLResultsReport, "FromExperiment") + @mock.patch.object(TextResultsReport, "FromExperiment") + @mock.patch.object(Result, "CompressResultsTo") + @mock.patch.object(Result, "CopyResultsTo") + @mock.patch.object(Result, "CleanUp") + @mock.patch.object(Result, "FormatStringTopCommands") + @mock.patch("builtins.open", new_callable=mock.mock_open) + def test_store_results( + self, + mock_open, + mock_top_commands, + mock_cleanup, + mock_copy, + mock_compress, + _mock_text_report, + mock_report, + mock_writefile, + mock_mkdir, + mock_rmdir, + ): + + self.mock_logger.Reset() + self.exp.results_directory = "/usr/local/crosperf-results" + bench_run = self.exp.benchmark_runs[5] + bench_path = "/usr/local/crosperf-results/" + "".join( + ch for ch in bench_run.name if ch.isalnum() + ) + self.assertEqual(len(self.exp.benchmark_runs), 6) + + er = experiment_runner.ExperimentRunner( + self.exp, + json_report=False, + using_schedv2=False, + log=self.mock_logger, + cmd_exec=self.mock_cmd_exec, + ) + + # Test 1. Make sure nothing is done if _terminated is true. + er._terminated = True + er._StoreResults(self.exp) + self.assertEqual(mock_cleanup.call_count, 0) + self.assertEqual(mock_copy.call_count, 0) + self.assertEqual(mock_compress.call_count, 0) + self.assertEqual(mock_report.call_count, 0) + self.assertEqual(mock_writefile.call_count, 0) + self.assertEqual(mock_mkdir.call_count, 0) + self.assertEqual(mock_rmdir.call_count, 0) + self.assertEqual(self.mock_logger.LogOutputCount, 0) + self.assertEqual(mock_open.call_count, 0) + self.assertEqual(mock_top_commands.call_count, 0) + + # Test 2. _terminated is false; everything works properly. + fake_result = Result( + self.mock_logger, self.exp.labels[0], "average", "daisy1" + ) + for r in self.exp.benchmark_runs: + r.result = fake_result + er._terminated = False + self.exp.compress_results = False + er._StoreResults(self.exp) + self.assertEqual(mock_cleanup.call_count, 6) + mock_cleanup.assert_called_with(bench_run.benchmark.rm_chroot_tmp) + self.assertEqual(mock_copy.call_count, 6) + mock_copy.assert_called_with(bench_path) + self.assertEqual(mock_writefile.call_count, 3) + self.assertEqual(len(mock_writefile.call_args_list), 3) + first_args = mock_writefile.call_args_list[0] + second_args = mock_writefile.call_args_list[1] + self.assertEqual( + first_args[0][0], "/usr/local/crosperf-results/experiment.exp" + ) + self.assertEqual( + second_args[0][0], "/usr/local/crosperf-results/results.html" + ) + self.assertEqual(mock_mkdir.call_count, 1) + mock_mkdir.assert_called_with("/usr/local/crosperf-results") + self.assertEqual(mock_rmdir.call_count, 1) + mock_rmdir.assert_called_with("/usr/local/crosperf-results") + self.assertEqual(self.mock_logger.LogOutputCount, 5) + self.assertEqual( + self.mock_logger.output_msgs, + [ + "Storing experiment file in /usr/local/crosperf-results.", + "Storing top statistics of each benchmark run into" + " /usr/local/crosperf-results/topstats.log.", + "Storing results of each benchmark run.", + "Storing results report in /usr/local/crosperf-results.", + "Storing email message body in /usr/local/crosperf-results.", + ], + ) + self.assertEqual(mock_open.call_count, 1) + # Check write to a topstats.log file. + mock_open.assert_called_with( + "/usr/local/crosperf-results/topstats.log", "w" + ) + mock_open().write.assert_called() + + # Check top calls with no arguments. + topcalls = [mock.call()] * 6 + self.assertEqual(mock_top_commands.call_args_list, topcalls) + + # Test 3. Test compress_results. + self.exp.compress_results = True + mock_copy.call_count = 0 + mock_compress.call_count = 0 + er._StoreResults(self.exp) + self.assertEqual(mock_copy.call_count, 0) + mock_copy.assert_called_with(bench_path) + self.assertEqual(mock_compress.call_count, 6) + mock_compress.assert_called_with(bench_path) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py index 3207d4a5..4bd3995e 100644 --- a/crosperf/experiment_status.py +++ b/crosperf/experiment_status.py @@ -14,136 +14,156 @@ import time class ExperimentStatus(object): - """The status class.""" - - def __init__(self, experiment): - self.experiment = experiment - self.num_total = len(self.experiment.benchmark_runs) - self.completed = 0 - self.new_job_start_time = time.time() - self.log_level = experiment.log_level - - def _GetProgressBar(self, num_complete, num_total): - ret = 'Done: %s%%' % int(100.0 * num_complete / num_total) - bar_length = 50 - done_char = '>' - undone_char = ' ' - num_complete_chars = bar_length * num_complete // num_total - num_undone_chars = bar_length - num_complete_chars - ret += ' [%s%s]' % (num_complete_chars * done_char, - num_undone_chars * undone_char) - return ret - - def GetProgressString(self): - """Get the elapsed_time, ETA.""" - current_time = time.time() - if self.experiment.start_time: - elapsed_time = current_time - self.experiment.start_time - else: - elapsed_time = 0 - try: - if self.completed != self.experiment.num_complete: - self.completed = self.experiment.num_complete - self.new_job_start_time = current_time - time_completed_jobs = ( - elapsed_time - (current_time - self.new_job_start_time)) - # eta is calculated as: - # ETA = (num_jobs_not_yet_started * estimated_time_per_job) - # + time_left_for_current_job - # - # where - # num_jobs_not_yet_started = (num_total - num_complete - 1) - # - # estimated_time_per_job = time_completed_jobs / num_run_complete - # - # time_left_for_current_job = estimated_time_per_job - - # time_spent_so_far_on_current_job - # - # The biggest problem with this calculation is its assumption that - # all jobs have roughly the same running time (blatantly false!). - # - # ETA can come out negative if the time spent on the current job is - # greater than the estimated time per job (e.g. you're running the - # first long job, after a series of short jobs). For now, if that - # happens, we set the ETA to "Unknown." - # - eta_seconds = ( - float(self.num_total - self.experiment.num_complete - 1) * - time_completed_jobs / self.experiment.num_run_complete + - (time_completed_jobs / self.experiment.num_run_complete - - (current_time - self.new_job_start_time))) - - eta_seconds = int(eta_seconds) - if eta_seconds > 0: - eta = datetime.timedelta(seconds=eta_seconds) - else: - eta = 'Unknown' - except ZeroDivisionError: - eta = 'Unknown' - strings = [] - strings.append('Current time: %s Elapsed: %s ETA: %s' % - (datetime.datetime.now(), - datetime.timedelta(seconds=int(elapsed_time)), eta)) - strings.append( - self._GetProgressBar(self.experiment.num_complete, self.num_total)) - return '\n'.join(strings) - - def GetStatusString(self): - """Get the status string of all the benchmark_runs.""" - status_bins = collections.defaultdict(list) - for benchmark_run in self.experiment.benchmark_runs: - status_bins[benchmark_run.timeline.GetLastEvent()].append(benchmark_run) - - status_strings = [] - for key, val in status_bins.items(): - if key == 'RUNNING': - get_description = self._GetNamesAndIterations - else: - get_description = self._GetCompactNamesAndIterations - status_strings.append('%s: %s' % (key, get_description(val))) - - thread_status = '' - thread_status_format = 'Thread Status: \n{}\n' - if (self.experiment.schedv2() is None and - self.experiment.log_level == 'verbose'): - # Add the machine manager status. - thread_status = thread_status_format.format( - self.experiment.machine_manager.AsString()) - elif self.experiment.schedv2(): - # In schedv2 mode, we always print out thread status. - thread_status = thread_status_format.format( - self.experiment.schedv2().threads_status_as_string()) - - result = '{}{}'.format(thread_status, '\n'.join(status_strings)) - - return result - - def _GetNamesAndIterations(self, benchmark_runs): - strings = [] - t = time.time() - for benchmark_run in benchmark_runs: - t_last = benchmark_run.timeline.GetLastEventTime() - elapsed = str(datetime.timedelta(seconds=int(t - t_last))) - strings.append("'{0}' {1}".format(benchmark_run.name, elapsed)) - return ' %s (%s)' % (len(strings), ', '.join(strings)) - - def _GetCompactNamesAndIterations(self, benchmark_runs): - grouped_benchmarks = collections.defaultdict(list) - for benchmark_run in benchmark_runs: - grouped_benchmarks[benchmark_run.label.name].append(benchmark_run) - - output_segs = [] - for label_name, label_runs in grouped_benchmarks.items(): - strings = [] - benchmark_iterations = collections.defaultdict(list) - for benchmark_run in label_runs: - assert benchmark_run.label.name == label_name - benchmark_name = benchmark_run.benchmark.name - benchmark_iterations[benchmark_name].append(benchmark_run.iteration) - for key, val in benchmark_iterations.items(): - val.sort() - iterations = ','.join(str(v) for v in val) - strings.append('{} [{}]'.format(key, iterations)) - output_segs.append(' ' + label_name + ': ' + ', '.join(strings) + '\n') - - return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs)) + """The status class.""" + + def __init__(self, experiment): + self.experiment = experiment + self.num_total = len(self.experiment.benchmark_runs) + self.completed = 0 + self.new_job_start_time = time.time() + self.log_level = experiment.log_level + + def _GetProgressBar(self, num_complete, num_total): + ret = "Done: %s%%" % int(100.0 * num_complete / num_total) + bar_length = 50 + done_char = ">" + undone_char = " " + num_complete_chars = bar_length * num_complete // num_total + num_undone_chars = bar_length - num_complete_chars + ret += " [%s%s]" % ( + num_complete_chars * done_char, + num_undone_chars * undone_char, + ) + return ret + + def GetProgressString(self): + """Get the elapsed_time, ETA.""" + current_time = time.time() + if self.experiment.start_time: + elapsed_time = current_time - self.experiment.start_time + else: + elapsed_time = 0 + try: + if self.completed != self.experiment.num_complete: + self.completed = self.experiment.num_complete + self.new_job_start_time = current_time + time_completed_jobs = elapsed_time - ( + current_time - self.new_job_start_time + ) + # eta is calculated as: + # ETA = (num_jobs_not_yet_started * estimated_time_per_job) + # + time_left_for_current_job + # + # where + # num_jobs_not_yet_started = (num_total - num_complete - 1) + # + # estimated_time_per_job = time_completed_jobs / num_run_complete + # + # time_left_for_current_job = estimated_time_per_job - + # time_spent_so_far_on_current_job + # + # The biggest problem with this calculation is its assumption that + # all jobs have roughly the same running time (blatantly false!). + # + # ETA can come out negative if the time spent on the current job is + # greater than the estimated time per job (e.g. you're running the + # first long job, after a series of short jobs). For now, if that + # happens, we set the ETA to "Unknown." + # + eta_seconds = float( + self.num_total - self.experiment.num_complete - 1 + ) * time_completed_jobs / self.experiment.num_run_complete + ( + time_completed_jobs / self.experiment.num_run_complete + - (current_time - self.new_job_start_time) + ) + + eta_seconds = int(eta_seconds) + if eta_seconds > 0: + eta = datetime.timedelta(seconds=eta_seconds) + else: + eta = "Unknown" + except ZeroDivisionError: + eta = "Unknown" + strings = [] + strings.append( + "Current time: %s Elapsed: %s ETA: %s" + % ( + datetime.datetime.now(), + datetime.timedelta(seconds=int(elapsed_time)), + eta, + ) + ) + strings.append( + self._GetProgressBar(self.experiment.num_complete, self.num_total) + ) + return "\n".join(strings) + + def GetStatusString(self): + """Get the status string of all the benchmark_runs.""" + status_bins = collections.defaultdict(list) + for benchmark_run in self.experiment.benchmark_runs: + status_bins[benchmark_run.timeline.GetLastEvent()].append( + benchmark_run + ) + + status_strings = [] + for key, val in status_bins.items(): + if key == "RUNNING": + get_description = self._GetNamesAndIterations + else: + get_description = self._GetCompactNamesAndIterations + status_strings.append("%s: %s" % (key, get_description(val))) + + thread_status = "" + thread_status_format = "Thread Status: \n{}\n" + if ( + self.experiment.schedv2() is None + and self.experiment.log_level == "verbose" + ): + # Add the machine manager status. + thread_status = thread_status_format.format( + self.experiment.machine_manager.AsString() + ) + elif self.experiment.schedv2(): + # In schedv2 mode, we always print out thread status. + thread_status = thread_status_format.format( + self.experiment.schedv2().threads_status_as_string() + ) + + result = "{}{}".format(thread_status, "\n".join(status_strings)) + + return result + + def _GetNamesAndIterations(self, benchmark_runs): + strings = [] + t = time.time() + for benchmark_run in benchmark_runs: + t_last = benchmark_run.timeline.GetLastEventTime() + elapsed = str(datetime.timedelta(seconds=int(t - t_last))) + strings.append("'{0}' {1}".format(benchmark_run.name, elapsed)) + return " %s (%s)" % (len(strings), ", ".join(strings)) + + def _GetCompactNamesAndIterations(self, benchmark_runs): + grouped_benchmarks = collections.defaultdict(list) + for benchmark_run in benchmark_runs: + grouped_benchmarks[benchmark_run.label.name].append(benchmark_run) + + output_segs = [] + for label_name, label_runs in grouped_benchmarks.items(): + strings = [] + benchmark_iterations = collections.defaultdict(list) + for benchmark_run in label_runs: + assert benchmark_run.label.name == label_name + benchmark_name = benchmark_run.benchmark.name + benchmark_iterations[benchmark_name].append( + benchmark_run.iteration + ) + for key, val in benchmark_iterations.items(): + val.sort() + iterations = ",".join(str(v) for v in val) + strings.append("{} [{}]".format(key, iterations)) + output_segs.append( + " " + label_name + ": " + ", ".join(strings) + "\n" + ) + + return " %s \n%s" % (len(benchmark_runs), "".join(output_segs)) diff --git a/crosperf/field.py b/crosperf/field.py index 51dd8732..11eb3778 100644 --- a/crosperf/field.py +++ b/crosperf/field.py @@ -7,150 +7,161 @@ class Field(object): - """Class representing a Field in an experiment file.""" + """Class representing a Field in an experiment file.""" - def __init__(self, name, required, default, inheritable, description): - self.name = name - self.required = required - self.assigned = False - self.default = default - self._value = default - self.inheritable = inheritable - self.description = description + def __init__(self, name, required, default, inheritable, description): + self.name = name + self.required = required + self.assigned = False + self.default = default + self._value = default + self.inheritable = inheritable + self.description = description - def Set(self, value, parse=True): - if parse: - self._value = self._Parse(value) - else: - self._value = value - self.assigned = True + def Set(self, value, parse=True): + if parse: + self._value = self._Parse(value) + else: + self._value = value + self.assigned = True - def Append(self, value): - self._value += self._Parse(value) - self.assigned = True + def Append(self, value): + self._value += self._Parse(value) + self.assigned = True - def _Parse(self, value): - return value + def _Parse(self, value): + return value - def Get(self): - return self._value + def Get(self): + return self._value - def GetString(self): - return str(self._value) + def GetString(self): + return str(self._value) class TextField(Field): - """Class of text field.""" + """Class of text field.""" - def __init__(self, - name, - required=False, - default='', - inheritable=False, - description=''): - super(TextField, self).__init__(name, required, default, inheritable, - description) + def __init__( + self, + name, + required=False, + default="", + inheritable=False, + description="", + ): + super(TextField, self).__init__( + name, required, default, inheritable, description + ) - def _Parse(self, value): - return str(value) + def _Parse(self, value): + return str(value) class BooleanField(Field): - """Class of boolean field.""" - - def __init__(self, - name, - required=False, - default=False, - inheritable=False, - description=''): - super(BooleanField, self).__init__(name, required, default, inheritable, - description) - - def _Parse(self, value): - if value.lower() == 'true': - return True - elif value.lower() == 'false': - return False - raise TypeError( - "Invalid value for '%s'. Must be true or false." % self.name) + """Class of boolean field.""" + + def __init__( + self, + name, + required=False, + default=False, + inheritable=False, + description="", + ): + super(BooleanField, self).__init__( + name, required, default, inheritable, description + ) + + def _Parse(self, value): + if value.lower() == "true": + return True + elif value.lower() == "false": + return False + raise TypeError( + "Invalid value for '%s'. Must be true or false." % self.name + ) class IntegerField(Field): - """Class of integer field.""" + """Class of integer field.""" - def __init__(self, - name, - required=False, - default=0, - inheritable=False, - description=''): - super(IntegerField, self).__init__(name, required, default, inheritable, - description) + def __init__( + self, name, required=False, default=0, inheritable=False, description="" + ): + super(IntegerField, self).__init__( + name, required, default, inheritable, description + ) - def _Parse(self, value): - return int(value) + def _Parse(self, value): + return int(value) class FloatField(Field): - """Class of float field.""" + """Class of float field.""" - def __init__(self, - name, - required=False, - default=0, - inheritable=False, - description=''): - super(FloatField, self).__init__(name, required, default, inheritable, - description) + def __init__( + self, name, required=False, default=0, inheritable=False, description="" + ): + super(FloatField, self).__init__( + name, required, default, inheritable, description + ) - def _Parse(self, value): - return float(value) + def _Parse(self, value): + return float(value) class ListField(Field): - """Class of list field.""" - - def __init__(self, - name, - required=False, - default=None, - inheritable=False, - description=''): - super(ListField, self).__init__(name, required, default, inheritable, - description) - - def _Parse(self, value): - return value.split() - - def GetString(self): - return ' '.join(self._value) - - def Append(self, value): - v = self._Parse(value) - if not self._value: - self._value = v - else: - self._value += v - self.assigned = True + """Class of list field.""" + + def __init__( + self, + name, + required=False, + default=None, + inheritable=False, + description="", + ): + super(ListField, self).__init__( + name, required, default, inheritable, description + ) + + def _Parse(self, value): + return value.split() + + def GetString(self): + return " ".join(self._value) + + def Append(self, value): + v = self._Parse(value) + if not self._value: + self._value = v + else: + self._value += v + self.assigned = True class EnumField(Field): - """Class of enum field.""" - - def __init__(self, - name, - options, - required=False, - default='', - inheritable=False, - description=''): - super(EnumField, self).__init__(name, required, default, inheritable, - description) - self.options = options - - def _Parse(self, value): - if value not in self.options: - raise TypeError("Invalid enum value for field '%s'. Must be one of (%s)" % - (self.name, ', '.join(self.options))) - return str(value) + """Class of enum field.""" + + def __init__( + self, + name, + options, + required=False, + default="", + inheritable=False, + description="", + ): + super(EnumField, self).__init__( + name, required, default, inheritable, description + ) + self.options = options + + def _Parse(self, value): + if value not in self.options: + raise TypeError( + "Invalid enum value for field '%s'. Must be one of (%s)" + % (self.name, ", ".join(self.options)) + ) + return str(value) diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py index d4fec8a0..1efc9167 100755 --- a/crosperf/flag_test_unittest.py +++ b/crosperf/flag_test_unittest.py @@ -14,32 +14,32 @@ import test_flag class FlagTestCase(unittest.TestCase): - """The unittest class.""" + """The unittest class.""" - def test_test_flag(self): - # Verify that test_flag.is_test exists, that it is a list, - # and that it contains 1 element. - self.assertTrue(isinstance(test_flag.is_test, list)) - self.assertEqual(len(test_flag.is_test), 1) + def test_test_flag(self): + # Verify that test_flag.is_test exists, that it is a list, + # and that it contains 1 element. + self.assertTrue(isinstance(test_flag.is_test, list)) + self.assertEqual(len(test_flag.is_test), 1) - # Verify that the getting the flag works and that the flag - # contains False, its starting value. - save_flag = test_flag.GetTestMode() - self.assertFalse(save_flag) + # Verify that the getting the flag works and that the flag + # contains False, its starting value. + save_flag = test_flag.GetTestMode() + self.assertFalse(save_flag) - # Verify that setting the flat to True, then getting it, works. - test_flag.SetTestMode(True) - self.assertTrue(test_flag.GetTestMode()) + # Verify that setting the flat to True, then getting it, works. + test_flag.SetTestMode(True) + self.assertTrue(test_flag.GetTestMode()) - # Verify that setting the flag to False, then getting it, works. - test_flag.SetTestMode(save_flag) - self.assertFalse(test_flag.GetTestMode()) + # Verify that setting the flag to False, then getting it, works. + test_flag.SetTestMode(save_flag) + self.assertFalse(test_flag.GetTestMode()) - # Verify that test_flag.is_test still exists, that it still is a - # list, and that it still contains 1 element. - self.assertTrue(isinstance(test_flag.is_test, list)) - self.assertEqual(len(test_flag.is_test), 1) + # Verify that test_flag.is_test still exists, that it still is a + # list, and that it still contains 1 element. + self.assertTrue(isinstance(test_flag.is_test, list)) + self.assertEqual(len(test_flag.is_test), 1) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py index 3d4732c0..54cf4d91 100755 --- a/crosperf/generate_report.py +++ b/crosperf/generate_report.py @@ -61,223 +61,248 @@ from results_report import TextResultsReport def CountBenchmarks(benchmark_runs): - """Counts the number of iterations for each benchmark in benchmark_runs.""" + """Counts the number of iterations for each benchmark in benchmark_runs.""" - # Example input for benchmark_runs: - # {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]} - def _MaxLen(results): - return 0 if not results else max(len(r) for r in results) + # Example input for benchmark_runs: + # {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]} + def _MaxLen(results): + return 0 if not results else max(len(r) for r in results) - return [(name, _MaxLen(results)) for name, results in benchmark_runs.items()] + return [ + (name, _MaxLen(results)) for name, results in benchmark_runs.items() + ] def CutResultsInPlace(results, max_keys=50, complain_on_update=True): - """Limits the given benchmark results to max_keys keys in-place. - - This takes the `data` field from the benchmark input, and mutates each - benchmark run to contain `max_keys` elements (ignoring special elements, like - "retval"). At the moment, it just selects the first `max_keys` keyvals, - alphabetically. - - If complain_on_update is true, this will print a message noting that a - truncation occurred. - - This returns the `results` object that was passed in, for convenience. - - e.g. - >>> benchmark_data = { - ... "bench_draw_line": [ - ... [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0}, - ... {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}], - ... [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0}, - ... {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}] - ... ] - ... } - >>> CutResultsInPlace(benchmark_data, max_keys=1, complain_on_update=False) - { - 'bench_draw_line': [ - [{'memory (mb)': 128.1, 'retval': 0}, - {'memory (mb)': 128.4, 'retval': 0}], - [{'memory (mb)': 124.3, 'retval': 0}, - {'memory (mb)': 123.9, 'retval': 0}] - ] - } - """ - actually_updated = False - for bench_results in results.values(): - for platform_results in bench_results: - for i, result in enumerate(platform_results): - # Keep the keys that come earliest when sorted alphabetically. - # Forcing alphabetical order is arbitrary, but necessary; otherwise, - # the keyvals we'd emit would depend on our iteration order through a - # map. - removable_keys = sorted(k for k in result if k != 'retval') - retained_keys = removable_keys[:max_keys] - platform_results[i] = {k: result[k] for k in retained_keys} - # retval needs to be passed through all of the time. - retval = result.get('retval') - if retval is not None: - platform_results[i]['retval'] = retval - actually_updated = actually_updated or \ - len(retained_keys) != len(removable_keys) - - if actually_updated and complain_on_update: - print( - 'Warning: Some benchmark keyvals have been truncated.', file=sys.stderr) - return results + """Limits the given benchmark results to max_keys keys in-place. + + This takes the `data` field from the benchmark input, and mutates each + benchmark run to contain `max_keys` elements (ignoring special elements, like + "retval"). At the moment, it just selects the first `max_keys` keyvals, + alphabetically. + + If complain_on_update is true, this will print a message noting that a + truncation occurred. + + This returns the `results` object that was passed in, for convenience. + + e.g. + >>> benchmark_data = { + ... "bench_draw_line": [ + ... [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0}, + ... {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}], + ... [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0}, + ... {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}] + ... ] + ... } + >>> CutResultsInPlace(benchmark_data, max_keys=1, complain_on_update=False) + { + 'bench_draw_line': [ + [{'memory (mb)': 128.1, 'retval': 0}, + {'memory (mb)': 128.4, 'retval': 0}], + [{'memory (mb)': 124.3, 'retval': 0}, + {'memory (mb)': 123.9, 'retval': 0}] + ] + } + """ + actually_updated = False + for bench_results in results.values(): + for platform_results in bench_results: + for i, result in enumerate(platform_results): + # Keep the keys that come earliest when sorted alphabetically. + # Forcing alphabetical order is arbitrary, but necessary; otherwise, + # the keyvals we'd emit would depend on our iteration order through a + # map. + removable_keys = sorted(k for k in result if k != "retval") + retained_keys = removable_keys[:max_keys] + platform_results[i] = {k: result[k] for k in retained_keys} + # retval needs to be passed through all of the time. + retval = result.get("retval") + if retval is not None: + platform_results[i]["retval"] = retval + actually_updated = actually_updated or len( + retained_keys + ) != len(removable_keys) + + if actually_updated and complain_on_update: + print( + "Warning: Some benchmark keyvals have been truncated.", + file=sys.stderr, + ) + return results def _PositiveInt(s): - i = int(s) - if i < 0: - raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i,)) - return i + i = int(s) + if i < 0: + raise argparse.ArgumentTypeError("%d is not a positive integer." % (i,)) + return i def _AccumulateActions(args): - """Given program arguments, determines what actions we want to run. - - Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a - ResultsReport, and the str is the file extension for the given report. - """ - results = [] - # The order of these is arbitrary. - if args.json: - results.append((JSONResultsReport, 'json')) - if args.text: - results.append((TextResultsReport, 'txt')) - if args.email: - email_ctor = functools.partial(TextResultsReport, email=True) - results.append((email_ctor, 'email')) - # We emit HTML if nothing else was specified. - if args.html or not results: - results.append((HTMLResultsReport, 'html')) - return results + """Given program arguments, determines what actions we want to run. + + Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a + ResultsReport, and the str is the file extension for the given report. + """ + results = [] + # The order of these is arbitrary. + if args.json: + results.append((JSONResultsReport, "json")) + if args.text: + results.append((TextResultsReport, "txt")) + if args.email: + email_ctor = functools.partial(TextResultsReport, email=True) + results.append((email_ctor, "email")) + # We emit HTML if nothing else was specified. + if args.html or not results: + results.append((HTMLResultsReport, "html")) + return results # Note: get_contents is a function, because it may be expensive (generating some # HTML reports takes O(seconds) on my machine, depending on the size of the # input data). def WriteFile(output_prefix, extension, get_contents, overwrite, verbose): - """Writes `contents` to a file named "${output_prefix}.${extension}". - - get_contents should be a zero-args function that returns a string (of the - contents to write). - If output_prefix == '-', this writes to stdout. - If overwrite is False, this will not overwrite files. - """ - if output_prefix == '-': - if verbose: - print('Writing %s report to stdout' % (extension,), file=sys.stderr) - sys.stdout.write(get_contents()) - return - - file_name = '%s.%s' % (output_prefix, extension) - if not overwrite and os.path.exists(file_name): - raise IOError('Refusing to write %s -- it already exists' % (file_name,)) - - with open(file_name, 'w') as out_file: - if verbose: - print('Writing %s report to %s' % (extension, file_name), file=sys.stderr) - out_file.write(get_contents()) + """Writes `contents` to a file named "${output_prefix}.${extension}". + + get_contents should be a zero-args function that returns a string (of the + contents to write). + If output_prefix == '-', this writes to stdout. + If overwrite is False, this will not overwrite files. + """ + if output_prefix == "-": + if verbose: + print("Writing %s report to stdout" % (extension,), file=sys.stderr) + sys.stdout.write(get_contents()) + return + + file_name = "%s.%s" % (output_prefix, extension) + if not overwrite and os.path.exists(file_name): + raise IOError( + "Refusing to write %s -- it already exists" % (file_name,) + ) + + with open(file_name, "w") as out_file: + if verbose: + print( + "Writing %s report to %s" % (extension, file_name), + file=sys.stderr, + ) + out_file.write(get_contents()) def RunActions(actions, benchmark_results, output_prefix, overwrite, verbose): - """Runs `actions`, returning True if all succeeded.""" - failed = False - - report_ctor = None # Make the linter happy - for report_ctor, extension in actions: - try: - get_contents = lambda: report_ctor(benchmark_results).GetReport() - WriteFile(output_prefix, extension, get_contents, overwrite, verbose) - except Exception: - # Complain and move along; we may have more actions that might complete - # successfully. - failed = True - traceback.print_exc() - return not failed + """Runs `actions`, returning True if all succeeded.""" + failed = False + + report_ctor = None # Make the linter happy + for report_ctor, extension in actions: + try: + get_contents = lambda: report_ctor(benchmark_results).GetReport() + WriteFile( + output_prefix, extension, get_contents, overwrite, verbose + ) + except Exception: + # Complain and move along; we may have more actions that might complete + # successfully. + failed = True + traceback.print_exc() + return not failed def PickInputFile(input_name): - """Given program arguments, returns file to read for benchmark input.""" - return sys.stdin if input_name == '-' else open(input_name) + """Given program arguments, returns file to read for benchmark input.""" + return sys.stdin if input_name == "-" else open(input_name) def _NoPerfReport(_label_name, _benchmark_name, _benchmark_iteration): - return {} + return {} def _ParseArgs(argv): - parser = argparse.ArgumentParser(description='Turns JSON into results ' - 'report(s).') - parser.add_argument( - '-v', - '--verbose', - action='store_true', - help='Be a tiny bit more verbose.') - parser.add_argument( - '-f', - '--force', - action='store_true', - help='Overwrite existing results files.') - parser.add_argument( - '-o', - '--output', - default='report', - type=str, - help='Prefix of the output filename (default: report). ' - '- means stdout.') - parser.add_argument( - '-i', - '--input', - required=True, - type=str, - help='Where to read the JSON from. - means stdin.') - parser.add_argument( - '-l', - '--statistic-limit', - default=0, - type=_PositiveInt, - help='The maximum number of benchmark statistics to ' - 'display from a single run. 0 implies unlimited.') - parser.add_argument( - '--json', action='store_true', help='Output a JSON report.') - parser.add_argument( - '--text', action='store_true', help='Output a text report.') - parser.add_argument( - '--email', - action='store_true', - help='Output a text report suitable for email.') - parser.add_argument( - '--html', - action='store_true', - help='Output an HTML report (this is the default if no ' - 'other output format is specified).') - return parser.parse_args(argv) + parser = argparse.ArgumentParser( + description="Turns JSON into results " "report(s)." + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Be a tiny bit more verbose.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + help="Overwrite existing results files.", + ) + parser.add_argument( + "-o", + "--output", + default="report", + type=str, + help="Prefix of the output filename (default: report). " + "- means stdout.", + ) + parser.add_argument( + "-i", + "--input", + required=True, + type=str, + help="Where to read the JSON from. - means stdin.", + ) + parser.add_argument( + "-l", + "--statistic-limit", + default=0, + type=_PositiveInt, + help="The maximum number of benchmark statistics to " + "display from a single run. 0 implies unlimited.", + ) + parser.add_argument( + "--json", action="store_true", help="Output a JSON report." + ) + parser.add_argument( + "--text", action="store_true", help="Output a text report." + ) + parser.add_argument( + "--email", + action="store_true", + help="Output a text report suitable for email.", + ) + parser.add_argument( + "--html", + action="store_true", + help="Output an HTML report (this is the default if no " + "other output format is specified).", + ) + return parser.parse_args(argv) def Main(argv): - args = _ParseArgs(argv) - with PickInputFile(args.input) as in_file: - raw_results = json.load(in_file) - - platform_names = raw_results['platforms'] - results = raw_results['data'] - if args.statistic_limit: - results = CutResultsInPlace(results, max_keys=args.statistic_limit) - benches = CountBenchmarks(results) - # In crosperf, a label is essentially a platform+configuration. So, a name of - # a label and a name of a platform are equivalent for our purposes. - bench_results = BenchmarkResults( - label_names=platform_names, - benchmark_names_and_iterations=benches, - run_keyvals=results, - read_perf_report=_NoPerfReport) - actions = _AccumulateActions(args) - ok = RunActions(actions, bench_results, args.output, args.force, args.verbose) - return 0 if ok else 1 - - -if __name__ == '__main__': - sys.exit(Main(sys.argv[1:])) + args = _ParseArgs(argv) + with PickInputFile(args.input) as in_file: + raw_results = json.load(in_file) + + platform_names = raw_results["platforms"] + results = raw_results["data"] + if args.statistic_limit: + results = CutResultsInPlace(results, max_keys=args.statistic_limit) + benches = CountBenchmarks(results) + # In crosperf, a label is essentially a platform+configuration. So, a name of + # a label and a name of a platform are equivalent for our purposes. + bench_results = BenchmarkResults( + label_names=platform_names, + benchmark_names_and_iterations=benches, + run_keyvals=results, + read_perf_report=_NoPerfReport, + ) + actions = _AccumulateActions(args) + ok = RunActions( + actions, bench_results, args.output, args.force, args.verbose + ) + return 0 if ok else 1 + + +if __name__ == "__main__": + sys.exit(Main(sys.argv[1:])) diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py index a3e5ad91..dbbd08f4 100755 --- a/crosperf/generate_report_unittest.py +++ b/crosperf/generate_report_unittest.py @@ -18,161 +18,159 @@ import generate_report import results_report import test_flag + # pylint: disable=deprecated-module try: - from StringIO import StringIO # for Python 2 + from StringIO import StringIO # for Python 2 except ImportError: - from io import StringIO # for Python 3 + from io import StringIO # for Python 3 class _ContextualStringIO(StringIO): - """StringIO that can be used in `with` statements.""" + """StringIO that can be used in `with` statements.""" - def __init__(self, *args): - StringIO.__init__(self, *args) + def __init__(self, *args): + StringIO.__init__(self, *args) - def __enter__(self): - return self + def __enter__(self): + return self - def __exit__(self, _type, _value, _traceback): - pass + def __exit__(self, _type, _value, _traceback): + pass class GenerateReportTests(unittest.TestCase): - """Tests for generate_report.py.""" - - def testCountBenchmarks(self): - runs = { - 'foo': [[{}, {}, {}], [{}, {}, {}, {}]], - 'bar': [], - 'baz': [[], [{}], [{}, {}, {}]] - } - results = generate_report.CountBenchmarks(runs) - expected_results = [('foo', 4), ('bar', 0), ('baz', 3)] - self.assertCountEqual(expected_results, results) - - def testCutResultsInPlace(self): - bench_data = { - 'foo': [[{ - 'a': 1, - 'b': 2, - 'c': 3 - }, { - 'a': 3, - 'b': 2.5, - 'c': 1 - }]], - 'bar': [[{ - 'd': 11, - 'e': 12, - 'f': 13 - }]], - 'baz': [[{ - 'g': 12, - 'h': 13 - }]], - 'qux': [[{ - 'i': 11 - }]], - } - original_bench_data = copy.deepcopy(bench_data) - - max_keys = 2 - results = generate_report.CutResultsInPlace( - bench_data, max_keys=max_keys, complain_on_update=False) - # Cuts should be in-place. - self.assertIs(results, bench_data) - self.assertCountEqual( - list(original_bench_data.keys()), list(bench_data.keys())) - for bench_name, original_runs in original_bench_data.items(): - bench_runs = bench_data[bench_name] - self.assertEqual(len(original_runs), len(bench_runs)) - # Order of these sub-lists shouldn't have changed. - for original_list, new_list in zip(original_runs, bench_runs): - self.assertEqual(len(original_list), len(new_list)) - for original_keyvals, sub_keyvals in zip(original_list, new_list): - # sub_keyvals must be a subset of original_keyvals - self.assertDictContainsSubset(sub_keyvals, original_keyvals) - - def testCutResultsInPlaceLeavesRetval(self): - bench_data = { - 'foo': [[{ - 'retval': 0, - 'a': 1 - }]], - 'bar': [[{ - 'retval': 1 - }]], - 'baz': [[{ - 'RETVAL': 1 - }]], - } - results = generate_report.CutResultsInPlace( - bench_data, max_keys=0, complain_on_update=False) - # Just reach into results assuming we know it otherwise outputs things in - # the expected way. If it doesn't, testCutResultsInPlace should give an - # indication as to what, exactly, is broken. - self.assertEqual(list(results['foo'][0][0].items()), [('retval', 0)]) - self.assertEqual(list(results['bar'][0][0].items()), [('retval', 1)]) - self.assertEqual(list(results['baz'][0][0].items()), []) - - def _RunMainWithInput(self, args, input_obj): - assert '-i' not in args - args += ['-i', '-'] - input_buf = _ContextualStringIO(json.dumps(input_obj)) - with mock.patch('generate_report.PickInputFile', return_value=input_buf) \ - as patched_pick: - result = generate_report.Main(args) - patched_pick.assert_called_once_with('-') - return result - - @mock.patch('generate_report.RunActions') - def testMain(self, mock_run_actions): - # Email is left out because it's a bit more difficult to test, and it'll be - # mildly obvious if it's failing. - args = ['--json', '--html', '--text'] - return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}}) - self.assertEqual(0, return_code) - self.assertEqual(mock_run_actions.call_count, 1) - ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]] - self.assertEqual(ctors, [ - results_report.JSONResultsReport, - results_report.TextResultsReport, - results_report.HTMLResultsReport, - ]) - - @mock.patch('generate_report.RunActions') - def testMainSelectsHTMLIfNoReportsGiven(self, mock_run_actions): - args = [] - return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}}) - self.assertEqual(0, return_code) - self.assertEqual(mock_run_actions.call_count, 1) - ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]] - self.assertEqual(ctors, [results_report.HTMLResultsReport]) - - # We only mock print_exc so we don't have exception info printed to stdout. - @mock.patch('generate_report.WriteFile', side_effect=ValueError('Oh noo')) - @mock.patch('traceback.print_exc') - def testRunActionsRunsAllActionsRegardlessOfExceptions( - self, mock_print_exc, mock_write_file): - actions = [(None, 'json'), (None, 'html'), (None, 'text'), (None, 'email')] - output_prefix = '-' - ok = generate_report.RunActions( - actions, {}, output_prefix, overwrite=False, verbose=False) - self.assertFalse(ok) - self.assertEqual(mock_write_file.call_count, len(actions)) - self.assertEqual(mock_print_exc.call_count, len(actions)) - - @mock.patch('generate_report.WriteFile') - def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file): - actions = [(None, 'json'), (None, 'html'), (None, 'text')] - output_prefix = '-' - ok = generate_report.RunActions( - actions, {}, output_prefix, overwrite=False, verbose=False) - self.assertEqual(mock_write_file.call_count, len(actions)) - self.assertTrue(ok) - - -if __name__ == '__main__': - test_flag.SetTestMode(True) - unittest.main() + """Tests for generate_report.py.""" + + def testCountBenchmarks(self): + runs = { + "foo": [[{}, {}, {}], [{}, {}, {}, {}]], + "bar": [], + "baz": [[], [{}], [{}, {}, {}]], + } + results = generate_report.CountBenchmarks(runs) + expected_results = [("foo", 4), ("bar", 0), ("baz", 3)] + self.assertCountEqual(expected_results, results) + + def testCutResultsInPlace(self): + bench_data = { + "foo": [[{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 2.5, "c": 1}]], + "bar": [[{"d": 11, "e": 12, "f": 13}]], + "baz": [[{"g": 12, "h": 13}]], + "qux": [[{"i": 11}]], + } + original_bench_data = copy.deepcopy(bench_data) + + max_keys = 2 + results = generate_report.CutResultsInPlace( + bench_data, max_keys=max_keys, complain_on_update=False + ) + # Cuts should be in-place. + self.assertIs(results, bench_data) + self.assertCountEqual( + list(original_bench_data.keys()), list(bench_data.keys()) + ) + for bench_name, original_runs in original_bench_data.items(): + bench_runs = bench_data[bench_name] + self.assertEqual(len(original_runs), len(bench_runs)) + # Order of these sub-lists shouldn't have changed. + for original_list, new_list in zip(original_runs, bench_runs): + self.assertEqual(len(original_list), len(new_list)) + for original_keyvals, sub_keyvals in zip( + original_list, new_list + ): + # sub_keyvals must be a subset of original_keyvals + self.assertDictContainsSubset(sub_keyvals, original_keyvals) + + def testCutResultsInPlaceLeavesRetval(self): + bench_data = { + "foo": [[{"retval": 0, "a": 1}]], + "bar": [[{"retval": 1}]], + "baz": [[{"RETVAL": 1}]], + } + results = generate_report.CutResultsInPlace( + bench_data, max_keys=0, complain_on_update=False + ) + # Just reach into results assuming we know it otherwise outputs things in + # the expected way. If it doesn't, testCutResultsInPlace should give an + # indication as to what, exactly, is broken. + self.assertEqual(list(results["foo"][0][0].items()), [("retval", 0)]) + self.assertEqual(list(results["bar"][0][0].items()), [("retval", 1)]) + self.assertEqual(list(results["baz"][0][0].items()), []) + + def _RunMainWithInput(self, args, input_obj): + assert "-i" not in args + args += ["-i", "-"] + input_buf = _ContextualStringIO(json.dumps(input_obj)) + with mock.patch( + "generate_report.PickInputFile", return_value=input_buf + ) as patched_pick: + result = generate_report.Main(args) + patched_pick.assert_called_once_with("-") + return result + + @mock.patch("generate_report.RunActions") + def testMain(self, mock_run_actions): + # Email is left out because it's a bit more difficult to test, and it'll be + # mildly obvious if it's failing. + args = ["--json", "--html", "--text"] + return_code = self._RunMainWithInput( + args, {"platforms": [], "data": {}} + ) + self.assertEqual(0, return_code) + self.assertEqual(mock_run_actions.call_count, 1) + ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]] + self.assertEqual( + ctors, + [ + results_report.JSONResultsReport, + results_report.TextResultsReport, + results_report.HTMLResultsReport, + ], + ) + + @mock.patch("generate_report.RunActions") + def testMainSelectsHTMLIfNoReportsGiven(self, mock_run_actions): + args = [] + return_code = self._RunMainWithInput( + args, {"platforms": [], "data": {}} + ) + self.assertEqual(0, return_code) + self.assertEqual(mock_run_actions.call_count, 1) + ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]] + self.assertEqual(ctors, [results_report.HTMLResultsReport]) + + # We only mock print_exc so we don't have exception info printed to stdout. + @mock.patch("generate_report.WriteFile", side_effect=ValueError("Oh noo")) + @mock.patch("traceback.print_exc") + def testRunActionsRunsAllActionsRegardlessOfExceptions( + self, mock_print_exc, mock_write_file + ): + actions = [ + (None, "json"), + (None, "html"), + (None, "text"), + (None, "email"), + ] + output_prefix = "-" + ok = generate_report.RunActions( + actions, {}, output_prefix, overwrite=False, verbose=False + ) + self.assertFalse(ok) + self.assertEqual(mock_write_file.call_count, len(actions)) + self.assertEqual(mock_print_exc.call_count, len(actions)) + + @mock.patch("generate_report.WriteFile") + def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file): + actions = [(None, "json"), (None, "html"), (None, "text")] + output_prefix = "-" + ok = generate_report.RunActions( + actions, {}, output_prefix, overwrite=False, verbose=False + ) + self.assertEqual(mock_write_file.call_count, len(actions)) + self.assertTrue(ok) + + +if __name__ == "__main__": + test_flag.SetTestMode(True) + unittest.main() diff --git a/crosperf/help.py b/crosperf/help.py index d9624d07..660e2a4b 100644 --- a/crosperf/help.py +++ b/crosperf/help.py @@ -9,39 +9,42 @@ from __future__ import print_function import sys import textwrap + from settings_factory import BenchmarkSettings from settings_factory import GlobalSettings from settings_factory import LabelSettings class Help(object): - """The help class.""" - - def GetUsage(self): - return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0]) - - def _WrapLine(self, line): - return '\n'.join(textwrap.wrap(line, 80)) - - def _GetFieldDescriptions(self, fields): - res = '' - for field_name in fields: - field = fields[field_name] - res += 'Field:\t\t%s\n' % field.name - res += self._WrapLine('Description:\t%s' % field.description) + '\n' - res += 'Type:\t\t%s\n' % type(field).__name__.replace('Field', '') - res += 'Required:\t%s\n' % field.required - if field.default: - res += 'Default:\t%s\n' % field.default - res += '\n' - return res - - def GetHelp(self): - global_fields = self._GetFieldDescriptions(GlobalSettings('').fields) - benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings('').fields) - label_fields = self._GetFieldDescriptions(LabelSettings('').fields) - - return """%s is a script for running performance experiments on + """The help class.""" + + def GetUsage(self): + return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0]) + + def _WrapLine(self, line): + return "\n".join(textwrap.wrap(line, 80)) + + def _GetFieldDescriptions(self, fields): + res = "" + for field_name in fields: + field = fields[field_name] + res += "Field:\t\t%s\n" % field.name + res += self._WrapLine("Description:\t%s" % field.description) + "\n" + res += "Type:\t\t%s\n" % type(field).__name__.replace("Field", "") + res += "Required:\t%s\n" % field.required + if field.default: + res += "Default:\t%s\n" % field.default + res += "\n" + return res + + def GetHelp(self): + global_fields = self._GetFieldDescriptions(GlobalSettings("").fields) + benchmark_fields = self._GetFieldDescriptions( + BenchmarkSettings("").fields + ) + label_fields = self._GetFieldDescriptions(LabelSettings("").fields) + + return """%s is a script for running performance experiments on ChromeOS. It allows one to run ChromeOS Autotest benchmarks over several images and compare the results to determine whether there is a performance difference. @@ -114,5 +117,11 @@ experiment file). Crosperf runs the experiment and caches the results generates and displays a report based on the run, and emails the report to the user. If the results were all read out of the cache, then by default no email is generated. -""" % (sys.argv[0], sys.argv[0], global_fields, benchmark_fields, label_fields, - sys.argv[0]) +""" % ( + sys.argv[0], + sys.argv[0], + global_fields, + benchmark_fields, + label_fields, + sys.argv[0], + ) diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py index de3fc15a..1fa25cfa 100644 --- a/crosperf/image_checksummer.py +++ b/crosperf/image_checksummer.py @@ -15,59 +15,71 @@ from cros_utils.file_utils import FileUtils class ImageChecksummer(object): - """Compute image checksum.""" + """Compute image checksum.""" - class PerImageChecksummer(object): - """Compute checksum for an image.""" + class PerImageChecksummer(object): + """Compute checksum for an image.""" - def __init__(self, label, log_level): - self._lock = threading.Lock() - self.label = label - self._checksum = None - self.log_level = log_level + def __init__(self, label, log_level): + self._lock = threading.Lock() + self.label = label + self._checksum = None + self.log_level = log_level - def Checksum(self): - with self._lock: - if not self._checksum: - logger.GetLogger().LogOutput( - "Acquiring checksum for '%s'." % self.label.name) - self._checksum = None - if self.label.image_type != 'local': - raise RuntimeError('Called Checksum on non-local image!') - if self.label.chromeos_image: - if os.path.exists(self.label.chromeos_image): - self._checksum = FileUtils().Md5File( - self.label.chromeos_image, log_level=self.log_level) - logger.GetLogger().LogOutput('Computed checksum is ' - ': %s' % self._checksum) - if not self._checksum: - raise RuntimeError('Checksum computing error.') - logger.GetLogger().LogOutput('Checksum is: %s' % self._checksum) - return self._checksum + def Checksum(self): + with self._lock: + if not self._checksum: + logger.GetLogger().LogOutput( + "Acquiring checksum for '%s'." % self.label.name + ) + self._checksum = None + if self.label.image_type != "local": + raise RuntimeError( + "Called Checksum on non-local image!" + ) + if self.label.chromeos_image: + if os.path.exists(self.label.chromeos_image): + self._checksum = FileUtils().Md5File( + self.label.chromeos_image, + log_level=self.log_level, + ) + logger.GetLogger().LogOutput( + "Computed checksum is " ": %s" % self._checksum + ) + if not self._checksum: + raise RuntimeError("Checksum computing error.") + logger.GetLogger().LogOutput( + "Checksum is: %s" % self._checksum + ) + return self._checksum - _instance = None - _lock = threading.Lock() - _per_image_checksummers = {} + _instance = None + _lock = threading.Lock() + _per_image_checksummers = {} - def __new__(cls, *args, **kwargs): - with cls._lock: - if not cls._instance: - cls._instance = super(ImageChecksummer, cls).__new__( - cls, *args, **kwargs) - return cls._instance + def __new__(cls, *args, **kwargs): + with cls._lock: + if not cls._instance: + cls._instance = super(ImageChecksummer, cls).__new__( + cls, *args, **kwargs + ) + return cls._instance - def Checksum(self, label, log_level): - if label.image_type != 'local': - raise RuntimeError('Attempt to call Checksum on non-local image.') - with self._lock: - if label.name not in self._per_image_checksummers: - self._per_image_checksummers[label.name] = ( - ImageChecksummer.PerImageChecksummer(label, log_level)) - checksummer = self._per_image_checksummers[label.name] + def Checksum(self, label, log_level): + if label.image_type != "local": + raise RuntimeError("Attempt to call Checksum on non-local image.") + with self._lock: + if label.name not in self._per_image_checksummers: + self._per_image_checksummers[ + label.name + ] = ImageChecksummer.PerImageChecksummer(label, log_level) + checksummer = self._per_image_checksummers[label.name] - try: - return checksummer.Checksum() - except: - logger.GetLogger().LogError('Could not compute checksum of image in label' - " '%s'." % label.name) - raise + try: + return checksummer.Checksum() + except: + logger.GetLogger().LogError( + "Could not compute checksum of image in label" + " '%s'." % label.name + ) + raise diff --git a/crosperf/label.py b/crosperf/label.py index 588fb67e..0ce3957b 100644 --- a/crosperf/label.py +++ b/crosperf/label.py @@ -10,179 +10,195 @@ from __future__ import print_function import hashlib import os -from image_checksummer import ImageChecksummer -from cros_utils.file_utils import FileUtils from cros_utils import misc +from cros_utils.file_utils import FileUtils +from image_checksummer import ImageChecksummer class Label(object): - """The label class.""" - - def __init__(self, - name, - build, - chromeos_image, - autotest_path, - debug_path, - chromeos_root, - board, - remote, - image_args, - cache_dir, - cache_only, - log_level, - compiler, - crosfleet=False, - chrome_src=None): - - self.image_type = self._GetImageType(chromeos_image) - - # Expand ~ - chromeos_root = os.path.expanduser(chromeos_root) - if self.image_type == 'local': - chromeos_image = os.path.expanduser(chromeos_image) - - self.name = name - self.build = build - self.chromeos_image = chromeos_image - self.autotest_path = autotest_path - self.debug_path = debug_path - self.board = board - self.remote = remote - self.image_args = image_args - self.cache_dir = cache_dir - self.cache_only = cache_only - self.log_level = log_level - self.chrome_version = '' - self.compiler = compiler - self.crosfleet = crosfleet - - if not chromeos_root: - if self.image_type == 'local': - chromeos_root = FileUtils().ChromeOSRootFromImage(chromeos_image) - if not chromeos_root: - raise RuntimeError("No ChromeOS root given for label '%s' and could " - "not determine one from image path: '%s'." % - (name, chromeos_image)) - else: - chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root) - if not chromeos_root: - raise RuntimeError("Invalid ChromeOS root given for label '%s': '%s'." % - (name, chromeos_root)) - - self.chromeos_root = chromeos_root - if not chrome_src: - # Old and new chroots may have different chrome src locations. - # The path also depends on the chrome build flags. - # Give priority to chrome-src-internal. - chrome_src_rel_paths = [ - '.cache/distfiles/target/chrome-src-internal', - '.cache/distfiles/chrome-src-internal', - '.cache/distfiles/target/chrome-src', - '.cache/distfiles/chrome-src', - ] - for chrome_src_rel_path in chrome_src_rel_paths: - chrome_src_abs_path = os.path.join(self.chromeos_root, - chrome_src_rel_path) - if os.path.exists(chrome_src_abs_path): - chrome_src = chrome_src_abs_path - break - if not chrome_src: - raise RuntimeError('Can not find location of Chrome sources.\n' - f'Checked paths: {chrome_src_rel_paths}') - else: - chrome_src = misc.CanonicalizePath(chrome_src) - # Make sure the path exists. - if not os.path.exists(chrome_src): - raise RuntimeError("Invalid Chrome src given for label '%s': '%s'." % - (name, chrome_src)) - self.chrome_src = chrome_src - - self._SetupChecksum() - - def _SetupChecksum(self): - """Compute label checksum only once.""" - - self.checksum = None - if self.image_type == 'local': - self.checksum = ImageChecksummer().Checksum(self, self.log_level) - elif self.image_type == 'trybot': - self.checksum = hashlib.md5( - self.chromeos_image.encode('utf-8')).hexdigest() - - def _GetImageType(self, chromeos_image): - image_type = None - if chromeos_image.find('xbuddy://') < 0: - image_type = 'local' - elif chromeos_image.find('trybot') >= 0: - image_type = 'trybot' - else: - image_type = 'official' - return image_type - - def __hash__(self): - """Label objects are used in a map, so provide "hash" and "equal".""" - - return hash(self.name) - - def __eq__(self, other): - """Label objects are used in a map, so provide "hash" and "equal".""" - - return isinstance(other, Label) and other.name == self.name - - def __str__(self): - """For better debugging.""" - - return 'label[name="{}"]'.format(self.name) + """The label class.""" + + def __init__( + self, + name, + build, + chromeos_image, + autotest_path, + debug_path, + chromeos_root, + board, + remote, + image_args, + cache_dir, + cache_only, + log_level, + compiler, + crosfleet=False, + chrome_src=None, + ): + + self.image_type = self._GetImageType(chromeos_image) + + # Expand ~ + chromeos_root = os.path.expanduser(chromeos_root) + if self.image_type == "local": + chromeos_image = os.path.expanduser(chromeos_image) + + self.name = name + self.build = build + self.chromeos_image = chromeos_image + self.autotest_path = autotest_path + self.debug_path = debug_path + self.board = board + self.remote = remote + self.image_args = image_args + self.cache_dir = cache_dir + self.cache_only = cache_only + self.log_level = log_level + self.chrome_version = "" + self.compiler = compiler + self.crosfleet = crosfleet + + if not chromeos_root: + if self.image_type == "local": + chromeos_root = FileUtils().ChromeOSRootFromImage( + chromeos_image + ) + if not chromeos_root: + raise RuntimeError( + "No ChromeOS root given for label '%s' and could " + "not determine one from image path: '%s'." + % (name, chromeos_image) + ) + else: + chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root) + if not chromeos_root: + raise RuntimeError( + "Invalid ChromeOS root given for label '%s': '%s'." + % (name, chromeos_root) + ) + + self.chromeos_root = chromeos_root + if not chrome_src: + # Old and new chroots may have different chrome src locations. + # The path also depends on the chrome build flags. + # Give priority to chrome-src-internal. + chrome_src_rel_paths = [ + ".cache/distfiles/target/chrome-src-internal", + ".cache/distfiles/chrome-src-internal", + ".cache/distfiles/target/chrome-src", + ".cache/distfiles/chrome-src", + ] + for chrome_src_rel_path in chrome_src_rel_paths: + chrome_src_abs_path = os.path.join( + self.chromeos_root, chrome_src_rel_path + ) + if os.path.exists(chrome_src_abs_path): + chrome_src = chrome_src_abs_path + break + if not chrome_src: + raise RuntimeError( + "Can not find location of Chrome sources.\n" + f"Checked paths: {chrome_src_rel_paths}" + ) + else: + chrome_src = misc.CanonicalizePath(chrome_src) + # Make sure the path exists. + if not os.path.exists(chrome_src): + raise RuntimeError( + "Invalid Chrome src given for label '%s': '%s'." + % (name, chrome_src) + ) + self.chrome_src = chrome_src + + self._SetupChecksum() + + def _SetupChecksum(self): + """Compute label checksum only once.""" + + self.checksum = None + if self.image_type == "local": + self.checksum = ImageChecksummer().Checksum(self, self.log_level) + elif self.image_type == "trybot": + self.checksum = hashlib.md5( + self.chromeos_image.encode("utf-8") + ).hexdigest() + + def _GetImageType(self, chromeos_image): + image_type = None + if chromeos_image.find("xbuddy://") < 0: + image_type = "local" + elif chromeos_image.find("trybot") >= 0: + image_type = "trybot" + else: + image_type = "official" + return image_type + + def __hash__(self): + """Label objects are used in a map, so provide "hash" and "equal".""" + + return hash(self.name) + + def __eq__(self, other): + """Label objects are used in a map, so provide "hash" and "equal".""" + + return isinstance(other, Label) and other.name == self.name + + def __str__(self): + """For better debugging.""" + + return 'label[name="{}"]'.format(self.name) class MockLabel(object): - """The mock label class.""" - - def __init__(self, - name, - build, - chromeos_image, - autotest_path, - debug_path, - chromeos_root, - board, - remote, - image_args, - cache_dir, - cache_only, - log_level, - compiler, - crosfleet=False, - chrome_src=None): - self.name = name - self.build = build - self.chromeos_image = chromeos_image - self.autotest_path = autotest_path - self.debug_path = debug_path - self.board = board - self.remote = remote - self.cache_dir = cache_dir - self.cache_only = cache_only - if not chromeos_root: - self.chromeos_root = '/tmp/chromeos_root' - else: - self.chromeos_root = chromeos_root - self.image_args = image_args - self.chrome_src = chrome_src - self.image_type = self._GetImageType(chromeos_image) - self.checksum = '' - self.log_level = log_level - self.compiler = compiler - self.crosfleet = crosfleet - self.chrome_version = 'Fake Chrome Version 50' - - def _GetImageType(self, chromeos_image): - image_type = None - if chromeos_image.find('xbuddy://') < 0: - image_type = 'local' - elif chromeos_image.find('trybot') >= 0: - image_type = 'trybot' - else: - image_type = 'official' - return image_type + """The mock label class.""" + + def __init__( + self, + name, + build, + chromeos_image, + autotest_path, + debug_path, + chromeos_root, + board, + remote, + image_args, + cache_dir, + cache_only, + log_level, + compiler, + crosfleet=False, + chrome_src=None, + ): + self.name = name + self.build = build + self.chromeos_image = chromeos_image + self.autotest_path = autotest_path + self.debug_path = debug_path + self.board = board + self.remote = remote + self.cache_dir = cache_dir + self.cache_only = cache_only + if not chromeos_root: + self.chromeos_root = "/tmp/chromeos_root" + else: + self.chromeos_root = chromeos_root + self.image_args = image_args + self.chrome_src = chrome_src + self.image_type = self._GetImageType(chromeos_image) + self.checksum = "" + self.log_level = log_level + self.compiler = compiler + self.crosfleet = crosfleet + self.chrome_version = "Fake Chrome Version 50" + + def _GetImageType(self, chromeos_image): + image_type = None + if chromeos_image.find("xbuddy://") < 0: + image_type = "local" + elif chromeos_image.find("trybot") >= 0: + image_type = "trybot" + else: + image_type = "official" + return image_type diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py index 5d6e6bd7..753ce0fe 100644 --- a/crosperf/machine_image_manager.py +++ b/crosperf/machine_image_manager.py @@ -11,7 +11,7 @@ import functools class MachineImageManager(object): - """Management of allocating images to duts. + """Management of allocating images to duts. * Data structure we have - @@ -137,173 +137,180 @@ class MachineImageManager(object): * Special / common case to handle seperately We have only 1 dut or if we have only 1 label, that's simple enough. - """ - - def __init__(self, labels, duts): - self.labels_ = labels - self.duts_ = duts - self.n_labels_ = len(labels) - self.n_duts_ = len(duts) - self.dut_name_ordinal_ = dict() - for idx, dut in enumerate(self.duts_): - self.dut_name_ordinal_[dut.name] = idx - - # Generate initial matrix containg 'X' or ' '. - self.matrix_ = [['X' if l.remote else ' ' - for _ in range(self.n_duts_)] - for l in self.labels_] - for ol, l in enumerate(self.labels_): - if l.remote: - for r in l.remote: - self.matrix_[ol][self.dut_name_ordinal_[r]] = ' ' - - self.label_duts_ = [[] for _ in range(self.n_labels_)] - self.allocate_log_ = [] - - def compute_initial_allocation(self): - """Compute the initial label-dut allocation. - - This method finds the most efficient way that every label gets imaged at - least once. - - Returns: - False, only if not all labels could be imaged to a certain machine, - otherwise True. """ - if self.n_duts_ == 1: - for i, v in self.matrix_vertical_generator(0): - if v != 'X': - self.matrix_[i][0] = 'Y' - return - - if self.n_labels_ == 1: - for j, v in self.matrix_horizontal_generator(0): - if v != 'X': - self.matrix_[0][j] = 'Y' - return - - if self.n_duts_ >= self.n_labels_: - n = 1 - else: - n = self.n_labels_ - self.n_duts_ + 1 - while n <= self.n_labels_: - if self._compute_initial_allocation_internal(0, n): - break - n += 1 - - return n <= self.n_labels_ - - def _record_allocate_log(self, label_i, dut_j): - self.allocate_log_.append((label_i, dut_j)) - self.label_duts_[label_i].append(dut_j) - - def allocate(self, dut, schedv2=None): - """Allocate a label for dut. - - Args: - dut: the dut that asks for a new image. - schedv2: the scheduling instance, we need the benchmark run - information with schedv2 for a better allocation. - - Returns: - a label to image onto the dut or None if no more available images for - the dut. - """ - j = self.dut_name_ordinal_[dut.name] - # 'can_' prefix means candidate label's. - can_reimage_number = 999 - can_i = 999 - can_label = None - can_pending_br_num = 0 - for i, v in self.matrix_vertical_generator(j): - label = self.labels_[i] - - # 2 optimizations here regarding allocating label to dut. - # Note schedv2 might be None in case we do not need this - # optimization or we are in testing mode. - if schedv2 is not None: - pending_br_num = len(schedv2.get_label_map()[label]) - if pending_br_num == 0: - # (A) - we have finished all br of this label, - # apparently, we do not want to reimaeg dut to - # this label. - continue - else: - # In case we do not have a schedv2 instance, mark - # pending_br_num as 0, so pending_br_num >= - # can_pending_br_num is always True. - pending_br_num = 0 - - # For this time being, I just comment this out until we have a - # better estimation how long each benchmarkrun takes. - # if (pending_br_num <= 5 and - # len(self.label_duts_[i]) >= 1): - # # (B) this is heuristic - if there are just a few test cases - # # (say <5) left undone for this label, and there is at least - # # 1 other machine working on this lable, we probably not want - # # to bother to reimage this dut to help with these 5 test - # # cases - # continue - - if v == 'Y': - self.matrix_[i][j] = '_' - self._record_allocate_log(i, j) - return label - if v == ' ': - label_reimage_number = len(self.label_duts_[i]) - if ((can_label is None) or - (label_reimage_number < can_reimage_number or - (label_reimage_number == can_reimage_number and - pending_br_num >= can_pending_br_num))): - can_reimage_number = label_reimage_number - can_i = i - can_label = label - can_pending_br_num = pending_br_num - - # All labels are marked either '_' (already taken) or 'X' (not - # compatible), so return None to notify machine thread to quit. - if can_label is None: - return None - - # At this point, we don't find any 'Y' for the machine, so we go the - # 'min' approach. - self.matrix_[can_i][j] = '_' - self._record_allocate_log(can_i, j) - return can_label - - def matrix_vertical_generator(self, col): - """Iterate matrix vertically at column 'col'. - - Yield row number i and value at matrix_[i][col]. - """ - for i, _ in enumerate(self.labels_): - yield i, self.matrix_[i][col] - - def matrix_horizontal_generator(self, row): - """Iterate matrix horizontally at row 'row'. - - Yield col number j and value at matrix_[row][j]. - """ - for j, _ in enumerate(self.duts_): - yield j, self.matrix_[row][j] - - def _compute_initial_allocation_internal(self, level, N): - """Search matrix for d with N.""" - - if level == self.n_labels_: - return True - - for j, v in self.matrix_horizontal_generator(level): - if v == ' ': - # Before we put a 'Y', we check how many Y column 'j' has. - # Note y[0] is row idx, y[1] is the cell value. - ny = functools.reduce(lambda x, y: x + 1 if (y[1] == 'Y') else x, - self.matrix_vertical_generator(j), 0) - if ny < N: - self.matrix_[level][j] = 'Y' - if self._compute_initial_allocation_internal(level + 1, N): + def __init__(self, labels, duts): + self.labels_ = labels + self.duts_ = duts + self.n_labels_ = len(labels) + self.n_duts_ = len(duts) + self.dut_name_ordinal_ = dict() + for idx, dut in enumerate(self.duts_): + self.dut_name_ordinal_[dut.name] = idx + + # Generate initial matrix containg 'X' or ' '. + self.matrix_ = [ + ["X" if l.remote else " " for _ in range(self.n_duts_)] + for l in self.labels_ + ] + for ol, l in enumerate(self.labels_): + if l.remote: + for r in l.remote: + self.matrix_[ol][self.dut_name_ordinal_[r]] = " " + + self.label_duts_ = [[] for _ in range(self.n_labels_)] + self.allocate_log_ = [] + + def compute_initial_allocation(self): + """Compute the initial label-dut allocation. + + This method finds the most efficient way that every label gets imaged at + least once. + + Returns: + False, only if not all labels could be imaged to a certain machine, + otherwise True. + """ + + if self.n_duts_ == 1: + for i, v in self.matrix_vertical_generator(0): + if v != "X": + self.matrix_[i][0] = "Y" + return + + if self.n_labels_ == 1: + for j, v in self.matrix_horizontal_generator(0): + if v != "X": + self.matrix_[0][j] = "Y" + return + + if self.n_duts_ >= self.n_labels_: + n = 1 + else: + n = self.n_labels_ - self.n_duts_ + 1 + while n <= self.n_labels_: + if self._compute_initial_allocation_internal(0, n): + break + n += 1 + + return n <= self.n_labels_ + + def _record_allocate_log(self, label_i, dut_j): + self.allocate_log_.append((label_i, dut_j)) + self.label_duts_[label_i].append(dut_j) + + def allocate(self, dut, schedv2=None): + """Allocate a label for dut. + + Args: + dut: the dut that asks for a new image. + schedv2: the scheduling instance, we need the benchmark run + information with schedv2 for a better allocation. + + Returns: + a label to image onto the dut or None if no more available images for + the dut. + """ + j = self.dut_name_ordinal_[dut.name] + # 'can_' prefix means candidate label's. + can_reimage_number = 999 + can_i = 999 + can_label = None + can_pending_br_num = 0 + for i, v in self.matrix_vertical_generator(j): + label = self.labels_[i] + + # 2 optimizations here regarding allocating label to dut. + # Note schedv2 might be None in case we do not need this + # optimization or we are in testing mode. + if schedv2 is not None: + pending_br_num = len(schedv2.get_label_map()[label]) + if pending_br_num == 0: + # (A) - we have finished all br of this label, + # apparently, we do not want to reimaeg dut to + # this label. + continue + else: + # In case we do not have a schedv2 instance, mark + # pending_br_num as 0, so pending_br_num >= + # can_pending_br_num is always True. + pending_br_num = 0 + + # For this time being, I just comment this out until we have a + # better estimation how long each benchmarkrun takes. + # if (pending_br_num <= 5 and + # len(self.label_duts_[i]) >= 1): + # # (B) this is heuristic - if there are just a few test cases + # # (say <5) left undone for this label, and there is at least + # # 1 other machine working on this lable, we probably not want + # # to bother to reimage this dut to help with these 5 test + # # cases + # continue + + if v == "Y": + self.matrix_[i][j] = "_" + self._record_allocate_log(i, j) + return label + if v == " ": + label_reimage_number = len(self.label_duts_[i]) + if (can_label is None) or ( + label_reimage_number < can_reimage_number + or ( + label_reimage_number == can_reimage_number + and pending_br_num >= can_pending_br_num + ) + ): + can_reimage_number = label_reimage_number + can_i = i + can_label = label + can_pending_br_num = pending_br_num + + # All labels are marked either '_' (already taken) or 'X' (not + # compatible), so return None to notify machine thread to quit. + if can_label is None: + return None + + # At this point, we don't find any 'Y' for the machine, so we go the + # 'min' approach. + self.matrix_[can_i][j] = "_" + self._record_allocate_log(can_i, j) + return can_label + + def matrix_vertical_generator(self, col): + """Iterate matrix vertically at column 'col'. + + Yield row number i and value at matrix_[i][col]. + """ + for i, _ in enumerate(self.labels_): + yield i, self.matrix_[i][col] + + def matrix_horizontal_generator(self, row): + """Iterate matrix horizontally at row 'row'. + + Yield col number j and value at matrix_[row][j]. + """ + for j, _ in enumerate(self.duts_): + yield j, self.matrix_[row][j] + + def _compute_initial_allocation_internal(self, level, N): + """Search matrix for d with N.""" + + if level == self.n_labels_: return True - self.matrix_[level][j] = ' ' - return False + for j, v in self.matrix_horizontal_generator(level): + if v == " ": + # Before we put a 'Y', we check how many Y column 'j' has. + # Note y[0] is row idx, y[1] is the cell value. + ny = functools.reduce( + lambda x, y: x + 1 if (y[1] == "Y") else x, + self.matrix_vertical_generator(j), + 0, + ) + if ny < N: + self.matrix_[level][j] = "Y" + if self._compute_initial_allocation_internal(level + 1, N): + return True + self.matrix_[level][j] = " " + + return False diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py index dd10a0d0..e93a5646 100755 --- a/crosperf/machine_image_manager_unittest.py +++ b/crosperf/machine_image_manager_unittest.py @@ -15,251 +15,282 @@ from machine_image_manager import MachineImageManager class MockLabel(object): - """Class for generating a mock Label.""" + """Class for generating a mock Label.""" - def __init__(self, name, remotes=None): - self.name = name - self.remote = remotes + def __init__(self, name, remotes=None): + self.name = name + self.remote = remotes - def __hash__(self): - """Provide hash function for label. + def __hash__(self): + """Provide hash function for label. - This is required because Label object is used inside a dict as key. - """ - return hash(self.name) + This is required because Label object is used inside a dict as key. + """ + return hash(self.name) - def __eq__(self, other): - """Provide eq function for label. + def __eq__(self, other): + """Provide eq function for label. - This is required because Label object is used inside a dict as key. - """ - return isinstance(other, MockLabel) and other.name == self.name + This is required because Label object is used inside a dict as key. + """ + return isinstance(other, MockLabel) and other.name == self.name class MockDut(object): - """Class for creating a mock Device-Under-Test (DUT).""" + """Class for creating a mock Device-Under-Test (DUT).""" - def __init__(self, name, label=None): - self.name = name - self.label_ = label + def __init__(self, name, label=None): + self.name = name + self.label_ = label class MachineImageManagerTester(unittest.TestCase): - """Class for testing MachineImageManager.""" - - def gen_duts_by_name(self, *names): - duts = [] - for n in names: - duts.append(MockDut(n)) - return duts - - def create_labels_and_duts_from_pattern(self, pattern): - labels = [] - duts = [] - for i, r in enumerate(pattern): - l = MockLabel('l{}'.format(i), []) - for j, v in enumerate(r.split()): - if v == '.': - l.remote.append('m{}'.format(j)) - if i == 0: - duts.append(MockDut('m{}'.format(j))) - labels.append(l) - return labels, duts - - def check_matrix_against_pattern(self, matrix, pattern): - for i, s in enumerate(pattern): - for j, v in enumerate(s.split()): - self.assertTrue(v == '.' and matrix[i][j] == ' ' or v == matrix[i][j]) - - def pattern_based_test(self, inp, output): - labels, duts = self.create_labels_and_duts_from_pattern(inp) - mim = MachineImageManager(labels, duts) - self.assertTrue(mim.compute_initial_allocation()) - self.check_matrix_against_pattern(mim.matrix_, output) - return mim - - def test_single_dut(self): - labels = [MockLabel('l1'), MockLabel('l2'), MockLabel('l3')] - dut = MockDut('m1') - mim = MachineImageManager(labels, [dut]) - mim.compute_initial_allocation() - self.assertTrue(mim.matrix_ == [['Y'], ['Y'], ['Y']]) - - def test_single_label(self): - labels = [MockLabel('l1')] - duts = self.gen_duts_by_name('m1', 'm2', 'm3') - mim = MachineImageManager(labels, duts) - mim.compute_initial_allocation() - self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']]) - - def test_case1(self): - labels = [ - MockLabel('l1', ['m1', 'm2']), - MockLabel('l2', ['m2', 'm3']), - MockLabel('l3', ['m1']) - ] - duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')] - mim = MachineImageManager(labels, duts) - self.assertTrue( - mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X', 'X']]) - mim.compute_initial_allocation() - self.assertTrue( - mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']]) - - def test_case2(self): - labels = [ - MockLabel('l1', ['m1', 'm2']), - MockLabel('l2', ['m2', 'm3']), - MockLabel('l3', ['m1']) - ] - duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')] - mim = MachineImageManager(labels, duts) - self.assertTrue( - mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X', 'X']]) - mim.compute_initial_allocation() - self.assertTrue( - mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']]) - - def test_case3(self): - labels = [ - MockLabel('l1', ['m1', 'm2']), - MockLabel('l2', ['m2', 'm3']), - MockLabel('l3', ['m1']) - ] - duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')] - mim = MachineImageManager(labels, duts) - mim.compute_initial_allocation() - self.assertTrue( - mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']]) - - def test_case4(self): - labels = [ - MockLabel('l1', ['m1', 'm2']), - MockLabel('l2', ['m2', 'm3']), - MockLabel('l3', ['m1']) - ] - duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')] - mim = MachineImageManager(labels, duts) - mim.compute_initial_allocation() - self.assertTrue( - mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']]) - - def test_case5(self): - labels = [ - MockLabel('l1', ['m3']), - MockLabel('l2', ['m3']), - MockLabel('l3', ['m1']) - ] - duts = self.gen_duts_by_name('m1', 'm2', 'm3') - mim = MachineImageManager(labels, duts) - self.assertTrue(mim.compute_initial_allocation()) - self.assertTrue( - mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X', 'X']]) - - def test_2x2_with_allocation(self): - labels = [MockLabel('l0'), MockLabel('l1')] - duts = [MockDut('m0'), MockDut('m1')] - mim = MachineImageManager(labels, duts) - self.assertTrue(mim.compute_initial_allocation()) - self.assertTrue(mim.allocate(duts[0]) == labels[0]) - self.assertTrue(mim.allocate(duts[0]) == labels[1]) - self.assertTrue(mim.allocate(duts[0]) is None) - self.assertTrue(mim.matrix_[0][0] == '_') - self.assertTrue(mim.matrix_[1][0] == '_') - self.assertTrue(mim.allocate(duts[1]) == labels[1]) - - def test_10x10_general(self): - """Gen 10x10 matrix.""" - n = 10 - labels = [] - duts = [] - for i in range(n): - labels.append(MockLabel('l{}'.format(i))) - duts.append(MockDut('m{}'.format(i))) - mim = MachineImageManager(labels, duts) - self.assertTrue(mim.compute_initial_allocation()) - for i in range(n): - for j in range(n): - if i == j: - self.assertTrue(mim.matrix_[i][j] == 'Y') - else: - self.assertTrue(mim.matrix_[i][j] == ' ') - self.assertTrue(mim.allocate(duts[3]).name == 'l3') - - def test_random_generated(self): - n = 10 - labels = [] - duts = [] - for i in range(10): - # generate 3-5 machines that is compatible with this label - l = MockLabel('l{}'.format(i), []) - r = random.random() - for _ in range(4): - t = int(r * 10) % n - r *= 10 - l.remote.append('m{}'.format(t)) - labels.append(l) - duts.append(MockDut('m{}'.format(i))) - mim = MachineImageManager(labels, duts) - self.assertTrue(mim.compute_initial_allocation()) - - def test_10x10_fully_random(self): - inp = [ - 'X . . . X X . X X .', 'X X . X . X . X X .', - 'X X X . . X . X . X', 'X . X X . . X X . X', - 'X X X X . . . X . .', 'X X . X . X . . X .', - '. X . X . X X X . .', '. X . X X . X X . .', - 'X X . . . X X X . .', '. X X X X . . . . X' - ] - output = [ - 'X Y . . X X . X X .', 'X X Y X . X . X X .', - 'X X X Y . X . X . X', 'X . X X Y . X X . X', - 'X X X X . Y . X . .', 'X X . X . X Y . X .', - 'Y X . X . X X X . .', '. X . X X . X X Y .', - 'X X . . . X X X . Y', '. X X X X . . Y . X' - ] - self.pattern_based_test(inp, output) - - def test_10x10_fully_random2(self): - inp = [ - 'X . X . . X . X X X', 'X X X X X X . . X .', - 'X . X X X X X . . X', 'X X X . X . X X . .', - '. X . X . X X X X X', 'X X X X X X X . . X', - 'X . X X X X X . . X', 'X X X . X X X X . .', - 'X X X . . . X X X X', '. X X . X X X . X X' - ] - output = [ - 'X . X Y . X . X X X', 'X X X X X X Y . X .', - 'X Y X X X X X . . X', 'X X X . X Y X X . .', - '. X Y X . X X X X X', 'X X X X X X X Y . X', - 'X . X X X X X . Y X', 'X X X . X X X X . Y', - 'X X X . Y . X X X X', 'Y X X . X X X . X X' - ] - self.pattern_based_test(inp, output) - - def test_3x4_with_allocation(self): - inp = ['X X . .', '. . X .', 'X . X .'] - output = ['X X Y .', 'Y . X .', 'X Y X .'] - mim = self.pattern_based_test(inp, output) - self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0]) - self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2]) - self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1]) - self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2]) - self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1]) - self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0]) - self.assertTrue(mim.allocate(mim.duts_[3]) is None) - self.assertTrue(mim.allocate(mim.duts_[2]) is None) - self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1]) - self.assertTrue(mim.allocate(mim.duts_[1]) is None) - self.assertTrue(mim.allocate(mim.duts_[0]) is None) - self.assertTrue(mim.label_duts_[0] == [2, 3]) - self.assertTrue(mim.label_duts_[1] == [0, 3, 1]) - self.assertTrue(mim.label_duts_[2] == [3, 1]) - self.assertListEqual(mim.allocate_log_, [(0, 2), (2, 3), (1, 0), (2, 1), - (1, 3), (0, 3), (1, 1)]) - - def test_cornercase_1(self): - """This corner case is brought up by Caroline. + """Class for testing MachineImageManager.""" + + def gen_duts_by_name(self, *names): + duts = [] + for n in names: + duts.append(MockDut(n)) + return duts + + def create_labels_and_duts_from_pattern(self, pattern): + labels = [] + duts = [] + for i, r in enumerate(pattern): + l = MockLabel("l{}".format(i), []) + for j, v in enumerate(r.split()): + if v == ".": + l.remote.append("m{}".format(j)) + if i == 0: + duts.append(MockDut("m{}".format(j))) + labels.append(l) + return labels, duts + + def check_matrix_against_pattern(self, matrix, pattern): + for i, s in enumerate(pattern): + for j, v in enumerate(s.split()): + self.assertTrue( + v == "." and matrix[i][j] == " " or v == matrix[i][j] + ) + + def pattern_based_test(self, inp, output): + labels, duts = self.create_labels_and_duts_from_pattern(inp) + mim = MachineImageManager(labels, duts) + self.assertTrue(mim.compute_initial_allocation()) + self.check_matrix_against_pattern(mim.matrix_, output) + return mim + + def test_single_dut(self): + labels = [MockLabel("l1"), MockLabel("l2"), MockLabel("l3")] + dut = MockDut("m1") + mim = MachineImageManager(labels, [dut]) + mim.compute_initial_allocation() + self.assertTrue(mim.matrix_ == [["Y"], ["Y"], ["Y"]]) + + def test_single_label(self): + labels = [MockLabel("l1")] + duts = self.gen_duts_by_name("m1", "m2", "m3") + mim = MachineImageManager(labels, duts) + mim.compute_initial_allocation() + self.assertTrue(mim.matrix_ == [["Y", "Y", "Y"]]) + + def test_case1(self): + labels = [ + MockLabel("l1", ["m1", "m2"]), + MockLabel("l2", ["m2", "m3"]), + MockLabel("l3", ["m1"]), + ] + duts = [MockDut("m1"), MockDut("m2"), MockDut("m3")] + mim = MachineImageManager(labels, duts) + self.assertTrue( + mim.matrix_ == [[" ", " ", "X"], ["X", " ", " "], [" ", "X", "X"]] + ) + mim.compute_initial_allocation() + self.assertTrue( + mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]] + ) + + def test_case2(self): + labels = [ + MockLabel("l1", ["m1", "m2"]), + MockLabel("l2", ["m2", "m3"]), + MockLabel("l3", ["m1"]), + ] + duts = [MockDut("m1"), MockDut("m2"), MockDut("m3")] + mim = MachineImageManager(labels, duts) + self.assertTrue( + mim.matrix_ == [[" ", " ", "X"], ["X", " ", " "], [" ", "X", "X"]] + ) + mim.compute_initial_allocation() + self.assertTrue( + mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]] + ) + + def test_case3(self): + labels = [ + MockLabel("l1", ["m1", "m2"]), + MockLabel("l2", ["m2", "m3"]), + MockLabel("l3", ["m1"]), + ] + duts = [MockDut("m1", labels[0]), MockDut("m2"), MockDut("m3")] + mim = MachineImageManager(labels, duts) + mim.compute_initial_allocation() + self.assertTrue( + mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]] + ) + + def test_case4(self): + labels = [ + MockLabel("l1", ["m1", "m2"]), + MockLabel("l2", ["m2", "m3"]), + MockLabel("l3", ["m1"]), + ] + duts = [MockDut("m1"), MockDut("m2", labels[0]), MockDut("m3")] + mim = MachineImageManager(labels, duts) + mim.compute_initial_allocation() + self.assertTrue( + mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]] + ) + + def test_case5(self): + labels = [ + MockLabel("l1", ["m3"]), + MockLabel("l2", ["m3"]), + MockLabel("l3", ["m1"]), + ] + duts = self.gen_duts_by_name("m1", "m2", "m3") + mim = MachineImageManager(labels, duts) + self.assertTrue(mim.compute_initial_allocation()) + self.assertTrue( + mim.matrix_ == [["X", "X", "Y"], ["X", "X", "Y"], ["Y", "X", "X"]] + ) + + def test_2x2_with_allocation(self): + labels = [MockLabel("l0"), MockLabel("l1")] + duts = [MockDut("m0"), MockDut("m1")] + mim = MachineImageManager(labels, duts) + self.assertTrue(mim.compute_initial_allocation()) + self.assertTrue(mim.allocate(duts[0]) == labels[0]) + self.assertTrue(mim.allocate(duts[0]) == labels[1]) + self.assertTrue(mim.allocate(duts[0]) is None) + self.assertTrue(mim.matrix_[0][0] == "_") + self.assertTrue(mim.matrix_[1][0] == "_") + self.assertTrue(mim.allocate(duts[1]) == labels[1]) + + def test_10x10_general(self): + """Gen 10x10 matrix.""" + n = 10 + labels = [] + duts = [] + for i in range(n): + labels.append(MockLabel("l{}".format(i))) + duts.append(MockDut("m{}".format(i))) + mim = MachineImageManager(labels, duts) + self.assertTrue(mim.compute_initial_allocation()) + for i in range(n): + for j in range(n): + if i == j: + self.assertTrue(mim.matrix_[i][j] == "Y") + else: + self.assertTrue(mim.matrix_[i][j] == " ") + self.assertTrue(mim.allocate(duts[3]).name == "l3") + + def test_random_generated(self): + n = 10 + labels = [] + duts = [] + for i in range(10): + # generate 3-5 machines that is compatible with this label + l = MockLabel("l{}".format(i), []) + r = random.random() + for _ in range(4): + t = int(r * 10) % n + r *= 10 + l.remote.append("m{}".format(t)) + labels.append(l) + duts.append(MockDut("m{}".format(i))) + mim = MachineImageManager(labels, duts) + self.assertTrue(mim.compute_initial_allocation()) + + def test_10x10_fully_random(self): + inp = [ + "X . . . X X . X X .", + "X X . X . X . X X .", + "X X X . . X . X . X", + "X . X X . . X X . X", + "X X X X . . . X . .", + "X X . X . X . . X .", + ". X . X . X X X . .", + ". X . X X . X X . .", + "X X . . . X X X . .", + ". X X X X . . . . X", + ] + output = [ + "X Y . . X X . X X .", + "X X Y X . X . X X .", + "X X X Y . X . X . X", + "X . X X Y . X X . X", + "X X X X . Y . X . .", + "X X . X . X Y . X .", + "Y X . X . X X X . .", + ". X . X X . X X Y .", + "X X . . . X X X . Y", + ". X X X X . . Y . X", + ] + self.pattern_based_test(inp, output) + + def test_10x10_fully_random2(self): + inp = [ + "X . X . . X . X X X", + "X X X X X X . . X .", + "X . X X X X X . . X", + "X X X . X . X X . .", + ". X . X . X X X X X", + "X X X X X X X . . X", + "X . X X X X X . . X", + "X X X . X X X X . .", + "X X X . . . X X X X", + ". X X . X X X . X X", + ] + output = [ + "X . X Y . X . X X X", + "X X X X X X Y . X .", + "X Y X X X X X . . X", + "X X X . X Y X X . .", + ". X Y X . X X X X X", + "X X X X X X X Y . X", + "X . X X X X X . Y X", + "X X X . X X X X . Y", + "X X X . Y . X X X X", + "Y X X . X X X . X X", + ] + self.pattern_based_test(inp, output) + + def test_3x4_with_allocation(self): + inp = ["X X . .", ". . X .", "X . X ."] + output = ["X X Y .", "Y . X .", "X Y X ."] + mim = self.pattern_based_test(inp, output) + self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0]) + self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2]) + self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1]) + self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2]) + self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1]) + self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0]) + self.assertTrue(mim.allocate(mim.duts_[3]) is None) + self.assertTrue(mim.allocate(mim.duts_[2]) is None) + self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1]) + self.assertTrue(mim.allocate(mim.duts_[1]) is None) + self.assertTrue(mim.allocate(mim.duts_[0]) is None) + self.assertTrue(mim.label_duts_[0] == [2, 3]) + self.assertTrue(mim.label_duts_[1] == [0, 3, 1]) + self.assertTrue(mim.label_duts_[2] == [3, 1]) + self.assertListEqual( + mim.allocate_log_, + [(0, 2), (2, 3), (1, 0), (2, 1), (1, 3), (0, 3), (1, 1)], + ) + + def test_cornercase_1(self): + """This corner case is brought up by Caroline. The description is - @@ -292,18 +323,18 @@ class MachineImageManagerTester(unittest.TestCase): l1 Y X X l2 Y X X - """ + """ - inp = ['. X X', '. X X', '. X X'] - output = ['Y X X', 'Y X X', 'Y X X'] - mim = self.pattern_based_test(inp, output) - self.assertTrue(mim.allocate(mim.duts_[1]) is None) - self.assertTrue(mim.allocate(mim.duts_[2]) is None) - self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0]) - self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1]) - self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2]) - self.assertTrue(mim.allocate(mim.duts_[0]) is None) + inp = [". X X", ". X X", ". X X"] + output = ["Y X X", "Y X X", "Y X X"] + mim = self.pattern_based_test(inp, output) + self.assertTrue(mim.allocate(mim.duts_[1]) is None) + self.assertTrue(mim.allocate(mim.duts_[2]) is None) + self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0]) + self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1]) + self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2]) + self.assertTrue(mim.allocate(mim.duts_[0]) is None) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py index f342794b..5c8af75a 100644 --- a/crosperf/machine_manager.py +++ b/crosperf/machine_manager.py @@ -17,538 +17,600 @@ import sys import threading import time +from cros_utils import command_executer +from cros_utils import logger import file_lock_machine import image_chromeos import test_flag -from cros_utils import command_executer -from cros_utils import logger -CHECKSUM_FILE = '/usr/local/osimage_checksum_file' + +CHECKSUM_FILE = "/usr/local/osimage_checksum_file" class BadChecksum(Exception): - """Raised if all machines for a label don't have the same checksum.""" + """Raised if all machines for a label don't have the same checksum.""" class BadChecksumString(Exception): - """Raised if all machines for a label don't have the same checksum string.""" + """Raised if all machines for a label don't have the same checksum string.""" class MissingLocksDirectory(Exception): - """Raised when cannot find/access the machine locks directory.""" + """Raised when cannot find/access the machine locks directory.""" class CrosCommandError(Exception): - """Raised when an error occurs running command on DUT.""" + """Raised when an error occurs running command on DUT.""" class CrosMachine(object): - """The machine class.""" - - def __init__(self, name, chromeos_root, log_level, cmd_exec=None): - self.name = name - self.image = None - # We relate a dut with a label if we reimage the dut using label or we - # detect at the very beginning that the dut is running this label. - self.label = None - self.checksum = None - self.locked = False - self.released_time = time.time() - self.test_run = None - self.chromeos_root = chromeos_root - self.log_level = log_level - self.cpuinfo = None - self.machine_id = None - self.checksum_string = None - self.meminfo = None - self.phys_kbytes = None - self.cooldown_wait_time = 0 - self.ce = cmd_exec or command_executer.GetCommandExecuter( - log_level=self.log_level) - self.SetUpChecksumInfo() - - def SetUpChecksumInfo(self): - if not self.IsReachable(): - self.machine_checksum = None - return - self._GetMemoryInfo() - self._GetCPUInfo() - self._ComputeMachineChecksumString() - self._GetMachineID() - self.machine_checksum = self._GetMD5Checksum(self.checksum_string) - self.machine_id_checksum = self._GetMD5Checksum(self.machine_id) - - def IsReachable(self): - command = 'ls' - ret = self.ce.CrosRunCommand( - command, machine=self.name, chromeos_root=self.chromeos_root) - if ret: - return False - return True - - def AddCooldownWaitTime(self, wait_time): - self.cooldown_wait_time += wait_time - - def GetCooldownWaitTime(self): - return self.cooldown_wait_time - - def _ParseMemoryInfo(self): - line = self.meminfo.splitlines()[0] - usable_kbytes = int(line.split()[1]) - # This code is from src/third_party/test/files/client/bin/base_utils.py - # usable_kbytes is system's usable DRAM in kbytes, - # as reported by memtotal() from device /proc/meminfo memtotal - # after Linux deducts 1.5% to 9.5% for system table overhead - # Undo the unknown actual deduction by rounding up - # to next small multiple of a big power-of-two - # eg 12GB - 5.1% gets rounded back up to 12GB - mindeduct = 0.005 # 0.5 percent - maxdeduct = 0.095 # 9.5 percent - # deduction range 1.5% .. 9.5% supports physical mem sizes - # 6GB .. 12GB in steps of .5GB - # 12GB .. 24GB in steps of 1 GB - # 24GB .. 48GB in steps of 2 GB ... - # Finer granularity in physical mem sizes would require - # tighter spread between min and max possible deductions - - # increase mem size by at least min deduction, without rounding - min_kbytes = int(usable_kbytes / (1.0 - mindeduct)) - # increase mem size further by 2**n rounding, by 0..roundKb or more - round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes - # find least binary roundup 2**n that covers worst-cast roundKb - mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2))) - # have round_kbytes <= mod2n < round_kbytes*2 - # round min_kbytes up to next multiple of mod2n - phys_kbytes = min_kbytes + mod2n - 1 - phys_kbytes -= phys_kbytes % mod2n # clear low bits - self.phys_kbytes = phys_kbytes - - def _GetMemoryInfo(self): - # TODO yunlian: when the machine in rebooting, it will not return - # meminfo, the assert does not catch it either - command = 'cat /proc/meminfo' - ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput( - command, machine=self.name, chromeos_root=self.chromeos_root) - assert ret == 0, 'Could not get meminfo from machine: %s' % self.name - if ret == 0: - self._ParseMemoryInfo() - - def _GetCPUInfo(self): - command = 'cat /proc/cpuinfo' - ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput( - command, machine=self.name, chromeos_root=self.chromeos_root) - assert ret == 0, 'Could not get cpuinfo from machine: %s' % self.name - - def _ComputeMachineChecksumString(self): - self.checksum_string = '' - # Some lines from cpuinfo have to be excluded because they are not - # persistent across DUTs. - # MHz, BogoMIPS are dynamically changing values. - # core id, apicid are identifiers assigned on startup - # and may differ on the same type of machine. - exclude_lines_list = ['MHz', 'BogoMIPS', 'bogomips', 'core id', 'apicid'] - for line in self.cpuinfo.splitlines(): - if not any(e in line for e in exclude_lines_list): - self.checksum_string += line - self.checksum_string += ' ' + str(self.phys_kbytes) - - def _GetMD5Checksum(self, ss): - if ss: - return hashlib.md5(ss.encode('utf-8')).hexdigest() - return '' - - def _GetMachineID(self): - command = 'dump_vpd_log --full --stdout' - _, if_out, _ = self.ce.CrosRunCommandWOutput( - command, machine=self.name, chromeos_root=self.chromeos_root) - b = if_out.splitlines() - a = [l for l in b if 'Product' in l] - if a: - self.machine_id = a[0] - return - command = 'ifconfig' - _, if_out, _ = self.ce.CrosRunCommandWOutput( - command, machine=self.name, chromeos_root=self.chromeos_root) - b = if_out.splitlines() - a = [l for l in b if 'HWaddr' in l] - if a: - self.machine_id = '_'.join(a) - return - a = [l for l in b if 'ether' in l] - if a: - self.machine_id = '_'.join(a) - return - assert 0, 'Could not get machine_id from machine: %s' % self.name - - def __str__(self): - l = [] - l.append(self.name) - l.append(str(self.image)) - l.append(str(self.checksum)) - l.append(str(self.locked)) - l.append(str(self.released_time)) - return ', '.join(l) + """The machine class.""" + + def __init__(self, name, chromeos_root, log_level, cmd_exec=None): + self.name = name + self.image = None + # We relate a dut with a label if we reimage the dut using label or we + # detect at the very beginning that the dut is running this label. + self.label = None + self.checksum = None + self.locked = False + self.released_time = time.time() + self.test_run = None + self.chromeos_root = chromeos_root + self.log_level = log_level + self.cpuinfo = None + self.machine_id = None + self.checksum_string = None + self.meminfo = None + self.phys_kbytes = None + self.cooldown_wait_time = 0 + self.ce = cmd_exec or command_executer.GetCommandExecuter( + log_level=self.log_level + ) + self.SetUpChecksumInfo() + + def SetUpChecksumInfo(self): + if not self.IsReachable(): + self.machine_checksum = None + return + self._GetMemoryInfo() + self._GetCPUInfo() + self._ComputeMachineChecksumString() + self._GetMachineID() + self.machine_checksum = self._GetMD5Checksum(self.checksum_string) + self.machine_id_checksum = self._GetMD5Checksum(self.machine_id) + + def IsReachable(self): + command = "ls" + ret = self.ce.CrosRunCommand( + command, machine=self.name, chromeos_root=self.chromeos_root + ) + if ret: + return False + return True + + def AddCooldownWaitTime(self, wait_time): + self.cooldown_wait_time += wait_time + + def GetCooldownWaitTime(self): + return self.cooldown_wait_time + + def _ParseMemoryInfo(self): + line = self.meminfo.splitlines()[0] + usable_kbytes = int(line.split()[1]) + # This code is from src/third_party/test/files/client/bin/base_utils.py + # usable_kbytes is system's usable DRAM in kbytes, + # as reported by memtotal() from device /proc/meminfo memtotal + # after Linux deducts 1.5% to 9.5% for system table overhead + # Undo the unknown actual deduction by rounding up + # to next small multiple of a big power-of-two + # eg 12GB - 5.1% gets rounded back up to 12GB + mindeduct = 0.005 # 0.5 percent + maxdeduct = 0.095 # 9.5 percent + # deduction range 1.5% .. 9.5% supports physical mem sizes + # 6GB .. 12GB in steps of .5GB + # 12GB .. 24GB in steps of 1 GB + # 24GB .. 48GB in steps of 2 GB ... + # Finer granularity in physical mem sizes would require + # tighter spread between min and max possible deductions + + # increase mem size by at least min deduction, without rounding + min_kbytes = int(usable_kbytes / (1.0 - mindeduct)) + # increase mem size further by 2**n rounding, by 0..roundKb or more + round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes + # find least binary roundup 2**n that covers worst-cast roundKb + mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2))) + # have round_kbytes <= mod2n < round_kbytes*2 + # round min_kbytes up to next multiple of mod2n + phys_kbytes = min_kbytes + mod2n - 1 + phys_kbytes -= phys_kbytes % mod2n # clear low bits + self.phys_kbytes = phys_kbytes + + def _GetMemoryInfo(self): + # TODO yunlian: when the machine in rebooting, it will not return + # meminfo, the assert does not catch it either + command = "cat /proc/meminfo" + ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput( + command, machine=self.name, chromeos_root=self.chromeos_root + ) + assert ret == 0, "Could not get meminfo from machine: %s" % self.name + if ret == 0: + self._ParseMemoryInfo() + + def _GetCPUInfo(self): + command = "cat /proc/cpuinfo" + ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput( + command, machine=self.name, chromeos_root=self.chromeos_root + ) + assert ret == 0, "Could not get cpuinfo from machine: %s" % self.name + + def _ComputeMachineChecksumString(self): + self.checksum_string = "" + # Some lines from cpuinfo have to be excluded because they are not + # persistent across DUTs. + # MHz, BogoMIPS are dynamically changing values. + # core id, apicid are identifiers assigned on startup + # and may differ on the same type of machine. + exclude_lines_list = [ + "MHz", + "BogoMIPS", + "bogomips", + "core id", + "apicid", + ] + for line in self.cpuinfo.splitlines(): + if not any(e in line for e in exclude_lines_list): + self.checksum_string += line + self.checksum_string += " " + str(self.phys_kbytes) + + def _GetMD5Checksum(self, ss): + if ss: + return hashlib.md5(ss.encode("utf-8")).hexdigest() + return "" + + def _GetMachineID(self): + command = "dump_vpd_log --full --stdout" + _, if_out, _ = self.ce.CrosRunCommandWOutput( + command, machine=self.name, chromeos_root=self.chromeos_root + ) + b = if_out.splitlines() + a = [l for l in b if "Product" in l] + if a: + self.machine_id = a[0] + return + command = "ifconfig" + _, if_out, _ = self.ce.CrosRunCommandWOutput( + command, machine=self.name, chromeos_root=self.chromeos_root + ) + b = if_out.splitlines() + a = [l for l in b if "HWaddr" in l] + if a: + self.machine_id = "_".join(a) + return + a = [l for l in b if "ether" in l] + if a: + self.machine_id = "_".join(a) + return + assert 0, "Could not get machine_id from machine: %s" % self.name + + def __str__(self): + l = [] + l.append(self.name) + l.append(str(self.image)) + l.append(str(self.checksum)) + l.append(str(self.locked)) + l.append(str(self.released_time)) + return ", ".join(l) class MachineManager(object): - """Lock, image and unlock machines locally for benchmark runs. - - This class contains methods and calls to lock, unlock and image - machines and distribute machines to each benchmark run. The assumption is - that all of the machines for the experiment have been globally locked - in the ExperimentRunner, but the machines still need to be locally - locked/unlocked (allocated to benchmark runs) to prevent multiple benchmark - runs within the same experiment from trying to use the same machine at the - same time. - """ - - def __init__(self, - chromeos_root, - acquire_timeout, - log_level, - locks_dir, - cmd_exec=None, - lgr=None): - self._lock = threading.RLock() - self._all_machines = [] - self._machines = [] - self.image_lock = threading.Lock() - self.num_reimages = 0 - self.chromeos_root = None - self.machine_checksum = {} - self.machine_checksum_string = {} - self.acquire_timeout = acquire_timeout - self.log_level = log_level - self.locks_dir = locks_dir - self.ce = cmd_exec or command_executer.GetCommandExecuter( - log_level=self.log_level) - self.logger = lgr or logger.GetLogger() - - if self.locks_dir and not os.path.isdir(self.locks_dir): - raise MissingLocksDirectory('Cannot access locks directory: %s' % - self.locks_dir) - - self._initialized_machines = [] - self.chromeos_root = chromeos_root - - def RemoveNonLockedMachines(self, locked_machines): - for m in self._all_machines: - if m.name not in locked_machines: - self._all_machines.remove(m) - - for m in self._machines: - if m.name not in locked_machines: - self._machines.remove(m) - - def GetChromeVersion(self, machine): - """Get the version of Chrome running on the DUT.""" - - cmd = '/opt/google/chrome/chrome --version' - ret, version, _ = self.ce.CrosRunCommandWOutput( - cmd, machine=machine.name, chromeos_root=self.chromeos_root) - if ret != 0: - raise CrosCommandError("Couldn't get Chrome version from %s." % - machine.name) - - if ret != 0: - version = '' - return version.rstrip() - - def ImageMachine(self, machine, label): - checksum = label.checksum - - if checksum and (machine.checksum == checksum): - return - chromeos_root = label.chromeos_root - if not chromeos_root: - chromeos_root = self.chromeos_root - image_chromeos_args = [ - image_chromeos.__file__, '--no_lock', - '--chromeos_root=%s' % chromeos_root, - '--image=%s' % label.chromeos_image, - '--image_args=%s' % label.image_args, - '--remote=%s' % machine.name, - '--logging_level=%s' % self.log_level - ] - if label.board: - image_chromeos_args.append('--board=%s' % label.board) - - # Currently can't image two machines at once. - # So have to serialized on this lock. - save_ce_log_level = self.ce.log_level - if self.log_level != 'verbose': - self.ce.log_level = 'average' - - with self.image_lock: - if self.log_level != 'verbose': - self.logger.LogOutput('Pushing image onto machine.') - self.logger.LogOutput('Running image_chromeos.DoImage with %s' % - ' '.join(image_chromeos_args)) - retval = 0 - if not test_flag.GetTestMode(): - retval = image_chromeos.DoImage(image_chromeos_args) - if retval: - cmd = 'reboot && exit' - if self.log_level != 'verbose': - self.logger.LogOutput('reboot & exit.') - self.ce.CrosRunCommand( - cmd, machine=machine.name, chromeos_root=self.chromeos_root) - time.sleep(60) - if self.log_level != 'verbose': - self.logger.LogOutput('Pushing image onto machine.') - self.logger.LogOutput('Running image_chromeos.DoImage with %s' % - ' '.join(image_chromeos_args)) - retval = image_chromeos.DoImage(image_chromeos_args) - if retval: - raise RuntimeError("Could not image machine: '%s'." % machine.name) - - self.num_reimages += 1 - machine.checksum = checksum - machine.image = label.chromeos_image - machine.label = label - - if not label.chrome_version: - label.chrome_version = self.GetChromeVersion(machine) - - self.ce.log_level = save_ce_log_level - return retval - - def ComputeCommonCheckSum(self, label): - # Since this is used for cache lookups before the machines have been - # compared/verified, check here to make sure they all have the same - # checksum (otherwise the cache lookup may not be valid). - base = None - for machine in self.GetMachines(label): - # Make sure the machine's checksums are calculated. - if not machine.machine_checksum: - machine.SetUpChecksumInfo() - # Use the first machine as the basis for comparison. - if not base: - base = machine - # Make sure this machine's checksum matches our 'common' checksum. - if base.machine_checksum != machine.machine_checksum: - # Found a difference. Fatal error. - # Extract non-matching part and report it. - for mismatch_index in range(len(base.checksum_string)): - if (mismatch_index >= len(machine.checksum_string) or - base.checksum_string[mismatch_index] != - machine.checksum_string[mismatch_index]): - break - # We want to show some context after the mismatch. - end_ind = mismatch_index + 8 - # Print a mismatching string. - raise BadChecksum( - 'Machine checksums do not match!\n' - 'Diff:\n' - f'{base.name}: {base.checksum_string[:end_ind]}\n' - f'{machine.name}: {machine.checksum_string[:end_ind]}\n' - '\nCheck for matching /proc/cpuinfo and /proc/meminfo on DUTs.\n') - self.machine_checksum[label.name] = base.machine_checksum - - def ComputeCommonCheckSumString(self, label): - # The assumption is that this function is only called AFTER - # ComputeCommonCheckSum, so there is no need to verify the machines - # are the same here. If this is ever changed, this function should be - # modified to verify that all the machines for a given label are the - # same. - for machine in self.GetMachines(label): - if machine.checksum_string: - self.machine_checksum_string[label.name] = machine.checksum_string - break - - def _TryToLockMachine(self, cros_machine): - with self._lock: - assert cros_machine, "Machine can't be None" - for m in self._machines: - if m.name == cros_machine.name: - return - locked = True - if self.locks_dir: - locked = file_lock_machine.Machine(cros_machine.name, - self.locks_dir).Lock( - True, sys.argv[0]) - if locked: - self._machines.append(cros_machine) - command = 'cat %s' % CHECKSUM_FILE - ret, out, _ = self.ce.CrosRunCommandWOutput( - command, - chromeos_root=self.chromeos_root, - machine=cros_machine.name) - if ret == 0: - cros_machine.checksum = out.strip() - elif self.locks_dir: - self.logger.LogOutput("Couldn't lock: %s" % cros_machine.name) - - # This is called from single threaded mode. - def AddMachine(self, machine_name): - with self._lock: - for m in self._all_machines: - assert m.name != machine_name, 'Tried to double-add %s' % machine_name - - if self.log_level != 'verbose': - self.logger.LogOutput('Setting up remote access to %s' % machine_name) - self.logger.LogOutput('Checking machine characteristics for %s' % - machine_name) - cm = CrosMachine(machine_name, self.chromeos_root, self.log_level) - if cm.machine_checksum: - self._all_machines.append(cm) - - def RemoveMachine(self, machine_name): - with self._lock: - self._machines = [m for m in self._machines if m.name != machine_name] - if self.locks_dir: - res = file_lock_machine.Machine(machine_name, - self.locks_dir).Unlock(True) - if not res: - self.logger.LogError("Could not unlock machine: '%s'." % machine_name) - - def ForceSameImageToAllMachines(self, label): - machines = self.GetMachines(label) - for m in machines: - self.ImageMachine(m, label) - m.SetUpChecksumInfo() - - def AcquireMachine(self, label): - image_checksum = label.checksum - machines = self.GetMachines(label) - check_interval_time = 120 - with self._lock: - # Lazily external lock machines - while self.acquire_timeout >= 0: + """Lock, image and unlock machines locally for benchmark runs. + + This class contains methods and calls to lock, unlock and image + machines and distribute machines to each benchmark run. The assumption is + that all of the machines for the experiment have been globally locked + in the ExperimentRunner, but the machines still need to be locally + locked/unlocked (allocated to benchmark runs) to prevent multiple benchmark + runs within the same experiment from trying to use the same machine at the + same time. + """ + + def __init__( + self, + chromeos_root, + acquire_timeout, + log_level, + locks_dir, + cmd_exec=None, + lgr=None, + ): + self._lock = threading.RLock() + self._all_machines = [] + self._machines = [] + self.image_lock = threading.Lock() + self.num_reimages = 0 + self.chromeos_root = None + self.machine_checksum = {} + self.machine_checksum_string = {} + self.acquire_timeout = acquire_timeout + self.log_level = log_level + self.locks_dir = locks_dir + self.ce = cmd_exec or command_executer.GetCommandExecuter( + log_level=self.log_level + ) + self.logger = lgr or logger.GetLogger() + + if self.locks_dir and not os.path.isdir(self.locks_dir): + raise MissingLocksDirectory( + "Cannot access locks directory: %s" % self.locks_dir + ) + + self._initialized_machines = [] + self.chromeos_root = chromeos_root + + def RemoveNonLockedMachines(self, locked_machines): + for m in self._all_machines: + if m.name not in locked_machines: + self._all_machines.remove(m) + + for m in self._machines: + if m.name not in locked_machines: + self._machines.remove(m) + + def GetChromeVersion(self, machine): + """Get the version of Chrome running on the DUT.""" + + cmd = "/opt/google/chrome/chrome --version" + ret, version, _ = self.ce.CrosRunCommandWOutput( + cmd, machine=machine.name, chromeos_root=self.chromeos_root + ) + if ret != 0: + raise CrosCommandError( + "Couldn't get Chrome version from %s." % machine.name + ) + + if ret != 0: + version = "" + return version.rstrip() + + def ImageMachine(self, machine, label): + checksum = label.checksum + + if checksum and (machine.checksum == checksum): + return + chromeos_root = label.chromeos_root + if not chromeos_root: + chromeos_root = self.chromeos_root + image_chromeos_args = [ + image_chromeos.__file__, + "--no_lock", + "--chromeos_root=%s" % chromeos_root, + "--image=%s" % label.chromeos_image, + "--image_args=%s" % label.image_args, + "--remote=%s" % machine.name, + "--logging_level=%s" % self.log_level, + ] + if label.board: + image_chromeos_args.append("--board=%s" % label.board) + + # Currently can't image two machines at once. + # So have to serialized on this lock. + save_ce_log_level = self.ce.log_level + if self.log_level != "verbose": + self.ce.log_level = "average" + + with self.image_lock: + if self.log_level != "verbose": + self.logger.LogOutput("Pushing image onto machine.") + self.logger.LogOutput( + "Running image_chromeos.DoImage with %s" + % " ".join(image_chromeos_args) + ) + retval = 0 + if not test_flag.GetTestMode(): + retval = image_chromeos.DoImage(image_chromeos_args) + if retval: + cmd = "reboot && exit" + if self.log_level != "verbose": + self.logger.LogOutput("reboot & exit.") + self.ce.CrosRunCommand( + cmd, machine=machine.name, chromeos_root=self.chromeos_root + ) + time.sleep(60) + if self.log_level != "verbose": + self.logger.LogOutput("Pushing image onto machine.") + self.logger.LogOutput( + "Running image_chromeos.DoImage with %s" + % " ".join(image_chromeos_args) + ) + retval = image_chromeos.DoImage(image_chromeos_args) + if retval: + raise RuntimeError( + "Could not image machine: '%s'." % machine.name + ) + + self.num_reimages += 1 + machine.checksum = checksum + machine.image = label.chromeos_image + machine.label = label + + if not label.chrome_version: + label.chrome_version = self.GetChromeVersion(machine) + + self.ce.log_level = save_ce_log_level + return retval + + def ComputeCommonCheckSum(self, label): + # Since this is used for cache lookups before the machines have been + # compared/verified, check here to make sure they all have the same + # checksum (otherwise the cache lookup may not be valid). + base = None + for machine in self.GetMachines(label): + # Make sure the machine's checksums are calculated. + if not machine.machine_checksum: + machine.SetUpChecksumInfo() + # Use the first machine as the basis for comparison. + if not base: + base = machine + # Make sure this machine's checksum matches our 'common' checksum. + if base.machine_checksum != machine.machine_checksum: + # Found a difference. Fatal error. + # Extract non-matching part and report it. + for mismatch_index in range(len(base.checksum_string)): + if ( + mismatch_index >= len(machine.checksum_string) + or base.checksum_string[mismatch_index] + != machine.checksum_string[mismatch_index] + ): + break + # We want to show some context after the mismatch. + end_ind = mismatch_index + 8 + # Print a mismatching string. + raise BadChecksum( + "Machine checksums do not match!\n" + "Diff:\n" + f"{base.name}: {base.checksum_string[:end_ind]}\n" + f"{machine.name}: {machine.checksum_string[:end_ind]}\n" + "\nCheck for matching /proc/cpuinfo and /proc/meminfo on DUTs.\n" + ) + self.machine_checksum[label.name] = base.machine_checksum + + def ComputeCommonCheckSumString(self, label): + # The assumption is that this function is only called AFTER + # ComputeCommonCheckSum, so there is no need to verify the machines + # are the same here. If this is ever changed, this function should be + # modified to verify that all the machines for a given label are the + # same. + for machine in self.GetMachines(label): + if machine.checksum_string: + self.machine_checksum_string[ + label.name + ] = machine.checksum_string + break + + def _TryToLockMachine(self, cros_machine): + with self._lock: + assert cros_machine, "Machine can't be None" + for m in self._machines: + if m.name == cros_machine.name: + return + locked = True + if self.locks_dir: + locked = file_lock_machine.Machine( + cros_machine.name, self.locks_dir + ).Lock(True, sys.argv[0]) + if locked: + self._machines.append(cros_machine) + command = "cat %s" % CHECKSUM_FILE + ret, out, _ = self.ce.CrosRunCommandWOutput( + command, + chromeos_root=self.chromeos_root, + machine=cros_machine.name, + ) + if ret == 0: + cros_machine.checksum = out.strip() + elif self.locks_dir: + self.logger.LogOutput("Couldn't lock: %s" % cros_machine.name) + + # This is called from single threaded mode. + def AddMachine(self, machine_name): + with self._lock: + for m in self._all_machines: + assert m.name != machine_name, ( + "Tried to double-add %s" % machine_name + ) + + if self.log_level != "verbose": + self.logger.LogOutput( + "Setting up remote access to %s" % machine_name + ) + self.logger.LogOutput( + "Checking machine characteristics for %s" % machine_name + ) + cm = CrosMachine(machine_name, self.chromeos_root, self.log_level) + if cm.machine_checksum: + self._all_machines.append(cm) + + def RemoveMachine(self, machine_name): + with self._lock: + self._machines = [ + m for m in self._machines if m.name != machine_name + ] + if self.locks_dir: + res = file_lock_machine.Machine( + machine_name, self.locks_dir + ).Unlock(True) + if not res: + self.logger.LogError( + "Could not unlock machine: '%s'." % machine_name + ) + + def ForceSameImageToAllMachines(self, label): + machines = self.GetMachines(label) for m in machines: - new_machine = m not in self._all_machines - self._TryToLockMachine(m) - if new_machine: - m.released_time = time.time() - if self.GetAvailableMachines(label): - break - sleep_time = max(1, min(self.acquire_timeout, check_interval_time)) - time.sleep(sleep_time) - self.acquire_timeout -= sleep_time - - if self.acquire_timeout < 0: - self.logger.LogFatal('Could not acquire any of the ' - "following machines: '%s'" % - ', '.join(machine.name for machine in machines)) - - -### for m in self._machines: -### if (m.locked and time.time() - m.released_time < 10 and -### m.checksum == image_checksum): -### return None - unlocked_machines = [ - machine for machine in self.GetAvailableMachines(label) - if not machine.locked - ] - for m in unlocked_machines: - if image_checksum and m.checksum == image_checksum: - m.locked = True - m.test_run = threading.current_thread() - return m - for m in unlocked_machines: - if not m.checksum: - m.locked = True - m.test_run = threading.current_thread() - return m - # This logic ensures that threads waiting on a machine will get a machine - # with a checksum equal to their image over other threads. This saves time - # when crosperf initially assigns the machines to threads by minimizing - # the number of re-images. - # TODO(asharif): If we centralize the thread-scheduler, we wont need this - # code and can implement minimal reimaging code more cleanly. - for m in unlocked_machines: - if time.time() - m.released_time > 15: - # The release time gap is too large, so it is probably in the start - # stage, we need to reset the released_time. - m.released_time = time.time() - elif time.time() - m.released_time > 8: - m.locked = True - m.test_run = threading.current_thread() - return m - return None - - def GetAvailableMachines(self, label=None): - if not label: - return self._machines - return [m for m in self._machines if m.name in label.remote] - - def GetMachines(self, label=None): - if not label: - return self._all_machines - return [m for m in self._all_machines if m.name in label.remote] - - def ReleaseMachine(self, machine): - with self._lock: - for m in self._machines: - if machine.name == m.name: - assert m.locked, 'Tried to double-release %s' % m.name - m.released_time = time.time() - m.locked = False - m.status = 'Available' - break - - def Cleanup(self): - with self._lock: - # Unlock all machines (via file lock) - for m in self._machines: - res = file_lock_machine.Machine(m.name, self.locks_dir).Unlock(True) - - if not res: - self.logger.LogError("Could not unlock machine: '%s'." % m.name) - - def __str__(self): - with self._lock: - l = ['MachineManager Status:'] + [str(m) for m in self._machines] - return '\n'.join(l) - - def AsString(self): - with self._lock: - stringify_fmt = '%-30s %-10s %-4s %-25s %-32s' - header = stringify_fmt % ('Machine', 'Thread', 'Lock', 'Status', - 'Checksum') - table = [header] - for m in self._machines: - if m.test_run: - test_name = m.test_run.name - test_status = m.test_run.timeline.GetLastEvent() - else: - test_name = '' - test_status = '' - - try: - machine_string = stringify_fmt % (m.name, test_name, m.locked, - test_status, m.checksum) - except ValueError: - machine_string = '' - table.append(machine_string) - return 'Machine Status:\n%s' % '\n'.join(table) - - def GetAllCPUInfo(self, labels): - """Get cpuinfo for labels, merge them if their cpuinfo are the same.""" - dic = collections.defaultdict(list) - for label in labels: - for machine in self._all_machines: - if machine.name in label.remote: - dic[machine.cpuinfo].append(label.name) - break - output_segs = [] - for key, v in dic.items(): - output = ' '.join(v) - output += '\n-------------------\n' - output += key - output += '\n\n\n' - output_segs.append(output) - return ''.join(output_segs) - - def GetAllMachines(self): - return self._all_machines + self.ImageMachine(m, label) + m.SetUpChecksumInfo() + + def AcquireMachine(self, label): + image_checksum = label.checksum + machines = self.GetMachines(label) + check_interval_time = 120 + with self._lock: + # Lazily external lock machines + while self.acquire_timeout >= 0: + for m in machines: + new_machine = m not in self._all_machines + self._TryToLockMachine(m) + if new_machine: + m.released_time = time.time() + if self.GetAvailableMachines(label): + break + sleep_time = max( + 1, min(self.acquire_timeout, check_interval_time) + ) + time.sleep(sleep_time) + self.acquire_timeout -= sleep_time + + if self.acquire_timeout < 0: + self.logger.LogFatal( + "Could not acquire any of the " + "following machines: '%s'" + % ", ".join(machine.name for machine in machines) + ) + + ### for m in self._machines: + ### if (m.locked and time.time() - m.released_time < 10 and + ### m.checksum == image_checksum): + ### return None + unlocked_machines = [ + machine + for machine in self.GetAvailableMachines(label) + if not machine.locked + ] + for m in unlocked_machines: + if image_checksum and m.checksum == image_checksum: + m.locked = True + m.test_run = threading.current_thread() + return m + for m in unlocked_machines: + if not m.checksum: + m.locked = True + m.test_run = threading.current_thread() + return m + # This logic ensures that threads waiting on a machine will get a machine + # with a checksum equal to their image over other threads. This saves time + # when crosperf initially assigns the machines to threads by minimizing + # the number of re-images. + # TODO(asharif): If we centralize the thread-scheduler, we wont need this + # code and can implement minimal reimaging code more cleanly. + for m in unlocked_machines: + if time.time() - m.released_time > 15: + # The release time gap is too large, so it is probably in the start + # stage, we need to reset the released_time. + m.released_time = time.time() + elif time.time() - m.released_time > 8: + m.locked = True + m.test_run = threading.current_thread() + return m + return None + + def GetAvailableMachines(self, label=None): + if not label: + return self._machines + return [m for m in self._machines if m.name in label.remote] + + def GetMachines(self, label=None): + if not label: + return self._all_machines + return [m for m in self._all_machines if m.name in label.remote] + + def ReleaseMachine(self, machine): + with self._lock: + for m in self._machines: + if machine.name == m.name: + assert m.locked, "Tried to double-release %s" % m.name + m.released_time = time.time() + m.locked = False + m.status = "Available" + break + + def Cleanup(self): + with self._lock: + # Unlock all machines (via file lock) + for m in self._machines: + res = file_lock_machine.Machine(m.name, self.locks_dir).Unlock( + True + ) + + if not res: + self.logger.LogError( + "Could not unlock machine: '%s'." % m.name + ) + + def __str__(self): + with self._lock: + l = ["MachineManager Status:"] + [str(m) for m in self._machines] + return "\n".join(l) + + def AsString(self): + with self._lock: + stringify_fmt = "%-30s %-10s %-4s %-25s %-32s" + header = stringify_fmt % ( + "Machine", + "Thread", + "Lock", + "Status", + "Checksum", + ) + table = [header] + for m in self._machines: + if m.test_run: + test_name = m.test_run.name + test_status = m.test_run.timeline.GetLastEvent() + else: + test_name = "" + test_status = "" + + try: + machine_string = stringify_fmt % ( + m.name, + test_name, + m.locked, + test_status, + m.checksum, + ) + except ValueError: + machine_string = "" + table.append(machine_string) + return "Machine Status:\n%s" % "\n".join(table) + + def GetAllCPUInfo(self, labels): + """Get cpuinfo for labels, merge them if their cpuinfo are the same.""" + dic = collections.defaultdict(list) + for label in labels: + for machine in self._all_machines: + if machine.name in label.remote: + dic[machine.cpuinfo].append(label.name) + break + output_segs = [] + for key, v in dic.items(): + output = " ".join(v) + output += "\n-------------------\n" + output += key + output += "\n\n\n" + output_segs.append(output) + return "".join(output_segs) + + def GetAllMachines(self): + return self._all_machines class MockCrosMachine(CrosMachine): - """Mock cros machine class.""" - # pylint: disable=super-init-not-called + """Mock cros machine class.""" + + # pylint: disable=super-init-not-called - MEMINFO_STRING = """MemTotal: 3990332 kB + MEMINFO_STRING = """MemTotal: 3990332 kB MemFree: 2608396 kB Buffers: 147168 kB Cached: 811560 kB @@ -585,7 +647,7 @@ DirectMap4k: 45824 kB DirectMap2M: 4096000 kB """ - CPUINFO_STRING = """processor: 0 + CPUINFO_STRING = """processor: 0 vendor_id: GenuineIntel cpu family: 6 model: 42 @@ -638,91 +700,97 @@ address sizes: 36 bits physical, 48 bits virtual power management: """ - def __init__(self, name, chromeos_root, log_level): - self.name = name - self.image = None - self.checksum = None - self.locked = False - self.released_time = time.time() - self.test_run = None - self.chromeos_root = chromeos_root - self.checksum_string = re.sub(r'\d', '', name) - # In test, we assume "lumpy1", "lumpy2" are the same machine. - self.machine_checksum = self._GetMD5Checksum(self.checksum_string) - self.log_level = log_level - self.label = None - self.cooldown_wait_time = 0 - self.ce = command_executer.GetCommandExecuter(log_level=self.log_level) - self._GetCPUInfo() - - def IsReachable(self): - return True - - def _GetMemoryInfo(self): - self.meminfo = self.MEMINFO_STRING - self._ParseMemoryInfo() - - def _GetCPUInfo(self): - self.cpuinfo = self.CPUINFO_STRING + def __init__(self, name, chromeos_root, log_level): + self.name = name + self.image = None + self.checksum = None + self.locked = False + self.released_time = time.time() + self.test_run = None + self.chromeos_root = chromeos_root + self.checksum_string = re.sub(r"\d", "", name) + # In test, we assume "lumpy1", "lumpy2" are the same machine. + self.machine_checksum = self._GetMD5Checksum(self.checksum_string) + self.log_level = log_level + self.label = None + self.cooldown_wait_time = 0 + self.ce = command_executer.GetCommandExecuter(log_level=self.log_level) + self._GetCPUInfo() + + def IsReachable(self): + return True + + def _GetMemoryInfo(self): + self.meminfo = self.MEMINFO_STRING + self._ParseMemoryInfo() + + def _GetCPUInfo(self): + self.cpuinfo = self.CPUINFO_STRING class MockMachineManager(MachineManager): - """Mock machine manager class.""" - - def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir): - super(MockMachineManager, self).__init__(chromeos_root, acquire_timeout, - log_level, locks_dir) - - def _TryToLockMachine(self, cros_machine): - self._machines.append(cros_machine) - cros_machine.checksum = '' - - def AddMachine(self, machine_name): - with self._lock: - for m in self._all_machines: - assert m.name != machine_name, 'Tried to double-add %s' % machine_name - cm = MockCrosMachine(machine_name, self.chromeos_root, self.log_level) - assert cm.machine_checksum, ('Could not find checksum for machine %s' % - machine_name) - # In Original MachineManager, the test is 'if cm.machine_checksum:' - if a - # machine is unreachable, then its machine_checksum is None. Here we - # cannot do this, because machine_checksum is always faked, so we directly - # test cm.IsReachable, which is properly mocked. - if cm.IsReachable(): - self._all_machines.append(cm) - - def GetChromeVersion(self, machine): - return 'Mock Chrome Version R50' - - def AcquireMachine(self, label): - for machine in self._all_machines: - if not machine.locked: - machine.locked = True - return machine - return None - - def ImageMachine(self, machine, label): - if machine or label: - return 0 - return 1 - - def ReleaseMachine(self, machine): - machine.locked = False - - def GetMachines(self, label=None): - return self._all_machines - - def GetAvailableMachines(self, label=None): - return self._all_machines - - def ForceSameImageToAllMachines(self, label=None): - return 0 - - def ComputeCommonCheckSum(self, label=None): - common_checksum = 12345 - for machine in self.GetMachines(label): - machine.machine_checksum = common_checksum - self.machine_checksum[label.name] = common_checksum - - def GetAllMachines(self): - return self._all_machines + """Mock machine manager class.""" + + def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir): + super(MockMachineManager, self).__init__( + chromeos_root, acquire_timeout, log_level, locks_dir + ) + + def _TryToLockMachine(self, cros_machine): + self._machines.append(cros_machine) + cros_machine.checksum = "" + + def AddMachine(self, machine_name): + with self._lock: + for m in self._all_machines: + assert m.name != machine_name, ( + "Tried to double-add %s" % machine_name + ) + cm = MockCrosMachine( + machine_name, self.chromeos_root, self.log_level + ) + assert cm.machine_checksum, ( + "Could not find checksum for machine %s" % machine_name + ) + # In Original MachineManager, the test is 'if cm.machine_checksum:' - if a + # machine is unreachable, then its machine_checksum is None. Here we + # cannot do this, because machine_checksum is always faked, so we directly + # test cm.IsReachable, which is properly mocked. + if cm.IsReachable(): + self._all_machines.append(cm) + + def GetChromeVersion(self, machine): + return "Mock Chrome Version R50" + + def AcquireMachine(self, label): + for machine in self._all_machines: + if not machine.locked: + machine.locked = True + return machine + return None + + def ImageMachine(self, machine, label): + if machine or label: + return 0 + return 1 + + def ReleaseMachine(self, machine): + machine.locked = False + + def GetMachines(self, label=None): + return self._all_machines + + def GetAvailableMachines(self, label=None): + return self._all_machines + + def ForceSameImageToAllMachines(self, label=None): + return 0 + + def ComputeCommonCheckSum(self, label=None): + common_checksum = 12345 + for machine in self.GetMachines(label): + machine.machine_checksum = common_checksum + self.machine_checksum[label.name] = common_checksum + + def GetAllMachines(self): + return self._all_machines diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py index aff18480..80b3dd11 100755 --- a/crosperf/machine_manager_unittest.py +++ b/crosperf/machine_manager_unittest.py @@ -9,485 +9,567 @@ from __future__ import print_function +import hashlib import os.path import time -import hashlib import unittest import unittest.mock as mock -import label -import machine_manager -import image_checksummer -import test_flag - from benchmark import Benchmark from benchmark_run import MockBenchmarkRun from cros_utils import command_executer from cros_utils import logger +import image_checksummer +import label +import machine_manager +import test_flag + # pylint: disable=protected-access class MyMachineManager(machine_manager.MachineManager): - """Machine manager for test.""" - - def __init__(self, chromeos_root): - super(MyMachineManager, self).__init__(chromeos_root, 0, 'average', '') - - def _TryToLockMachine(self, cros_machine): - self._machines.append(cros_machine) - cros_machine.checksum = '' - - def AddMachine(self, machine_name): - with self._lock: - for m in self._all_machines: - assert m.name != machine_name, 'Tried to double-add %s' % machine_name - cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root, - 'average') - assert cm.machine_checksum, ('Could not find checksum for machine %s' % - machine_name) - self._all_machines.append(cm) - - -CHROMEOS_ROOT = '/tmp/chromeos-root' -MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2'] -LABEL_LUMPY = label.MockLabel('lumpy', 'build', 'lumpy_chromeos_image', - 'autotest_dir', 'debug_dir', CHROMEOS_ROOT, - 'lumpy', ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'], - '', '', False, 'average', 'gcc', False, None) -LABEL_MIX = label.MockLabel('mix', 'build', 'chromeos_image', 'autotest_dir', - 'debug_dir', CHROMEOS_ROOT, 'mix', - ['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '', - False, 'average', 'gcc', False, None) + """Machine manager for test.""" + + def __init__(self, chromeos_root): + super(MyMachineManager, self).__init__(chromeos_root, 0, "average", "") + + def _TryToLockMachine(self, cros_machine): + self._machines.append(cros_machine) + cros_machine.checksum = "" + + def AddMachine(self, machine_name): + with self._lock: + for m in self._all_machines: + assert m.name != machine_name, ( + "Tried to double-add %s" % machine_name + ) + cm = machine_manager.MockCrosMachine( + machine_name, self.chromeos_root, "average" + ) + assert cm.machine_checksum, ( + "Could not find checksum for machine %s" % machine_name + ) + self._all_machines.append(cm) + + +CHROMEOS_ROOT = "/tmp/chromeos-root" +MACHINE_NAMES = ["lumpy1", "lumpy2", "lumpy3", "daisy1", "daisy2"] +LABEL_LUMPY = label.MockLabel( + "lumpy", + "build", + "lumpy_chromeos_image", + "autotest_dir", + "debug_dir", + CHROMEOS_ROOT, + "lumpy", + ["lumpy1", "lumpy2", "lumpy3", "lumpy4"], + "", + "", + False, + "average", + "gcc", + False, + None, +) +LABEL_MIX = label.MockLabel( + "mix", + "build", + "chromeos_image", + "autotest_dir", + "debug_dir", + CHROMEOS_ROOT, + "mix", + ["daisy1", "daisy2", "lumpy3", "lumpy4"], + "", + "", + False, + "average", + "gcc", + False, + None, +) class MachineManagerTest(unittest.TestCase): - """Test for machine manager class.""" - - msgs = [] - image_log = [] - log_fatal_msgs = [] - fake_logger_count = 0 - fake_logger_msgs = [] - - mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - - mock_logger = mock.Mock(spec=logger.Logger) - - mock_lumpy1 = mock.Mock(spec=machine_manager.CrosMachine) - mock_lumpy2 = mock.Mock(spec=machine_manager.CrosMachine) - mock_lumpy3 = mock.Mock(spec=machine_manager.CrosMachine) - mock_lumpy4 = mock.Mock(spec=machine_manager.CrosMachine) - mock_daisy1 = mock.Mock(spec=machine_manager.CrosMachine) - mock_daisy2 = mock.Mock(spec=machine_manager.CrosMachine) - - @mock.patch.object(os.path, 'isdir') - - # pylint: disable=arguments-differ - def setUp(self, mock_isdir): - - mock_isdir.return_value = True - self.mm = machine_manager.MachineManager('/usr/local/chromeos', 0, - 'average', None, - self.mock_cmd_exec, - self.mock_logger) - - self.mock_lumpy1.name = 'lumpy1' - self.mock_lumpy2.name = 'lumpy2' - self.mock_lumpy3.name = 'lumpy3' - self.mock_lumpy4.name = 'lumpy4' - self.mock_daisy1.name = 'daisy1' - self.mock_daisy2.name = 'daisy2' - self.mock_lumpy1.machine_checksum = 'lumpy123' - self.mock_lumpy2.machine_checksum = 'lumpy123' - self.mock_lumpy3.machine_checksum = 'lumpy123' - self.mock_lumpy4.machine_checksum = 'lumpy123' - self.mock_daisy1.machine_checksum = 'daisy12' - self.mock_daisy2.machine_checksum = 'daisy12' - self.mock_lumpy1.checksum_string = 'lumpy_checksum_str' - self.mock_lumpy2.checksum_string = 'lumpy_checksum_str' - self.mock_lumpy3.checksum_string = 'lumpy_checksum_str' - self.mock_lumpy4.checksum_string = 'lumpy_checksum_str' - self.mock_daisy1.checksum_string = 'daisy_checksum_str' - self.mock_daisy2.checksum_string = 'daisy_checksum_str' - self.mock_lumpy1.cpuinfo = 'lumpy_cpu_info' - self.mock_lumpy2.cpuinfo = 'lumpy_cpu_info' - self.mock_lumpy3.cpuinfo = 'lumpy_cpu_info' - self.mock_lumpy4.cpuinfo = 'lumpy_cpu_info' - self.mock_daisy1.cpuinfo = 'daisy_cpu_info' - self.mock_daisy2.cpuinfo = 'daisy_cpu_info' - self.mm._all_machines.append(self.mock_daisy1) - self.mm._all_machines.append(self.mock_daisy2) - self.mm._all_machines.append(self.mock_lumpy1) - self.mm._all_machines.append(self.mock_lumpy2) - self.mm._all_machines.append(self.mock_lumpy3) - - def testGetMachines(self): - manager = MyMachineManager(CHROMEOS_ROOT) - for m in MACHINE_NAMES: - manager.AddMachine(m) - names = [m.name for m in manager.GetMachines(LABEL_LUMPY)] - self.assertEqual(names, ['lumpy1', 'lumpy2', 'lumpy3']) - - def testGetAvailableMachines(self): - manager = MyMachineManager(CHROMEOS_ROOT) - for m in MACHINE_NAMES: - manager.AddMachine(m) - for m in manager._all_machines: - if int(m.name[-1]) % 2: - manager._TryToLockMachine(m) - names = [m.name for m in manager.GetAvailableMachines(LABEL_LUMPY)] - self.assertEqual(names, ['lumpy1', 'lumpy3']) - - @mock.patch.object(time, 'sleep') - @mock.patch.object(command_executer.CommandExecuter, 'RunCommand') - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand') - @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum') - def test_image_machine(self, mock_checksummer, mock_run_croscmd, mock_run_cmd, - mock_sleep): - - def FakeMD5Checksum(_input_str): - return 'machine_fake_md5_checksum' - - self.fake_logger_count = 0 - self.fake_logger_msgs = [] - - def FakeLogOutput(msg): - self.fake_logger_count += 1 - self.fake_logger_msgs.append(msg) - - def ResetValues(): - self.fake_logger_count = 0 - self.fake_logger_msgs = [] - mock_run_cmd.reset_mock() - mock_run_croscmd.reset_mock() - mock_checksummer.reset_mock() - mock_sleep.reset_mock() - machine.checksum = 'fake_md5_checksum' - self.mm.checksum = None - self.mm.num_reimages = 0 - - self.mock_cmd_exec.CrosRunCommand = mock_run_croscmd - self.mock_cmd_exec.RunCommand = mock_run_cmd - - self.mm.logger.LogOutput = FakeLogOutput - machine = self.mock_lumpy1 - machine._GetMD5Checksum = FakeMD5Checksum - machine.checksum = 'fake_md5_checksum' - mock_checksummer.return_value = 'fake_md5_checksum' - self.mock_cmd_exec.log_level = 'verbose' - - test_flag.SetTestMode(True) - # Test 1: label.image_type == "local" - LABEL_LUMPY.image_type = 'local' - self.mm.ImageMachine(machine, LABEL_LUMPY) - self.assertEqual(mock_run_cmd.call_count, 0) - self.assertEqual(mock_run_croscmd.call_count, 0) - - # Test 2: label.image_type == "trybot" - ResetValues() - LABEL_LUMPY.image_type = 'trybot' - mock_run_cmd.return_value = 0 - self.mm.ImageMachine(machine, LABEL_LUMPY) - self.assertEqual(mock_run_croscmd.call_count, 0) - self.assertEqual(mock_checksummer.call_count, 0) - - # Test 3: label.image_type is neither local nor trybot; retval from - # RunCommand is 1, i.e. image_chromeos fails... - ResetValues() - LABEL_LUMPY.image_type = 'other' - mock_run_cmd.return_value = 1 - try: - self.mm.ImageMachine(machine, LABEL_LUMPY) - except RuntimeError: - self.assertEqual(mock_checksummer.call_count, 0) - self.assertEqual(mock_run_cmd.call_count, 2) - self.assertEqual(mock_run_croscmd.call_count, 1) - self.assertEqual(mock_sleep.call_count, 1) - image_call_args_str = mock_run_cmd.call_args[0][0] - image_call_args = image_call_args_str.split(' ') - self.assertEqual(image_call_args[0], 'python') - self.assertEqual(image_call_args[1].split('/')[-1], 'image_chromeos.pyc') - image_call_args = image_call_args[2:] - self.assertEqual(image_call_args, [ - '--chromeos_root=/tmp/chromeos-root', '--image=lumpy_chromeos_image', - '--image_args=', '--remote=lumpy1', '--logging_level=average', - '--board=lumpy' - ]) - self.assertEqual(mock_run_croscmd.call_args[0][0], 'reboot && exit') - - # Test 4: Everything works properly. Trybot image type. - ResetValues() - LABEL_LUMPY.image_type = 'trybot' - mock_run_cmd.return_value = 0 - self.mm.ImageMachine(machine, LABEL_LUMPY) - self.assertEqual(mock_checksummer.call_count, 0) - self.assertEqual(mock_run_croscmd.call_count, 0) - self.assertEqual(mock_sleep.call_count, 0) - - def test_compute_common_checksum(self): - self.mm.machine_checksum = {} - self.mm.ComputeCommonCheckSum(LABEL_LUMPY) - self.assertEqual(self.mm.machine_checksum['lumpy'], 'lumpy123') - self.assertEqual(len(self.mm.machine_checksum), 1) - - self.mm.machine_checksum = {} - self.assertRaisesRegex(machine_manager.BadChecksum, r'daisy.*\n.*lumpy', - self.mm.ComputeCommonCheckSum, LABEL_MIX) - - def test_compute_common_checksum_string(self): - self.mm.machine_checksum_string = {} - self.mm.ComputeCommonCheckSumString(LABEL_LUMPY) - self.assertEqual(len(self.mm.machine_checksum_string), 1) - self.assertEqual(self.mm.machine_checksum_string['lumpy'], - 'lumpy_checksum_str') - - self.mm.machine_checksum_string = {} - self.mm.ComputeCommonCheckSumString(LABEL_MIX) - self.assertEqual(len(self.mm.machine_checksum_string), 1) - self.assertEqual(self.mm.machine_checksum_string['mix'], - 'daisy_checksum_str') - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput') - def test_try_to_lock_machine(self, mock_cros_runcmd): - mock_cros_runcmd.return_value = [0, 'false_lock_checksum', ''] - self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd - self.mm._machines = [] - self.mm._TryToLockMachine(self.mock_lumpy1) - self.assertEqual(len(self.mm._machines), 1) - self.assertEqual(self.mm._machines[0], self.mock_lumpy1) - self.assertEqual(self.mock_lumpy1.checksum, 'false_lock_checksum') - self.assertEqual(mock_cros_runcmd.call_count, 1) - cmd_str = mock_cros_runcmd.call_args[0][0] - self.assertEqual(cmd_str, 'cat /usr/local/osimage_checksum_file') - args_dict = mock_cros_runcmd.call_args[1] - self.assertEqual(len(args_dict), 2) - self.assertEqual(args_dict['machine'], self.mock_lumpy1.name) - self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos') - - @mock.patch.object(machine_manager, 'CrosMachine') - def test_add_machine(self, mock_machine): - - mock_machine.machine_checksum = 'daisy123' - self.assertEqual(len(self.mm._all_machines), 5) - self.mm.AddMachine('daisy3') - self.assertEqual(len(self.mm._all_machines), 6) - - self.assertRaises(Exception, self.mm.AddMachine, 'lumpy1') - - def test_remove_machine(self): - self.mm._machines = self.mm._all_machines - self.assertTrue(self.mock_lumpy2 in self.mm._machines) - self.mm.RemoveMachine(self.mock_lumpy2.name) - self.assertFalse(self.mock_lumpy2 in self.mm._machines) - - def test_force_same_image_to_all_machines(self): - self.image_log = [] - - def FakeImageMachine(machine, label_arg): - image = label_arg.chromeos_image - self.image_log.append('Pushed %s onto %s' % (image, machine.name)) - - def FakeSetUpChecksumInfo(): - pass - - self.mm.ImageMachine = FakeImageMachine - self.mock_lumpy1.SetUpChecksumInfo = FakeSetUpChecksumInfo - self.mock_lumpy2.SetUpChecksumInfo = FakeSetUpChecksumInfo - self.mock_lumpy3.SetUpChecksumInfo = FakeSetUpChecksumInfo - - self.mm.ForceSameImageToAllMachines(LABEL_LUMPY) - self.assertEqual(len(self.image_log), 3) - self.assertEqual(self.image_log[0], - 'Pushed lumpy_chromeos_image onto lumpy1') - self.assertEqual(self.image_log[1], - 'Pushed lumpy_chromeos_image onto lumpy2') - self.assertEqual(self.image_log[2], - 'Pushed lumpy_chromeos_image onto lumpy3') - - @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum') - @mock.patch.object(hashlib, 'md5') - def test_acquire_machine(self, mock_md5, mock_checksum): - - self.msgs = [] - self.log_fatal_msgs = [] - - def FakeLock(machine): - self.msgs.append('Tried to lock %s' % machine.name) - - def FakeLogFatal(msg): - self.log_fatal_msgs.append(msg) - - self.mm._TryToLockMachine = FakeLock - self.mm.logger.LogFatal = FakeLogFatal - - mock_md5.return_value = '123456' - mock_checksum.return_value = 'fake_md5_checksum' - - self.mm._machines = self.mm._all_machines - self.mock_lumpy1.locked = True - self.mock_lumpy2.locked = True - self.mock_lumpy3.locked = False - self.mock_lumpy3.checksum = 'fake_md5_checksum' - self.mock_daisy1.locked = True - self.mock_daisy2.locked = False - self.mock_daisy2.checksum = 'fake_md5_checksum' - - self.mock_lumpy1.released_time = time.time() - self.mock_lumpy2.released_time = time.time() - self.mock_lumpy3.released_time = time.time() - self.mock_daisy1.released_time = time.time() - self.mock_daisy2.released_time = time.time() - - # Test 1. Basic test. Acquire lumpy3. - self.mm.AcquireMachine(LABEL_LUMPY) - m = self.mock_lumpy1 - self.assertEqual(m, self.mock_lumpy1) - self.assertTrue(self.mock_lumpy1.locked) - self.assertEqual(mock_md5.call_count, 0) - self.assertEqual(self.msgs, [ - 'Tried to lock lumpy1', 'Tried to lock lumpy2', 'Tried to lock lumpy3' - ]) - - # Test the second return statment (machine is unlocked, has no checksum) - save_locked = self.mock_lumpy1.locked - self.mock_lumpy1.locked = False - self.mock_lumpy1.checksum = None - m = self.mm.AcquireMachine(LABEL_LUMPY) - self.assertEqual(m, self.mock_lumpy1) - self.assertTrue(self.mock_lumpy1.locked) - - # Test the third return statement: - # - machine is unlocked - # - checksums don't match - # - current time minus release time is > 20. - self.mock_lumpy1.locked = False - self.mock_lumpy1.checksum = '123' - self.mock_lumpy1.released_time = time.time() - 8 - m = self.mm.AcquireMachine(LABEL_LUMPY) - self.assertEqual(m, self.mock_lumpy1) - self.assertTrue(self.mock_lumpy1.locked) - - # Test all machines are already locked. - m = self.mm.AcquireMachine(LABEL_LUMPY) - self.assertIsNone(m) - - # Restore values of mock_lumpy1, so other tests succeed. - self.mock_lumpy1.locked = save_locked - self.mock_lumpy1.checksum = '123' - - def test_get_available_machines(self): - self.mm._machines = self.mm._all_machines - - machine_list = self.mm.GetAvailableMachines() - self.assertEqual(machine_list, self.mm._all_machines) - - machine_list = self.mm.GetAvailableMachines(LABEL_MIX) - self.assertEqual(machine_list, - [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3]) - - machine_list = self.mm.GetAvailableMachines(LABEL_LUMPY) - self.assertEqual(machine_list, - [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3]) - - def test_get_machines(self): - machine_list = self.mm.GetMachines() - self.assertEqual(machine_list, self.mm._all_machines) - - machine_list = self.mm.GetMachines(LABEL_MIX) - self.assertEqual(machine_list, - [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3]) - - machine_list = self.mm.GetMachines(LABEL_LUMPY) - self.assertEqual(machine_list, - [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3]) - - def test_release_machines(self): - - self.mm._machines = [self.mock_lumpy1, self.mock_daisy2] - - self.mock_lumpy1.locked = True - self.mock_daisy2.locked = True - - self.assertTrue(self.mock_lumpy1.locked) - self.mm.ReleaseMachine(self.mock_lumpy1) - self.assertFalse(self.mock_lumpy1.locked) - self.assertEqual(self.mock_lumpy1.status, 'Available') - - self.assertTrue(self.mock_daisy2.locked) - self.mm.ReleaseMachine(self.mock_daisy2) - self.assertFalse(self.mock_daisy2.locked) - self.assertEqual(self.mock_daisy2.status, 'Available') - - # Test double-relase... - self.assertRaises(AssertionError, self.mm.ReleaseMachine, self.mock_lumpy1) - - def test_cleanup(self): - self.mock_logger.reset_mock() - self.mm.Cleanup() - self.assertEqual(self.mock_logger.call_count, 0) - - OUTPUT_STR = ('Machine Status:\nMachine Thread ' - 'Lock Status Checksum' - ' \nlumpy1 test ' - 'run True PENDING 123' - ' \nlumpy2 ' - 'test run False PENDING 123' - ' \nlumpy3 ' - 'test run False PENDING 123' - ' \ndaisy1 ' - 'test run False PENDING 678' - ' \ndaisy2 ' - 'test run True PENDING 678' - ' ') - - def test_as_string(self): + """Test for machine manager class.""" + + msgs = [] + image_log = [] + log_fatal_msgs = [] + fake_logger_count = 0 + fake_logger_msgs = [] + + mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) mock_logger = mock.Mock(spec=logger.Logger) - bench = Benchmark( - 'page_cycler_v2.netsim.top_10', # name - 'page_cycler_v2.netsim.top_10', # test_name - '', # test_args - 1, # iteratins - False, # rm_chroot_tmp - '', # perf_args - suite='telemetry_Crosperf') # suite - - test_run = MockBenchmarkRun('test run', bench, LABEL_LUMPY, 1, [], self.mm, - mock_logger, 'verbose', '', {}) - - self.mm._machines = [ - self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3, self.mock_daisy1, - self.mock_daisy2 - ] - - self.mock_lumpy1.test_run = test_run - self.mock_lumpy2.test_run = test_run - self.mock_lumpy3.test_run = test_run - self.mock_daisy1.test_run = test_run - self.mock_daisy2.test_run = test_run - - self.mock_lumpy1.locked = True - self.mock_lumpy2.locked = False - self.mock_lumpy3.locked = False - self.mock_daisy1.locked = False - self.mock_daisy2.locked = True - - self.mock_lumpy1.checksum = '123' - self.mock_lumpy2.checksum = '123' - self.mock_lumpy3.checksum = '123' - self.mock_daisy1.checksum = '678' - self.mock_daisy2.checksum = '678' - - output = self.mm.AsString() - self.assertEqual(output, self.OUTPUT_STR) - - def test_get_all_cpu_info(self): - info = self.mm.GetAllCPUInfo([LABEL_LUMPY, LABEL_MIX]) - self.assertEqual( - info, 'lumpy\n-------------------\nlumpy_cpu_info\n\n\nmix\n-' - '------------------\ndaisy_cpu_info\n\n\n') + mock_lumpy1 = mock.Mock(spec=machine_manager.CrosMachine) + mock_lumpy2 = mock.Mock(spec=machine_manager.CrosMachine) + mock_lumpy3 = mock.Mock(spec=machine_manager.CrosMachine) + mock_lumpy4 = mock.Mock(spec=machine_manager.CrosMachine) + mock_daisy1 = mock.Mock(spec=machine_manager.CrosMachine) + mock_daisy2 = mock.Mock(spec=machine_manager.CrosMachine) + + @mock.patch.object(os.path, "isdir") + + # pylint: disable=arguments-differ + def setUp(self, mock_isdir): + + mock_isdir.return_value = True + self.mm = machine_manager.MachineManager( + "/usr/local/chromeos", + 0, + "average", + None, + self.mock_cmd_exec, + self.mock_logger, + ) + + self.mock_lumpy1.name = "lumpy1" + self.mock_lumpy2.name = "lumpy2" + self.mock_lumpy3.name = "lumpy3" + self.mock_lumpy4.name = "lumpy4" + self.mock_daisy1.name = "daisy1" + self.mock_daisy2.name = "daisy2" + self.mock_lumpy1.machine_checksum = "lumpy123" + self.mock_lumpy2.machine_checksum = "lumpy123" + self.mock_lumpy3.machine_checksum = "lumpy123" + self.mock_lumpy4.machine_checksum = "lumpy123" + self.mock_daisy1.machine_checksum = "daisy12" + self.mock_daisy2.machine_checksum = "daisy12" + self.mock_lumpy1.checksum_string = "lumpy_checksum_str" + self.mock_lumpy2.checksum_string = "lumpy_checksum_str" + self.mock_lumpy3.checksum_string = "lumpy_checksum_str" + self.mock_lumpy4.checksum_string = "lumpy_checksum_str" + self.mock_daisy1.checksum_string = "daisy_checksum_str" + self.mock_daisy2.checksum_string = "daisy_checksum_str" + self.mock_lumpy1.cpuinfo = "lumpy_cpu_info" + self.mock_lumpy2.cpuinfo = "lumpy_cpu_info" + self.mock_lumpy3.cpuinfo = "lumpy_cpu_info" + self.mock_lumpy4.cpuinfo = "lumpy_cpu_info" + self.mock_daisy1.cpuinfo = "daisy_cpu_info" + self.mock_daisy2.cpuinfo = "daisy_cpu_info" + self.mm._all_machines.append(self.mock_daisy1) + self.mm._all_machines.append(self.mock_daisy2) + self.mm._all_machines.append(self.mock_lumpy1) + self.mm._all_machines.append(self.mock_lumpy2) + self.mm._all_machines.append(self.mock_lumpy3) + + def testGetMachines(self): + manager = MyMachineManager(CHROMEOS_ROOT) + for m in MACHINE_NAMES: + manager.AddMachine(m) + names = [m.name for m in manager.GetMachines(LABEL_LUMPY)] + self.assertEqual(names, ["lumpy1", "lumpy2", "lumpy3"]) + + def testGetAvailableMachines(self): + manager = MyMachineManager(CHROMEOS_ROOT) + for m in MACHINE_NAMES: + manager.AddMachine(m) + for m in manager._all_machines: + if int(m.name[-1]) % 2: + manager._TryToLockMachine(m) + names = [m.name for m in manager.GetAvailableMachines(LABEL_LUMPY)] + self.assertEqual(names, ["lumpy1", "lumpy3"]) + + @mock.patch.object(time, "sleep") + @mock.patch.object(command_executer.CommandExecuter, "RunCommand") + @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand") + @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum") + def test_image_machine( + self, mock_checksummer, mock_run_croscmd, mock_run_cmd, mock_sleep + ): + def FakeMD5Checksum(_input_str): + return "machine_fake_md5_checksum" + + self.fake_logger_count = 0 + self.fake_logger_msgs = [] + + def FakeLogOutput(msg): + self.fake_logger_count += 1 + self.fake_logger_msgs.append(msg) + + def ResetValues(): + self.fake_logger_count = 0 + self.fake_logger_msgs = [] + mock_run_cmd.reset_mock() + mock_run_croscmd.reset_mock() + mock_checksummer.reset_mock() + mock_sleep.reset_mock() + machine.checksum = "fake_md5_checksum" + self.mm.checksum = None + self.mm.num_reimages = 0 + + self.mock_cmd_exec.CrosRunCommand = mock_run_croscmd + self.mock_cmd_exec.RunCommand = mock_run_cmd + + self.mm.logger.LogOutput = FakeLogOutput + machine = self.mock_lumpy1 + machine._GetMD5Checksum = FakeMD5Checksum + machine.checksum = "fake_md5_checksum" + mock_checksummer.return_value = "fake_md5_checksum" + self.mock_cmd_exec.log_level = "verbose" + + test_flag.SetTestMode(True) + # Test 1: label.image_type == "local" + LABEL_LUMPY.image_type = "local" + self.mm.ImageMachine(machine, LABEL_LUMPY) + self.assertEqual(mock_run_cmd.call_count, 0) + self.assertEqual(mock_run_croscmd.call_count, 0) + + # Test 2: label.image_type == "trybot" + ResetValues() + LABEL_LUMPY.image_type = "trybot" + mock_run_cmd.return_value = 0 + self.mm.ImageMachine(machine, LABEL_LUMPY) + self.assertEqual(mock_run_croscmd.call_count, 0) + self.assertEqual(mock_checksummer.call_count, 0) + + # Test 3: label.image_type is neither local nor trybot; retval from + # RunCommand is 1, i.e. image_chromeos fails... + ResetValues() + LABEL_LUMPY.image_type = "other" + mock_run_cmd.return_value = 1 + try: + self.mm.ImageMachine(machine, LABEL_LUMPY) + except RuntimeError: + self.assertEqual(mock_checksummer.call_count, 0) + self.assertEqual(mock_run_cmd.call_count, 2) + self.assertEqual(mock_run_croscmd.call_count, 1) + self.assertEqual(mock_sleep.call_count, 1) + image_call_args_str = mock_run_cmd.call_args[0][0] + image_call_args = image_call_args_str.split(" ") + self.assertEqual(image_call_args[0], "python") + self.assertEqual( + image_call_args[1].split("/")[-1], "image_chromeos.pyc" + ) + image_call_args = image_call_args[2:] + self.assertEqual( + image_call_args, + [ + "--chromeos_root=/tmp/chromeos-root", + "--image=lumpy_chromeos_image", + "--image_args=", + "--remote=lumpy1", + "--logging_level=average", + "--board=lumpy", + ], + ) + self.assertEqual(mock_run_croscmd.call_args[0][0], "reboot && exit") + + # Test 4: Everything works properly. Trybot image type. + ResetValues() + LABEL_LUMPY.image_type = "trybot" + mock_run_cmd.return_value = 0 + self.mm.ImageMachine(machine, LABEL_LUMPY) + self.assertEqual(mock_checksummer.call_count, 0) + self.assertEqual(mock_run_croscmd.call_count, 0) + self.assertEqual(mock_sleep.call_count, 0) + + def test_compute_common_checksum(self): + self.mm.machine_checksum = {} + self.mm.ComputeCommonCheckSum(LABEL_LUMPY) + self.assertEqual(self.mm.machine_checksum["lumpy"], "lumpy123") + self.assertEqual(len(self.mm.machine_checksum), 1) + + self.mm.machine_checksum = {} + self.assertRaisesRegex( + machine_manager.BadChecksum, + r"daisy.*\n.*lumpy", + self.mm.ComputeCommonCheckSum, + LABEL_MIX, + ) + + def test_compute_common_checksum_string(self): + self.mm.machine_checksum_string = {} + self.mm.ComputeCommonCheckSumString(LABEL_LUMPY) + self.assertEqual(len(self.mm.machine_checksum_string), 1) + self.assertEqual( + self.mm.machine_checksum_string["lumpy"], "lumpy_checksum_str" + ) + + self.mm.machine_checksum_string = {} + self.mm.ComputeCommonCheckSumString(LABEL_MIX) + self.assertEqual(len(self.mm.machine_checksum_string), 1) + self.assertEqual( + self.mm.machine_checksum_string["mix"], "daisy_checksum_str" + ) + + @mock.patch.object( + command_executer.CommandExecuter, "CrosRunCommandWOutput" + ) + def test_try_to_lock_machine(self, mock_cros_runcmd): + mock_cros_runcmd.return_value = [0, "false_lock_checksum", ""] + self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd + self.mm._machines = [] + self.mm._TryToLockMachine(self.mock_lumpy1) + self.assertEqual(len(self.mm._machines), 1) + self.assertEqual(self.mm._machines[0], self.mock_lumpy1) + self.assertEqual(self.mock_lumpy1.checksum, "false_lock_checksum") + self.assertEqual(mock_cros_runcmd.call_count, 1) + cmd_str = mock_cros_runcmd.call_args[0][0] + self.assertEqual(cmd_str, "cat /usr/local/osimage_checksum_file") + args_dict = mock_cros_runcmd.call_args[1] + self.assertEqual(len(args_dict), 2) + self.assertEqual(args_dict["machine"], self.mock_lumpy1.name) + self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos") + + @mock.patch.object(machine_manager, "CrosMachine") + def test_add_machine(self, mock_machine): + + mock_machine.machine_checksum = "daisy123" + self.assertEqual(len(self.mm._all_machines), 5) + self.mm.AddMachine("daisy3") + self.assertEqual(len(self.mm._all_machines), 6) + + self.assertRaises(Exception, self.mm.AddMachine, "lumpy1") + + def test_remove_machine(self): + self.mm._machines = self.mm._all_machines + self.assertTrue(self.mock_lumpy2 in self.mm._machines) + self.mm.RemoveMachine(self.mock_lumpy2.name) + self.assertFalse(self.mock_lumpy2 in self.mm._machines) + + def test_force_same_image_to_all_machines(self): + self.image_log = [] + + def FakeImageMachine(machine, label_arg): + image = label_arg.chromeos_image + self.image_log.append("Pushed %s onto %s" % (image, machine.name)) + + def FakeSetUpChecksumInfo(): + pass + + self.mm.ImageMachine = FakeImageMachine + self.mock_lumpy1.SetUpChecksumInfo = FakeSetUpChecksumInfo + self.mock_lumpy2.SetUpChecksumInfo = FakeSetUpChecksumInfo + self.mock_lumpy3.SetUpChecksumInfo = FakeSetUpChecksumInfo + + self.mm.ForceSameImageToAllMachines(LABEL_LUMPY) + self.assertEqual(len(self.image_log), 3) + self.assertEqual( + self.image_log[0], "Pushed lumpy_chromeos_image onto lumpy1" + ) + self.assertEqual( + self.image_log[1], "Pushed lumpy_chromeos_image onto lumpy2" + ) + self.assertEqual( + self.image_log[2], "Pushed lumpy_chromeos_image onto lumpy3" + ) + + @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum") + @mock.patch.object(hashlib, "md5") + def test_acquire_machine(self, mock_md5, mock_checksum): + + self.msgs = [] + self.log_fatal_msgs = [] + + def FakeLock(machine): + self.msgs.append("Tried to lock %s" % machine.name) + + def FakeLogFatal(msg): + self.log_fatal_msgs.append(msg) + + self.mm._TryToLockMachine = FakeLock + self.mm.logger.LogFatal = FakeLogFatal + + mock_md5.return_value = "123456" + mock_checksum.return_value = "fake_md5_checksum" + + self.mm._machines = self.mm._all_machines + self.mock_lumpy1.locked = True + self.mock_lumpy2.locked = True + self.mock_lumpy3.locked = False + self.mock_lumpy3.checksum = "fake_md5_checksum" + self.mock_daisy1.locked = True + self.mock_daisy2.locked = False + self.mock_daisy2.checksum = "fake_md5_checksum" + + self.mock_lumpy1.released_time = time.time() + self.mock_lumpy2.released_time = time.time() + self.mock_lumpy3.released_time = time.time() + self.mock_daisy1.released_time = time.time() + self.mock_daisy2.released_time = time.time() + + # Test 1. Basic test. Acquire lumpy3. + self.mm.AcquireMachine(LABEL_LUMPY) + m = self.mock_lumpy1 + self.assertEqual(m, self.mock_lumpy1) + self.assertTrue(self.mock_lumpy1.locked) + self.assertEqual(mock_md5.call_count, 0) + self.assertEqual( + self.msgs, + [ + "Tried to lock lumpy1", + "Tried to lock lumpy2", + "Tried to lock lumpy3", + ], + ) + + # Test the second return statment (machine is unlocked, has no checksum) + save_locked = self.mock_lumpy1.locked + self.mock_lumpy1.locked = False + self.mock_lumpy1.checksum = None + m = self.mm.AcquireMachine(LABEL_LUMPY) + self.assertEqual(m, self.mock_lumpy1) + self.assertTrue(self.mock_lumpy1.locked) + + # Test the third return statement: + # - machine is unlocked + # - checksums don't match + # - current time minus release time is > 20. + self.mock_lumpy1.locked = False + self.mock_lumpy1.checksum = "123" + self.mock_lumpy1.released_time = time.time() - 8 + m = self.mm.AcquireMachine(LABEL_LUMPY) + self.assertEqual(m, self.mock_lumpy1) + self.assertTrue(self.mock_lumpy1.locked) + + # Test all machines are already locked. + m = self.mm.AcquireMachine(LABEL_LUMPY) + self.assertIsNone(m) + + # Restore values of mock_lumpy1, so other tests succeed. + self.mock_lumpy1.locked = save_locked + self.mock_lumpy1.checksum = "123" + + def test_get_available_machines(self): + self.mm._machines = self.mm._all_machines + + machine_list = self.mm.GetAvailableMachines() + self.assertEqual(machine_list, self.mm._all_machines) + + machine_list = self.mm.GetAvailableMachines(LABEL_MIX) + self.assertEqual( + machine_list, [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3] + ) + + machine_list = self.mm.GetAvailableMachines(LABEL_LUMPY) + self.assertEqual( + machine_list, [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3] + ) + + def test_get_machines(self): + machine_list = self.mm.GetMachines() + self.assertEqual(machine_list, self.mm._all_machines) + + machine_list = self.mm.GetMachines(LABEL_MIX) + self.assertEqual( + machine_list, [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3] + ) + + machine_list = self.mm.GetMachines(LABEL_LUMPY) + self.assertEqual( + machine_list, [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3] + ) + + def test_release_machines(self): + + self.mm._machines = [self.mock_lumpy1, self.mock_daisy2] + + self.mock_lumpy1.locked = True + self.mock_daisy2.locked = True + + self.assertTrue(self.mock_lumpy1.locked) + self.mm.ReleaseMachine(self.mock_lumpy1) + self.assertFalse(self.mock_lumpy1.locked) + self.assertEqual(self.mock_lumpy1.status, "Available") + + self.assertTrue(self.mock_daisy2.locked) + self.mm.ReleaseMachine(self.mock_daisy2) + self.assertFalse(self.mock_daisy2.locked) + self.assertEqual(self.mock_daisy2.status, "Available") + + # Test double-relase... + self.assertRaises( + AssertionError, self.mm.ReleaseMachine, self.mock_lumpy1 + ) + + def test_cleanup(self): + self.mock_logger.reset_mock() + self.mm.Cleanup() + self.assertEqual(self.mock_logger.call_count, 0) + + OUTPUT_STR = ( + "Machine Status:\nMachine Thread " + "Lock Status Checksum" + " \nlumpy1 test " + "run True PENDING 123" + " \nlumpy2 " + "test run False PENDING 123" + " \nlumpy3 " + "test run False PENDING 123" + " \ndaisy1 " + "test run False PENDING 678" + " \ndaisy2 " + "test run True PENDING 678" + " " + ) + + def test_as_string(self): + + mock_logger = mock.Mock(spec=logger.Logger) + + bench = Benchmark( + "page_cycler_v2.netsim.top_10", # name + "page_cycler_v2.netsim.top_10", # test_name + "", # test_args + 1, # iteratins + False, # rm_chroot_tmp + "", # perf_args + suite="telemetry_Crosperf", + ) # suite + + test_run = MockBenchmarkRun( + "test run", + bench, + LABEL_LUMPY, + 1, + [], + self.mm, + mock_logger, + "verbose", + "", + {}, + ) + + self.mm._machines = [ + self.mock_lumpy1, + self.mock_lumpy2, + self.mock_lumpy3, + self.mock_daisy1, + self.mock_daisy2, + ] + + self.mock_lumpy1.test_run = test_run + self.mock_lumpy2.test_run = test_run + self.mock_lumpy3.test_run = test_run + self.mock_daisy1.test_run = test_run + self.mock_daisy2.test_run = test_run + + self.mock_lumpy1.locked = True + self.mock_lumpy2.locked = False + self.mock_lumpy3.locked = False + self.mock_daisy1.locked = False + self.mock_daisy2.locked = True + + self.mock_lumpy1.checksum = "123" + self.mock_lumpy2.checksum = "123" + self.mock_lumpy3.checksum = "123" + self.mock_daisy1.checksum = "678" + self.mock_daisy2.checksum = "678" + + output = self.mm.AsString() + self.assertEqual(output, self.OUTPUT_STR) + + def test_get_all_cpu_info(self): + info = self.mm.GetAllCPUInfo([LABEL_LUMPY, LABEL_MIX]) + self.assertEqual( + info, + "lumpy\n-------------------\nlumpy_cpu_info\n\n\nmix\n-" + "------------------\ndaisy_cpu_info\n\n\n", + ) MEMINFO_STRING = """MemTotal: 3990332 kB @@ -580,35 +662,37 @@ address sizes: 36 bits physical, 48 bits virtual power management: """ -CHECKSUM_STRING = ('processor: 0vendor_id: GenuineIntelcpu family: 6model: ' - '42model name: Intel(R) Celeron(R) CPU 867 @ ' - '1.30GHzstepping: 7microcode: 0x25cache size: 2048 ' - 'KBphysical id: 0siblings: 2cpu cores: 2' - 'fpu: yesfpu_exception: yescpuid level: ' - '13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep' - ' mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse ' - 'sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc ' - 'arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc ' - 'aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ' - 'ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt ' - 'tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts ' - 'dts tpr_shadow vnmi flexpriority ept vpidclflush size: ' - '64cache_alignment: 64address sizes: 36 bits physical, 48 ' - 'bits virtualpower management:processor: 1vendor_id: ' - 'GenuineIntelcpu family: 6model: 42model name: Intel(R) ' - 'Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache' - ' size: 2048 KBphysical id: 0siblings: 2cpu cores:' - ' 2fpu: yesfpu_exception: yescpuid' - ' level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 ' - 'apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx ' - 'fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm ' - 'constant_tsc arch_perfmon pebs bts rep_good nopl xtopology ' - 'nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl ' - 'vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic ' - 'popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt ' - 'pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush ' - 'size: 64cache_alignment: 64address sizes: 36 bits physical,' - ' 48 bits virtualpower management: 4194304') +CHECKSUM_STRING = ( + "processor: 0vendor_id: GenuineIntelcpu family: 6model: " + "42model name: Intel(R) Celeron(R) CPU 867 @ " + "1.30GHzstepping: 7microcode: 0x25cache size: 2048 " + "KBphysical id: 0siblings: 2cpu cores: 2" + "fpu: yesfpu_exception: yescpuid level: " + "13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep" + " mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse " + "sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc " + "arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc " + "aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 " + "ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt " + "tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts " + "dts tpr_shadow vnmi flexpriority ept vpidclflush size: " + "64cache_alignment: 64address sizes: 36 bits physical, 48 " + "bits virtualpower management:processor: 1vendor_id: " + "GenuineIntelcpu family: 6model: 42model name: Intel(R) " + "Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache" + " size: 2048 KBphysical id: 0siblings: 2cpu cores:" + " 2fpu: yesfpu_exception: yescpuid" + " level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 " + "apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx " + "fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm " + "constant_tsc arch_perfmon pebs bts rep_good nopl xtopology " + "nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl " + "vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic " + "popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt " + "pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush " + "size: 64cache_alignment: 64address sizes: 36 bits physical," + " 48 bits virtualpower management: 4194304" +) DUMP_VPD_STRING = """ "PBA_SN"="Pba.txt" @@ -667,187 +751,212 @@ wlan0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500 class CrosMachineTest(unittest.TestCase): - """Test for CrosMachine class.""" - - mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - - @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo') - def test_init(self, mock_setup): - - cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos', - 'average', self.mock_cmd_exec) - self.assertEqual(mock_setup.call_count, 1) - self.assertEqual(cm.chromeos_root, '/usr/local/chromeos') - self.assertEqual(cm.log_level, 'average') - - @mock.patch.object(machine_manager.CrosMachine, 'IsReachable') - @mock.patch.object(machine_manager.CrosMachine, '_GetMemoryInfo') - @mock.patch.object(machine_manager.CrosMachine, '_GetCPUInfo') - @mock.patch.object(machine_manager.CrosMachine, - '_ComputeMachineChecksumString') - @mock.patch.object(machine_manager.CrosMachine, '_GetMachineID') - @mock.patch.object(machine_manager.CrosMachine, '_GetMD5Checksum') - def test_setup_checksum_info(self, mock_md5sum, mock_machineid, - mock_checkstring, mock_cpuinfo, mock_meminfo, - mock_isreachable): - - # Test 1. Machine is not reachable; SetUpChecksumInfo is called via - # __init__. - mock_isreachable.return_value = False - mock_md5sum.return_value = 'md5_checksum' - cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos', - 'average', self.mock_cmd_exec) - cm.checksum_string = 'This is a checksum string.' - cm.machine_id = 'machine_id1' - self.assertEqual(mock_isreachable.call_count, 1) - self.assertIsNone(cm.machine_checksum) - self.assertEqual(mock_meminfo.call_count, 0) - - # Test 2. Machine is reachable. Call explicitly. - mock_isreachable.return_value = True - cm.checksum_string = 'This is a checksum string.' - cm.machine_id = 'machine_id1' - cm.SetUpChecksumInfo() - self.assertEqual(mock_isreachable.call_count, 2) - self.assertEqual(mock_meminfo.call_count, 1) - self.assertEqual(mock_cpuinfo.call_count, 1) - self.assertEqual(mock_checkstring.call_count, 1) - self.assertEqual(mock_machineid.call_count, 1) - self.assertEqual(mock_md5sum.call_count, 2) - self.assertEqual(cm.machine_checksum, 'md5_checksum') - self.assertEqual(cm.machine_id_checksum, 'md5_checksum') - self.assertEqual(mock_md5sum.call_args_list[0][0][0], - 'This is a checksum string.') - self.assertEqual(mock_md5sum.call_args_list[1][0][0], 'machine_id1') - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand') - @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo') - def test_is_reachable(self, mock_setup, mock_run_cmd): - - cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos', - 'average', self.mock_cmd_exec) - self.mock_cmd_exec.CrosRunCommand = mock_run_cmd - - # Test 1. CrosRunCommand returns 1 (fail) - mock_run_cmd.return_value = 1 - result = cm.IsReachable() - self.assertFalse(result) - self.assertEqual(mock_setup.call_count, 1) - self.assertEqual(mock_run_cmd.call_count, 1) - - # Test 2. CrosRunCommand returns 0 (success) - mock_run_cmd.return_value = 0 - result = cm.IsReachable() - self.assertTrue(result) - self.assertEqual(mock_run_cmd.call_count, 2) - first_args = mock_run_cmd.call_args_list[0] - second_args = mock_run_cmd.call_args_list[1] - self.assertEqual(first_args[0], second_args[0]) - self.assertEqual(first_args[1], second_args[1]) - self.assertEqual(len(first_args[0]), 1) - self.assertEqual(len(first_args[1]), 2) - self.assertEqual(first_args[0][0], 'ls') - args_dict = first_args[1] - self.assertEqual(args_dict['machine'], 'daisy.cros') - self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos') - - @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo') - def test_parse_memory_info(self, _mock_setup): - cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos', - 'average', self.mock_cmd_exec) - cm.meminfo = MEMINFO_STRING - cm._ParseMemoryInfo() - self.assertEqual(cm.phys_kbytes, 4194304) - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput') - @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo') - def test_get_memory_info(self, _mock_setup, mock_run_cmd): - cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos', - 'average', self.mock_cmd_exec) - self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd - mock_run_cmd.return_value = [0, MEMINFO_STRING, ''] - cm._GetMemoryInfo() - self.assertEqual(mock_run_cmd.call_count, 1) - call_args = mock_run_cmd.call_args_list[0] - self.assertEqual(call_args[0][0], 'cat /proc/meminfo') - args_dict = call_args[1] - self.assertEqual(args_dict['machine'], 'daisy.cros') - self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos') - self.assertEqual(cm.meminfo, MEMINFO_STRING) - self.assertEqual(cm.phys_kbytes, 4194304) - - mock_run_cmd.return_value = [1, MEMINFO_STRING, ''] - self.assertRaises(Exception, cm._GetMemoryInfo) - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput') - @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo') - def test_get_cpu_info(self, _mock_setup, mock_run_cmd): - cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos', - 'average', self.mock_cmd_exec) - self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd - mock_run_cmd.return_value = [0, CPUINFO_STRING, ''] - cm._GetCPUInfo() - self.assertEqual(mock_run_cmd.call_count, 1) - call_args = mock_run_cmd.call_args_list[0] - self.assertEqual(call_args[0][0], 'cat /proc/cpuinfo') - args_dict = call_args[1] - self.assertEqual(args_dict['machine'], 'daisy.cros') - self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos') - self.assertEqual(cm.cpuinfo, CPUINFO_STRING) - - @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo') - def test_compute_machine_checksum_string(self, _mock_setup): - cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos', - 'average', self.mock_cmd_exec) - cm.cpuinfo = CPUINFO_STRING - cm.meminfo = MEMINFO_STRING - cm._ParseMemoryInfo() - cm._ComputeMachineChecksumString() - self.assertEqual(cm.checksum_string, CHECKSUM_STRING) - - @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo') - def test_get_md5_checksum(self, _mock_setup): - cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos', - 'average', self.mock_cmd_exec) - temp_str = 'abcde' - checksum_str = cm._GetMD5Checksum(temp_str) - self.assertEqual(checksum_str, 'ab56b4d92b40713acc5af89985d4b786') - - temp_str = '' - checksum_str = cm._GetMD5Checksum(temp_str) - self.assertEqual(checksum_str, '') - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput') - @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo') - def test_get_machine_id(self, _mock_setup, mock_run_cmd): - cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos', - 'average', self.mock_cmd_exec) - self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd - mock_run_cmd.return_value = [0, DUMP_VPD_STRING, ''] - - cm._GetMachineID() - self.assertEqual(cm.machine_id, '"Product_S/N"="HT4L91SC300208"') - - mock_run_cmd.return_value = [0, IFCONFIG_STRING, ''] - cm._GetMachineID() - self.assertEqual( - cm.machine_id, - ' ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ ' - 'ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether ' - '44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)') - - mock_run_cmd.return_value = [0, 'invalid hardware config', ''] - self.assertRaises(Exception, cm._GetMachineID) - - def test_add_cooldown_waittime(self): - cm = machine_manager.CrosMachine('1.2.3.4.cros', '/usr/local/chromeos', - 'average') - self.assertEqual(cm.GetCooldownWaitTime(), 0) - cm.AddCooldownWaitTime(250) - self.assertEqual(cm.GetCooldownWaitTime(), 250) - cm.AddCooldownWaitTime(1) - self.assertEqual(cm.GetCooldownWaitTime(), 251) - - -if __name__ == '__main__': - unittest.main() + """Test for CrosMachine class.""" + + mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) + + @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo") + def test_init(self, mock_setup): + + cm = machine_manager.CrosMachine( + "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec + ) + self.assertEqual(mock_setup.call_count, 1) + self.assertEqual(cm.chromeos_root, "/usr/local/chromeos") + self.assertEqual(cm.log_level, "average") + + @mock.patch.object(machine_manager.CrosMachine, "IsReachable") + @mock.patch.object(machine_manager.CrosMachine, "_GetMemoryInfo") + @mock.patch.object(machine_manager.CrosMachine, "_GetCPUInfo") + @mock.patch.object( + machine_manager.CrosMachine, "_ComputeMachineChecksumString" + ) + @mock.patch.object(machine_manager.CrosMachine, "_GetMachineID") + @mock.patch.object(machine_manager.CrosMachine, "_GetMD5Checksum") + def test_setup_checksum_info( + self, + mock_md5sum, + mock_machineid, + mock_checkstring, + mock_cpuinfo, + mock_meminfo, + mock_isreachable, + ): + + # Test 1. Machine is not reachable; SetUpChecksumInfo is called via + # __init__. + mock_isreachable.return_value = False + mock_md5sum.return_value = "md5_checksum" + cm = machine_manager.CrosMachine( + "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec + ) + cm.checksum_string = "This is a checksum string." + cm.machine_id = "machine_id1" + self.assertEqual(mock_isreachable.call_count, 1) + self.assertIsNone(cm.machine_checksum) + self.assertEqual(mock_meminfo.call_count, 0) + + # Test 2. Machine is reachable. Call explicitly. + mock_isreachable.return_value = True + cm.checksum_string = "This is a checksum string." + cm.machine_id = "machine_id1" + cm.SetUpChecksumInfo() + self.assertEqual(mock_isreachable.call_count, 2) + self.assertEqual(mock_meminfo.call_count, 1) + self.assertEqual(mock_cpuinfo.call_count, 1) + self.assertEqual(mock_checkstring.call_count, 1) + self.assertEqual(mock_machineid.call_count, 1) + self.assertEqual(mock_md5sum.call_count, 2) + self.assertEqual(cm.machine_checksum, "md5_checksum") + self.assertEqual(cm.machine_id_checksum, "md5_checksum") + self.assertEqual( + mock_md5sum.call_args_list[0][0][0], "This is a checksum string." + ) + self.assertEqual(mock_md5sum.call_args_list[1][0][0], "machine_id1") + + @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand") + @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo") + def test_is_reachable(self, mock_setup, mock_run_cmd): + + cm = machine_manager.CrosMachine( + "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec + ) + self.mock_cmd_exec.CrosRunCommand = mock_run_cmd + + # Test 1. CrosRunCommand returns 1 (fail) + mock_run_cmd.return_value = 1 + result = cm.IsReachable() + self.assertFalse(result) + self.assertEqual(mock_setup.call_count, 1) + self.assertEqual(mock_run_cmd.call_count, 1) + + # Test 2. CrosRunCommand returns 0 (success) + mock_run_cmd.return_value = 0 + result = cm.IsReachable() + self.assertTrue(result) + self.assertEqual(mock_run_cmd.call_count, 2) + first_args = mock_run_cmd.call_args_list[0] + second_args = mock_run_cmd.call_args_list[1] + self.assertEqual(first_args[0], second_args[0]) + self.assertEqual(first_args[1], second_args[1]) + self.assertEqual(len(first_args[0]), 1) + self.assertEqual(len(first_args[1]), 2) + self.assertEqual(first_args[0][0], "ls") + args_dict = first_args[1] + self.assertEqual(args_dict["machine"], "daisy.cros") + self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos") + + @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo") + def test_parse_memory_info(self, _mock_setup): + cm = machine_manager.CrosMachine( + "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec + ) + cm.meminfo = MEMINFO_STRING + cm._ParseMemoryInfo() + self.assertEqual(cm.phys_kbytes, 4194304) + + @mock.patch.object( + command_executer.CommandExecuter, "CrosRunCommandWOutput" + ) + @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo") + def test_get_memory_info(self, _mock_setup, mock_run_cmd): + cm = machine_manager.CrosMachine( + "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec + ) + self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd + mock_run_cmd.return_value = [0, MEMINFO_STRING, ""] + cm._GetMemoryInfo() + self.assertEqual(mock_run_cmd.call_count, 1) + call_args = mock_run_cmd.call_args_list[0] + self.assertEqual(call_args[0][0], "cat /proc/meminfo") + args_dict = call_args[1] + self.assertEqual(args_dict["machine"], "daisy.cros") + self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos") + self.assertEqual(cm.meminfo, MEMINFO_STRING) + self.assertEqual(cm.phys_kbytes, 4194304) + + mock_run_cmd.return_value = [1, MEMINFO_STRING, ""] + self.assertRaises(Exception, cm._GetMemoryInfo) + + @mock.patch.object( + command_executer.CommandExecuter, "CrosRunCommandWOutput" + ) + @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo") + def test_get_cpu_info(self, _mock_setup, mock_run_cmd): + cm = machine_manager.CrosMachine( + "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec + ) + self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd + mock_run_cmd.return_value = [0, CPUINFO_STRING, ""] + cm._GetCPUInfo() + self.assertEqual(mock_run_cmd.call_count, 1) + call_args = mock_run_cmd.call_args_list[0] + self.assertEqual(call_args[0][0], "cat /proc/cpuinfo") + args_dict = call_args[1] + self.assertEqual(args_dict["machine"], "daisy.cros") + self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos") + self.assertEqual(cm.cpuinfo, CPUINFO_STRING) + + @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo") + def test_compute_machine_checksum_string(self, _mock_setup): + cm = machine_manager.CrosMachine( + "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec + ) + cm.cpuinfo = CPUINFO_STRING + cm.meminfo = MEMINFO_STRING + cm._ParseMemoryInfo() + cm._ComputeMachineChecksumString() + self.assertEqual(cm.checksum_string, CHECKSUM_STRING) + + @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo") + def test_get_md5_checksum(self, _mock_setup): + cm = machine_manager.CrosMachine( + "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec + ) + temp_str = "abcde" + checksum_str = cm._GetMD5Checksum(temp_str) + self.assertEqual(checksum_str, "ab56b4d92b40713acc5af89985d4b786") + + temp_str = "" + checksum_str = cm._GetMD5Checksum(temp_str) + self.assertEqual(checksum_str, "") + + @mock.patch.object( + command_executer.CommandExecuter, "CrosRunCommandWOutput" + ) + @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo") + def test_get_machine_id(self, _mock_setup, mock_run_cmd): + cm = machine_manager.CrosMachine( + "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec + ) + self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd + mock_run_cmd.return_value = [0, DUMP_VPD_STRING, ""] + + cm._GetMachineID() + self.assertEqual(cm.machine_id, '"Product_S/N"="HT4L91SC300208"') + + mock_run_cmd.return_value = [0, IFCONFIG_STRING, ""] + cm._GetMachineID() + self.assertEqual( + cm.machine_id, + " ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ " + "ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether " + "44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)", + ) + + mock_run_cmd.return_value = [0, "invalid hardware config", ""] + self.assertRaises(Exception, cm._GetMachineID) + + def test_add_cooldown_waittime(self): + cm = machine_manager.CrosMachine( + "1.2.3.4.cros", "/usr/local/chromeos", "average" + ) + self.assertEqual(cm.GetCooldownWaitTime(), 0) + cm.AddCooldownWaitTime(250) + self.assertEqual(cm.GetCooldownWaitTime(), 250) + cm.AddCooldownWaitTime(1) + self.assertEqual(cm.GetCooldownWaitTime(), 251) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py index 7ee74a81..a596662e 100644 --- a/crosperf/mock_instance.py +++ b/crosperf/mock_instance.py @@ -10,144 +10,163 @@ from __future__ import print_function from benchmark import Benchmark from label import MockLabel -perf_args = 'record -a -e cycles' + +perf_args = "record -a -e cycles" label1 = MockLabel( - 'test1', - 'build1', - 'image1', - 'autotest_dir', - 'debug_dir', - '/tmp/test_benchmark_run', - 'x86-alex', - 'chromeos-alex1', - image_args='', - cache_dir='', + "test1", + "build1", + "image1", + "autotest_dir", + "debug_dir", + "/tmp/test_benchmark_run", + "x86-alex", + "chromeos-alex1", + image_args="", + cache_dir="", cache_only=False, - log_level='average', - compiler='gcc', + log_level="average", + compiler="gcc", crosfleet=False, - chrome_src=None) + chrome_src=None, +) label2 = MockLabel( - 'test2', - 'build2', - 'image2', - 'autotest_dir', - 'debug_dir', - '/tmp/test_benchmark_run_2', - 'x86-alex', - 'chromeos-alex2', - image_args='', - cache_dir='', + "test2", + "build2", + "image2", + "autotest_dir", + "debug_dir", + "/tmp/test_benchmark_run_2", + "x86-alex", + "chromeos-alex2", + image_args="", + cache_dir="", cache_only=False, - log_level='average', - compiler='gcc', + log_level="average", + compiler="gcc", crosfleet=False, - chrome_src=None) - -benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '', - perf_args, 'telemetry_Crosperf', '') - -benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '', - perf_args, 'telemetry_Crosperf', '') + chrome_src=None, +) + +benchmark1 = Benchmark( + "benchmark1", + "autotest_name_1", + "autotest_args", + 2, + "", + perf_args, + "telemetry_Crosperf", + "", +) + +benchmark2 = Benchmark( + "benchmark2", + "autotest_name_2", + "autotest_args", + 2, + "", + perf_args, + "telemetry_Crosperf", + "", +) keyval = {} keyval[0] = { - '': 'PASS', - 'milliseconds_1': '1', - 'milliseconds_2': '8', - 'milliseconds_3': '9.2', - 'test{1}': '2', - 'test{2}': '4', - 'ms_1': '2.1', - 'total': '5', - 'bool': 'True' + "": "PASS", + "milliseconds_1": "1", + "milliseconds_2": "8", + "milliseconds_3": "9.2", + "test{1}": "2", + "test{2}": "4", + "ms_1": "2.1", + "total": "5", + "bool": "True", } keyval[1] = { - '': 'PASS', - 'milliseconds_1': '3', - 'milliseconds_2': '5', - 'ms_1': '2.2', - 'total': '6', - 'test{1}': '3', - 'test{2}': '4', - 'bool': 'FALSE' + "": "PASS", + "milliseconds_1": "3", + "milliseconds_2": "5", + "ms_1": "2.2", + "total": "6", + "test{1}": "3", + "test{2}": "4", + "bool": "FALSE", } keyval[2] = { - '': 'PASS', - 'milliseconds_4': '30', - 'milliseconds_5': '50', - 'ms_1': '2.23', - 'total': '6', - 'test{1}': '5', - 'test{2}': '4', - 'bool': 'FALSE' + "": "PASS", + "milliseconds_4": "30", + "milliseconds_5": "50", + "ms_1": "2.23", + "total": "6", + "test{1}": "5", + "test{2}": "4", + "bool": "FALSE", } keyval[3] = { - '': 'PASS', - 'milliseconds_1': '3', - 'milliseconds_6': '7', - 'ms_1': '2.3', - 'total': '7', - 'test{1}': '2', - 'test{2}': '6', - 'bool': 'FALSE' + "": "PASS", + "milliseconds_1": "3", + "milliseconds_6": "7", + "ms_1": "2.3", + "total": "7", + "test{1}": "2", + "test{2}": "6", + "bool": "FALSE", } keyval[4] = { - '': 'PASS', - 'milliseconds_1': '3', - 'milliseconds_8': '6', - 'ms_1': '2.3', - 'total': '7', - 'test{1}': '2', - 'test{2}': '6', - 'bool': 'TRUE' + "": "PASS", + "milliseconds_1": "3", + "milliseconds_8": "6", + "ms_1": "2.3", + "total": "7", + "test{1}": "2", + "test{2}": "6", + "bool": "TRUE", } keyval[5] = { - '': 'PASS', - 'milliseconds_1': '3', - 'milliseconds_8': '6', - 'ms_1': '2.2', - 'total': '7', - 'test{1}': '2', - 'test{2}': '2', - 'bool': 'TRUE' + "": "PASS", + "milliseconds_1": "3", + "milliseconds_8": "6", + "ms_1": "2.2", + "total": "7", + "test{1}": "2", + "test{2}": "2", + "bool": "TRUE", } keyval[6] = { - '': 'PASS', - 'milliseconds_1': '3', - 'milliseconds_8': '6', - 'ms_1': '2', - 'total': '7', - 'test{1}': '2', - 'test{2}': '4', - 'bool': 'TRUE' + "": "PASS", + "milliseconds_1": "3", + "milliseconds_8": "6", + "ms_1": "2", + "total": "7", + "test{1}": "2", + "test{2}": "4", + "bool": "TRUE", } keyval[7] = { - '': 'PASS', - 'milliseconds_1': '3', - 'milliseconds_8': '6', - 'ms_1': '1', - 'total': '7', - 'test{1}': '1', - 'test{2}': '6', - 'bool': 'TRUE' + "": "PASS", + "milliseconds_1": "3", + "milliseconds_8": "6", + "ms_1": "1", + "total": "7", + "test{1}": "1", + "test{2}": "6", + "bool": "TRUE", } keyval[8] = { - '': 'PASS', - 'milliseconds_1': '3', - 'milliseconds_8': '6', - 'ms_1': '3.3', - 'total': '7', - 'test{1}': '2', - 'test{2}': '8', - 'bool': 'TRUE' + "": "PASS", + "milliseconds_1": "3", + "milliseconds_8": "6", + "ms_1": "3.3", + "total": "7", + "test{1}": "2", + "test{2}": "8", + "bool": "TRUE", } diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py index 33a6946d..3dd6839a 100644 --- a/crosperf/results_cache.py +++ b/crosperf/results_cache.py @@ -20,642 +20,729 @@ import tempfile from cros_utils import command_executer from cros_utils import misc - from image_checksummer import ImageChecksummer - import results_report import test_flag -SCRATCH_DIR = os.path.expanduser('~/cros_scratch') -RESULTS_FILE = 'results.pickle' -MACHINE_FILE = 'machine.txt' -AUTOTEST_TARBALL = 'autotest.tbz2' -RESULTS_TARBALL = 'results.tbz2' -PERF_RESULTS_FILE = 'perf-results.txt' -CACHE_KEYS_FILE = 'cache_keys.txt' + +SCRATCH_DIR = os.path.expanduser("~/cros_scratch") +RESULTS_FILE = "results.pickle" +MACHINE_FILE = "machine.txt" +AUTOTEST_TARBALL = "autotest.tbz2" +RESULTS_TARBALL = "results.tbz2" +PERF_RESULTS_FILE = "perf-results.txt" +CACHE_KEYS_FILE = "cache_keys.txt" class PidVerificationError(Exception): - """Error of perf PID verification in per-process mode.""" + """Error of perf PID verification in per-process mode.""" class PerfDataReadError(Exception): - """Error of reading a perf.data header.""" + """Error of reading a perf.data header.""" class Result(object): - """Class for holding the results of a single test run. - - This class manages what exactly is stored inside the cache without knowing - what the key of the cache is. For runs with perf, it stores perf.data, - perf.report, etc. The key generation is handled by the ResultsCache class. - """ - - def __init__(self, logger, label, log_level, machine, cmd_exec=None): - self.chromeos_root = label.chromeos_root - self._logger = logger - self.ce = cmd_exec or command_executer.GetCommandExecuter( - self._logger, log_level=log_level) - self.temp_dir = None - self.label = label - self.results_dir = None - self.log_level = log_level - self.machine = machine - self.perf_data_files = [] - self.perf_report_files = [] - self.results_file = [] - self.turbostat_log_file = '' - self.cpustats_log_file = '' - self.cpuinfo_file = '' - self.top_log_file = '' - self.wait_time_log_file = '' - self.chrome_version = '' - self.err = None - self.chroot_results_dir = '' - self.test_name = '' - self.keyvals = None - self.board = None - self.suite = None - self.cwp_dso = '' - self.retval = None - self.out = None - self.top_cmds = [] - - def GetTopCmds(self): - """Get the list of top commands consuming CPU on the machine.""" - return self.top_cmds - - def FormatStringTopCommands(self): - """Get formatted string of top commands. - - Get the formatted string with top commands consuming CPU on DUT machine. - Number of "non-chrome" processes in the list is limited to 5. - """ - format_list = [ - 'Top commands with highest CPU usage:', - # Header. - '%20s %9s %6s %s' % ('COMMAND', 'AVG CPU%', 'COUNT', 'HIGHEST 5'), - '-' * 50, - ] - if self.top_cmds: - # After switching to top processes we have to expand the list since there - # will be a lot of 'chrome' processes (up to 10, sometimes more) in the - # top. - # Let's limit the list size by the number of non-chrome processes. - limit_of_non_chrome_procs = 5 - num_of_non_chrome_procs = 0 - for topcmd in self.top_cmds: - print_line = '%20s %9.2f %6s %s' % ( - topcmd['cmd'], topcmd['cpu_use_avg'], topcmd['count'], - topcmd['top5_cpu_use']) - format_list.append(print_line) - if not topcmd['cmd'].startswith('chrome'): - num_of_non_chrome_procs += 1 - if num_of_non_chrome_procs >= limit_of_non_chrome_procs: - break - else: - format_list.append('[NO DATA FROM THE TOP LOG]') - format_list.append('-' * 50) - return '\n'.join(format_list) - - def CopyFilesTo(self, dest_dir, files_to_copy): - file_index = 0 - for file_to_copy in files_to_copy: - if not os.path.isdir(dest_dir): - command = 'mkdir -p %s' % dest_dir - self.ce.RunCommand(command) - dest_file = os.path.join( - dest_dir, ('%s.%s' % (os.path.basename(file_to_copy), file_index))) - ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False) - if ret: - raise IOError('Could not copy results file: %s' % file_to_copy) - file_index += 1 - - def CopyResultsTo(self, dest_dir): - self.CopyFilesTo(dest_dir, self.results_file) - self.CopyFilesTo(dest_dir, self.perf_data_files) - self.CopyFilesTo(dest_dir, self.perf_report_files) - extra_files = [] - if self.top_log_file: - extra_files.append(self.top_log_file) - if self.cpuinfo_file: - extra_files.append(self.cpuinfo_file) - if extra_files: - self.CopyFilesTo(dest_dir, extra_files) - if self.results_file or self.perf_data_files or self.perf_report_files: - self._logger.LogOutput('Results files stored in %s.' % dest_dir) - - def CompressResultsTo(self, dest_dir): - tarball = os.path.join(self.results_dir, RESULTS_TARBALL) - # Test_that runs hold all output under TEST_NAME_HASHTAG/results/, - # while tast runs hold output under TEST_NAME/. - # Both ensure to be unique. - result_dir_name = self.test_name if self.suite == 'tast' else 'results' - results_dir = self.FindFilesInResultsDir('-name %s' % - result_dir_name).split('\n')[0] - - if not results_dir: - self._logger.LogOutput('WARNING: No results dir matching %r found' % - result_dir_name) - return - - self.CreateTarball(results_dir, tarball) - self.CopyFilesTo(dest_dir, [tarball]) - if results_dir: - self._logger.LogOutput('Results files compressed into %s.' % dest_dir) - - def GetNewKeyvals(self, keyvals_dict): - # Initialize 'units' dictionary. - units_dict = {} - for k in keyvals_dict: - units_dict[k] = '' - results_files = self.GetDataMeasurementsFiles() - for f in results_files: - # Make sure we can find the results file - if os.path.exists(f): - data_filename = f - else: - # Otherwise get the base filename and create the correct - # path for it. - _, f_base = misc.GetRoot(f) - data_filename = os.path.join(self.chromeos_root, 'chroot/tmp', - self.temp_dir, f_base) - if data_filename.find('.json') > 0: - raw_dict = dict() - if os.path.exists(data_filename): - with open(data_filename, 'r') as data_file: - raw_dict = json.load(data_file) - - if 'charts' in raw_dict: - raw_dict = raw_dict['charts'] - for k1 in raw_dict: - field_dict = raw_dict[k1] - for k2 in field_dict: - result_dict = field_dict[k2] - key = k1 + '__' + k2 - if 'value' in result_dict: - keyvals_dict[key] = result_dict['value'] - elif 'values' in result_dict: - values = result_dict['values'] - if ('type' in result_dict - and result_dict['type'] == 'list_of_scalar_values' and values - and values != 'null'): - keyvals_dict[key] = sum(values) / float(len(values)) - else: - keyvals_dict[key] = values - units_dict[key] = result_dict['units'] - else: - if os.path.exists(data_filename): - with open(data_filename, 'r') as data_file: - lines = data_file.readlines() - for line in lines: - tmp_dict = json.loads(line) - graph_name = tmp_dict['graph'] - graph_str = (graph_name + '__') if graph_name else '' - key = graph_str + tmp_dict['description'] - keyvals_dict[key] = tmp_dict['value'] - units_dict[key] = tmp_dict['units'] - - return keyvals_dict, units_dict - - def AppendTelemetryUnits(self, keyvals_dict, units_dict): - """keyvals_dict is the dict of key-value used to generate Crosperf reports. - - units_dict is a dictionary of the units for the return values in - keyvals_dict. We need to associate the units with the return values, - for Telemetry tests, so that we can include the units in the reports. - This function takes each value in keyvals_dict, finds the corresponding - unit in the units_dict, and replaces the old value with a list of the - old value and the units. This later gets properly parsed in the - ResultOrganizer class, for generating the reports. - """ + """Class for holding the results of a single test run. - results_dict = {} - for k in keyvals_dict: - # We don't want these lines in our reports; they add no useful data. - if not k or k == 'telemetry_Crosperf': - continue - val = keyvals_dict[k] - units = units_dict[k] - new_val = [val, units] - results_dict[k] = new_val - return results_dict - - def GetKeyvals(self): - results_in_chroot = os.path.join(self.chromeos_root, 'chroot', 'tmp') - if not self.temp_dir: - self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot) - command = f'cp -r {self.results_dir}/* {self.temp_dir}' - self.ce.RunCommand(command, print_to_console=False) - - command = ('./generate_test_report --no-color --csv %s' % - (os.path.join('/tmp', os.path.basename(self.temp_dir)))) - _, out, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root, - command, - print_to_console=False) - keyvals_dict = {} - tmp_dir_in_chroot = misc.GetInsideChrootPath(self.chromeos_root, - self.temp_dir) - for line in out.splitlines(): - tokens = re.split('=|,', line) - key = tokens[-2] - if key.startswith(tmp_dir_in_chroot): - key = key[len(tmp_dir_in_chroot) + 1:] - value = tokens[-1] - keyvals_dict[key] = value - - # Check to see if there is a perf_measurements file and get the - # data from it if so. - keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict) - if self.suite == 'telemetry_Crosperf': - # For telemtry_Crosperf results, append the units to the return - # results, for use in generating the reports. - keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict) - return keyvals_dict - - def GetSamples(self): - actual_samples = 0 - for perf_data_file in self.perf_data_files: - chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root, - perf_data_file) - perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf') - perf_file = '/usr/sbin/perf' - if os.path.exists(perf_path): - perf_file = '/usr/bin/perf' - - # For each perf.data, we want to collect sample count for specific DSO. - # We specify exact match for known DSO type, and every sample for `all`. - exact_match = '' - if self.cwp_dso == 'all': - exact_match = '""' - elif self.cwp_dso == 'chrome': - exact_match = '" chrome "' - elif self.cwp_dso == 'kallsyms': - exact_match = '"[kernel.kallsyms]"' - else: - # This will need to be updated once there are more DSO types supported, - # if user want an exact match for the field they want. - exact_match = '"%s"' % self.cwp_dso - - command = ('%s report -n -s dso -i %s 2> /dev/null | grep %s' % - (perf_file, chroot_perf_data_file, exact_match)) - _, result, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root, - command) - # Accumulate the sample count for all matched fields. - # Each line looks like this: - # 45.42% 237210 chrome - # And we want the second number which is the sample count. - samples = 0 - try: - for line in result.split('\n'): - attr = line.split() - if len(attr) == 3 and '%' in attr[0]: - samples += int(attr[1]) - except: - raise RuntimeError('Cannot parse perf dso result') - - actual_samples += samples - - # Remove idle cycles from the accumulated sample count. - perf_report_file = f'{perf_data_file}.report' - if not os.path.exists(perf_report_file): - raise RuntimeError(f'Missing perf report file: {perf_report_file}') - - idle_functions = { - '[kernel.kallsyms]': - ('intel_idle', 'arch_cpu_idle', 'intel_idle', 'cpu_startup_entry', - 'default_idle', 'cpu_idle_loop', 'do_idle'), - } - idle_samples = 0 - - with open(perf_report_file) as f: - try: - for line in f: - line = line.strip() - if not line or line[0] == '#': - continue - # Each line has the following fields, - # pylint: disable=line-too-long - # Overhead Samples Command Shared Object Symbol - # pylint: disable=line-too-long - # 1.48% 60 swapper [kernel.kallsyms] [k] intel_idle - # pylint: disable=line-too-long - # 0.00% 1 shill libshill-net.so [.] std::__1::vector<unsigned char, std::__1::allocator<unsigned char> >::vector<unsigned char const*> - _, samples, _, dso, _, function = line.split(None, 5) - - if dso in idle_functions and function in idle_functions[dso]: - if self.log_level != 'verbose': - self._logger.LogOutput('Removing %s samples from %s in %s' % - (samples, function, dso)) - idle_samples += int(samples) - except: - raise RuntimeError('Cannot parse perf report') - actual_samples -= idle_samples - return [actual_samples, u'samples'] - - def GetResultsDir(self): - if self.suite == 'tast': - mo = re.search(r'Writing results to (\S+)', self.out) - else: - mo = re.search(r'Results placed in (\S+)', self.out) - if mo: - result = mo.group(1) - return result - raise RuntimeError('Could not find results directory.') - - def FindFilesInResultsDir(self, find_args): - if not self.results_dir: - return '' - - command = 'find %s %s' % (self.results_dir, find_args) - ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False) - if ret: - raise RuntimeError('Could not run find command!') - return out - - def GetResultsFile(self): - if self.suite == 'telemetry_Crosperf': - return self.FindFilesInResultsDir('-name histograms.json').splitlines() - return self.FindFilesInResultsDir('-name results-chart.json').splitlines() - - def GetPerfDataFiles(self): - return self.FindFilesInResultsDir('-name perf.data').splitlines() - - def GetPerfReportFiles(self): - return self.FindFilesInResultsDir('-name perf.data.report').splitlines() - - def GetDataMeasurementsFiles(self): - result = self.FindFilesInResultsDir('-name perf_measurements').splitlines() - if not result: - if self.suite == 'telemetry_Crosperf': - result = ( - self.FindFilesInResultsDir('-name histograms.json').splitlines()) - else: - result = (self.FindFilesInResultsDir( - '-name results-chart.json').splitlines()) - return result - - def GetTurbostatFile(self): - """Get turbostat log path string.""" - return self.FindFilesInResultsDir('-name turbostat.log').split('\n')[0] - - def GetCpustatsFile(self): - """Get cpustats log path string.""" - return self.FindFilesInResultsDir('-name cpustats.log').split('\n')[0] - - def GetCpuinfoFile(self): - """Get cpustats log path string.""" - return self.FindFilesInResultsDir('-name cpuinfo.log').split('\n')[0] - - def GetTopFile(self): - """Get cpustats log path string.""" - return self.FindFilesInResultsDir('-name top.log').split('\n')[0] - - def GetWaitTimeFile(self): - """Get wait time log path string.""" - return self.FindFilesInResultsDir('-name wait_time.log').split('\n')[0] - - def _CheckDebugPath(self, option, path): - relative_path = path[1:] - out_chroot_path = os.path.join(self.chromeos_root, 'chroot', relative_path) - if os.path.exists(out_chroot_path): - if option == 'kallsyms': - path = os.path.join(path, 'System.map-*') - return '--' + option + ' ' + path - else: - print('** WARNING **: --%s option not applied, %s does not exist' % - (option, out_chroot_path)) - return '' - - def GeneratePerfReportFiles(self): - perf_report_files = [] - for perf_data_file in self.perf_data_files: - # Generate a perf.report and store it side-by-side with the perf.data - # file. - chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root, - perf_data_file) - perf_report_file = '%s.report' % perf_data_file - if os.path.exists(perf_report_file): - raise RuntimeError('Perf report file already exists: %s' % - perf_report_file) - chroot_perf_report_file = misc.GetInsideChrootPath( - self.chromeos_root, perf_report_file) - perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf') - - perf_file = '/usr/sbin/perf' - if os.path.exists(perf_path): - perf_file = '/usr/bin/perf' - - debug_path = self.label.debug_path - - if debug_path: - symfs = '--symfs ' + debug_path - vmlinux = '--vmlinux ' + os.path.join(debug_path, 'usr', 'lib', - 'debug', 'boot', 'vmlinux') - kallsyms = '' - print('** WARNING **: --kallsyms option not applied, no System.map-* ' - 'for downloaded image.') - else: - if self.label.image_type != 'local': - print('** WARNING **: Using local debug info in /build, this may ' - 'not match the downloaded image.') - build_path = os.path.join('/build', self.board) - symfs = self._CheckDebugPath('symfs', build_path) - vmlinux_path = os.path.join(build_path, 'usr/lib/debug/boot/vmlinux') - vmlinux = self._CheckDebugPath('vmlinux', vmlinux_path) - kallsyms_path = os.path.join(build_path, 'boot') - kallsyms = self._CheckDebugPath('kallsyms', kallsyms_path) - - command = ('%s report -n %s %s %s -i %s --stdio > %s' % - (perf_file, symfs, vmlinux, kallsyms, chroot_perf_data_file, - chroot_perf_report_file)) - if self.log_level != 'verbose': - self._logger.LogOutput('Generating perf report...\nCMD: %s' % command) - exit_code = self.ce.ChrootRunCommand(self.chromeos_root, command) - if exit_code == 0: - if self.log_level != 'verbose': - self._logger.LogOutput('Perf report generated successfully.') - else: - raise RuntimeError('Perf report not generated correctly. CMD: %s' % - command) - - # Add a keyval to the dictionary for the events captured. - perf_report_files.append( - misc.GetOutsideChrootPath(self.chromeos_root, - chroot_perf_report_file)) - return perf_report_files - - def GatherPerfResults(self): - report_id = 0 - for perf_report_file in self.perf_report_files: - with open(perf_report_file, 'r') as f: - report_contents = f.read() - for group in re.findall(r'Events: (\S+) (\S+)', report_contents): - num_events = group[0] - event_name = group[1] - key = 'perf_%s_%s' % (report_id, event_name) - value = str(misc.UnitToNumber(num_events)) - self.keyvals[key] = value - - def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso): - self.board = self.label.board - self.out = out - self.err = err - self.retval = retval - self.test_name = test - self.suite = suite - self.cwp_dso = cwp_dso - self.chroot_results_dir = self.GetResultsDir() - self.results_dir = misc.GetOutsideChrootPath(self.chromeos_root, - self.chroot_results_dir) - self.results_file = self.GetResultsFile() - self.perf_data_files = self.GetPerfDataFiles() - # Include all perf.report data in table. - self.perf_report_files = self.GeneratePerfReportFiles() - self.turbostat_log_file = self.GetTurbostatFile() - self.cpustats_log_file = self.GetCpustatsFile() - self.cpuinfo_file = self.GetCpuinfoFile() - self.top_log_file = self.GetTopFile() - self.wait_time_log_file = self.GetWaitTimeFile() - # TODO(asharif): Do something similar with perf stat. - - # Grab keyvals from the directory. - self.ProcessResults() - - def ProcessChartResults(self): - # Open and parse the json results file generated by telemetry/test_that. - if not self.results_file: - raise IOError('No results file found.') - filename = self.results_file[0] - if not filename.endswith('.json'): - raise IOError('Attempt to call json on non-json file: %s' % filename) - if not os.path.exists(filename): - raise IOError('%s does not exist' % filename) - - keyvals = {} - with open(filename, 'r') as f: - raw_dict = json.load(f) - if 'charts' in raw_dict: - raw_dict = raw_dict['charts'] - for k, field_dict in raw_dict.items(): - for item in field_dict: - keyname = k + '__' + item - value_dict = field_dict[item] - if 'value' in value_dict: - result = value_dict['value'] - elif 'values' in value_dict: - values = value_dict['values'] - if not values: - continue - if ('type' in value_dict - and value_dict['type'] == 'list_of_scalar_values' - and values != 'null'): - result = sum(values) / float(len(values)) - else: - result = values - else: - continue - units = value_dict['units'] - new_value = [result, units] - keyvals[keyname] = new_value - return keyvals - - def ProcessTurbostatResults(self): - """Given turbostat_log_file non-null parse cpu stats from file. - - Returns: - Dictionary of 'cpufreq', 'cputemp' where each - includes dictionary 'all': [list_of_values] - - Example of the output of turbostat_log. - ---------------------- - CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp - - 329 12.13 2723 2393 10975 77 - 0 336 12.41 2715 2393 6328 77 - 2 323 11.86 2731 2393 4647 69 - CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp - - 1940 67.46 2884 2393 39920 83 - 0 1827 63.70 2877 2393 21184 83 - """ - cpustats = {} - read_data = '' - with open(self.turbostat_log_file) as f: - read_data = f.readlines() - - if not read_data: - self._logger.LogOutput('WARNING: Turbostat output file is empty.') - return {} - - # First line always contains the header. - stats = read_data[0].split() - - # Mandatory parameters. - if 'CPU' not in stats: - self._logger.LogOutput( - 'WARNING: Missing data for CPU# in Turbostat output.') - return {} - if 'Bzy_MHz' not in stats: - self._logger.LogOutput( - 'WARNING: Missing data for Bzy_MHz in Turbostat output.') - return {} - cpu_index = stats.index('CPU') - cpufreq_index = stats.index('Bzy_MHz') - cpufreq = cpustats.setdefault('cpufreq', {'all': []}) - - # Optional parameters. - cputemp_index = -1 - if 'CoreTmp' in stats: - cputemp_index = stats.index('CoreTmp') - cputemp = cpustats.setdefault('cputemp', {'all': []}) - - # Parse data starting from the second line ignoring repeating headers. - for st in read_data[1:]: - # Data represented by int or float separated by spaces. - numbers = st.split() - if not all(word.replace('.', '', 1).isdigit() for word in numbers[1:]): - # Skip the line if data mismatch. - continue - if numbers[cpu_index] != '-': - # Ignore Core-specific statistics which starts with Core number. - # Combined statistics for all core has "-" CPU identifier. - continue - - cpufreq['all'].append(int(numbers[cpufreq_index])) - if cputemp_index != -1: - cputemp['all'].append(int(numbers[cputemp_index])) - return cpustats - - def ProcessTopResults(self): - """Given self.top_log_file process top log data. - - Returns: - List of dictionaries with the following keyvals: - 'cmd': command name (string), - 'cpu_use_avg': average cpu usage (float), - 'count': number of occurrences (int), - 'top5_cpu_use': up to 5 highest cpu usages (descending list of floats) - - Example of the top log: - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome - 375 root 0 -20 0 0 0 S 5.9 0.0 0:00.17 kworker - 617 syslog 20 0 25332 8372 7888 S 5.9 0.2 0:00.77 systemd - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 5745 chronos 20 0 5438580 139328 67988 R 122.8 3.6 0:04.26 chrome - 912 root -51 0 0 0 0 S 2.0 0.0 0:01.04 irq/cro - 121 root 20 0 0 0 0 S 1.0 0.0 0:00.45 spi5 + This class manages what exactly is stored inside the cache without knowing + what the key of the cache is. For runs with perf, it stores perf.data, + perf.report, etc. The key generation is handled by the ResultsCache class. """ - all_data = '' - with open(self.top_log_file) as f: - all_data = f.read() - if not all_data: - self._logger.LogOutput('WARNING: Top log file is empty.') - return [] - - top_line_regex = re.compile( - r""" + def __init__(self, logger, label, log_level, machine, cmd_exec=None): + self.chromeos_root = label.chromeos_root + self._logger = logger + self.ce = cmd_exec or command_executer.GetCommandExecuter( + self._logger, log_level=log_level + ) + self.temp_dir = None + self.label = label + self.results_dir = None + self.log_level = log_level + self.machine = machine + self.perf_data_files = [] + self.perf_report_files = [] + self.results_file = [] + self.turbostat_log_file = "" + self.cpustats_log_file = "" + self.cpuinfo_file = "" + self.top_log_file = "" + self.wait_time_log_file = "" + self.chrome_version = "" + self.err = None + self.chroot_results_dir = "" + self.test_name = "" + self.keyvals = None + self.board = None + self.suite = None + self.cwp_dso = "" + self.retval = None + self.out = None + self.top_cmds = [] + + def GetTopCmds(self): + """Get the list of top commands consuming CPU on the machine.""" + return self.top_cmds + + def FormatStringTopCommands(self): + """Get formatted string of top commands. + + Get the formatted string with top commands consuming CPU on DUT machine. + Number of "non-chrome" processes in the list is limited to 5. + """ + format_list = [ + "Top commands with highest CPU usage:", + # Header. + "%20s %9s %6s %s" % ("COMMAND", "AVG CPU%", "COUNT", "HIGHEST 5"), + "-" * 50, + ] + if self.top_cmds: + # After switching to top processes we have to expand the list since there + # will be a lot of 'chrome' processes (up to 10, sometimes more) in the + # top. + # Let's limit the list size by the number of non-chrome processes. + limit_of_non_chrome_procs = 5 + num_of_non_chrome_procs = 0 + for topcmd in self.top_cmds: + print_line = "%20s %9.2f %6s %s" % ( + topcmd["cmd"], + topcmd["cpu_use_avg"], + topcmd["count"], + topcmd["top5_cpu_use"], + ) + format_list.append(print_line) + if not topcmd["cmd"].startswith("chrome"): + num_of_non_chrome_procs += 1 + if num_of_non_chrome_procs >= limit_of_non_chrome_procs: + break + else: + format_list.append("[NO DATA FROM THE TOP LOG]") + format_list.append("-" * 50) + return "\n".join(format_list) + + def CopyFilesTo(self, dest_dir, files_to_copy): + file_index = 0 + for file_to_copy in files_to_copy: + if not os.path.isdir(dest_dir): + command = "mkdir -p %s" % dest_dir + self.ce.RunCommand(command) + dest_file = os.path.join( + dest_dir, + ("%s.%s" % (os.path.basename(file_to_copy), file_index)), + ) + ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False) + if ret: + raise IOError("Could not copy results file: %s" % file_to_copy) + file_index += 1 + + def CopyResultsTo(self, dest_dir): + self.CopyFilesTo(dest_dir, self.results_file) + self.CopyFilesTo(dest_dir, self.perf_data_files) + self.CopyFilesTo(dest_dir, self.perf_report_files) + extra_files = [] + if self.top_log_file: + extra_files.append(self.top_log_file) + if self.cpuinfo_file: + extra_files.append(self.cpuinfo_file) + if extra_files: + self.CopyFilesTo(dest_dir, extra_files) + if self.results_file or self.perf_data_files or self.perf_report_files: + self._logger.LogOutput("Results files stored in %s." % dest_dir) + + def CompressResultsTo(self, dest_dir): + tarball = os.path.join(self.results_dir, RESULTS_TARBALL) + # Test_that runs hold all output under TEST_NAME_HASHTAG/results/, + # while tast runs hold output under TEST_NAME/. + # Both ensure to be unique. + result_dir_name = self.test_name if self.suite == "tast" else "results" + results_dir = self.FindFilesInResultsDir( + "-name %s" % result_dir_name + ).split("\n")[0] + + if not results_dir: + self._logger.LogOutput( + "WARNING: No results dir matching %r found" % result_dir_name + ) + return + + self.CreateTarball(results_dir, tarball) + self.CopyFilesTo(dest_dir, [tarball]) + if results_dir: + self._logger.LogOutput( + "Results files compressed into %s." % dest_dir + ) + + def GetNewKeyvals(self, keyvals_dict): + # Initialize 'units' dictionary. + units_dict = {} + for k in keyvals_dict: + units_dict[k] = "" + results_files = self.GetDataMeasurementsFiles() + for f in results_files: + # Make sure we can find the results file + if os.path.exists(f): + data_filename = f + else: + # Otherwise get the base filename and create the correct + # path for it. + _, f_base = misc.GetRoot(f) + data_filename = os.path.join( + self.chromeos_root, "chroot/tmp", self.temp_dir, f_base + ) + if data_filename.find(".json") > 0: + raw_dict = dict() + if os.path.exists(data_filename): + with open(data_filename, "r") as data_file: + raw_dict = json.load(data_file) + + if "charts" in raw_dict: + raw_dict = raw_dict["charts"] + for k1 in raw_dict: + field_dict = raw_dict[k1] + for k2 in field_dict: + result_dict = field_dict[k2] + key = k1 + "__" + k2 + if "value" in result_dict: + keyvals_dict[key] = result_dict["value"] + elif "values" in result_dict: + values = result_dict["values"] + if ( + "type" in result_dict + and result_dict["type"] + == "list_of_scalar_values" + and values + and values != "null" + ): + keyvals_dict[key] = sum(values) / float( + len(values) + ) + else: + keyvals_dict[key] = values + units_dict[key] = result_dict["units"] + else: + if os.path.exists(data_filename): + with open(data_filename, "r") as data_file: + lines = data_file.readlines() + for line in lines: + tmp_dict = json.loads(line) + graph_name = tmp_dict["graph"] + graph_str = ( + (graph_name + "__") if graph_name else "" + ) + key = graph_str + tmp_dict["description"] + keyvals_dict[key] = tmp_dict["value"] + units_dict[key] = tmp_dict["units"] + + return keyvals_dict, units_dict + + def AppendTelemetryUnits(self, keyvals_dict, units_dict): + """keyvals_dict is the dict of key-value used to generate Crosperf reports. + + units_dict is a dictionary of the units for the return values in + keyvals_dict. We need to associate the units with the return values, + for Telemetry tests, so that we can include the units in the reports. + This function takes each value in keyvals_dict, finds the corresponding + unit in the units_dict, and replaces the old value with a list of the + old value and the units. This later gets properly parsed in the + ResultOrganizer class, for generating the reports. + """ + + results_dict = {} + for k in keyvals_dict: + # We don't want these lines in our reports; they add no useful data. + if not k or k == "telemetry_Crosperf": + continue + val = keyvals_dict[k] + units = units_dict[k] + new_val = [val, units] + results_dict[k] = new_val + return results_dict + + def GetKeyvals(self): + results_in_chroot = os.path.join(self.chromeos_root, "chroot", "tmp") + if not self.temp_dir: + self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot) + command = f"cp -r {self.results_dir}/* {self.temp_dir}" + self.ce.RunCommand(command, print_to_console=False) + + command = "./generate_test_report --no-color --csv %s" % ( + os.path.join("/tmp", os.path.basename(self.temp_dir)) + ) + _, out, _ = self.ce.ChrootRunCommandWOutput( + self.chromeos_root, command, print_to_console=False + ) + keyvals_dict = {} + tmp_dir_in_chroot = misc.GetInsideChrootPath( + self.chromeos_root, self.temp_dir + ) + for line in out.splitlines(): + tokens = re.split("=|,", line) + key = tokens[-2] + if key.startswith(tmp_dir_in_chroot): + key = key[len(tmp_dir_in_chroot) + 1 :] + value = tokens[-1] + keyvals_dict[key] = value + + # Check to see if there is a perf_measurements file and get the + # data from it if so. + keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict) + if self.suite == "telemetry_Crosperf": + # For telemtry_Crosperf results, append the units to the return + # results, for use in generating the reports. + keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict) + return keyvals_dict + + def GetSamples(self): + actual_samples = 0 + for perf_data_file in self.perf_data_files: + chroot_perf_data_file = misc.GetInsideChrootPath( + self.chromeos_root, perf_data_file + ) + perf_path = os.path.join( + self.chromeos_root, "chroot", "usr/bin/perf" + ) + perf_file = "/usr/sbin/perf" + if os.path.exists(perf_path): + perf_file = "/usr/bin/perf" + + # For each perf.data, we want to collect sample count for specific DSO. + # We specify exact match for known DSO type, and every sample for `all`. + exact_match = "" + if self.cwp_dso == "all": + exact_match = '""' + elif self.cwp_dso == "chrome": + exact_match = '" chrome "' + elif self.cwp_dso == "kallsyms": + exact_match = '"[kernel.kallsyms]"' + else: + # This will need to be updated once there are more DSO types supported, + # if user want an exact match for the field they want. + exact_match = '"%s"' % self.cwp_dso + + command = "%s report -n -s dso -i %s 2> /dev/null | grep %s" % ( + perf_file, + chroot_perf_data_file, + exact_match, + ) + _, result, _ = self.ce.ChrootRunCommandWOutput( + self.chromeos_root, command + ) + # Accumulate the sample count for all matched fields. + # Each line looks like this: + # 45.42% 237210 chrome + # And we want the second number which is the sample count. + samples = 0 + try: + for line in result.split("\n"): + attr = line.split() + if len(attr) == 3 and "%" in attr[0]: + samples += int(attr[1]) + except: + raise RuntimeError("Cannot parse perf dso result") + + actual_samples += samples + + # Remove idle cycles from the accumulated sample count. + perf_report_file = f"{perf_data_file}.report" + if not os.path.exists(perf_report_file): + raise RuntimeError( + f"Missing perf report file: {perf_report_file}" + ) + + idle_functions = { + "[kernel.kallsyms]": ( + "intel_idle", + "arch_cpu_idle", + "intel_idle", + "cpu_startup_entry", + "default_idle", + "cpu_idle_loop", + "do_idle", + ), + } + idle_samples = 0 + + with open(perf_report_file) as f: + try: + for line in f: + line = line.strip() + if not line or line[0] == "#": + continue + # Each line has the following fields, + # pylint: disable=line-too-long + # Overhead Samples Command Shared Object Symbol + # pylint: disable=line-too-long + # 1.48% 60 swapper [kernel.kallsyms] [k] intel_idle + # pylint: disable=line-too-long + # 0.00% 1 shill libshill-net.so [.] std::__1::vector<unsigned char, std::__1::allocator<unsigned char> >::vector<unsigned char const*> + _, samples, _, dso, _, function = line.split(None, 5) + + if ( + dso in idle_functions + and function in idle_functions[dso] + ): + if self.log_level != "verbose": + self._logger.LogOutput( + "Removing %s samples from %s in %s" + % (samples, function, dso) + ) + idle_samples += int(samples) + except: + raise RuntimeError("Cannot parse perf report") + actual_samples -= idle_samples + return [actual_samples, "samples"] + + def GetResultsDir(self): + if self.suite == "tast": + mo = re.search(r"Writing results to (\S+)", self.out) + else: + mo = re.search(r"Results placed in (\S+)", self.out) + if mo: + result = mo.group(1) + return result + raise RuntimeError("Could not find results directory.") + + def FindFilesInResultsDir(self, find_args): + if not self.results_dir: + return "" + + command = "find %s %s" % (self.results_dir, find_args) + ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False) + if ret: + raise RuntimeError("Could not run find command!") + return out + + def GetResultsFile(self): + if self.suite == "telemetry_Crosperf": + return self.FindFilesInResultsDir( + "-name histograms.json" + ).splitlines() + return self.FindFilesInResultsDir( + "-name results-chart.json" + ).splitlines() + + def GetPerfDataFiles(self): + return self.FindFilesInResultsDir("-name perf.data").splitlines() + + def GetPerfReportFiles(self): + return self.FindFilesInResultsDir("-name perf.data.report").splitlines() + + def GetDataMeasurementsFiles(self): + result = self.FindFilesInResultsDir( + "-name perf_measurements" + ).splitlines() + if not result: + if self.suite == "telemetry_Crosperf": + result = self.FindFilesInResultsDir( + "-name histograms.json" + ).splitlines() + else: + result = self.FindFilesInResultsDir( + "-name results-chart.json" + ).splitlines() + return result + + def GetTurbostatFile(self): + """Get turbostat log path string.""" + return self.FindFilesInResultsDir("-name turbostat.log").split("\n")[0] + + def GetCpustatsFile(self): + """Get cpustats log path string.""" + return self.FindFilesInResultsDir("-name cpustats.log").split("\n")[0] + + def GetCpuinfoFile(self): + """Get cpustats log path string.""" + return self.FindFilesInResultsDir("-name cpuinfo.log").split("\n")[0] + + def GetTopFile(self): + """Get cpustats log path string.""" + return self.FindFilesInResultsDir("-name top.log").split("\n")[0] + + def GetWaitTimeFile(self): + """Get wait time log path string.""" + return self.FindFilesInResultsDir("-name wait_time.log").split("\n")[0] + + def _CheckDebugPath(self, option, path): + relative_path = path[1:] + out_chroot_path = os.path.join( + self.chromeos_root, "chroot", relative_path + ) + if os.path.exists(out_chroot_path): + if option == "kallsyms": + path = os.path.join(path, "System.map-*") + return "--" + option + " " + path + else: + print( + "** WARNING **: --%s option not applied, %s does not exist" + % (option, out_chroot_path) + ) + return "" + + def GeneratePerfReportFiles(self): + perf_report_files = [] + for perf_data_file in self.perf_data_files: + # Generate a perf.report and store it side-by-side with the perf.data + # file. + chroot_perf_data_file = misc.GetInsideChrootPath( + self.chromeos_root, perf_data_file + ) + perf_report_file = "%s.report" % perf_data_file + if os.path.exists(perf_report_file): + raise RuntimeError( + "Perf report file already exists: %s" % perf_report_file + ) + chroot_perf_report_file = misc.GetInsideChrootPath( + self.chromeos_root, perf_report_file + ) + perf_path = os.path.join( + self.chromeos_root, "chroot", "usr/bin/perf" + ) + + perf_file = "/usr/sbin/perf" + if os.path.exists(perf_path): + perf_file = "/usr/bin/perf" + + debug_path = self.label.debug_path + + if debug_path: + symfs = "--symfs " + debug_path + vmlinux = "--vmlinux " + os.path.join( + debug_path, "usr", "lib", "debug", "boot", "vmlinux" + ) + kallsyms = "" + print( + "** WARNING **: --kallsyms option not applied, no System.map-* " + "for downloaded image." + ) + else: + if self.label.image_type != "local": + print( + "** WARNING **: Using local debug info in /build, this may " + "not match the downloaded image." + ) + build_path = os.path.join("/build", self.board) + symfs = self._CheckDebugPath("symfs", build_path) + vmlinux_path = os.path.join( + build_path, "usr/lib/debug/boot/vmlinux" + ) + vmlinux = self._CheckDebugPath("vmlinux", vmlinux_path) + kallsyms_path = os.path.join(build_path, "boot") + kallsyms = self._CheckDebugPath("kallsyms", kallsyms_path) + + command = "%s report -n %s %s %s -i %s --stdio > %s" % ( + perf_file, + symfs, + vmlinux, + kallsyms, + chroot_perf_data_file, + chroot_perf_report_file, + ) + if self.log_level != "verbose": + self._logger.LogOutput( + "Generating perf report...\nCMD: %s" % command + ) + exit_code = self.ce.ChrootRunCommand(self.chromeos_root, command) + if exit_code == 0: + if self.log_level != "verbose": + self._logger.LogOutput( + "Perf report generated successfully." + ) + else: + raise RuntimeError( + "Perf report not generated correctly. CMD: %s" % command + ) + + # Add a keyval to the dictionary for the events captured. + perf_report_files.append( + misc.GetOutsideChrootPath( + self.chromeos_root, chroot_perf_report_file + ) + ) + return perf_report_files + + def GatherPerfResults(self): + report_id = 0 + for perf_report_file in self.perf_report_files: + with open(perf_report_file, "r") as f: + report_contents = f.read() + for group in re.findall( + r"Events: (\S+) (\S+)", report_contents + ): + num_events = group[0] + event_name = group[1] + key = "perf_%s_%s" % (report_id, event_name) + value = str(misc.UnitToNumber(num_events)) + self.keyvals[key] = value + + def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso): + self.board = self.label.board + self.out = out + self.err = err + self.retval = retval + self.test_name = test + self.suite = suite + self.cwp_dso = cwp_dso + self.chroot_results_dir = self.GetResultsDir() + self.results_dir = misc.GetOutsideChrootPath( + self.chromeos_root, self.chroot_results_dir + ) + self.results_file = self.GetResultsFile() + self.perf_data_files = self.GetPerfDataFiles() + # Include all perf.report data in table. + self.perf_report_files = self.GeneratePerfReportFiles() + self.turbostat_log_file = self.GetTurbostatFile() + self.cpustats_log_file = self.GetCpustatsFile() + self.cpuinfo_file = self.GetCpuinfoFile() + self.top_log_file = self.GetTopFile() + self.wait_time_log_file = self.GetWaitTimeFile() + # TODO(asharif): Do something similar with perf stat. + + # Grab keyvals from the directory. + self.ProcessResults() + + def ProcessChartResults(self): + # Open and parse the json results file generated by telemetry/test_that. + if not self.results_file: + raise IOError("No results file found.") + filename = self.results_file[0] + if not filename.endswith(".json"): + raise IOError( + "Attempt to call json on non-json file: %s" % filename + ) + if not os.path.exists(filename): + raise IOError("%s does not exist" % filename) + + keyvals = {} + with open(filename, "r") as f: + raw_dict = json.load(f) + if "charts" in raw_dict: + raw_dict = raw_dict["charts"] + for k, field_dict in raw_dict.items(): + for item in field_dict: + keyname = k + "__" + item + value_dict = field_dict[item] + if "value" in value_dict: + result = value_dict["value"] + elif "values" in value_dict: + values = value_dict["values"] + if not values: + continue + if ( + "type" in value_dict + and value_dict["type"] == "list_of_scalar_values" + and values != "null" + ): + result = sum(values) / float(len(values)) + else: + result = values + else: + continue + units = value_dict["units"] + new_value = [result, units] + keyvals[keyname] = new_value + return keyvals + + def ProcessTurbostatResults(self): + """Given turbostat_log_file non-null parse cpu stats from file. + + Returns: + Dictionary of 'cpufreq', 'cputemp' where each + includes dictionary 'all': [list_of_values] + + Example of the output of turbostat_log. + ---------------------- + CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp + - 329 12.13 2723 2393 10975 77 + 0 336 12.41 2715 2393 6328 77 + 2 323 11.86 2731 2393 4647 69 + CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp + - 1940 67.46 2884 2393 39920 83 + 0 1827 63.70 2877 2393 21184 83 + """ + cpustats = {} + read_data = "" + with open(self.turbostat_log_file) as f: + read_data = f.readlines() + + if not read_data: + self._logger.LogOutput("WARNING: Turbostat output file is empty.") + return {} + + # First line always contains the header. + stats = read_data[0].split() + + # Mandatory parameters. + if "CPU" not in stats: + self._logger.LogOutput( + "WARNING: Missing data for CPU# in Turbostat output." + ) + return {} + if "Bzy_MHz" not in stats: + self._logger.LogOutput( + "WARNING: Missing data for Bzy_MHz in Turbostat output." + ) + return {} + cpu_index = stats.index("CPU") + cpufreq_index = stats.index("Bzy_MHz") + cpufreq = cpustats.setdefault("cpufreq", {"all": []}) + + # Optional parameters. + cputemp_index = -1 + if "CoreTmp" in stats: + cputemp_index = stats.index("CoreTmp") + cputemp = cpustats.setdefault("cputemp", {"all": []}) + + # Parse data starting from the second line ignoring repeating headers. + for st in read_data[1:]: + # Data represented by int or float separated by spaces. + numbers = st.split() + if not all( + word.replace(".", "", 1).isdigit() for word in numbers[1:] + ): + # Skip the line if data mismatch. + continue + if numbers[cpu_index] != "-": + # Ignore Core-specific statistics which starts with Core number. + # Combined statistics for all core has "-" CPU identifier. + continue + + cpufreq["all"].append(int(numbers[cpufreq_index])) + if cputemp_index != -1: + cputemp["all"].append(int(numbers[cputemp_index])) + return cpustats + + def ProcessTopResults(self): + """Given self.top_log_file process top log data. + + Returns: + List of dictionaries with the following keyvals: + 'cmd': command name (string), + 'cpu_use_avg': average cpu usage (float), + 'count': number of occurrences (int), + 'top5_cpu_use': up to 5 highest cpu usages (descending list of floats) + + Example of the top log: + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome + 375 root 0 -20 0 0 0 S 5.9 0.0 0:00.17 kworker + 617 syslog 20 0 25332 8372 7888 S 5.9 0.2 0:00.77 systemd + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 5745 chronos 20 0 5438580 139328 67988 R 122.8 3.6 0:04.26 chrome + 912 root -51 0 0 0 0 S 2.0 0.0 0:01.04 irq/cro + 121 root 20 0 0 0 0 S 1.0 0.0 0:00.45 spi5 + """ + all_data = "" + with open(self.top_log_file) as f: + all_data = f.read() + + if not all_data: + self._logger.LogOutput("WARNING: Top log file is empty.") + return [] + + top_line_regex = re.compile( + r""" ^\s*(?P<pid>\d+)\s+ # Group 1: PID \S+\s+\S+\s+-?\d+\s+ # Ignore: user, prio, nice \d+\s+\d+\s+\d+\s+ # Ignore: virt/res/shared mem @@ -663,814 +750,922 @@ class Result(object): (?P<cpu_use>\d+\.\d+)\s+ # Group 2: CPU usage \d+\.\d+\s+\d+:\d+\.\d+\s+ # Ignore: mem usage, time (?P<cmd>\S+)$ # Group 3: command - """, re.VERBOSE) - # Page represents top log data per one measurement within time interval - # 'top_interval'. - # Pages separated by empty line. - pages = all_data.split('\n\n') - # Snapshots are structured representation of the pages. - snapshots = [] - for page in pages: - if not page: - continue - - # Snapshot list will contain all processes (command duplicates are - # allowed). - snapshot = [] - for line in page.splitlines(): - match = top_line_regex.match(line) - if match: - # Top line is valid, collect data. - process = { - # NOTE: One command may be represented by multiple processes. - 'cmd': match.group('cmd'), - 'pid': match.group('pid'), - 'cpu_use': float(match.group('cpu_use')), - } - - # Filter out processes with 0 CPU usage and top command. - if process['cpu_use'] > 0 and process['cmd'] != 'top': - snapshot.append(process) - - # If page contained meaningful data add snapshot to the list. - if snapshot: - snapshots.append(snapshot) - - # Define threshold of CPU usage when Chrome is busy, i.e. benchmark is - # running. - # Ideally it should be 100% but it will be hardly reachable with 1 core. - # Statistics on DUT with 2-6 cores shows that chrome load of 100%, 95% and - # 90% equally occurs in 72-74% of all top log snapshots. - # Further decreasing of load threshold leads to a shifting percent of - # "high load" snapshots which might include snapshots when benchmark is - # not running. - # On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2% - # of snapshots accordingly. - # Threshold of "high load" is reduced to 70% (from 90) when we switched to - # topstats per process. From experiment data the rest 20% are distributed - # among other chrome processes. - CHROME_HIGH_CPU_LOAD = 70 - # Number of snapshots where chrome is heavily used. - high_load_snapshots = 0 - # Total CPU use per process in ALL active snapshots. - cmd_total_cpu_use = collections.defaultdict(float) - # Top CPU usages per command. - cmd_top5_cpu_use = collections.defaultdict(list) - # List of Top Commands to be returned. - topcmds = [] - - for snapshot_processes in snapshots: - # CPU usage per command, per PID in one snapshot. - cmd_cpu_use_per_snapshot = collections.defaultdict(dict) - for process in snapshot_processes: - cmd = process['cmd'] - cpu_use = process['cpu_use'] - pid = process['pid'] - cmd_cpu_use_per_snapshot[cmd][pid] = cpu_use - - # Chrome processes, pid: cpu_usage. - chrome_processes = cmd_cpu_use_per_snapshot.get('chrome', {}) - chrome_cpu_use_list = chrome_processes.values() - - if chrome_cpu_use_list and max( - chrome_cpu_use_list) > CHROME_HIGH_CPU_LOAD: - # CPU usage of any of the "chrome" processes exceeds "High load" - # threshold which means DUT is busy running a benchmark. - high_load_snapshots += 1 - for cmd, cpu_use_per_pid in cmd_cpu_use_per_snapshot.items(): - for pid, cpu_use in cpu_use_per_pid.items(): - # Append PID to the name of the command. - cmd_with_pid = cmd + '-' + pid - cmd_total_cpu_use[cmd_with_pid] += cpu_use - - # Add cpu_use into command top cpu usages, sorted in descending - # order. - heapq.heappush(cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1)) - - for consumer, usage in sorted(cmd_total_cpu_use.items(), - key=lambda x: x[1], - reverse=True): - # Iterate through commands by descending order of total CPU usage. - topcmd = { - 'cmd': consumer, - 'cpu_use_avg': usage / high_load_snapshots, - 'count': len(cmd_top5_cpu_use[consumer]), - 'top5_cpu_use': heapq.nlargest(5, cmd_top5_cpu_use[consumer]), - } - topcmds.append(topcmd) - - return topcmds - - def ProcessCpustatsResults(self): - """Given cpustats_log_file non-null parse cpu data from file. - - Returns: - Dictionary of 'cpufreq', 'cputemp' where each - includes dictionary of parameter: [list_of_values] - - Example of cpustats.log output. - ---------------------- - /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000 - /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 2016000 - little-cpu 41234 - big-cpu 51234 - - If cores share the same policy their frequencies may always match - on some devices. - To make report concise we should eliminate redundancy in the output. - Function removes cpuN data if it duplicates data from other cores. - """ - - cpustats = {} - read_data = '' - with open(self.cpustats_log_file) as f: - read_data = f.readlines() - - if not read_data: - self._logger.LogOutput('WARNING: Cpustats output file is empty.') - return {} - - cpufreq_regex = re.compile(r'^[/\S]+/(cpu\d+)/[/\S]+\s+(\d+)$') - cputemp_regex = re.compile(r'^([^/\s]+)\s+(\d+)$') - - for st in read_data: - match = cpufreq_regex.match(st) - if match: - cpu = match.group(1) - # CPU frequency comes in kHz. - freq_khz = int(match.group(2)) - freq_mhz = freq_khz / 1000 - # cpufreq represents a dictionary with CPU frequency-related - # data from cpustats.log. - cpufreq = cpustats.setdefault('cpufreq', {}) - cpu_n_freq = cpufreq.setdefault(cpu, []) - cpu_n_freq.append(freq_mhz) - else: - match = cputemp_regex.match(st) - if match: - therm_type = match.group(1) - # The value is int, uCelsius unit. - temp_uc = float(match.group(2)) - # Round to XX.X float. - temp_c = round(temp_uc / 1000, 1) - # cputemp represents a dictionary with temperature measurements - # from cpustats.log. - cputemp = cpustats.setdefault('cputemp', {}) - therm_type = cputemp.setdefault(therm_type, []) - therm_type.append(temp_c) - - # Remove duplicate statistics from cpustats. - pruned_stats = {} - for cpukey, cpuparam in cpustats.items(): - # Copy 'cpufreq' and 'cputemp'. - pruned_params = pruned_stats.setdefault(cpukey, {}) - for paramkey, paramvalue in sorted(cpuparam.items()): - # paramvalue is list of all measured data. - if paramvalue not in pruned_params.values(): - pruned_params[paramkey] = paramvalue - - return pruned_stats - - def ProcessHistogramsResults(self): - # Open and parse the json results file generated by telemetry/test_that. - if not self.results_file: - raise IOError('No results file found.') - filename = self.results_file[0] - if not filename.endswith('.json'): - raise IOError('Attempt to call json on non-json file: %s' % filename) - if not os.path.exists(filename): - raise IOError('%s does not exist' % filename) - - keyvals = {} - with open(filename) as f: - histograms = json.load(f) - value_map = {} - # Gets generic set values. - for obj in histograms: - if 'type' in obj and obj['type'] == 'GenericSet': - value_map[obj['guid']] = obj['values'] - - for obj in histograms: - if 'name' not in obj or 'sampleValues' not in obj: - continue - metric_name = obj['name'] - vals = obj['sampleValues'] - if isinstance(vals, list): - # Remove None elements from the list - vals = [val for val in vals if val is not None] - if vals: + """, + re.VERBOSE, + ) + # Page represents top log data per one measurement within time interval + # 'top_interval'. + # Pages separated by empty line. + pages = all_data.split("\n\n") + # Snapshots are structured representation of the pages. + snapshots = [] + for page in pages: + if not page: + continue + + # Snapshot list will contain all processes (command duplicates are + # allowed). + snapshot = [] + for line in page.splitlines(): + match = top_line_regex.match(line) + if match: + # Top line is valid, collect data. + process = { + # NOTE: One command may be represented by multiple processes. + "cmd": match.group("cmd"), + "pid": match.group("pid"), + "cpu_use": float(match.group("cpu_use")), + } + + # Filter out processes with 0 CPU usage and top command. + if process["cpu_use"] > 0 and process["cmd"] != "top": + snapshot.append(process) + + # If page contained meaningful data add snapshot to the list. + if snapshot: + snapshots.append(snapshot) + + # Define threshold of CPU usage when Chrome is busy, i.e. benchmark is + # running. + # Ideally it should be 100% but it will be hardly reachable with 1 core. + # Statistics on DUT with 2-6 cores shows that chrome load of 100%, 95% and + # 90% equally occurs in 72-74% of all top log snapshots. + # Further decreasing of load threshold leads to a shifting percent of + # "high load" snapshots which might include snapshots when benchmark is + # not running. + # On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2% + # of snapshots accordingly. + # Threshold of "high load" is reduced to 70% (from 90) when we switched to + # topstats per process. From experiment data the rest 20% are distributed + # among other chrome processes. + CHROME_HIGH_CPU_LOAD = 70 + # Number of snapshots where chrome is heavily used. + high_load_snapshots = 0 + # Total CPU use per process in ALL active snapshots. + cmd_total_cpu_use = collections.defaultdict(float) + # Top CPU usages per command. + cmd_top5_cpu_use = collections.defaultdict(list) + # List of Top Commands to be returned. + topcmds = [] + + for snapshot_processes in snapshots: + # CPU usage per command, per PID in one snapshot. + cmd_cpu_use_per_snapshot = collections.defaultdict(dict) + for process in snapshot_processes: + cmd = process["cmd"] + cpu_use = process["cpu_use"] + pid = process["pid"] + cmd_cpu_use_per_snapshot[cmd][pid] = cpu_use + + # Chrome processes, pid: cpu_usage. + chrome_processes = cmd_cpu_use_per_snapshot.get("chrome", {}) + chrome_cpu_use_list = chrome_processes.values() + + if ( + chrome_cpu_use_list + and max(chrome_cpu_use_list) > CHROME_HIGH_CPU_LOAD + ): + # CPU usage of any of the "chrome" processes exceeds "High load" + # threshold which means DUT is busy running a benchmark. + high_load_snapshots += 1 + for cmd, cpu_use_per_pid in cmd_cpu_use_per_snapshot.items(): + for pid, cpu_use in cpu_use_per_pid.items(): + # Append PID to the name of the command. + cmd_with_pid = cmd + "-" + pid + cmd_total_cpu_use[cmd_with_pid] += cpu_use + + # Add cpu_use into command top cpu usages, sorted in descending + # order. + heapq.heappush( + cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1) + ) + + for consumer, usage in sorted( + cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True + ): + # Iterate through commands by descending order of total CPU usage. + topcmd = { + "cmd": consumer, + "cpu_use_avg": usage / high_load_snapshots, + "count": len(cmd_top5_cpu_use[consumer]), + "top5_cpu_use": heapq.nlargest(5, cmd_top5_cpu_use[consumer]), + } + topcmds.append(topcmd) + + return topcmds + + def ProcessCpustatsResults(self): + """Given cpustats_log_file non-null parse cpu data from file. + + Returns: + Dictionary of 'cpufreq', 'cputemp' where each + includes dictionary of parameter: [list_of_values] + + Example of cpustats.log output. + ---------------------- + /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000 + /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 2016000 + little-cpu 41234 + big-cpu 51234 + + If cores share the same policy their frequencies may always match + on some devices. + To make report concise we should eliminate redundancy in the output. + Function removes cpuN data if it duplicates data from other cores. + """ + + cpustats = {} + read_data = "" + with open(self.cpustats_log_file) as f: + read_data = f.readlines() + + if not read_data: + self._logger.LogOutput("WARNING: Cpustats output file is empty.") + return {} + + cpufreq_regex = re.compile(r"^[/\S]+/(cpu\d+)/[/\S]+\s+(\d+)$") + cputemp_regex = re.compile(r"^([^/\s]+)\s+(\d+)$") + + for st in read_data: + match = cpufreq_regex.match(st) + if match: + cpu = match.group(1) + # CPU frequency comes in kHz. + freq_khz = int(match.group(2)) + freq_mhz = freq_khz / 1000 + # cpufreq represents a dictionary with CPU frequency-related + # data from cpustats.log. + cpufreq = cpustats.setdefault("cpufreq", {}) + cpu_n_freq = cpufreq.setdefault(cpu, []) + cpu_n_freq.append(freq_mhz) + else: + match = cputemp_regex.match(st) + if match: + therm_type = match.group(1) + # The value is int, uCelsius unit. + temp_uc = float(match.group(2)) + # Round to XX.X float. + temp_c = round(temp_uc / 1000, 1) + # cputemp represents a dictionary with temperature measurements + # from cpustats.log. + cputemp = cpustats.setdefault("cputemp", {}) + therm_type = cputemp.setdefault(therm_type, []) + therm_type.append(temp_c) + + # Remove duplicate statistics from cpustats. + pruned_stats = {} + for cpukey, cpuparam in cpustats.items(): + # Copy 'cpufreq' and 'cputemp'. + pruned_params = pruned_stats.setdefault(cpukey, {}) + for paramkey, paramvalue in sorted(cpuparam.items()): + # paramvalue is list of all measured data. + if paramvalue not in pruned_params.values(): + pruned_params[paramkey] = paramvalue + + return pruned_stats + + def ProcessHistogramsResults(self): + # Open and parse the json results file generated by telemetry/test_that. + if not self.results_file: + raise IOError("No results file found.") + filename = self.results_file[0] + if not filename.endswith(".json"): + raise IOError( + "Attempt to call json on non-json file: %s" % filename + ) + if not os.path.exists(filename): + raise IOError("%s does not exist" % filename) + + keyvals = {} + with open(filename) as f: + histograms = json.load(f) + value_map = {} + # Gets generic set values. + for obj in histograms: + if "type" in obj and obj["type"] == "GenericSet": + value_map[obj["guid"]] = obj["values"] + + for obj in histograms: + if "name" not in obj or "sampleValues" not in obj: + continue + metric_name = obj["name"] + vals = obj["sampleValues"] + if isinstance(vals, list): + # Remove None elements from the list + vals = [val for val in vals if val is not None] + if vals: + result = float(sum(vals)) / len(vals) + else: + result = 0 + else: + result = vals + unit = obj["unit"] + diagnostics = obj["diagnostics"] + # for summaries of benchmarks + key = metric_name + if key not in keyvals: + keyvals[key] = [[result], unit] + else: + keyvals[key][0].append(result) + # TODO: do we need summaries of stories? + # for summaries of story tags + if "storyTags" in diagnostics: + guid = diagnostics["storyTags"] + if guid not in value_map: + raise RuntimeError( + "Unrecognized storyTags in %s " % (obj) + ) + for story_tag in value_map[guid]: + key = metric_name + "__" + story_tag + if key not in keyvals: + keyvals[key] = [[result], unit] + else: + keyvals[key][0].append(result) + # calculate summary + for key in keyvals: + vals = keyvals[key][0] + unit = keyvals[key][1] result = float(sum(vals)) / len(vals) - else: - result = 0 - else: - result = vals - unit = obj['unit'] - diagnostics = obj['diagnostics'] - # for summaries of benchmarks - key = metric_name - if key not in keyvals: - keyvals[key] = [[result], unit] + keyvals[key] = [result, unit] + return keyvals + + def ReadPidFromPerfData(self): + """Read PIDs from perf.data files. + + Extract PID from perf.data if "perf record" was running per process, + i.e. with "-p <PID>" and no "-a". + + Returns: + pids: list of PIDs. + + Raises: + PerfDataReadError when perf.data header reading fails. + """ + cmd = ["/usr/bin/perf", "report", "--header-only", "-i"] + pids = [] + + for perf_data_path in self.perf_data_files: + perf_data_path_in_chroot = misc.GetInsideChrootPath( + self.chromeos_root, perf_data_path + ) + path_str = " ".join(cmd + [perf_data_path_in_chroot]) + status, output, _ = self.ce.ChrootRunCommandWOutput( + self.chromeos_root, path_str + ) + if status: + # Error of reading a perf.data profile is fatal. + raise PerfDataReadError( + f"Failed to read perf.data profile: {path_str}" + ) + + # Pattern to search a line with "perf record" command line: + # # cmdline : /usr/bin/perf record -e instructions -p 123" + cmdline_regex = re.compile( + r"^\#\scmdline\s:\s+(?P<cmd>.*perf\s+record\s+.*)$" + ) + # Pattern to search PID in a command line. + pid_regex = re.compile(r"^.*\s-p\s(?P<pid>\d+)\s*.*$") + for line in output.splitlines(): + cmd_match = cmdline_regex.match(line) + if cmd_match: + # Found a perf command line. + cmdline = cmd_match.group("cmd") + # '-a' is a system-wide mode argument. + if "-a" not in cmdline.split(): + # It can be that perf was attached to PID and was still running in + # system-wide mode. + # We filter out this case here since it's not per-process. + pid_match = pid_regex.match(cmdline) + if pid_match: + pids.append(pid_match.group("pid")) + # Stop the search and move to the next perf.data file. + break + else: + # cmdline wasn't found in the header. It's a fatal error. + raise PerfDataReadError( + f"Perf command line is not found in {path_str}" + ) + return pids + + def VerifyPerfDataPID(self): + """Verify PIDs in per-process perf.data profiles. + + Check that at list one top process is profiled if perf was running in + per-process mode. + + Raises: + PidVerificationError if PID verification of per-process perf.data profiles + fail. + """ + perf_data_pids = self.ReadPidFromPerfData() + if not perf_data_pids: + # In system-wide mode there are no PIDs. + self._logger.LogOutput("System-wide perf mode. Skip verification.") + return + + # PIDs will be present only in per-process profiles. + # In this case we need to verify that profiles are collected on the + # hottest processes. + top_processes = [top_cmd["cmd"] for top_cmd in self.top_cmds] + # top_process structure: <cmd>-<pid> + top_pids = [top_process.split("-")[-1] for top_process in top_processes] + for top_pid in top_pids: + if top_pid in perf_data_pids: + self._logger.LogOutput( + "PID verification passed! " + f"Top process {top_pid} is profiled." + ) + return + raise PidVerificationError( + f"top processes {top_processes} are missing in perf.data traces with" + f" PID: {perf_data_pids}." + ) + + def ProcessResults(self, use_cache=False): + # Note that this function doesn't know anything about whether there is a + # cache hit or miss. It should process results agnostic of the cache hit + # state. + if ( + self.results_file + and self.suite == "telemetry_Crosperf" + and "histograms.json" in self.results_file[0] + ): + self.keyvals = self.ProcessHistogramsResults() + elif ( + self.results_file + and self.suite != "telemetry_Crosperf" + and "results-chart.json" in self.results_file[0] + ): + self.keyvals = self.ProcessChartResults() else: - keyvals[key][0].append(result) - # TODO: do we need summaries of stories? - # for summaries of story tags - if 'storyTags' in diagnostics: - guid = diagnostics['storyTags'] - if guid not in value_map: - raise RuntimeError('Unrecognized storyTags in %s ' % (obj)) - for story_tag in value_map[guid]: - key = metric_name + '__' + story_tag - if key not in keyvals: - keyvals[key] = [[result], unit] + if not use_cache: + print( + "\n ** WARNING **: Had to use deprecated output-method to " + "collect results.\n" + ) + self.keyvals = self.GetKeyvals() + self.keyvals["retval"] = self.retval + # If we are in CWP approximation mode, we want to collect DSO samples + # for each perf.data file + if self.cwp_dso and self.retval == 0: + self.keyvals["samples"] = self.GetSamples() + # If the samples count collected from perf file is 0, we will treat + # it as a failed run. + if self.keyvals["samples"][0] == 0: + del self.keyvals["samples"] + self.keyvals["retval"] = 1 + # Generate report from all perf.data files. + # Now parse all perf report files and include them in keyvals. + self.GatherPerfResults() + + cpustats = {} + # Turbostat output has higher priority of processing. + if self.turbostat_log_file: + cpustats = self.ProcessTurbostatResults() + # Process cpustats output only if turbostat has no data. + if not cpustats and self.cpustats_log_file: + cpustats = self.ProcessCpustatsResults() + if self.top_log_file: + self.top_cmds = self.ProcessTopResults() + # Verify that PID in non system-wide perf.data and top_cmds are matching. + if self.perf_data_files and self.top_cmds: + self.VerifyPerfDataPID() + if self.wait_time_log_file: + with open(self.wait_time_log_file) as f: + wait_time = f.readline().strip() + try: + wait_time = float(wait_time) + except ValueError: + raise ValueError("Wait time in log file is not a number.") + # This is for accumulating wait time for telemtry_Crosperf runs only, + # for test_that runs, please refer to suite_runner. + self.machine.AddCooldownWaitTime(wait_time) + + for param_key, param in cpustats.items(): + for param_type, param_values in param.items(): + val_avg = sum(param_values) / len(param_values) + val_min = min(param_values) + val_max = max(param_values) + # Average data is always included. + self.keyvals["_".join([param_key, param_type, "avg"])] = val_avg + # Insert min/max results only if they deviate + # from average. + if val_min != val_avg: + self.keyvals[ + "_".join([param_key, param_type, "min"]) + ] = val_min + if val_max != val_avg: + self.keyvals[ + "_".join([param_key, param_type, "max"]) + ] = val_max + + def GetChromeVersionFromCache(self, cache_dir): + # Read chrome_version from keys file, if present. + chrome_version = "" + keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE) + if os.path.exists(keys_file): + with open(keys_file, "r") as f: + lines = f.readlines() + for l in lines: + if l.startswith("Google Chrome "): + chrome_version = l + if chrome_version.endswith("\n"): + chrome_version = chrome_version[:-1] + break + return chrome_version + + def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso): + self.test_name = test + self.suite = suite + self.cwp_dso = cwp_dso + # Read in everything from the cache directory. + with open(os.path.join(cache_dir, RESULTS_FILE), "rb") as f: + self.out = pickle.load(f) + self.err = pickle.load(f) + self.retval = pickle.load(f) + + # Untar the tarball to a temporary directory + self.temp_dir = tempfile.mkdtemp( + dir=os.path.join(self.chromeos_root, "chroot", "tmp") + ) + + command = "cd %s && tar xf %s" % ( + self.temp_dir, + os.path.join(cache_dir, AUTOTEST_TARBALL), + ) + ret = self.ce.RunCommand(command, print_to_console=False) + if ret: + raise RuntimeError("Could not untar cached tarball") + self.results_dir = self.temp_dir + self.results_file = self.GetDataMeasurementsFiles() + self.perf_data_files = self.GetPerfDataFiles() + self.perf_report_files = self.GetPerfReportFiles() + self.chrome_version = self.GetChromeVersionFromCache(cache_dir) + self.ProcessResults(use_cache=True) + + def CleanUp(self, rm_chroot_tmp): + if rm_chroot_tmp and self.results_dir: + dirname, basename = misc.GetRoot(self.results_dir) + if basename.find("test_that_results_") != -1: + command = "rm -rf %s" % self.results_dir else: - keyvals[key][0].append(result) - # calculate summary - for key in keyvals: - vals = keyvals[key][0] - unit = keyvals[key][1] - result = float(sum(vals)) / len(vals) - keyvals[key] = [result, unit] - return keyvals - - def ReadPidFromPerfData(self): - """Read PIDs from perf.data files. - - Extract PID from perf.data if "perf record" was running per process, - i.e. with "-p <PID>" and no "-a". - - Returns: - pids: list of PIDs. - - Raises: - PerfDataReadError when perf.data header reading fails. - """ - cmd = ['/usr/bin/perf', 'report', '--header-only', '-i'] - pids = [] - - for perf_data_path in self.perf_data_files: - perf_data_path_in_chroot = misc.GetInsideChrootPath( - self.chromeos_root, perf_data_path) - path_str = ' '.join(cmd + [perf_data_path_in_chroot]) - status, output, _ = self.ce.ChrootRunCommandWOutput( - self.chromeos_root, path_str) - if status: - # Error of reading a perf.data profile is fatal. - raise PerfDataReadError( - f'Failed to read perf.data profile: {path_str}') - - # Pattern to search a line with "perf record" command line: - # # cmdline : /usr/bin/perf record -e instructions -p 123" - cmdline_regex = re.compile( - r'^\#\scmdline\s:\s+(?P<cmd>.*perf\s+record\s+.*)$') - # Pattern to search PID in a command line. - pid_regex = re.compile(r'^.*\s-p\s(?P<pid>\d+)\s*.*$') - for line in output.splitlines(): - cmd_match = cmdline_regex.match(line) - if cmd_match: - # Found a perf command line. - cmdline = cmd_match.group('cmd') - # '-a' is a system-wide mode argument. - if '-a' not in cmdline.split(): - # It can be that perf was attached to PID and was still running in - # system-wide mode. - # We filter out this case here since it's not per-process. - pid_match = pid_regex.match(cmdline) - if pid_match: - pids.append(pid_match.group('pid')) - # Stop the search and move to the next perf.data file. - break - else: - # cmdline wasn't found in the header. It's a fatal error. - raise PerfDataReadError( - f'Perf command line is not found in {path_str}') - return pids - - def VerifyPerfDataPID(self): - """Verify PIDs in per-process perf.data profiles. - - Check that at list one top process is profiled if perf was running in - per-process mode. - - Raises: - PidVerificationError if PID verification of per-process perf.data profiles - fail. - """ - perf_data_pids = self.ReadPidFromPerfData() - if not perf_data_pids: - # In system-wide mode there are no PIDs. - self._logger.LogOutput('System-wide perf mode. Skip verification.') - return - - # PIDs will be present only in per-process profiles. - # In this case we need to verify that profiles are collected on the - # hottest processes. - top_processes = [top_cmd['cmd'] for top_cmd in self.top_cmds] - # top_process structure: <cmd>-<pid> - top_pids = [top_process.split('-')[-1] for top_process in top_processes] - for top_pid in top_pids: - if top_pid in perf_data_pids: - self._logger.LogOutput('PID verification passed! ' - f'Top process {top_pid} is profiled.') - return - raise PidVerificationError( - f'top processes {top_processes} are missing in perf.data traces with' - f' PID: {perf_data_pids}.') - - def ProcessResults(self, use_cache=False): - # Note that this function doesn't know anything about whether there is a - # cache hit or miss. It should process results agnostic of the cache hit - # state. - if (self.results_file and self.suite == 'telemetry_Crosperf' - and 'histograms.json' in self.results_file[0]): - self.keyvals = self.ProcessHistogramsResults() - elif (self.results_file and self.suite != 'telemetry_Crosperf' - and 'results-chart.json' in self.results_file[0]): - self.keyvals = self.ProcessChartResults() - else: - if not use_cache: - print('\n ** WARNING **: Had to use deprecated output-method to ' - 'collect results.\n') - self.keyvals = self.GetKeyvals() - self.keyvals['retval'] = self.retval - # If we are in CWP approximation mode, we want to collect DSO samples - # for each perf.data file - if self.cwp_dso and self.retval == 0: - self.keyvals['samples'] = self.GetSamples() - # If the samples count collected from perf file is 0, we will treat - # it as a failed run. - if self.keyvals['samples'][0] == 0: - del self.keyvals['samples'] - self.keyvals['retval'] = 1 - # Generate report from all perf.data files. - # Now parse all perf report files and include them in keyvals. - self.GatherPerfResults() - - cpustats = {} - # Turbostat output has higher priority of processing. - if self.turbostat_log_file: - cpustats = self.ProcessTurbostatResults() - # Process cpustats output only if turbostat has no data. - if not cpustats and self.cpustats_log_file: - cpustats = self.ProcessCpustatsResults() - if self.top_log_file: - self.top_cmds = self.ProcessTopResults() - # Verify that PID in non system-wide perf.data and top_cmds are matching. - if self.perf_data_files and self.top_cmds: - self.VerifyPerfDataPID() - if self.wait_time_log_file: - with open(self.wait_time_log_file) as f: - wait_time = f.readline().strip() + command = "rm -rf %s" % dirname + self.ce.RunCommand(command) + if self.temp_dir: + command = "rm -rf %s" % self.temp_dir + self.ce.RunCommand(command) + + def CreateTarball(self, results_dir, tarball): + if not results_dir.strip(): + raise ValueError( + "Refusing to `tar` an empty results_dir: %r" % results_dir + ) + + ret = self.ce.RunCommand( + "cd %s && " + "tar " + "--exclude=var/spool " + "--exclude=var/log " + "-cjf %s ." % (results_dir, tarball) + ) + if ret: + raise RuntimeError("Couldn't compress test output directory.") + + def StoreToCacheDir(self, cache_dir, machine_manager, key_list): + # Create the dir if it doesn't exist. + temp_dir = tempfile.mkdtemp() + + # Store to the temp directory. + with open(os.path.join(temp_dir, RESULTS_FILE), "wb") as f: + pickle.dump(self.out, f) + pickle.dump(self.err, f) + pickle.dump(self.retval, f) + + if not test_flag.GetTestMode(): + with open(os.path.join(temp_dir, CACHE_KEYS_FILE), "w") as f: + f.write("%s\n" % self.label.name) + f.write("%s\n" % self.label.chrome_version) + f.write("%s\n" % self.machine.checksum_string) + for k in key_list: + f.write(k) + f.write("\n") + + if self.results_dir: + tarball = os.path.join(temp_dir, AUTOTEST_TARBALL) + self.CreateTarball(self.results_dir, tarball) + + # Store machine info. + # TODO(asharif): Make machine_manager a singleton, and don't pass it into + # this function. + with open(os.path.join(temp_dir, MACHINE_FILE), "w") as f: + f.write(machine_manager.machine_checksum_string[self.label.name]) + + if os.path.exists(cache_dir): + command = f"rm -rf {cache_dir}" + self.ce.RunCommand(command) + + parent_dir = os.path.dirname(cache_dir) + command = f"mkdir -p {parent_dir} && " + command += f"chmod g+x {temp_dir} && " + command += f"mv {temp_dir} {cache_dir}" + ret = self.ce.RunCommand(command) + if ret: + command = f"rm -rf {temp_dir}" + self.ce.RunCommand(command) + raise RuntimeError( + "Could not move dir %s to dir %s" % (temp_dir, cache_dir) + ) + + @classmethod + def CreateFromRun( + cls, + logger, + log_level, + label, + machine, + out, + err, + retval, + test, + suite="telemetry_Crosperf", + cwp_dso="", + ): + if suite == "telemetry": + result = TelemetryResult(logger, label, log_level, machine) + else: + result = cls(logger, label, log_level, machine) + result.PopulateFromRun(out, err, retval, test, suite, cwp_dso) + return result + + @classmethod + def CreateFromCacheHit( + cls, + logger, + log_level, + label, + machine, + cache_dir, + test, + suite="telemetry_Crosperf", + cwp_dso="", + ): + if suite == "telemetry": + result = TelemetryResult(logger, label, log_level, machine) + else: + result = cls(logger, label, log_level, machine) try: - wait_time = float(wait_time) - except ValueError: - raise ValueError('Wait time in log file is not a number.') - # This is for accumulating wait time for telemtry_Crosperf runs only, - # for test_that runs, please refer to suite_runner. - self.machine.AddCooldownWaitTime(wait_time) - - for param_key, param in cpustats.items(): - for param_type, param_values in param.items(): - val_avg = sum(param_values) / len(param_values) - val_min = min(param_values) - val_max = max(param_values) - # Average data is always included. - self.keyvals['_'.join([param_key, param_type, 'avg'])] = val_avg - # Insert min/max results only if they deviate - # from average. - if val_min != val_avg: - self.keyvals['_'.join([param_key, param_type, 'min'])] = val_min - if val_max != val_avg: - self.keyvals['_'.join([param_key, param_type, 'max'])] = val_max - - def GetChromeVersionFromCache(self, cache_dir): - # Read chrome_version from keys file, if present. - chrome_version = '' - keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE) - if os.path.exists(keys_file): - with open(keys_file, 'r') as f: - lines = f.readlines() - for l in lines: - if l.startswith('Google Chrome '): - chrome_version = l - if chrome_version.endswith('\n'): - chrome_version = chrome_version[:-1] - break - return chrome_version - - def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso): - self.test_name = test - self.suite = suite - self.cwp_dso = cwp_dso - # Read in everything from the cache directory. - with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f: - self.out = pickle.load(f) - self.err = pickle.load(f) - self.retval = pickle.load(f) - - # Untar the tarball to a temporary directory - self.temp_dir = tempfile.mkdtemp( - dir=os.path.join(self.chromeos_root, 'chroot', 'tmp')) - - command = ('cd %s && tar xf %s' % - (self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL))) - ret = self.ce.RunCommand(command, print_to_console=False) - if ret: - raise RuntimeError('Could not untar cached tarball') - self.results_dir = self.temp_dir - self.results_file = self.GetDataMeasurementsFiles() - self.perf_data_files = self.GetPerfDataFiles() - self.perf_report_files = self.GetPerfReportFiles() - self.chrome_version = self.GetChromeVersionFromCache(cache_dir) - self.ProcessResults(use_cache=True) - - def CleanUp(self, rm_chroot_tmp): - if rm_chroot_tmp and self.results_dir: - dirname, basename = misc.GetRoot(self.results_dir) - if basename.find('test_that_results_') != -1: - command = 'rm -rf %s' % self.results_dir - else: - command = 'rm -rf %s' % dirname - self.ce.RunCommand(command) - if self.temp_dir: - command = 'rm -rf %s' % self.temp_dir - self.ce.RunCommand(command) - - def CreateTarball(self, results_dir, tarball): - if not results_dir.strip(): - raise ValueError('Refusing to `tar` an empty results_dir: %r' % - results_dir) - - ret = self.ce.RunCommand('cd %s && ' - 'tar ' - '--exclude=var/spool ' - '--exclude=var/log ' - '-cjf %s .' % (results_dir, tarball)) - if ret: - raise RuntimeError("Couldn't compress test output directory.") - - def StoreToCacheDir(self, cache_dir, machine_manager, key_list): - # Create the dir if it doesn't exist. - temp_dir = tempfile.mkdtemp() - - # Store to the temp directory. - with open(os.path.join(temp_dir, RESULTS_FILE), 'wb') as f: - pickle.dump(self.out, f) - pickle.dump(self.err, f) - pickle.dump(self.retval, f) - - if not test_flag.GetTestMode(): - with open(os.path.join(temp_dir, CACHE_KEYS_FILE), 'w') as f: - f.write('%s\n' % self.label.name) - f.write('%s\n' % self.label.chrome_version) - f.write('%s\n' % self.machine.checksum_string) - for k in key_list: - f.write(k) - f.write('\n') - - if self.results_dir: - tarball = os.path.join(temp_dir, AUTOTEST_TARBALL) - self.CreateTarball(self.results_dir, tarball) - - # Store machine info. - # TODO(asharif): Make machine_manager a singleton, and don't pass it into - # this function. - with open(os.path.join(temp_dir, MACHINE_FILE), 'w') as f: - f.write(machine_manager.machine_checksum_string[self.label.name]) - - if os.path.exists(cache_dir): - command = f'rm -rf {cache_dir}' - self.ce.RunCommand(command) - - parent_dir = os.path.dirname(cache_dir) - command = f'mkdir -p {parent_dir} && ' - command += f'chmod g+x {temp_dir} && ' - command += f'mv {temp_dir} {cache_dir}' - ret = self.ce.RunCommand(command) - if ret: - command = f'rm -rf {temp_dir}' - self.ce.RunCommand(command) - raise RuntimeError('Could not move dir %s to dir %s' % - (temp_dir, cache_dir)) - - @classmethod - def CreateFromRun(cls, - logger, - log_level, - label, - machine, - out, - err, - retval, - test, - suite='telemetry_Crosperf', - cwp_dso=''): - if suite == 'telemetry': - result = TelemetryResult(logger, label, log_level, machine) - else: - result = cls(logger, label, log_level, machine) - result.PopulateFromRun(out, err, retval, test, suite, cwp_dso) - return result - - @classmethod - def CreateFromCacheHit(cls, - logger, - log_level, - label, - machine, - cache_dir, - test, - suite='telemetry_Crosperf', - cwp_dso=''): - if suite == 'telemetry': - result = TelemetryResult(logger, label, log_level, machine) - else: - result = cls(logger, label, log_level, machine) - try: - result.PopulateFromCacheDir(cache_dir, test, suite, cwp_dso) - - except RuntimeError as e: - logger.LogError('Exception while using cache: %s' % e) - return None - return result + result.PopulateFromCacheDir(cache_dir, test, suite, cwp_dso) + + except RuntimeError as e: + logger.LogError("Exception while using cache: %s" % e) + return None + return result class TelemetryResult(Result): - """Class to hold the results of a single Telemetry run.""" - - def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso): - self.out = out - self.err = err - self.retval = retval - - self.ProcessResults() - - # pylint: disable=arguments-differ - def ProcessResults(self): - # The output is: - # url,average_commit_time (ms),... - # www.google.com,33.4,21.2,... - # We need to convert to this format: - # {"www.google.com:average_commit_time (ms)": "33.4", - # "www.google.com:...": "21.2"} - # Added note: Occasionally the output comes back - # with "JSON.stringify(window.automation.GetResults())" on - # the first line, and then the rest of the output as - # described above. - - lines = self.out.splitlines() - self.keyvals = {} - - if lines: - if lines[0].startswith('JSON.stringify'): - lines = lines[1:] - - if not lines: - return - labels = lines[0].split(',') - for line in lines[1:]: - fields = line.split(',') - if len(fields) != len(labels): - continue - for i in range(1, len(labels)): - key = '%s %s' % (fields[0], labels[i]) - value = fields[i] - self.keyvals[key] = value - self.keyvals['retval'] = self.retval - - def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso): - self.test_name = test - self.suite = suite - self.cwp_dso = cwp_dso - with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f: - self.out = pickle.load(f) - self.err = pickle.load(f) - self.retval = pickle.load(f) - - self.chrome_version = (super(TelemetryResult, - self).GetChromeVersionFromCache(cache_dir)) - self.ProcessResults() + """Class to hold the results of a single Telemetry run.""" + + def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso): + self.out = out + self.err = err + self.retval = retval + + self.ProcessResults() + + # pylint: disable=arguments-differ + def ProcessResults(self): + # The output is: + # url,average_commit_time (ms),... + # www.google.com,33.4,21.2,... + # We need to convert to this format: + # {"www.google.com:average_commit_time (ms)": "33.4", + # "www.google.com:...": "21.2"} + # Added note: Occasionally the output comes back + # with "JSON.stringify(window.automation.GetResults())" on + # the first line, and then the rest of the output as + # described above. + + lines = self.out.splitlines() + self.keyvals = {} + + if lines: + if lines[0].startswith("JSON.stringify"): + lines = lines[1:] + + if not lines: + return + labels = lines[0].split(",") + for line in lines[1:]: + fields = line.split(",") + if len(fields) != len(labels): + continue + for i in range(1, len(labels)): + key = "%s %s" % (fields[0], labels[i]) + value = fields[i] + self.keyvals[key] = value + self.keyvals["retval"] = self.retval + + def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso): + self.test_name = test + self.suite = suite + self.cwp_dso = cwp_dso + with open(os.path.join(cache_dir, RESULTS_FILE), "rb") as f: + self.out = pickle.load(f) + self.err = pickle.load(f) + self.retval = pickle.load(f) + + self.chrome_version = super( + TelemetryResult, self + ).GetChromeVersionFromCache(cache_dir) + self.ProcessResults() class CacheConditions(object): - """Various Cache condition values, for export.""" + """Various Cache condition values, for export.""" - # Cache hit only if the result file exists. - CACHE_FILE_EXISTS = 0 + # Cache hit only if the result file exists. + CACHE_FILE_EXISTS = 0 - # Cache hit if the checksum of cpuinfo and totalmem of - # the cached result and the new run match. - MACHINES_MATCH = 1 + # Cache hit if the checksum of cpuinfo and totalmem of + # the cached result and the new run match. + MACHINES_MATCH = 1 - # Cache hit if the image checksum of the cached result and the new run match. - CHECKSUMS_MATCH = 2 + # Cache hit if the image checksum of the cached result and the new run match. + CHECKSUMS_MATCH = 2 - # Cache hit only if the cached result was successful - RUN_SUCCEEDED = 3 + # Cache hit only if the cached result was successful + RUN_SUCCEEDED = 3 - # Never a cache hit. - FALSE = 4 + # Never a cache hit. + FALSE = 4 - # Cache hit if the image path matches the cached image path. - IMAGE_PATH_MATCH = 5 + # Cache hit if the image path matches the cached image path. + IMAGE_PATH_MATCH = 5 - # Cache hit if the uuid of hard disk mataches the cached one + # Cache hit if the uuid of hard disk mataches the cached one - SAME_MACHINE_MATCH = 6 + SAME_MACHINE_MATCH = 6 class ResultsCache(object): - """Class to handle the cache for storing/retrieving test run results. - - This class manages the key of the cached runs without worrying about what - is exactly stored (value). The value generation is handled by the Results - class. - """ - CACHE_VERSION = 6 - - def __init__(self): - # Proper initialization happens in the Init function below. - self.chromeos_image = None - self.chromeos_root = None - self.test_name = None - self.iteration = None - self.test_args = None - self.profiler_args = None - self.board = None - self.cache_conditions = None - self.machine_manager = None - self.machine = None - self._logger = None - self.ce = None - self.label = None - self.share_cache = None - self.suite = None - self.log_level = None - self.show_all = None - self.run_local = None - self.cwp_dso = None - - def Init(self, chromeos_image, chromeos_root, test_name, iteration, - test_args, profiler_args, machine_manager, machine, board, - cache_conditions, logger_to_use, log_level, label, share_cache, - suite, show_all_results, run_local, cwp_dso): - self.chromeos_image = chromeos_image - self.chromeos_root = chromeos_root - self.test_name = test_name - self.iteration = iteration - self.test_args = test_args - self.profiler_args = profiler_args - self.board = board - self.cache_conditions = cache_conditions - self.machine_manager = machine_manager - self.machine = machine - self._logger = logger_to_use - self.ce = command_executer.GetCommandExecuter(self._logger, - log_level=log_level) - self.label = label - self.share_cache = share_cache - self.suite = suite - self.log_level = log_level - self.show_all = show_all_results - self.run_local = run_local - self.cwp_dso = cwp_dso - - def GetCacheDirForRead(self): - matching_dirs = [] - for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)): - matching_dirs += glob.glob(glob_path) - - if matching_dirs: - # Cache file found. - return matching_dirs[0] - return None - - def GetCacheDirForWrite(self, get_keylist=False): - cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0] - if get_keylist: - args_str = '%s_%s_%s' % (self.test_args, self.profiler_args, - self.run_local) - version, image = results_report.ParseChromeosImage( - self.label.chromeos_image) - keylist = [ - version, image, self.label.board, self.machine.name, self.test_name, - str(self.iteration), args_str - ] - return cache_path, keylist - return cache_path - - def FormCacheDir(self, list_of_strings): - cache_key = ' '.join(list_of_strings) - cache_dir = misc.GetFilenameFromString(cache_key) - if self.label.cache_dir: - cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir)) - cache_path = [os.path.join(cache_home, cache_dir)] - else: - cache_path = [os.path.join(SCRATCH_DIR, cache_dir)] - - if self.share_cache: - for path in [x.strip() for x in self.share_cache.split(',')]: - if os.path.exists(path): - cache_path.append(os.path.join(path, cache_dir)) + """Class to handle the cache for storing/retrieving test run results. + + This class manages the key of the cached runs without worrying about what + is exactly stored (value). The value generation is handled by the Results + class. + """ + + CACHE_VERSION = 6 + + def __init__(self): + # Proper initialization happens in the Init function below. + self.chromeos_image = None + self.chromeos_root = None + self.test_name = None + self.iteration = None + self.test_args = None + self.profiler_args = None + self.board = None + self.cache_conditions = None + self.machine_manager = None + self.machine = None + self._logger = None + self.ce = None + self.label = None + self.share_cache = None + self.suite = None + self.log_level = None + self.show_all = None + self.run_local = None + self.cwp_dso = None + + def Init( + self, + chromeos_image, + chromeos_root, + test_name, + iteration, + test_args, + profiler_args, + machine_manager, + machine, + board, + cache_conditions, + logger_to_use, + log_level, + label, + share_cache, + suite, + show_all_results, + run_local, + cwp_dso, + ): + self.chromeos_image = chromeos_image + self.chromeos_root = chromeos_root + self.test_name = test_name + self.iteration = iteration + self.test_args = test_args + self.profiler_args = profiler_args + self.board = board + self.cache_conditions = cache_conditions + self.machine_manager = machine_manager + self.machine = machine + self._logger = logger_to_use + self.ce = command_executer.GetCommandExecuter( + self._logger, log_level=log_level + ) + self.label = label + self.share_cache = share_cache + self.suite = suite + self.log_level = log_level + self.show_all = show_all_results + self.run_local = run_local + self.cwp_dso = cwp_dso + + def GetCacheDirForRead(self): + matching_dirs = [] + for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)): + matching_dirs += glob.glob(glob_path) + + if matching_dirs: + # Cache file found. + return matching_dirs[0] + return None + + def GetCacheDirForWrite(self, get_keylist=False): + cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0] + if get_keylist: + args_str = "%s_%s_%s" % ( + self.test_args, + self.profiler_args, + self.run_local, + ) + version, image = results_report.ParseChromeosImage( + self.label.chromeos_image + ) + keylist = [ + version, + image, + self.label.board, + self.machine.name, + self.test_name, + str(self.iteration), + args_str, + ] + return cache_path, keylist + return cache_path + + def FormCacheDir(self, list_of_strings): + cache_key = " ".join(list_of_strings) + cache_dir = misc.GetFilenameFromString(cache_key) + if self.label.cache_dir: + cache_home = os.path.abspath( + os.path.expanduser(self.label.cache_dir) + ) + cache_path = [os.path.join(cache_home, cache_dir)] + else: + cache_path = [os.path.join(SCRATCH_DIR, cache_dir)] + + if self.share_cache: + for path in [x.strip() for x in self.share_cache.split(",")]: + if os.path.exists(path): + cache_path.append(os.path.join(path, cache_dir)) + else: + self._logger.LogFatal( + "Unable to find shared cache: %s" % path + ) + + return cache_path + + def GetCacheKeyList(self, read): + if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions: + machine_checksum = "*" + else: + machine_checksum = self.machine_manager.machine_checksum[ + self.label.name + ] + if ( + read + and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions + ): + checksum = "*" + elif self.label.image_type == "trybot": + checksum = hashlib.md5( + self.label.chromeos_image.encode("utf-8") + ).hexdigest() + elif self.label.image_type == "official": + checksum = "*" else: - self._logger.LogFatal('Unable to find shared cache: %s' % path) - - return cache_path - - def GetCacheKeyList(self, read): - if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions: - machine_checksum = '*' - else: - machine_checksum = self.machine_manager.machine_checksum[self.label.name] - if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions: - checksum = '*' - elif self.label.image_type == 'trybot': - checksum = hashlib.md5( - self.label.chromeos_image.encode('utf-8')).hexdigest() - elif self.label.image_type == 'official': - checksum = '*' - else: - checksum = ImageChecksummer().Checksum(self.label, self.log_level) - - if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions: - image_path_checksum = '*' - else: - image_path_checksum = hashlib.md5( - self.chromeos_image.encode('utf-8')).hexdigest() - - machine_id_checksum = '' - if read and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions: - machine_id_checksum = '*' - else: - if self.machine and self.machine.name in self.label.remote: - machine_id_checksum = self.machine.machine_id_checksum - else: - for machine in self.machine_manager.GetMachines(self.label): - if machine.name == self.label.remote[0]: - machine_id_checksum = machine.machine_id_checksum - break - - temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args, - self.run_local) - test_args_checksum = hashlib.md5( - temp_test_args.encode('utf-8')).hexdigest() - return (image_path_checksum, self.test_name, str(self.iteration), - test_args_checksum, checksum, machine_checksum, - machine_id_checksum, str(self.CACHE_VERSION)) - - def ReadResult(self): - if CacheConditions.FALSE in self.cache_conditions: - cache_dir = self.GetCacheDirForWrite() - command = 'rm -rf %s' % (cache_dir, ) - self.ce.RunCommand(command) - return None - cache_dir = self.GetCacheDirForRead() - - if not cache_dir: - return None - - if not os.path.isdir(cache_dir): - return None - - if self.log_level == 'verbose': - self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir) - result = Result.CreateFromCacheHit(self._logger, self.log_level, - self.label, self.machine, cache_dir, - self.test_name, self.suite, - self.cwp_dso) - if not result: - return None - - if (result.retval == 0 - or CacheConditions.RUN_SUCCEEDED not in self.cache_conditions): - return result - - return None - - def StoreResult(self, result): - cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True) - result.StoreToCacheDir(cache_dir, self.machine_manager, keylist) + checksum = ImageChecksummer().Checksum(self.label, self.log_level) + + if ( + read + and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions + ): + image_path_checksum = "*" + else: + image_path_checksum = hashlib.md5( + self.chromeos_image.encode("utf-8") + ).hexdigest() + + machine_id_checksum = "" + if ( + read + and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions + ): + machine_id_checksum = "*" + else: + if self.machine and self.machine.name in self.label.remote: + machine_id_checksum = self.machine.machine_id_checksum + else: + for machine in self.machine_manager.GetMachines(self.label): + if machine.name == self.label.remote[0]: + machine_id_checksum = machine.machine_id_checksum + break + + temp_test_args = "%s %s %s" % ( + self.test_args, + self.profiler_args, + self.run_local, + ) + test_args_checksum = hashlib.md5( + temp_test_args.encode("utf-8") + ).hexdigest() + return ( + image_path_checksum, + self.test_name, + str(self.iteration), + test_args_checksum, + checksum, + machine_checksum, + machine_id_checksum, + str(self.CACHE_VERSION), + ) + + def ReadResult(self): + if CacheConditions.FALSE in self.cache_conditions: + cache_dir = self.GetCacheDirForWrite() + command = "rm -rf %s" % (cache_dir,) + self.ce.RunCommand(command) + return None + cache_dir = self.GetCacheDirForRead() + + if not cache_dir: + return None + + if not os.path.isdir(cache_dir): + return None + + if self.log_level == "verbose": + self._logger.LogOutput( + "Trying to read from cache dir: %s" % cache_dir + ) + result = Result.CreateFromCacheHit( + self._logger, + self.log_level, + self.label, + self.machine, + cache_dir, + self.test_name, + self.suite, + self.cwp_dso, + ) + if not result: + return None + + if ( + result.retval == 0 + or CacheConditions.RUN_SUCCEEDED not in self.cache_conditions + ): + return result + + return None + + def StoreResult(self, result): + cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True) + result.StoreToCacheDir(cache_dir, self.machine_manager, keylist) class MockResultsCache(ResultsCache): - """Class for mock testing, corresponding to ResultsCache class.""" + """Class for mock testing, corresponding to ResultsCache class.""" - # FIXME: pylint complains about this mock init method, we should probably - # replace all Mock classes in Crosperf with simple Mock.mock(). - # pylint: disable=arguments-differ - def Init(self, *args): - pass + # FIXME: pylint complains about this mock init method, we should probably + # replace all Mock classes in Crosperf with simple Mock.mock(). + # pylint: disable=arguments-differ + def Init(self, *args): + pass - def ReadResult(self): - return None + def ReadResult(self): + return None - def StoreResult(self, result): - pass + def StoreResult(self, result): + pass class MockResult(Result): - """Class for mock testing, corresponding to Result class.""" + """Class for mock testing, corresponding to Result class.""" - def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso): - self.out = out - self.err = err - self.retval = retval + def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso): + self.out = out + self.err = err + self.retval = retval diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py index f4090be4..dbf5d672 100755 --- a/crosperf/results_cache_unittest.py +++ b/crosperf/results_cache_unittest.py @@ -17,20 +17,20 @@ import tempfile import unittest import unittest.mock as mock +from cros_utils import command_executer +from cros_utils import logger +from cros_utils import misc import image_checksummer -import machine_manager -import test_flag - from label import MockLabel +import machine_manager from results_cache import CacheConditions from results_cache import PerfDataReadError from results_cache import PidVerificationError from results_cache import Result from results_cache import ResultsCache from results_cache import TelemetryResult -from cros_utils import command_executer -from cros_utils import logger -from cros_utils import misc +import test_flag + # The following hardcoded string has blocked words replaced, and thus # is not representative of a true crosperf output. @@ -133,35 +133,35 @@ INFO : Elapsed time: 0m18s """ keyvals = { - '': 'PASS', - 'b_stdio_putcgetc__0_': '0.100005711667', - 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667', - 'b_malloc_thread_local__0_': '0.01138439', - 'b_string_strlen__0_': '0.044893587', - 'b_malloc_sparse__0_': '0.015053784', - 'b_string_memset__0_': '0.00275405066667', - 'platform_LibCBench': 'PASS', - 'b_pthread_uselesslock__0_': '0.0294113346667', - 'b_string_strchr__0_': '0.00456903', - 'b_pthread_create_serial1__0_': '0.0291785246667', - 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__': '0.118360778', - 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667', - 'b_pthread_createjoin_serial1__0_': '0.031907936', - 'b_malloc_thread_stress__0_': '0.0367894733333', - 'b_regex_search____a_b_c__d_b__': '0.00165455066667', - 'b_malloc_bubble__0_': '0.015066374', - 'b_malloc_big2__0_': '0.002951359', - 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333', - 'b_pthread_createjoin_serial2__0_': '0.043485347', - 'b_regex_search___a_25_b__': '0.0496191923333', - 'b_utf8_bigbuf__0_': '0.0473772253333', - 'b_malloc_big1__0_': '0.00375231466667', - 'b_regex_compile____a_b_c__d_b__': '0.00529833933333', - 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325', - 'b_malloc_tiny2__0_': '0.000581407333333', - 'b_utf8_onebyone__0_': '0.130938538333', - 'b_malloc_tiny1__0_': '0.000768474333333', - 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333' + "": "PASS", + "b_stdio_putcgetc__0_": "0.100005711667", + "b_string_strstr___azbycxdwevfugthsirjqkplomn__": "0.0133123556667", + "b_malloc_thread_local__0_": "0.01138439", + "b_string_strlen__0_": "0.044893587", + "b_malloc_sparse__0_": "0.015053784", + "b_string_memset__0_": "0.00275405066667", + "platform_LibCBench": "PASS", + "b_pthread_uselesslock__0_": "0.0294113346667", + "b_string_strchr__0_": "0.00456903", + "b_pthread_create_serial1__0_": "0.0291785246667", + "b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__": "0.118360778", + "b_string_strstr___aaaaaaaaaaaaaacccccccccccc__": "0.0135694476667", + "b_pthread_createjoin_serial1__0_": "0.031907936", + "b_malloc_thread_stress__0_": "0.0367894733333", + "b_regex_search____a_b_c__d_b__": "0.00165455066667", + "b_malloc_bubble__0_": "0.015066374", + "b_malloc_big2__0_": "0.002951359", + "b_stdio_putcgetc_unlocked__0_": "0.0371443833333", + "b_pthread_createjoin_serial2__0_": "0.043485347", + "b_regex_search___a_25_b__": "0.0496191923333", + "b_utf8_bigbuf__0_": "0.0473772253333", + "b_malloc_big1__0_": "0.00375231466667", + "b_regex_compile____a_b_c__d_b__": "0.00529833933333", + "b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__": "0.068957325", + "b_malloc_tiny2__0_": "0.000581407333333", + "b_utf8_onebyone__0_": "0.130938538333", + "b_malloc_tiny1__0_": "0.000768474333333", + "b_string_strstr___abcdefghijklmnopqrstuvwxyz__": "0.0134553343333", } PERF_DATA_HEADER = """ @@ -192,8 +192,7 @@ PERF_DATA_HEADER = """ # """ -TURBOSTAT_LOG_OUTPUT = ( - """CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp +TURBOSTAT_LOG_OUTPUT = """CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp - 329 12.13 2723 2393 10975 77 0 336 12.41 2715 2393 6328 77 2 323 11.86 2731 2393 4647 69 @@ -221,17 +220,13 @@ CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp - 843 29.83 2832 2393 28161 47 0 827 29.35 2826 2393 16093 47 2 858 30.31 2838 2393 12068 46 -""") +""" TURBOSTAT_DATA = { - 'cpufreq': { - 'all': [2723, 2884, 2927, 2937, 2932, 2933, 2832] - }, - 'cputemp': { - 'all': [77, 83, 84, 72, 75, 46, 47] - }, + "cpufreq": {"all": [2723, 2884, 2927, 2937, 2932, 2933, 2832]}, + "cputemp": {"all": [77, 83, 84, 72, 75, 46, 47]}, } -TOP_LOG = (""" +TOP_LOG = """ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome 4204 chronos 12 -8 2492716 205728 179016 S 11.8 5.3 0:03.89 chrome @@ -253,58 +248,58 @@ TOP_LOG = (""" 5713 chronos 20 0 5178652 103120 50372 S 17.8 2.6 0:01.13 chrome 7 root 20 0 0 0 0 S 1.0 0.0 0:00.73 rcu_preempt 855 root 20 0 0 0 0 S 1.0 0.0 0:00.01 kworker/4:2 -""") +""" TOP_DATA = [ { - 'cmd': 'chrome-5745', - 'cpu_use_avg': 115.35, - 'count': 2, - 'top5_cpu_use': [122.8, 107.9], + "cmd": "chrome-5745", + "cpu_use_avg": 115.35, + "count": 2, + "top5_cpu_use": [122.8, 107.9], }, { - 'cmd': 'chrome-5713', - 'cpu_use_avg': 8.9, - 'count': 1, - 'top5_cpu_use': [17.8] + "cmd": "chrome-5713", + "cpu_use_avg": 8.9, + "count": 1, + "top5_cpu_use": [17.8], }, { - 'cmd': 'irq/cros-ec-912', - 'cpu_use_avg': 1.0, - 'count': 1, - 'top5_cpu_use': [2.0], + "cmd": "irq/cros-ec-912", + "cpu_use_avg": 1.0, + "count": 1, + "top5_cpu_use": [2.0], }, { - 'cmd': 'chrome-5205', - 'cpu_use_avg': 0.5, - 'count': 1, - 'top5_cpu_use': [1.0] + "cmd": "chrome-5205", + "cpu_use_avg": 0.5, + "count": 1, + "top5_cpu_use": [1.0], }, { - 'cmd': 'spi5-121', - 'cpu_use_avg': 0.5, - 'count': 1, - 'top5_cpu_use': [1.0], + "cmd": "spi5-121", + "cpu_use_avg": 0.5, + "count": 1, + "top5_cpu_use": [1.0], }, { - 'cmd': 'sshd-4811', - 'cpu_use_avg': 0.5, - 'count': 1, - 'top5_cpu_use': [1.0], + "cmd": "sshd-4811", + "cpu_use_avg": 0.5, + "count": 1, + "top5_cpu_use": [1.0], }, { - 'cmd': 'rcu_preempt-7', - 'cpu_use_avg': 0.5, - 'count': 1, - 'top5_cpu_use': [1.0], + "cmd": "rcu_preempt-7", + "cpu_use_avg": 0.5, + "count": 1, + "top5_cpu_use": [1.0], }, { - 'cmd': 'kworker/4:2-855', - 'cpu_use_avg': 0.5, - 'count': 1, - 'top5_cpu_use': [1.0], + "cmd": "kworker/4:2-855", + "cpu_use_avg": 0.5, + "count": 1, + "top5_cpu_use": [1.0], }, ] -TOP_OUTPUT = (""" COMMAND AVG CPU% SEEN HIGHEST 5 +TOP_OUTPUT = """ COMMAND AVG CPU% SEEN HIGHEST 5 chrome 128.250000 6 [122.8, 107.9, 17.8, 5.0, 2.0] irq/230-cros-ec 1.000000 1 [2.0] sshd 0.500000 1 [1.0] @@ -312,9 +307,9 @@ TOP_OUTPUT = (""" COMMAND AVG CPU% SEEN HIGHEST 5 spi5 0.500000 1 [1.0] rcu_preempt 0.500000 1 [1.0] kworker/4:2 0.500000 1 [1.0] -""") +""" -CPUSTATS_UNIQ_OUTPUT = (""" +CPUSTATS_UNIQ_OUTPUT = """ /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000 /sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1512000 /sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_cur_freq 2016000 @@ -327,20 +322,20 @@ big-cpu 51234 soc-thermal 45456 little-cpu 42555 big-cpu 61724 -""") +""" CPUSTATS_UNIQ_DATA = { - 'cpufreq': { - 'cpu0': [1512, 1500], - 'cpu1': [1512, 1600], - 'cpu3': [2016, 2012] + "cpufreq": { + "cpu0": [1512, 1500], + "cpu1": [1512, 1600], + "cpu3": [2016, 2012], + }, + "cputemp": { + "soc-thermal": [44.4, 45.5], + "little-cpu": [41.2, 42.6], + "big-cpu": [51.2, 61.7], }, - 'cputemp': { - 'soc-thermal': [44.4, 45.5], - 'little-cpu': [41.2, 42.6], - 'big-cpu': [51.2, 61.7] - } } -CPUSTATS_DUPL_OUTPUT = (""" +CPUSTATS_DUPL_OUTPUT = """ /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000 /sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1512000 /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 1512000 @@ -353,17 +348,14 @@ CPUSTATS_DUPL_OUTPUT = (""" /sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1614000 /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 1614000 /sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_cur_freq 1982000 -""") +""" CPUSTATS_DUPL_DATA = { - 'cpufreq': { - 'cpu0': [1512, 1500, 1614], - 'cpu3': [2016, 2016, 1982] - }, + "cpufreq": {"cpu0": [1512, 1500, 1614], "cpu3": [2016, 2016, 1982]}, } -TMP_DIR1 = '/tmp/tmpAbcXyz' +TMP_DIR1 = "/tmp/tmpAbcXyz" -HISTOGRAMSET = (""" +HISTOGRAMSET = """ [ { "values": [ @@ -435,1427 +427,1557 @@ HISTOGRAMSET = (""" } ] -""") +""" # pylint: enable=line-too-long class MockResult(Result): - """Mock result class.""" - def __init__(self, mylogger, label, logging_level, machine): - super(MockResult, self).__init__(mylogger, label, logging_level, machine) + """Mock result class.""" + + def __init__(self, mylogger, label, logging_level, machine): + super(MockResult, self).__init__( + mylogger, label, logging_level, machine + ) - def FindFilesInResultsDir(self, find_args): - return '' + def FindFilesInResultsDir(self, find_args): + return "" - # pylint: disable=arguments-differ - def GetKeyvals(self, temp=False): - if temp: - pass - return keyvals + # pylint: disable=arguments-differ + def GetKeyvals(self, temp=False): + if temp: + pass + return keyvals class ResultTest(unittest.TestCase): - """Result test class.""" - def __init__(self, *args, **kwargs): - super(ResultTest, self).__init__(*args, **kwargs) - self.callFakeProcessResults = False - self.fakeCacheReturnResult = None - self.callGetResultsDir = False - self.callProcessResults = False - self.callGetPerfReportFiles = False - self.kv_dict = None - self.tmpdir = '' - self.callGetNewKeyvals = False - self.callGetResultsFile = False - self.callGetPerfDataFiles = False - self.callGetTurbostatFile = False - self.callGetCpustatsFile = False - self.callGetTopFile = False - self.callGetCpuinfoFile = False - self.callGetWaitTimeFile = False - self.args = None - self.callGatherPerfResults = False - self.mock_logger = mock.Mock(spec=logger.Logger) - self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - self.mock_label = MockLabel('mock_label', 'build', 'chromeos_image', - 'autotest_dir', 'debug_dir', '/tmp', 'lumpy', - 'remote', 'image_args', 'cache_dir', 'average', - 'gcc', False, None) - - def testCreateFromRun(self): - result = MockResult.CreateFromRun(logger.GetLogger(), 'average', - self.mock_label, 'remote1', OUTPUT, - error, 0, True) - self.assertEqual(result.keyvals, keyvals) - self.assertEqual(result.chroot_results_dir, - '/tmp/test_that.PO1234567/platform_LibCBench') - self.assertEqual(result.results_dir, - '/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench') - self.assertEqual(result.retval, 0) - - def setUp(self): - self.result = Result(self.mock_logger, self.mock_label, 'average', - self.mock_cmd_exec) - self.result.chromeos_root = '/tmp/chromeos' - - @mock.patch.object(os.path, 'isdir') - @mock.patch.object(command_executer.CommandExecuter, 'RunCommand') - @mock.patch.object(command_executer.CommandExecuter, 'CopyFiles') - def test_copy_files_to(self, mock_copyfiles, mock_runcmd, mock_isdir): - - files = ['src_file_1', 'src_file_2', 'src_file_3'] - dest_dir = '/tmp/test' - self.mock_cmd_exec.RunCommand = mock_runcmd - self.mock_cmd_exec.CopyFiles = mock_copyfiles - - mock_copyfiles.return_value = 0 - - # test 1. dest_dir exists; CopyFiles returns 0. - mock_isdir.return_value = True - self.result.CopyFilesTo(dest_dir, files) - self.assertEqual(mock_runcmd.call_count, 0) - self.assertEqual(mock_copyfiles.call_count, 3) - first_args = mock_copyfiles.call_args_list[0][0] - second_args = mock_copyfiles.call_args_list[1][0] - third_args = mock_copyfiles.call_args_list[2][0] - self.assertEqual(first_args, ('src_file_1', '/tmp/test/src_file_1.0')) - self.assertEqual(second_args, ('src_file_2', '/tmp/test/src_file_2.1')) - self.assertEqual(third_args, ('src_file_3', '/tmp/test/src_file_3.2')) - - mock_runcmd.reset_mock() - mock_copyfiles.reset_mock() - # test 2. dest_dir does not exist; CopyFiles returns 0. - mock_isdir.return_value = False - self.result.CopyFilesTo(dest_dir, files) - self.assertEqual(mock_runcmd.call_count, 3) - self.assertEqual(mock_copyfiles.call_count, 3) - self.assertEqual(mock_runcmd.call_args_list[0], - mock_runcmd.call_args_list[1]) - self.assertEqual(mock_runcmd.call_args_list[0], - mock_runcmd.call_args_list[2]) - self.assertEqual(mock_runcmd.call_args_list[0][0], - ('mkdir -p /tmp/test', )) - - # test 3. CopyFiles returns 1 (fails). - mock_copyfiles.return_value = 1 - self.assertRaises(Exception, self.result.CopyFilesTo, dest_dir, files) - - @mock.patch.object(Result, 'CopyFilesTo') - def test_copy_results_to(self, mockCopyFilesTo): - results_file = [ - '/tmp/result.json.0', '/tmp/result.json.1', '/tmp/result.json.2' - ] - perf_data_files = [ - '/tmp/perf.data.0', '/tmp/perf.data.1', '/tmp/perf.data.2' - ] - perf_report_files = [ - '/tmp/perf.report.0', '/tmp/perf.report.1', '/tmp/perf.report.2' - ] - - self.result.results_file = results_file - self.result.perf_data_files = perf_data_files - self.result.perf_report_files = perf_report_files - - self.result.CopyFilesTo = mockCopyFilesTo - self.result.CopyResultsTo('/tmp/results/') - self.assertEqual(mockCopyFilesTo.call_count, 3) - self.assertEqual(len(mockCopyFilesTo.call_args_list), 3) - self.assertEqual(mockCopyFilesTo.call_args_list[0][0], - ('/tmp/results/', results_file)) - self.assertEqual(mockCopyFilesTo.call_args_list[1][0], - ('/tmp/results/', perf_data_files)) - self.assertEqual(mockCopyFilesTo.call_args_list[2][0], - ('/tmp/results/', perf_report_files)) - - def test_get_new_keyvals(self): - kv_dict = {} - - def FakeGetDataMeasurementsFiles(): - filename = os.path.join(os.getcwd(), 'unittest_keyval_file.txt') - return [filename] - - self.result.GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles - kv_dict2, udict = self.result.GetNewKeyvals(kv_dict) - self.assertEqual( - kv_dict2, { - u'Box2D__Box2D': 4775, - u'Mandreel__Mandreel': 6620, - u'Gameboy__Gameboy': 9901, - u'Crypto__Crypto': 8737, - u'telemetry_page_measurement_results__num_errored': 0, - u'telemetry_page_measurement_results__num_failed': 0, - u'PdfJS__PdfJS': 6455, - u'Total__Score': 7918, - u'EarleyBoyer__EarleyBoyer': 14340, - u'MandreelLatency__MandreelLatency': 5188, - u'CodeLoad__CodeLoad': 6271, - u'DeltaBlue__DeltaBlue': 14401, - u'Typescript__Typescript': 9815, - u'SplayLatency__SplayLatency': 7653, - u'zlib__zlib': 16094, - u'Richards__Richards': 10358, - u'RegExp__RegExp': 1765, - u'NavierStokes__NavierStokes': 9815, - u'Splay__Splay': 4425, - u'RayTrace__RayTrace': 16600 - }) - self.assertEqual( - udict, { - u'Box2D__Box2D': u'score', - u'Mandreel__Mandreel': u'score', - u'Gameboy__Gameboy': u'score', - u'Crypto__Crypto': u'score', - u'telemetry_page_measurement_results__num_errored': u'count', - u'telemetry_page_measurement_results__num_failed': u'count', - u'PdfJS__PdfJS': u'score', - u'Total__Score': u'score', - u'EarleyBoyer__EarleyBoyer': u'score', - u'MandreelLatency__MandreelLatency': u'score', - u'CodeLoad__CodeLoad': u'score', - u'DeltaBlue__DeltaBlue': u'score', - u'Typescript__Typescript': u'score', - u'SplayLatency__SplayLatency': u'score', - u'zlib__zlib': u'score', - u'Richards__Richards': u'score', - u'RegExp__RegExp': u'score', - u'NavierStokes__NavierStokes': u'score', - u'Splay__Splay': u'score', - u'RayTrace__RayTrace': u'score' - }) - - def test_append_telemetry_units(self): - kv_dict = { - u'Box2D__Box2D': 4775, - u'Mandreel__Mandreel': 6620, - u'Gameboy__Gameboy': 9901, - u'Crypto__Crypto': 8737, - u'PdfJS__PdfJS': 6455, - u'Total__Score': 7918, - u'EarleyBoyer__EarleyBoyer': 14340, - u'MandreelLatency__MandreelLatency': 5188, - u'CodeLoad__CodeLoad': 6271, - u'DeltaBlue__DeltaBlue': 14401, - u'Typescript__Typescript': 9815, - u'SplayLatency__SplayLatency': 7653, - u'zlib__zlib': 16094, - u'Richards__Richards': 10358, - u'RegExp__RegExp': 1765, - u'NavierStokes__NavierStokes': 9815, - u'Splay__Splay': 4425, - u'RayTrace__RayTrace': 16600 - } - units_dict = { - u'Box2D__Box2D': u'score', - u'Mandreel__Mandreel': u'score', - u'Gameboy__Gameboy': u'score', - u'Crypto__Crypto': u'score', - u'PdfJS__PdfJS': u'score', - u'Total__Score': u'score', - u'EarleyBoyer__EarleyBoyer': u'score', - u'MandreelLatency__MandreelLatency': u'score', - u'CodeLoad__CodeLoad': u'score', - u'DeltaBlue__DeltaBlue': u'score', - u'Typescript__Typescript': u'score', - u'SplayLatency__SplayLatency': u'score', - u'zlib__zlib': u'score', - u'Richards__Richards': u'score', - u'RegExp__RegExp': u'score', - u'NavierStokes__NavierStokes': u'score', - u'Splay__Splay': u'score', - u'RayTrace__RayTrace': u'score' - } + """Result test class.""" + + def __init__(self, *args, **kwargs): + super(ResultTest, self).__init__(*args, **kwargs) + self.callFakeProcessResults = False + self.fakeCacheReturnResult = None + self.callGetResultsDir = False + self.callProcessResults = False + self.callGetPerfReportFiles = False + self.kv_dict = None + self.tmpdir = "" + self.callGetNewKeyvals = False + self.callGetResultsFile = False + self.callGetPerfDataFiles = False + self.callGetTurbostatFile = False + self.callGetCpustatsFile = False + self.callGetTopFile = False + self.callGetCpuinfoFile = False + self.callGetWaitTimeFile = False + self.args = None + self.callGatherPerfResults = False + self.mock_logger = mock.Mock(spec=logger.Logger) + self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) + self.mock_label = MockLabel( + "mock_label", + "build", + "chromeos_image", + "autotest_dir", + "debug_dir", + "/tmp", + "lumpy", + "remote", + "image_args", + "cache_dir", + "average", + "gcc", + False, + None, + ) + + def testCreateFromRun(self): + result = MockResult.CreateFromRun( + logger.GetLogger(), + "average", + self.mock_label, + "remote1", + OUTPUT, + error, + 0, + True, + ) + self.assertEqual(result.keyvals, keyvals) + self.assertEqual( + result.chroot_results_dir, + "/tmp/test_that.PO1234567/platform_LibCBench", + ) + self.assertEqual( + result.results_dir, + "/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench", + ) + self.assertEqual(result.retval, 0) + + def setUp(self): + self.result = Result( + self.mock_logger, self.mock_label, "average", self.mock_cmd_exec + ) + self.result.chromeos_root = "/tmp/chromeos" + + @mock.patch.object(os.path, "isdir") + @mock.patch.object(command_executer.CommandExecuter, "RunCommand") + @mock.patch.object(command_executer.CommandExecuter, "CopyFiles") + def test_copy_files_to(self, mock_copyfiles, mock_runcmd, mock_isdir): + + files = ["src_file_1", "src_file_2", "src_file_3"] + dest_dir = "/tmp/test" + self.mock_cmd_exec.RunCommand = mock_runcmd + self.mock_cmd_exec.CopyFiles = mock_copyfiles + + mock_copyfiles.return_value = 0 + + # test 1. dest_dir exists; CopyFiles returns 0. + mock_isdir.return_value = True + self.result.CopyFilesTo(dest_dir, files) + self.assertEqual(mock_runcmd.call_count, 0) + self.assertEqual(mock_copyfiles.call_count, 3) + first_args = mock_copyfiles.call_args_list[0][0] + second_args = mock_copyfiles.call_args_list[1][0] + third_args = mock_copyfiles.call_args_list[2][0] + self.assertEqual(first_args, ("src_file_1", "/tmp/test/src_file_1.0")) + self.assertEqual(second_args, ("src_file_2", "/tmp/test/src_file_2.1")) + self.assertEqual(third_args, ("src_file_3", "/tmp/test/src_file_3.2")) + + mock_runcmd.reset_mock() + mock_copyfiles.reset_mock() + # test 2. dest_dir does not exist; CopyFiles returns 0. + mock_isdir.return_value = False + self.result.CopyFilesTo(dest_dir, files) + self.assertEqual(mock_runcmd.call_count, 3) + self.assertEqual(mock_copyfiles.call_count, 3) + self.assertEqual( + mock_runcmd.call_args_list[0], mock_runcmd.call_args_list[1] + ) + self.assertEqual( + mock_runcmd.call_args_list[0], mock_runcmd.call_args_list[2] + ) + self.assertEqual( + mock_runcmd.call_args_list[0][0], ("mkdir -p /tmp/test",) + ) + + # test 3. CopyFiles returns 1 (fails). + mock_copyfiles.return_value = 1 + self.assertRaises(Exception, self.result.CopyFilesTo, dest_dir, files) + + @mock.patch.object(Result, "CopyFilesTo") + def test_copy_results_to(self, mockCopyFilesTo): + results_file = [ + "/tmp/result.json.0", + "/tmp/result.json.1", + "/tmp/result.json.2", + ] + perf_data_files = [ + "/tmp/perf.data.0", + "/tmp/perf.data.1", + "/tmp/perf.data.2", + ] + perf_report_files = [ + "/tmp/perf.report.0", + "/tmp/perf.report.1", + "/tmp/perf.report.2", + ] + + self.result.results_file = results_file + self.result.perf_data_files = perf_data_files + self.result.perf_report_files = perf_report_files + + self.result.CopyFilesTo = mockCopyFilesTo + self.result.CopyResultsTo("/tmp/results/") + self.assertEqual(mockCopyFilesTo.call_count, 3) + self.assertEqual(len(mockCopyFilesTo.call_args_list), 3) + self.assertEqual( + mockCopyFilesTo.call_args_list[0][0], + ("/tmp/results/", results_file), + ) + self.assertEqual( + mockCopyFilesTo.call_args_list[1][0], + ("/tmp/results/", perf_data_files), + ) + self.assertEqual( + mockCopyFilesTo.call_args_list[2][0], + ("/tmp/results/", perf_report_files), + ) + + def test_get_new_keyvals(self): + kv_dict = {} + + def FakeGetDataMeasurementsFiles(): + filename = os.path.join(os.getcwd(), "unittest_keyval_file.txt") + return [filename] + + self.result.GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles + kv_dict2, udict = self.result.GetNewKeyvals(kv_dict) + self.assertEqual( + kv_dict2, + { + u"Box2D__Box2D": 4775, + u"Mandreel__Mandreel": 6620, + u"Gameboy__Gameboy": 9901, + u"Crypto__Crypto": 8737, + u"telemetry_page_measurement_results__num_errored": 0, + u"telemetry_page_measurement_results__num_failed": 0, + u"PdfJS__PdfJS": 6455, + u"Total__Score": 7918, + u"EarleyBoyer__EarleyBoyer": 14340, + u"MandreelLatency__MandreelLatency": 5188, + u"CodeLoad__CodeLoad": 6271, + u"DeltaBlue__DeltaBlue": 14401, + u"Typescript__Typescript": 9815, + u"SplayLatency__SplayLatency": 7653, + u"zlib__zlib": 16094, + u"Richards__Richards": 10358, + u"RegExp__RegExp": 1765, + u"NavierStokes__NavierStokes": 9815, + u"Splay__Splay": 4425, + u"RayTrace__RayTrace": 16600, + }, + ) + self.assertEqual( + udict, + { + u"Box2D__Box2D": u"score", + u"Mandreel__Mandreel": u"score", + u"Gameboy__Gameboy": u"score", + u"Crypto__Crypto": u"score", + u"telemetry_page_measurement_results__num_errored": u"count", + u"telemetry_page_measurement_results__num_failed": u"count", + u"PdfJS__PdfJS": u"score", + u"Total__Score": u"score", + u"EarleyBoyer__EarleyBoyer": u"score", + u"MandreelLatency__MandreelLatency": u"score", + u"CodeLoad__CodeLoad": u"score", + u"DeltaBlue__DeltaBlue": u"score", + u"Typescript__Typescript": u"score", + u"SplayLatency__SplayLatency": u"score", + u"zlib__zlib": u"score", + u"Richards__Richards": u"score", + u"RegExp__RegExp": u"score", + u"NavierStokes__NavierStokes": u"score", + u"Splay__Splay": u"score", + u"RayTrace__RayTrace": u"score", + }, + ) + + def test_append_telemetry_units(self): + kv_dict = { + u"Box2D__Box2D": 4775, + u"Mandreel__Mandreel": 6620, + u"Gameboy__Gameboy": 9901, + u"Crypto__Crypto": 8737, + u"PdfJS__PdfJS": 6455, + u"Total__Score": 7918, + u"EarleyBoyer__EarleyBoyer": 14340, + u"MandreelLatency__MandreelLatency": 5188, + u"CodeLoad__CodeLoad": 6271, + u"DeltaBlue__DeltaBlue": 14401, + u"Typescript__Typescript": 9815, + u"SplayLatency__SplayLatency": 7653, + u"zlib__zlib": 16094, + u"Richards__Richards": 10358, + u"RegExp__RegExp": 1765, + u"NavierStokes__NavierStokes": 9815, + u"Splay__Splay": 4425, + u"RayTrace__RayTrace": 16600, + } + units_dict = { + u"Box2D__Box2D": u"score", + u"Mandreel__Mandreel": u"score", + u"Gameboy__Gameboy": u"score", + u"Crypto__Crypto": u"score", + u"PdfJS__PdfJS": u"score", + u"Total__Score": u"score", + u"EarleyBoyer__EarleyBoyer": u"score", + u"MandreelLatency__MandreelLatency": u"score", + u"CodeLoad__CodeLoad": u"score", + u"DeltaBlue__DeltaBlue": u"score", + u"Typescript__Typescript": u"score", + u"SplayLatency__SplayLatency": u"score", + u"zlib__zlib": u"score", + u"Richards__Richards": u"score", + u"RegExp__RegExp": u"score", + u"NavierStokes__NavierStokes": u"score", + u"Splay__Splay": u"score", + u"RayTrace__RayTrace": u"score", + } - results_dict = self.result.AppendTelemetryUnits(kv_dict, units_dict) - self.assertEqual( - results_dict, { - u'Box2D__Box2D': [4775, u'score'], - u'Splay__Splay': [4425, u'score'], - u'Gameboy__Gameboy': [9901, u'score'], - u'Crypto__Crypto': [8737, u'score'], - u'PdfJS__PdfJS': [6455, u'score'], - u'Total__Score': [7918, u'score'], - u'EarleyBoyer__EarleyBoyer': [14340, u'score'], - u'MandreelLatency__MandreelLatency': [5188, u'score'], - u'DeltaBlue__DeltaBlue': [14401, u'score'], - u'SplayLatency__SplayLatency': [7653, u'score'], - u'Mandreel__Mandreel': [6620, u'score'], - u'Richards__Richards': [10358, u'score'], - u'zlib__zlib': [16094, u'score'], - u'CodeLoad__CodeLoad': [6271, u'score'], - u'Typescript__Typescript': [9815, u'score'], - u'RegExp__RegExp': [1765, u'score'], - u'RayTrace__RayTrace': [16600, u'score'], - u'NavierStokes__NavierStokes': [9815, u'score'] - }) - - @mock.patch.object(misc, 'GetInsideChrootPath') - @mock.patch.object(tempfile, 'mkdtemp') - @mock.patch.object(command_executer.CommandExecuter, 'RunCommand') - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_get_keyvals(self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp, - mock_getpath): - - self.kv_dict = {} - self.callGetNewKeyvals = False - - def reset(): - self.kv_dict = {} - self.callGetNewKeyvals = False - mock_chrootruncmd.reset_mock() - mock_runcmd.reset_mock() - mock_mkdtemp.reset_mock() - mock_getpath.reset_mock() - - def FakeGetNewKeyvals(kv_dict): - self.kv_dict = kv_dict - self.callGetNewKeyvals = True - return_kvdict = {'first_time': 680, 'Total': 10} - return_udict = {'first_time': 'ms', 'Total': 'score'} - return return_kvdict, return_udict - - mock_mkdtemp.return_value = TMP_DIR1 - mock_chrootruncmd.return_value = [ - '', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1), - '' - ] - mock_getpath.return_value = TMP_DIR1 - self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd - self.result.ce.RunCommand = mock_runcmd - self.result.GetNewKeyvals = FakeGetNewKeyvals - self.result.suite = 'telemetry_Crosperf' - self.result.results_dir = '/tmp/test_that_resultsNmq' - - # Test 1. no self.temp_dir. - res = self.result.GetKeyvals() - self.assertTrue(self.callGetNewKeyvals) - self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'}) - self.assertEqual(mock_runcmd.call_count, 1) - self.assertEqual(mock_runcmd.call_args_list[0][0], - ('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1, )) - self.assertEqual(mock_chrootruncmd.call_count, 1) - self.assertEqual( - mock_chrootruncmd.call_args_list[0][0], - (self.result.chromeos_root, - ('./generate_test_report --no-color --csv %s') % TMP_DIR1)) - self.assertEqual(mock_getpath.call_count, 1) - self.assertEqual(mock_mkdtemp.call_count, 1) - self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']}) - - # Test 2. self.temp_dir - reset() - mock_chrootruncmd.return_value = [ - '', ('/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/' - 'telemetry_Crosperf,PASS\n'), '' - ] - mock_getpath.return_value = '/tmp/tmpJCajRG' - self.result.temp_dir = '/tmp/tmpJCajRG' - res = self.result.GetKeyvals() - self.assertEqual(mock_runcmd.call_count, 0) - self.assertEqual(mock_mkdtemp.call_count, 0) - self.assertEqual(mock_chrootruncmd.call_count, 1) - self.assertTrue(self.callGetNewKeyvals) - self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'}) - self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']}) - - # Test 3. suite != telemetry_Crosperf. Normally this would be for - # running non-Telemetry autotests, such as BootPerfServer. In this test - # case, the keyvals we have set up were returned from a Telemetry test run; - # so this pass is basically testing that we don't append the units to the - # test results (which we do for Telemetry autotest runs). - reset() - self.result.suite = '' - res = self.result.GetKeyvals() - self.assertEqual(res, {'Total': 10, 'first_time': 680}) - - @mock.patch.object(misc, 'GetInsideChrootPath') - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - @mock.patch.object(os.path, 'exists') - def test_get_samples(self, mock_exists, mock_get_total_samples, - mock_getpath): - self.result.perf_data_files = ['/tmp/results/perf.data'] - self.result.board = 'samus' - mock_getpath.return_value = '/usr/chromeos/chroot/tmp/results/perf.data' - mock_get_total_samples.return_value = [ - '', '45.42% 237210 chrome ', '' - ] - mock_exists.return_value = True - - # mock_open does not seem to support iteration. - # pylint: disable=line-too-long - content = """1.63% 66 dav1d-tile chrome [.] decode_coefs + results_dict = self.result.AppendTelemetryUnits(kv_dict, units_dict) + self.assertEqual( + results_dict, + { + u"Box2D__Box2D": [4775, u"score"], + u"Splay__Splay": [4425, u"score"], + u"Gameboy__Gameboy": [9901, u"score"], + u"Crypto__Crypto": [8737, u"score"], + u"PdfJS__PdfJS": [6455, u"score"], + u"Total__Score": [7918, u"score"], + u"EarleyBoyer__EarleyBoyer": [14340, u"score"], + u"MandreelLatency__MandreelLatency": [5188, u"score"], + u"DeltaBlue__DeltaBlue": [14401, u"score"], + u"SplayLatency__SplayLatency": [7653, u"score"], + u"Mandreel__Mandreel": [6620, u"score"], + u"Richards__Richards": [10358, u"score"], + u"zlib__zlib": [16094, u"score"], + u"CodeLoad__CodeLoad": [6271, u"score"], + u"Typescript__Typescript": [9815, u"score"], + u"RegExp__RegExp": [1765, u"score"], + u"RayTrace__RayTrace": [16600, u"score"], + u"NavierStokes__NavierStokes": [9815, u"score"], + }, + ) + + @mock.patch.object(misc, "GetInsideChrootPath") + @mock.patch.object(tempfile, "mkdtemp") + @mock.patch.object(command_executer.CommandExecuter, "RunCommand") + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + def test_get_keyvals( + self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp, mock_getpath + ): + + self.kv_dict = {} + self.callGetNewKeyvals = False + + def reset(): + self.kv_dict = {} + self.callGetNewKeyvals = False + mock_chrootruncmd.reset_mock() + mock_runcmd.reset_mock() + mock_mkdtemp.reset_mock() + mock_getpath.reset_mock() + + def FakeGetNewKeyvals(kv_dict): + self.kv_dict = kv_dict + self.callGetNewKeyvals = True + return_kvdict = {"first_time": 680, "Total": 10} + return_udict = {"first_time": "ms", "Total": "score"} + return return_kvdict, return_udict + + mock_mkdtemp.return_value = TMP_DIR1 + mock_chrootruncmd.return_value = [ + "", + ("%s,PASS\n%s/telemetry_Crosperf,PASS\n") % (TMP_DIR1, TMP_DIR1), + "", + ] + mock_getpath.return_value = TMP_DIR1 + self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd + self.result.ce.RunCommand = mock_runcmd + self.result.GetNewKeyvals = FakeGetNewKeyvals + self.result.suite = "telemetry_Crosperf" + self.result.results_dir = "/tmp/test_that_resultsNmq" + + # Test 1. no self.temp_dir. + res = self.result.GetKeyvals() + self.assertTrue(self.callGetNewKeyvals) + self.assertEqual( + self.kv_dict, {"": "PASS", "telemetry_Crosperf": "PASS"} + ) + self.assertEqual(mock_runcmd.call_count, 1) + self.assertEqual( + mock_runcmd.call_args_list[0][0], + ("cp -r /tmp/test_that_resultsNmq/* %s" % TMP_DIR1,), + ) + self.assertEqual(mock_chrootruncmd.call_count, 1) + self.assertEqual( + mock_chrootruncmd.call_args_list[0][0], + ( + self.result.chromeos_root, + ("./generate_test_report --no-color --csv %s") % TMP_DIR1, + ), + ) + self.assertEqual(mock_getpath.call_count, 1) + self.assertEqual(mock_mkdtemp.call_count, 1) + self.assertEqual( + res, {"Total": [10, "score"], "first_time": [680, "ms"]} + ) + + # Test 2. self.temp_dir + reset() + mock_chrootruncmd.return_value = [ + "", + ( + "/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/" + "telemetry_Crosperf,PASS\n" + ), + "", + ] + mock_getpath.return_value = "/tmp/tmpJCajRG" + self.result.temp_dir = "/tmp/tmpJCajRG" + res = self.result.GetKeyvals() + self.assertEqual(mock_runcmd.call_count, 0) + self.assertEqual(mock_mkdtemp.call_count, 0) + self.assertEqual(mock_chrootruncmd.call_count, 1) + self.assertTrue(self.callGetNewKeyvals) + self.assertEqual( + self.kv_dict, {"": "PASS", "telemetry_Crosperf": "PASS"} + ) + self.assertEqual( + res, {"Total": [10, "score"], "first_time": [680, "ms"]} + ) + + # Test 3. suite != telemetry_Crosperf. Normally this would be for + # running non-Telemetry autotests, such as BootPerfServer. In this test + # case, the keyvals we have set up were returned from a Telemetry test run; + # so this pass is basically testing that we don't append the units to the + # test results (which we do for Telemetry autotest runs). + reset() + self.result.suite = "" + res = self.result.GetKeyvals() + self.assertEqual(res, {"Total": 10, "first_time": 680}) + + @mock.patch.object(misc, "GetInsideChrootPath") + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + @mock.patch.object(os.path, "exists") + def test_get_samples( + self, mock_exists, mock_get_total_samples, mock_getpath + ): + self.result.perf_data_files = ["/tmp/results/perf.data"] + self.result.board = "samus" + mock_getpath.return_value = "/usr/chromeos/chroot/tmp/results/perf.data" + mock_get_total_samples.return_value = [ + "", + "45.42% 237210 chrome ", + "", + ] + mock_exists.return_value = True + + # mock_open does not seem to support iteration. + # pylint: disable=line-too-long + content = """1.63% 66 dav1d-tile chrome [.] decode_coefs 1.48% 60 swapper [kernel.kallsyms] [k] intel_idle 1.16% 47 dav1d-tile chrome [.] decode_sb""" - with mock.patch('builtins.open', return_value=io.StringIO(content)): - samples = self.result.GetSamples() - self.assertEqual(samples, [237210 - 60, u'samples']) - - def test_get_results_dir(self): - - self.result.out = '' - self.assertRaises(Exception, self.result.GetResultsDir) - - self.result.out = OUTPUT - resdir = self.result.GetResultsDir() - self.assertEqual(resdir, '/tmp/test_that.PO1234567/platform_LibCBench') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandGeneric') - def test_find_files_in_results_dir(self, mock_runcmd): - - self.result.results_dir = None - res = self.result.FindFilesInResultsDir('-name perf.data') - self.assertEqual(res, '') - - self.result.ce.RunCommand = mock_runcmd - self.result.results_dir = '/tmp/test_results' - mock_runcmd.return_value = [0, '/tmp/test_results/perf.data', ''] - res = self.result.FindFilesInResultsDir('-name perf.data') - self.assertEqual(mock_runcmd.call_count, 1) - self.assertEqual(mock_runcmd.call_args_list[0][0], - ('find /tmp/test_results -name perf.data', )) - self.assertEqual(res, '/tmp/test_results/perf.data') - - mock_runcmd.reset_mock() - mock_runcmd.return_value = [1, '', ''] - self.assertRaises(Exception, self.result.FindFilesInResultsDir, - '-name perf.data') - - @mock.patch.object(Result, 'FindFilesInResultsDir') - def test_get_perf_data_files(self, mock_findfiles): - self.args = None - - mock_findfiles.return_value = 'line1\nline1\n' - self.result.FindFilesInResultsDir = mock_findfiles - res = self.result.GetPerfDataFiles() - self.assertEqual(res, ['line1', 'line1']) - self.assertEqual(mock_findfiles.call_args_list[0][0], - ('-name perf.data', )) - - def test_get_perf_report_files(self): - self.args = None - - def FakeFindFiles(find_args): - self.args = find_args - return 'line1\nline1\n' - - self.result.FindFilesInResultsDir = FakeFindFiles - res = self.result.GetPerfReportFiles() - self.assertEqual(res, ['line1', 'line1']) - self.assertEqual(self.args, '-name perf.data.report') - - def test_get_data_measurement_files(self): - self.args = None - - def FakeFindFiles(find_args): - self.args = find_args - return 'line1\nline1\n' - - self.result.FindFilesInResultsDir = FakeFindFiles - res = self.result.GetDataMeasurementsFiles() - self.assertEqual(res, ['line1', 'line1']) - self.assertEqual(self.args, '-name perf_measurements') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_turbostat_file_finds_single_log(self, mock_runcmd): - """Expected behavior when a single log file found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, 'some/long/path/turbostat.log', '') - found_single_log = self.result.GetTurbostatFile() - self.assertEqual(found_single_log, 'some/long/path/turbostat.log') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_turbostat_file_finds_multiple_logs(self, mock_runcmd): - """Error case when multiple files found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, - 'some/long/path/turbostat.log\nturbostat.log', - '') - found_first_logs = self.result.GetTurbostatFile() - self.assertEqual(found_first_logs, 'some/long/path/turbostat.log') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_turbostat_file_finds_no_logs(self, mock_runcmd): - """Error case when no log file found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, '', '') - found_no_logs = self.result.GetTurbostatFile() - self.assertEqual(found_no_logs, '') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_turbostat_file_with_failing_find(self, mock_runcmd): - """Error case when file search returns an error.""" - self.result.results_dir = '/tmp/test_results' - mock_runcmd.return_value = (-1, '', 'error') - with self.assertRaises(RuntimeError): - self.result.GetTurbostatFile() - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_top_file_finds_single_log(self, mock_runcmd): - """Expected behavior when a single top log file found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, 'some/long/path/top.log', '') - found_single_log = self.result.GetTopFile() - self.assertEqual(found_single_log, 'some/long/path/top.log') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_top_file_finds_multiple_logs(self, mock_runcmd): - """The case when multiple top files found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, 'some/long/path/top.log\ntop.log', '') - found_first_logs = self.result.GetTopFile() - self.assertEqual(found_first_logs, 'some/long/path/top.log') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_top_file_finds_no_logs(self, mock_runcmd): - """Error case when no log file found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, '', '') - found_no_logs = self.result.GetTopFile() - self.assertEqual(found_no_logs, '') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_cpuinfo_file_finds_single_log(self, mock_runcmd): - """Expected behavior when a single cpuinfo file found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, 'some/long/path/cpuinfo.log', '') - found_single_log = self.result.GetCpuinfoFile() - self.assertEqual(found_single_log, 'some/long/path/cpuinfo.log') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_cpustats_file_finds_single_log(self, mock_runcmd): - """Expected behavior when a single log file found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, 'some/long/path/cpustats.log', '') - found_single_log = self.result.GetCpustatsFile() - self.assertEqual(found_single_log, 'some/long/path/cpustats.log') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_cpustats_file_finds_multiple_logs(self, mock_runcmd): - """The case when multiple files found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, 'some/long/path/cpustats.log\ncpustats.log', - '') - found_first_logs = self.result.GetCpustatsFile() - self.assertEqual(found_first_logs, 'some/long/path/cpustats.log') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - def test_get_cpustats_file_finds_no_logs(self, mock_runcmd): - """Error case when no log file found.""" - self.result.results_dir = '/tmp/test_results' - self.result.ce.RunCommandWOutput = mock_runcmd - mock_runcmd.return_value = (0, '', '') - found_no_logs = self.result.GetCpustatsFile() - self.assertEqual(found_no_logs, '') - - def test_verify_perf_data_pid_ok(self): - """Verify perf PID which is present in TOP_DATA.""" - self.result.top_cmds = TOP_DATA - # pid is present in TOP_DATA. - with mock.patch.object(Result, - 'ReadPidFromPerfData', - return_value=['5713']): - self.result.VerifyPerfDataPID() - - def test_verify_perf_data_pid_fail(self): - """Test perf PID missing in top raises the error.""" - self.result.top_cmds = TOP_DATA - # pid is not in the list of top processes. - with mock.patch.object(Result, - 'ReadPidFromPerfData', - return_value=['9999']): - with self.assertRaises(PidVerificationError): - self.result.VerifyPerfDataPID() - - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_read_pid_from_perf_data_ok(self, mock_runcmd): - """Test perf header parser, normal flow.""" - self.result.ce.ChrootRunCommandWOutput = mock_runcmd - self.result.perf_data_files = [ - '/tmp/chromeos/chroot/tmp/results/perf.data' - ] - exp_pid = '12345' - mock_runcmd.return_value = (0, PERF_DATA_HEADER.format(pid=exp_pid), '') - pids = self.result.ReadPidFromPerfData() - self.assertEqual(pids, [exp_pid]) - - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_read_pid_from_perf_data_mult_profiles(self, mock_runcmd): - """Test multiple perf.data files with PID.""" - self.result.ce.ChrootRunCommandWOutput = mock_runcmd - # self.result.chromeos_root = '/tmp/chromeos' - self.result.perf_data_files = [ - '/tmp/chromeos/chroot/tmp/results/perf.data.0', - '/tmp/chromeos/chroot/tmp/results/perf.data.1', - ] - # There is '-p <pid>' in command line but it's still system-wide: '-a'. - cmd_line = '# cmdline : /usr/bin/perf record -e instructions -p {pid}' - exp_perf_pids = ['1111', '2222'] - mock_runcmd.side_effect = [ - (0, cmd_line.format(pid=exp_perf_pids[0]), ''), - (0, cmd_line.format(pid=exp_perf_pids[1]), ''), - ] - pids = self.result.ReadPidFromPerfData() - self.assertEqual(pids, exp_perf_pids) - - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_read_pid_from_perf_data_no_pid(self, mock_runcmd): - """Test perf.data without PID.""" - self.result.ce.ChrootRunCommandWOutput = mock_runcmd - self.result.perf_data_files = [ - '/tmp/chromeos/chroot/tmp/results/perf.data' - ] - cmd_line = '# cmdline : /usr/bin/perf record -e instructions' - mock_runcmd.return_value = (0, cmd_line, '') - pids = self.result.ReadPidFromPerfData() - # pids is empty. - self.assertEqual(pids, []) - - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_read_pid_from_perf_data_system_wide(self, mock_runcmd): - """Test reading from system-wide profile with PID.""" - self.result.ce.ChrootRunCommandWOutput = mock_runcmd - self.result.perf_data_files = [ - '/tmp/chromeos/chroot/tmp/results/perf.data' - ] - # There is '-p <pid>' in command line but it's still system-wide: '-a'. - cmd_line = '# cmdline : /usr/bin/perf record -e instructions -a -p 1234' - mock_runcmd.return_value = (0, cmd_line, '') - pids = self.result.ReadPidFromPerfData() - # pids should be empty since it's not a per-process profiling. - self.assertEqual(pids, []) - - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_read_pid_from_perf_data_read_fail(self, mock_runcmd): - """Failure to read perf.data raises the error.""" - self.result.ce.ChrootRunCommandWOutput = mock_runcmd - self.result.perf_data_files = [ - '/tmp/chromeos/chroot/tmp/results/perf.data' - ] - # Error status of the profile read. - mock_runcmd.return_value = (1, '', '') - with self.assertRaises(PerfDataReadError): - self.result.ReadPidFromPerfData() - - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_read_pid_from_perf_data_fail(self, mock_runcmd): - """Failure to find cmdline in perf.data header raises the error.""" - self.result.ce.ChrootRunCommandWOutput = mock_runcmd - self.result.perf_data_files = [ - '/tmp/chromeos/chroot/tmp/results/perf.data' - ] - # Empty output. - mock_runcmd.return_value = (0, '', '') - with self.assertRaises(PerfDataReadError): - self.result.ReadPidFromPerfData() - - def test_process_turbostat_results_with_valid_data(self): - """Normal case when log exists and contains valid data.""" - self.result.turbostat_log_file = '/tmp/somelogfile.log' - with mock.patch('builtins.open', - mock.mock_open(read_data=TURBOSTAT_LOG_OUTPUT)) as mo: - cpustats = self.result.ProcessTurbostatResults() - # Check that the log got opened and data were read/parsed. - calls = [mock.call('/tmp/somelogfile.log')] - mo.assert_has_calls(calls) - self.assertEqual(cpustats, TURBOSTAT_DATA) - - def test_process_turbostat_results_from_empty_file(self): - """Error case when log exists but file is empty.""" - self.result.turbostat_log_file = '/tmp/emptylogfile.log' - with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo: - cpustats = self.result.ProcessTurbostatResults() - # Check that the log got opened and parsed successfully and empty data - # returned. - calls = [mock.call('/tmp/emptylogfile.log')] - mo.assert_has_calls(calls) - self.assertEqual(cpustats, {}) - - def test_process_turbostat_results_when_file_doesnt_exist(self): - """Error case when file does not exist.""" - nonexistinglog = '/tmp/1' - while os.path.exists(nonexistinglog): - # Extend file path if it happens to exist. - nonexistinglog = os.path.join(nonexistinglog, '1') - self.result.turbostat_log_file = nonexistinglog - # Allow the tested function to call a 'real' open and hopefully crash. - with self.assertRaises(IOError): - self.result.ProcessTurbostatResults() - - def test_process_cpustats_results_with_uniq_data(self): - """Process cpustats log which has freq unique to each core. - - Testing normal case when frequency data vary between - different cores. - Expecting that data for all cores will be present in - returned cpustats. - """ - self.result.cpustats_log_file = '/tmp/somelogfile.log' - with mock.patch('builtins.open', - mock.mock_open(read_data=CPUSTATS_UNIQ_OUTPUT)) as mo: - cpustats = self.result.ProcessCpustatsResults() - # Check that the log got opened and data were read/parsed. - calls = [mock.call('/tmp/somelogfile.log')] - mo.assert_has_calls(calls) - self.assertEqual(cpustats, CPUSTATS_UNIQ_DATA) - - def test_process_cpustats_results_with_dupl_data(self): - """Process cpustats log where cores have duplicate freq. - - Testing normal case when frequency data on some cores - are duplicated. - Expecting that duplicated data is discarded in - returned cpustats. - """ - self.result.cpustats_log_file = '/tmp/somelogfile.log' - with mock.patch('builtins.open', - mock.mock_open(read_data=CPUSTATS_DUPL_OUTPUT)) as mo: - cpustats = self.result.ProcessCpustatsResults() - # Check that the log got opened and data were read/parsed. - calls = [mock.call('/tmp/somelogfile.log')] - mo.assert_has_calls(calls) - self.assertEqual(cpustats, CPUSTATS_DUPL_DATA) - - def test_process_cpustats_results_from_empty_file(self): - """Error case when log exists but file is empty.""" - self.result.cpustats_log_file = '/tmp/emptylogfile.log' - with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo: - cpustats = self.result.ProcessCpustatsResults() - # Check that the log got opened and parsed successfully and empty data - # returned. - calls = [mock.call('/tmp/emptylogfile.log')] - mo.assert_has_calls(calls) - self.assertEqual(cpustats, {}) - - def test_process_top_results_with_valid_data(self): - """Process top log with valid data.""" - - self.result.top_log_file = '/tmp/fakelogfile.log' - with mock.patch('builtins.open', mock.mock_open(read_data=TOP_LOG)) as mo: - topproc = self.result.ProcessTopResults() - # Check that the log got opened and data were read/parsed. - calls = [mock.call('/tmp/fakelogfile.log')] - mo.assert_has_calls(calls) - self.assertEqual(topproc, TOP_DATA) - - def test_process_top_results_from_empty_file(self): - """Error case when log exists but file is empty.""" - self.result.top_log_file = '/tmp/emptylogfile.log' - with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo: - topcalls = self.result.ProcessTopResults() - # Check that the log got opened and parsed successfully and empty data - # returned. - calls = [mock.call('/tmp/emptylogfile.log')] - mo.assert_has_calls(calls) - self.assertEqual(topcalls, []) - - def test_format_string_top_cmds(self): - """Test formatted string with top commands.""" - self.result.top_cmds = [ - { - 'cmd': 'chrome-111', - 'cpu_use_avg': 119.753453465, - 'count': 44444, - 'top5_cpu_use': [222.8, 217.9, 217.8, 191.0, 189.9], - }, - { - 'cmd': 'chrome-222', - 'cpu_use_avg': 100, - 'count': 33333, - 'top5_cpu_use': [200.0, 195.0, 190.0, 185.0, 180.0], - }, - { - 'cmd': 'irq/230-cros-ec', - 'cpu_use_avg': 10.000000000000001, - 'count': 1000, - 'top5_cpu_use': [11.5, 11.4, 11.3, 11.2, 11.1], - }, - { - 'cmd': 'powerd', - 'cpu_use_avg': 2.0, - 'count': 2, - 'top5_cpu_use': [3.0, 1.0] - }, - { - 'cmd': 'cmd3', - 'cpu_use_avg': 1.0, - 'count': 1, - 'top5_cpu_use': [1.0], - }, - { - 'cmd': 'cmd4', - 'cpu_use_avg': 1.0, - 'count': 1, - 'top5_cpu_use': [1.0], - }, - { - 'cmd': 'cmd5', - 'cpu_use_avg': 1.0, - 'count': 1, - 'top5_cpu_use': [1.0], - }, - { - 'cmd': 'cmd6_not_for_print', - 'cpu_avg': 1.0, - 'count': 1, - 'top5': [1.0], - }, - ] - form_str = self.result.FormatStringTopCommands() - self.assertEqual( - form_str, '\n'.join([ - 'Top commands with highest CPU usage:', - ' COMMAND AVG CPU% COUNT HIGHEST 5', - '-' * 50, - ' chrome-111 119.75 44444 ' - '[222.8, 217.9, 217.8, 191.0, 189.9]', - ' chrome-222 100.00 33333 ' - '[200.0, 195.0, 190.0, 185.0, 180.0]', - ' irq/230-cros-ec 10.00 1000 ' - '[11.5, 11.4, 11.3, 11.2, 11.1]', - ' powerd 2.00 2 [3.0, 1.0]', - ' cmd3 1.00 1 [1.0]', - ' cmd4 1.00 1 [1.0]', - ' cmd5 1.00 1 [1.0]', - '-' * 50, - ])) - - def test_format_string_top_calls_no_data(self): - """Test formatted string of top with no data.""" - self.result.top_cmds = [] - form_str = self.result.FormatStringTopCommands() - self.assertEqual( - form_str, '\n'.join([ - 'Top commands with highest CPU usage:', - ' COMMAND AVG CPU% COUNT HIGHEST 5', - '-' * 50, - '[NO DATA FROM THE TOP LOG]', - '-' * 50, - ])) - - @mock.patch.object(misc, 'GetInsideChrootPath') - @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand') - def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath): - fake_file = '/usr/chromeos/chroot/tmp/results/fake_file' - self.result.perf_data_files = ['/tmp/results/perf.data'] - self.result.board = 'lumpy' - mock_getpath.return_value = fake_file - self.result.ce.ChrootRunCommand = mock_chrootruncmd - mock_chrootruncmd.return_value = 0 - # Debug path not found - self.result.label.debug_path = '' - tmp = self.result.GeneratePerfReportFiles() - self.assertEqual(tmp, ['/tmp/chromeos/chroot%s' % fake_file]) - self.assertEqual(mock_chrootruncmd.call_args_list[0][0], - (self.result.chromeos_root, - ('/usr/sbin/perf report -n ' - '-i %s --stdio > %s') % (fake_file, fake_file))) - - @mock.patch.object(misc, 'GetInsideChrootPath') - @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand') - def test_generate_perf_report_files_debug(self, mock_chrootruncmd, - mock_getpath): - fake_file = '/usr/chromeos/chroot/tmp/results/fake_file' - self.result.perf_data_files = ['/tmp/results/perf.data'] - self.result.board = 'lumpy' - mock_getpath.return_value = fake_file - self.result.ce.ChrootRunCommand = mock_chrootruncmd - mock_chrootruncmd.return_value = 0 - # Debug path found - self.result.label.debug_path = '/tmp/debug' - tmp = self.result.GeneratePerfReportFiles() - self.assertEqual(tmp, ['/tmp/chromeos/chroot%s' % fake_file]) - self.assertEqual(mock_chrootruncmd.call_args_list[0][0], - (self.result.chromeos_root, - ('/usr/sbin/perf report -n --symfs /tmp/debug ' - '--vmlinux /tmp/debug/usr/lib/debug/boot/vmlinux ' - '-i %s --stdio > %s') % (fake_file, fake_file))) - - @mock.patch.object(misc, 'GetOutsideChrootPath') - def test_populate_from_run(self, mock_getpath): - def FakeGetResultsDir(): - self.callGetResultsDir = True - return '/tmp/results_dir' - - def FakeGetResultsFile(): - self.callGetResultsFile = True - return [] - - def FakeGetPerfDataFiles(): - self.callGetPerfDataFiles = True - return [] - - def FakeGetPerfReportFiles(): - self.callGetPerfReportFiles = True - return [] - - def FakeGetTurbostatFile(): - self.callGetTurbostatFile = True - return [] - - def FakeGetCpustatsFile(): - self.callGetCpustatsFile = True - return [] - - def FakeGetTopFile(): - self.callGetTopFile = True - return [] - - def FakeGetCpuinfoFile(): - self.callGetCpuinfoFile = True - return [] - - def FakeGetWaitTimeFile(): - self.callGetWaitTimeFile = True - return [] - - def FakeProcessResults(show_results=False): - if show_results: - pass - self.callProcessResults = True - - if mock_getpath: - pass - mock.get_path = '/tmp/chromeos/tmp/results_dir' - - self.callGetResultsDir = False - self.callGetResultsFile = False - self.callGetPerfDataFiles = False - self.callGetPerfReportFiles = False - self.callGetTurbostatFile = False - self.callGetCpustatsFile = False - self.callGetTopFile = False - self.callGetCpuinfoFile = False - self.callGetWaitTimeFile = False - self.callProcessResults = False - - self.result.GetResultsDir = FakeGetResultsDir - self.result.GetResultsFile = FakeGetResultsFile - self.result.GetPerfDataFiles = FakeGetPerfDataFiles - self.result.GeneratePerfReportFiles = FakeGetPerfReportFiles - self.result.GetTurbostatFile = FakeGetTurbostatFile - self.result.GetCpustatsFile = FakeGetCpustatsFile - self.result.GetTopFile = FakeGetTopFile - self.result.GetCpuinfoFile = FakeGetCpuinfoFile - self.result.GetWaitTimeFile = FakeGetWaitTimeFile - self.result.ProcessResults = FakeProcessResults - - self.result.PopulateFromRun(OUTPUT, '', 0, 'test', 'telemetry_Crosperf', - 'chrome') - self.assertTrue(self.callGetResultsDir) - self.assertTrue(self.callGetResultsFile) - self.assertTrue(self.callGetPerfDataFiles) - self.assertTrue(self.callGetPerfReportFiles) - self.assertTrue(self.callGetTurbostatFile) - self.assertTrue(self.callGetCpustatsFile) - self.assertTrue(self.callGetTopFile) - self.assertTrue(self.callGetCpuinfoFile) - self.assertTrue(self.callGetWaitTimeFile) - self.assertTrue(self.callProcessResults) - - def FakeGetKeyvals(self, show_all=False): - if show_all: - return {'first_time': 680, 'Total': 10} - else: - return {'Total': 10} - - def test_process_results(self): - def FakeGatherPerfResults(): - self.callGatherPerfResults = True - - def FakeGetSamples(): - return (1, 'samples') - - # Test 1 - self.callGatherPerfResults = False - - self.result.GetKeyvals = self.FakeGetKeyvals - self.result.GatherPerfResults = FakeGatherPerfResults - - self.result.retval = 0 - self.result.ProcessResults() - self.assertTrue(self.callGatherPerfResults) - self.assertEqual(len(self.result.keyvals), 2) - self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 0}) - - # Test 2 - self.result.retval = 1 - self.result.ProcessResults() - self.assertEqual(len(self.result.keyvals), 2) - self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 1}) - - # Test 3 - self.result.cwp_dso = 'chrome' - self.result.retval = 0 - self.result.GetSamples = FakeGetSamples - self.result.ProcessResults() - self.assertEqual(len(self.result.keyvals), 3) - self.assertEqual(self.result.keyvals, { - 'Total': 10, - 'samples': (1, 'samples'), - 'retval': 0 - }) - - # Test 4. Parse output of benchmarks with multiple sotries in histogram - # format - self.result.suite = 'telemetry_Crosperf' - self.result.results_file = [tempfile.mkdtemp() + '/histograms.json'] - with open(self.result.results_file[0], 'w') as f: - f.write(HISTOGRAMSET) - self.result.ProcessResults() - shutil.rmtree(os.path.dirname(self.result.results_file[0])) - # Verify the summary for the story is correct - self.assertEqual( - self.result.keyvals['timeToFirstContentfulPaint__typical'], - [880.000, u'ms_smallerIsBetter']) - # Veirfy the summary for a certain stroy tag is correct - self.assertEqual( - self.result. - keyvals['timeToFirstContentfulPaint__cache_temperature:cold'], - [1000.000, u'ms_smallerIsBetter']) - self.assertEqual( - self.result. - keyvals['timeToFirstContentfulPaint__cache_temperature:warm'], - [800.000, u'ms_smallerIsBetter']) - - @mock.patch.object(Result, 'ProcessCpustatsResults') - @mock.patch.object(Result, 'ProcessTurbostatResults') - def test_process_results_with_turbostat_log(self, mock_proc_turbo, - mock_proc_cpustats): - self.result.GetKeyvals = self.FakeGetKeyvals - - self.result.retval = 0 - self.result.turbostat_log_file = '/tmp/turbostat.log' - mock_proc_turbo.return_value = { - 'cpufreq': { - 'all': [1, 2, 3] - }, - 'cputemp': { - 'all': [5.0, 6.0, 7.0] + with mock.patch("builtins.open", return_value=io.StringIO(content)): + samples = self.result.GetSamples() + self.assertEqual(samples, [237210 - 60, u"samples"]) + + def test_get_results_dir(self): + + self.result.out = "" + self.assertRaises(Exception, self.result.GetResultsDir) + + self.result.out = OUTPUT + resdir = self.result.GetResultsDir() + self.assertEqual(resdir, "/tmp/test_that.PO1234567/platform_LibCBench") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandGeneric") + def test_find_files_in_results_dir(self, mock_runcmd): + + self.result.results_dir = None + res = self.result.FindFilesInResultsDir("-name perf.data") + self.assertEqual(res, "") + + self.result.ce.RunCommand = mock_runcmd + self.result.results_dir = "/tmp/test_results" + mock_runcmd.return_value = [0, "/tmp/test_results/perf.data", ""] + res = self.result.FindFilesInResultsDir("-name perf.data") + self.assertEqual(mock_runcmd.call_count, 1) + self.assertEqual( + mock_runcmd.call_args_list[0][0], + ("find /tmp/test_results -name perf.data",), + ) + self.assertEqual(res, "/tmp/test_results/perf.data") + + mock_runcmd.reset_mock() + mock_runcmd.return_value = [1, "", ""] + self.assertRaises( + Exception, self.result.FindFilesInResultsDir, "-name perf.data" + ) + + @mock.patch.object(Result, "FindFilesInResultsDir") + def test_get_perf_data_files(self, mock_findfiles): + self.args = None + + mock_findfiles.return_value = "line1\nline1\n" + self.result.FindFilesInResultsDir = mock_findfiles + res = self.result.GetPerfDataFiles() + self.assertEqual(res, ["line1", "line1"]) + self.assertEqual( + mock_findfiles.call_args_list[0][0], ("-name perf.data",) + ) + + def test_get_perf_report_files(self): + self.args = None + + def FakeFindFiles(find_args): + self.args = find_args + return "line1\nline1\n" + + self.result.FindFilesInResultsDir = FakeFindFiles + res = self.result.GetPerfReportFiles() + self.assertEqual(res, ["line1", "line1"]) + self.assertEqual(self.args, "-name perf.data.report") + + def test_get_data_measurement_files(self): + self.args = None + + def FakeFindFiles(find_args): + self.args = find_args + return "line1\nline1\n" + + self.result.FindFilesInResultsDir = FakeFindFiles + res = self.result.GetDataMeasurementsFiles() + self.assertEqual(res, ["line1", "line1"]) + self.assertEqual(self.args, "-name perf_measurements") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_turbostat_file_finds_single_log(self, mock_runcmd): + """Expected behavior when a single log file found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = (0, "some/long/path/turbostat.log", "") + found_single_log = self.result.GetTurbostatFile() + self.assertEqual(found_single_log, "some/long/path/turbostat.log") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_turbostat_file_finds_multiple_logs(self, mock_runcmd): + """Error case when multiple files found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = ( + 0, + "some/long/path/turbostat.log\nturbostat.log", + "", + ) + found_first_logs = self.result.GetTurbostatFile() + self.assertEqual(found_first_logs, "some/long/path/turbostat.log") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_turbostat_file_finds_no_logs(self, mock_runcmd): + """Error case when no log file found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = (0, "", "") + found_no_logs = self.result.GetTurbostatFile() + self.assertEqual(found_no_logs, "") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_turbostat_file_with_failing_find(self, mock_runcmd): + """Error case when file search returns an error.""" + self.result.results_dir = "/tmp/test_results" + mock_runcmd.return_value = (-1, "", "error") + with self.assertRaises(RuntimeError): + self.result.GetTurbostatFile() + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_top_file_finds_single_log(self, mock_runcmd): + """Expected behavior when a single top log file found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = (0, "some/long/path/top.log", "") + found_single_log = self.result.GetTopFile() + self.assertEqual(found_single_log, "some/long/path/top.log") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_top_file_finds_multiple_logs(self, mock_runcmd): + """The case when multiple top files found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = (0, "some/long/path/top.log\ntop.log", "") + found_first_logs = self.result.GetTopFile() + self.assertEqual(found_first_logs, "some/long/path/top.log") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_top_file_finds_no_logs(self, mock_runcmd): + """Error case when no log file found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = (0, "", "") + found_no_logs = self.result.GetTopFile() + self.assertEqual(found_no_logs, "") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_cpuinfo_file_finds_single_log(self, mock_runcmd): + """Expected behavior when a single cpuinfo file found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = (0, "some/long/path/cpuinfo.log", "") + found_single_log = self.result.GetCpuinfoFile() + self.assertEqual(found_single_log, "some/long/path/cpuinfo.log") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_cpustats_file_finds_single_log(self, mock_runcmd): + """Expected behavior when a single log file found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = (0, "some/long/path/cpustats.log", "") + found_single_log = self.result.GetCpustatsFile() + self.assertEqual(found_single_log, "some/long/path/cpustats.log") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_cpustats_file_finds_multiple_logs(self, mock_runcmd): + """The case when multiple files found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = ( + 0, + "some/long/path/cpustats.log\ncpustats.log", + "", + ) + found_first_logs = self.result.GetCpustatsFile() + self.assertEqual(found_first_logs, "some/long/path/cpustats.log") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + def test_get_cpustats_file_finds_no_logs(self, mock_runcmd): + """Error case when no log file found.""" + self.result.results_dir = "/tmp/test_results" + self.result.ce.RunCommandWOutput = mock_runcmd + mock_runcmd.return_value = (0, "", "") + found_no_logs = self.result.GetCpustatsFile() + self.assertEqual(found_no_logs, "") + + def test_verify_perf_data_pid_ok(self): + """Verify perf PID which is present in TOP_DATA.""" + self.result.top_cmds = TOP_DATA + # pid is present in TOP_DATA. + with mock.patch.object( + Result, "ReadPidFromPerfData", return_value=["5713"] + ): + self.result.VerifyPerfDataPID() + + def test_verify_perf_data_pid_fail(self): + """Test perf PID missing in top raises the error.""" + self.result.top_cmds = TOP_DATA + # pid is not in the list of top processes. + with mock.patch.object( + Result, "ReadPidFromPerfData", return_value=["9999"] + ): + with self.assertRaises(PidVerificationError): + self.result.VerifyPerfDataPID() + + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + def test_read_pid_from_perf_data_ok(self, mock_runcmd): + """Test perf header parser, normal flow.""" + self.result.ce.ChrootRunCommandWOutput = mock_runcmd + self.result.perf_data_files = [ + "/tmp/chromeos/chroot/tmp/results/perf.data" + ] + exp_pid = "12345" + mock_runcmd.return_value = (0, PERF_DATA_HEADER.format(pid=exp_pid), "") + pids = self.result.ReadPidFromPerfData() + self.assertEqual(pids, [exp_pid]) + + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + def test_read_pid_from_perf_data_mult_profiles(self, mock_runcmd): + """Test multiple perf.data files with PID.""" + self.result.ce.ChrootRunCommandWOutput = mock_runcmd + # self.result.chromeos_root = '/tmp/chromeos' + self.result.perf_data_files = [ + "/tmp/chromeos/chroot/tmp/results/perf.data.0", + "/tmp/chromeos/chroot/tmp/results/perf.data.1", + ] + # There is '-p <pid>' in command line but it's still system-wide: '-a'. + cmd_line = "# cmdline : /usr/bin/perf record -e instructions -p {pid}" + exp_perf_pids = ["1111", "2222"] + mock_runcmd.side_effect = [ + (0, cmd_line.format(pid=exp_perf_pids[0]), ""), + (0, cmd_line.format(pid=exp_perf_pids[1]), ""), + ] + pids = self.result.ReadPidFromPerfData() + self.assertEqual(pids, exp_perf_pids) + + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + def test_read_pid_from_perf_data_no_pid(self, mock_runcmd): + """Test perf.data without PID.""" + self.result.ce.ChrootRunCommandWOutput = mock_runcmd + self.result.perf_data_files = [ + "/tmp/chromeos/chroot/tmp/results/perf.data" + ] + cmd_line = "# cmdline : /usr/bin/perf record -e instructions" + mock_runcmd.return_value = (0, cmd_line, "") + pids = self.result.ReadPidFromPerfData() + # pids is empty. + self.assertEqual(pids, []) + + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + def test_read_pid_from_perf_data_system_wide(self, mock_runcmd): + """Test reading from system-wide profile with PID.""" + self.result.ce.ChrootRunCommandWOutput = mock_runcmd + self.result.perf_data_files = [ + "/tmp/chromeos/chroot/tmp/results/perf.data" + ] + # There is '-p <pid>' in command line but it's still system-wide: '-a'. + cmd_line = "# cmdline : /usr/bin/perf record -e instructions -a -p 1234" + mock_runcmd.return_value = (0, cmd_line, "") + pids = self.result.ReadPidFromPerfData() + # pids should be empty since it's not a per-process profiling. + self.assertEqual(pids, []) + + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + def test_read_pid_from_perf_data_read_fail(self, mock_runcmd): + """Failure to read perf.data raises the error.""" + self.result.ce.ChrootRunCommandWOutput = mock_runcmd + self.result.perf_data_files = [ + "/tmp/chromeos/chroot/tmp/results/perf.data" + ] + # Error status of the profile read. + mock_runcmd.return_value = (1, "", "") + with self.assertRaises(PerfDataReadError): + self.result.ReadPidFromPerfData() + + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + def test_read_pid_from_perf_data_fail(self, mock_runcmd): + """Failure to find cmdline in perf.data header raises the error.""" + self.result.ce.ChrootRunCommandWOutput = mock_runcmd + self.result.perf_data_files = [ + "/tmp/chromeos/chroot/tmp/results/perf.data" + ] + # Empty output. + mock_runcmd.return_value = (0, "", "") + with self.assertRaises(PerfDataReadError): + self.result.ReadPidFromPerfData() + + def test_process_turbostat_results_with_valid_data(self): + """Normal case when log exists and contains valid data.""" + self.result.turbostat_log_file = "/tmp/somelogfile.log" + with mock.patch( + "builtins.open", mock.mock_open(read_data=TURBOSTAT_LOG_OUTPUT) + ) as mo: + cpustats = self.result.ProcessTurbostatResults() + # Check that the log got opened and data were read/parsed. + calls = [mock.call("/tmp/somelogfile.log")] + mo.assert_has_calls(calls) + self.assertEqual(cpustats, TURBOSTAT_DATA) + + def test_process_turbostat_results_from_empty_file(self): + """Error case when log exists but file is empty.""" + self.result.turbostat_log_file = "/tmp/emptylogfile.log" + with mock.patch("builtins.open", mock.mock_open(read_data="")) as mo: + cpustats = self.result.ProcessTurbostatResults() + # Check that the log got opened and parsed successfully and empty data + # returned. + calls = [mock.call("/tmp/emptylogfile.log")] + mo.assert_has_calls(calls) + self.assertEqual(cpustats, {}) + + def test_process_turbostat_results_when_file_doesnt_exist(self): + """Error case when file does not exist.""" + nonexistinglog = "/tmp/1" + while os.path.exists(nonexistinglog): + # Extend file path if it happens to exist. + nonexistinglog = os.path.join(nonexistinglog, "1") + self.result.turbostat_log_file = nonexistinglog + # Allow the tested function to call a 'real' open and hopefully crash. + with self.assertRaises(IOError): + self.result.ProcessTurbostatResults() + + def test_process_cpustats_results_with_uniq_data(self): + """Process cpustats log which has freq unique to each core. + + Testing normal case when frequency data vary between + different cores. + Expecting that data for all cores will be present in + returned cpustats. + """ + self.result.cpustats_log_file = "/tmp/somelogfile.log" + with mock.patch( + "builtins.open", mock.mock_open(read_data=CPUSTATS_UNIQ_OUTPUT) + ) as mo: + cpustats = self.result.ProcessCpustatsResults() + # Check that the log got opened and data were read/parsed. + calls = [mock.call("/tmp/somelogfile.log")] + mo.assert_has_calls(calls) + self.assertEqual(cpustats, CPUSTATS_UNIQ_DATA) + + def test_process_cpustats_results_with_dupl_data(self): + """Process cpustats log where cores have duplicate freq. + + Testing normal case when frequency data on some cores + are duplicated. + Expecting that duplicated data is discarded in + returned cpustats. + """ + self.result.cpustats_log_file = "/tmp/somelogfile.log" + with mock.patch( + "builtins.open", mock.mock_open(read_data=CPUSTATS_DUPL_OUTPUT) + ) as mo: + cpustats = self.result.ProcessCpustatsResults() + # Check that the log got opened and data were read/parsed. + calls = [mock.call("/tmp/somelogfile.log")] + mo.assert_has_calls(calls) + self.assertEqual(cpustats, CPUSTATS_DUPL_DATA) + + def test_process_cpustats_results_from_empty_file(self): + """Error case when log exists but file is empty.""" + self.result.cpustats_log_file = "/tmp/emptylogfile.log" + with mock.patch("builtins.open", mock.mock_open(read_data="")) as mo: + cpustats = self.result.ProcessCpustatsResults() + # Check that the log got opened and parsed successfully and empty data + # returned. + calls = [mock.call("/tmp/emptylogfile.log")] + mo.assert_has_calls(calls) + self.assertEqual(cpustats, {}) + + def test_process_top_results_with_valid_data(self): + """Process top log with valid data.""" + + self.result.top_log_file = "/tmp/fakelogfile.log" + with mock.patch( + "builtins.open", mock.mock_open(read_data=TOP_LOG) + ) as mo: + topproc = self.result.ProcessTopResults() + # Check that the log got opened and data were read/parsed. + calls = [mock.call("/tmp/fakelogfile.log")] + mo.assert_has_calls(calls) + self.assertEqual(topproc, TOP_DATA) + + def test_process_top_results_from_empty_file(self): + """Error case when log exists but file is empty.""" + self.result.top_log_file = "/tmp/emptylogfile.log" + with mock.patch("builtins.open", mock.mock_open(read_data="")) as mo: + topcalls = self.result.ProcessTopResults() + # Check that the log got opened and parsed successfully and empty data + # returned. + calls = [mock.call("/tmp/emptylogfile.log")] + mo.assert_has_calls(calls) + self.assertEqual(topcalls, []) + + def test_format_string_top_cmds(self): + """Test formatted string with top commands.""" + self.result.top_cmds = [ + { + "cmd": "chrome-111", + "cpu_use_avg": 119.753453465, + "count": 44444, + "top5_cpu_use": [222.8, 217.9, 217.8, 191.0, 189.9], + }, + { + "cmd": "chrome-222", + "cpu_use_avg": 100, + "count": 33333, + "top5_cpu_use": [200.0, 195.0, 190.0, 185.0, 180.0], + }, + { + "cmd": "irq/230-cros-ec", + "cpu_use_avg": 10.000000000000001, + "count": 1000, + "top5_cpu_use": [11.5, 11.4, 11.3, 11.2, 11.1], + }, + { + "cmd": "powerd", + "cpu_use_avg": 2.0, + "count": 2, + "top5_cpu_use": [3.0, 1.0], + }, + { + "cmd": "cmd3", + "cpu_use_avg": 1.0, + "count": 1, + "top5_cpu_use": [1.0], + }, + { + "cmd": "cmd4", + "cpu_use_avg": 1.0, + "count": 1, + "top5_cpu_use": [1.0], + }, + { + "cmd": "cmd5", + "cpu_use_avg": 1.0, + "count": 1, + "top5_cpu_use": [1.0], + }, + { + "cmd": "cmd6_not_for_print", + "cpu_avg": 1.0, + "count": 1, + "top5": [1.0], + }, + ] + form_str = self.result.FormatStringTopCommands() + self.assertEqual( + form_str, + "\n".join( + [ + "Top commands with highest CPU usage:", + " COMMAND AVG CPU% COUNT HIGHEST 5", + "-" * 50, + " chrome-111 119.75 44444 " + "[222.8, 217.9, 217.8, 191.0, 189.9]", + " chrome-222 100.00 33333 " + "[200.0, 195.0, 190.0, 185.0, 180.0]", + " irq/230-cros-ec 10.00 1000 " + "[11.5, 11.4, 11.3, 11.2, 11.1]", + " powerd 2.00 2 [3.0, 1.0]", + " cmd3 1.00 1 [1.0]", + " cmd4 1.00 1 [1.0]", + " cmd5 1.00 1 [1.0]", + "-" * 50, + ] + ), + ) + + def test_format_string_top_calls_no_data(self): + """Test formatted string of top with no data.""" + self.result.top_cmds = [] + form_str = self.result.FormatStringTopCommands() + self.assertEqual( + form_str, + "\n".join( + [ + "Top commands with highest CPU usage:", + " COMMAND AVG CPU% COUNT HIGHEST 5", + "-" * 50, + "[NO DATA FROM THE TOP LOG]", + "-" * 50, + ] + ), + ) + + @mock.patch.object(misc, "GetInsideChrootPath") + @mock.patch.object(command_executer.CommandExecuter, "ChrootRunCommand") + def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath): + fake_file = "/usr/chromeos/chroot/tmp/results/fake_file" + self.result.perf_data_files = ["/tmp/results/perf.data"] + self.result.board = "lumpy" + mock_getpath.return_value = fake_file + self.result.ce.ChrootRunCommand = mock_chrootruncmd + mock_chrootruncmd.return_value = 0 + # Debug path not found + self.result.label.debug_path = "" + tmp = self.result.GeneratePerfReportFiles() + self.assertEqual(tmp, ["/tmp/chromeos/chroot%s" % fake_file]) + self.assertEqual( + mock_chrootruncmd.call_args_list[0][0], + ( + self.result.chromeos_root, + ("/usr/sbin/perf report -n " "-i %s --stdio > %s") + % (fake_file, fake_file), + ), + ) + + @mock.patch.object(misc, "GetInsideChrootPath") + @mock.patch.object(command_executer.CommandExecuter, "ChrootRunCommand") + def test_generate_perf_report_files_debug( + self, mock_chrootruncmd, mock_getpath + ): + fake_file = "/usr/chromeos/chroot/tmp/results/fake_file" + self.result.perf_data_files = ["/tmp/results/perf.data"] + self.result.board = "lumpy" + mock_getpath.return_value = fake_file + self.result.ce.ChrootRunCommand = mock_chrootruncmd + mock_chrootruncmd.return_value = 0 + # Debug path found + self.result.label.debug_path = "/tmp/debug" + tmp = self.result.GeneratePerfReportFiles() + self.assertEqual(tmp, ["/tmp/chromeos/chroot%s" % fake_file]) + self.assertEqual( + mock_chrootruncmd.call_args_list[0][0], + ( + self.result.chromeos_root, + ( + "/usr/sbin/perf report -n --symfs /tmp/debug " + "--vmlinux /tmp/debug/usr/lib/debug/boot/vmlinux " + "-i %s --stdio > %s" + ) + % (fake_file, fake_file), + ), + ) + + @mock.patch.object(misc, "GetOutsideChrootPath") + def test_populate_from_run(self, mock_getpath): + def FakeGetResultsDir(): + self.callGetResultsDir = True + return "/tmp/results_dir" + + def FakeGetResultsFile(): + self.callGetResultsFile = True + return [] + + def FakeGetPerfDataFiles(): + self.callGetPerfDataFiles = True + return [] + + def FakeGetPerfReportFiles(): + self.callGetPerfReportFiles = True + return [] + + def FakeGetTurbostatFile(): + self.callGetTurbostatFile = True + return [] + + def FakeGetCpustatsFile(): + self.callGetCpustatsFile = True + return [] + + def FakeGetTopFile(): + self.callGetTopFile = True + return [] + + def FakeGetCpuinfoFile(): + self.callGetCpuinfoFile = True + return [] + + def FakeGetWaitTimeFile(): + self.callGetWaitTimeFile = True + return [] + + def FakeProcessResults(show_results=False): + if show_results: + pass + self.callProcessResults = True + + if mock_getpath: + pass + mock.get_path = "/tmp/chromeos/tmp/results_dir" + + self.callGetResultsDir = False + self.callGetResultsFile = False + self.callGetPerfDataFiles = False + self.callGetPerfReportFiles = False + self.callGetTurbostatFile = False + self.callGetCpustatsFile = False + self.callGetTopFile = False + self.callGetCpuinfoFile = False + self.callGetWaitTimeFile = False + self.callProcessResults = False + + self.result.GetResultsDir = FakeGetResultsDir + self.result.GetResultsFile = FakeGetResultsFile + self.result.GetPerfDataFiles = FakeGetPerfDataFiles + self.result.GeneratePerfReportFiles = FakeGetPerfReportFiles + self.result.GetTurbostatFile = FakeGetTurbostatFile + self.result.GetCpustatsFile = FakeGetCpustatsFile + self.result.GetTopFile = FakeGetTopFile + self.result.GetCpuinfoFile = FakeGetCpuinfoFile + self.result.GetWaitTimeFile = FakeGetWaitTimeFile + self.result.ProcessResults = FakeProcessResults + + self.result.PopulateFromRun( + OUTPUT, "", 0, "test", "telemetry_Crosperf", "chrome" + ) + self.assertTrue(self.callGetResultsDir) + self.assertTrue(self.callGetResultsFile) + self.assertTrue(self.callGetPerfDataFiles) + self.assertTrue(self.callGetPerfReportFiles) + self.assertTrue(self.callGetTurbostatFile) + self.assertTrue(self.callGetCpustatsFile) + self.assertTrue(self.callGetTopFile) + self.assertTrue(self.callGetCpuinfoFile) + self.assertTrue(self.callGetWaitTimeFile) + self.assertTrue(self.callProcessResults) + + def FakeGetKeyvals(self, show_all=False): + if show_all: + return {"first_time": 680, "Total": 10} + else: + return {"Total": 10} + + def test_process_results(self): + def FakeGatherPerfResults(): + self.callGatherPerfResults = True + + def FakeGetSamples(): + return (1, "samples") + + # Test 1 + self.callGatherPerfResults = False + + self.result.GetKeyvals = self.FakeGetKeyvals + self.result.GatherPerfResults = FakeGatherPerfResults + + self.result.retval = 0 + self.result.ProcessResults() + self.assertTrue(self.callGatherPerfResults) + self.assertEqual(len(self.result.keyvals), 2) + self.assertEqual(self.result.keyvals, {"Total": 10, "retval": 0}) + + # Test 2 + self.result.retval = 1 + self.result.ProcessResults() + self.assertEqual(len(self.result.keyvals), 2) + self.assertEqual(self.result.keyvals, {"Total": 10, "retval": 1}) + + # Test 3 + self.result.cwp_dso = "chrome" + self.result.retval = 0 + self.result.GetSamples = FakeGetSamples + self.result.ProcessResults() + self.assertEqual(len(self.result.keyvals), 3) + self.assertEqual( + self.result.keyvals, + {"Total": 10, "samples": (1, "samples"), "retval": 0}, + ) + + # Test 4. Parse output of benchmarks with multiple sotries in histogram + # format + self.result.suite = "telemetry_Crosperf" + self.result.results_file = [tempfile.mkdtemp() + "/histograms.json"] + with open(self.result.results_file[0], "w") as f: + f.write(HISTOGRAMSET) + self.result.ProcessResults() + shutil.rmtree(os.path.dirname(self.result.results_file[0])) + # Verify the summary for the story is correct + self.assertEqual( + self.result.keyvals["timeToFirstContentfulPaint__typical"], + [880.000, u"ms_smallerIsBetter"], + ) + # Veirfy the summary for a certain stroy tag is correct + self.assertEqual( + self.result.keyvals[ + "timeToFirstContentfulPaint__cache_temperature:cold" + ], + [1000.000, u"ms_smallerIsBetter"], + ) + self.assertEqual( + self.result.keyvals[ + "timeToFirstContentfulPaint__cache_temperature:warm" + ], + [800.000, u"ms_smallerIsBetter"], + ) + + @mock.patch.object(Result, "ProcessCpustatsResults") + @mock.patch.object(Result, "ProcessTurbostatResults") + def test_process_results_with_turbostat_log( + self, mock_proc_turbo, mock_proc_cpustats + ): + self.result.GetKeyvals = self.FakeGetKeyvals + + self.result.retval = 0 + self.result.turbostat_log_file = "/tmp/turbostat.log" + mock_proc_turbo.return_value = { + "cpufreq": {"all": [1, 2, 3]}, + "cputemp": {"all": [5.0, 6.0, 7.0]}, } - } - self.result.ProcessResults() - mock_proc_turbo.assert_has_calls([mock.call()]) - mock_proc_cpustats.assert_not_called() - self.assertEqual(len(self.result.keyvals), 8) - self.assertEqual( - self.result.keyvals, { - 'Total': 10, - 'cpufreq_all_avg': 2, - 'cpufreq_all_max': 3, - 'cpufreq_all_min': 1, - 'cputemp_all_avg': 6.0, - 'cputemp_all_min': 5.0, - 'cputemp_all_max': 7.0, - 'retval': 0 - }) - - @mock.patch.object(Result, 'ProcessCpustatsResults') - @mock.patch.object(Result, 'ProcessTurbostatResults') - def test_process_results_with_cpustats_log(self, mock_proc_turbo, - mock_proc_cpustats): - self.result.GetKeyvals = self.FakeGetKeyvals - - self.result.retval = 0 - self.result.cpustats_log_file = '/tmp/cpustats.log' - mock_proc_cpustats.return_value = { - 'cpufreq': { - 'cpu0': [100, 100, 100], - 'cpu1': [4, 5, 6] - }, - 'cputemp': { - 'little': [20.2, 20.2, 20.2], - 'big': [55.2, 66.1, 77.3] + self.result.ProcessResults() + mock_proc_turbo.assert_has_calls([mock.call()]) + mock_proc_cpustats.assert_not_called() + self.assertEqual(len(self.result.keyvals), 8) + self.assertEqual( + self.result.keyvals, + { + "Total": 10, + "cpufreq_all_avg": 2, + "cpufreq_all_max": 3, + "cpufreq_all_min": 1, + "cputemp_all_avg": 6.0, + "cputemp_all_min": 5.0, + "cputemp_all_max": 7.0, + "retval": 0, + }, + ) + + @mock.patch.object(Result, "ProcessCpustatsResults") + @mock.patch.object(Result, "ProcessTurbostatResults") + def test_process_results_with_cpustats_log( + self, mock_proc_turbo, mock_proc_cpustats + ): + self.result.GetKeyvals = self.FakeGetKeyvals + + self.result.retval = 0 + self.result.cpustats_log_file = "/tmp/cpustats.log" + mock_proc_cpustats.return_value = { + "cpufreq": {"cpu0": [100, 100, 100], "cpu1": [4, 5, 6]}, + "cputemp": { + "little": [20.2, 20.2, 20.2], + "big": [55.2, 66.1, 77.3], + }, } - } - self.result.ProcessResults() - mock_proc_turbo.assert_not_called() - mock_proc_cpustats.assert_has_calls([mock.call()]) - self.assertEqual(len(self.result.keyvals), 10) - self.assertEqual( - self.result.keyvals, { - 'Total': 10, - 'cpufreq_cpu0_avg': 100, - 'cpufreq_cpu1_avg': 5, - 'cpufreq_cpu1_max': 6, - 'cpufreq_cpu1_min': 4, - 'cputemp_big_avg': 66.2, - 'cputemp_big_max': 77.3, - 'cputemp_big_min': 55.2, - 'cputemp_little_avg': 20.2, - 'retval': 0 - }) - - @mock.patch.object(Result, 'ProcessCpustatsResults') - @mock.patch.object(Result, 'ProcessTurbostatResults') - def test_process_results_with_turbostat_and_cpustats_logs( - self, mock_proc_turbo, mock_proc_cpustats): - self.result.GetKeyvals = self.FakeGetKeyvals - - self.result.retval = 0 - self.result.turbostat_log_file = '/tmp/turbostat.log' - self.result.cpustats_log_file = '/tmp/cpustats.log' - mock_proc_turbo.return_value = { - 'cpufreq': { - 'all': [1, 2, 3] - }, - 'cputemp': { - 'all': [5.0, 6.0, 7.0] + self.result.ProcessResults() + mock_proc_turbo.assert_not_called() + mock_proc_cpustats.assert_has_calls([mock.call()]) + self.assertEqual(len(self.result.keyvals), 10) + self.assertEqual( + self.result.keyvals, + { + "Total": 10, + "cpufreq_cpu0_avg": 100, + "cpufreq_cpu1_avg": 5, + "cpufreq_cpu1_max": 6, + "cpufreq_cpu1_min": 4, + "cputemp_big_avg": 66.2, + "cputemp_big_max": 77.3, + "cputemp_big_min": 55.2, + "cputemp_little_avg": 20.2, + "retval": 0, + }, + ) + + @mock.patch.object(Result, "ProcessCpustatsResults") + @mock.patch.object(Result, "ProcessTurbostatResults") + def test_process_results_with_turbostat_and_cpustats_logs( + self, mock_proc_turbo, mock_proc_cpustats + ): + self.result.GetKeyvals = self.FakeGetKeyvals + + self.result.retval = 0 + self.result.turbostat_log_file = "/tmp/turbostat.log" + self.result.cpustats_log_file = "/tmp/cpustats.log" + mock_proc_turbo.return_value = { + "cpufreq": {"all": [1, 2, 3]}, + "cputemp": {"all": [5.0, 6.0, 7.0]}, } - } - self.result.ProcessResults() - mock_proc_turbo.assert_has_calls([mock.call()]) - mock_proc_cpustats.assert_not_called() - self.assertEqual(len(self.result.keyvals), 8) - self.assertEqual( - self.result.keyvals, { - 'Total': 10, - 'cpufreq_all_avg': 2, - 'cpufreq_all_max': 3, - 'cpufreq_all_min': 1, - 'cputemp_all_avg': 6.0, - 'cputemp_all_min': 5.0, - 'cputemp_all_max': 7.0, - 'retval': 0 - }) - - @mock.patch.object(Result, 'ProcessCpustatsResults') - @mock.patch.object(Result, 'ProcessTurbostatResults') - def test_process_results_without_cpu_data(self, mock_proc_turbo, - mock_proc_cpustats): - self.result.GetKeyvals = self.FakeGetKeyvals - - self.result.retval = 0 - self.result.turbostat_log_file = '' - self.result.cpustats_log_file = '' - self.result.ProcessResults() - mock_proc_turbo.assert_not_called() - mock_proc_cpustats.assert_not_called() - self.assertEqual(len(self.result.keyvals), 2) - self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 0}) - - @mock.patch.object(misc, 'GetInsideChrootPath') - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_populate_from_cache_dir(self, mock_runchrootcmd, mock_getpath): - - # pylint: disable=redefined-builtin - def FakeMkdtemp(dir=None): - if dir: - pass - return self.tmpdir - - def FakeGetSamples(): - return [1, u'samples'] - - current_path = os.getcwd() - cache_dir = os.path.join(current_path, 'test_cache/test_input') - self.result.ce = command_executer.GetCommandExecuter(log_level='average') - self.result.ce.ChrootRunCommandWOutput = mock_runchrootcmd - mock_runchrootcmd.return_value = [ - '', ('%s,PASS\n%s/\telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1), - '' - ] - mock_getpath.return_value = TMP_DIR1 - self.tmpdir = tempfile.mkdtemp() - save_real_mkdtemp = tempfile.mkdtemp - tempfile.mkdtemp = FakeMkdtemp - - self.result.PopulateFromCacheDir(cache_dir, 'sunspider', - 'telemetry_Crosperf', '') - self.assertEqual( - self.result.keyvals, { - u'Total__Total': [444.0, u'ms'], - u'regexp-dna__regexp-dna': [16.2, u'ms'], - u'telemetry_page_measurement_results__num_failed': [0, u'count'], - u'telemetry_page_measurement_results__num_errored': [0, u'count'], - u'string-fasta__string-fasta': [23.2, u'ms'], - u'crypto-sha1__crypto-sha1': [11.6, u'ms'], - u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': - [3.2, u'ms'], - u'access-nsieve__access-nsieve': [7.9, u'ms'], - u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'], - u'string-validate-input__string-validate-input': [19.3, u'ms'], - u'3d-raytrace__3d-raytrace': [24.7, u'ms'], - u'3d-cube__3d-cube': [28.0, u'ms'], - u'string-unpack-code__string-unpack-code': [46.7, u'ms'], - u'date-format-tofte__date-format-tofte': [26.3, u'ms'], - u'math-partial-sums__math-partial-sums': [22.0, u'ms'], - '\telemetry_Crosperf': ['PASS', ''], - u'crypto-aes__crypto-aes': [15.2, u'ms'], - u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'], - u'crypto-md5__crypto-md5': [10.5, u'ms'], - u'string-tagcloud__string-tagcloud': [52.8, u'ms'], - u'access-nbody__access-nbody': [8.5, u'ms'], - 'retval': 0, - u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'], - u'math-cordic__math-cordic': [8.7, u'ms'], - u'access-binary-trees__access-binary-trees': [4.5, u'ms'], - u'controlflow-recursive__controlflow-recursive': [4.4, u'ms'], - u'access-fannkuch__access-fannkuch': [17.8, u'ms'], - u'string-base64__string-base64': [16.0, u'ms'], - u'date-format-xparb__date-format-xparb': [20.9, u'ms'], - u'3d-morph__3d-morph': [22.1, u'ms'], - u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms'] - }) - - self.result.GetSamples = FakeGetSamples - self.result.PopulateFromCacheDir(cache_dir, 'sunspider', - 'telemetry_Crosperf', 'chrome') - self.assertEqual( - self.result.keyvals, { - u'Total__Total': [444.0, u'ms'], - u'regexp-dna__regexp-dna': [16.2, u'ms'], - u'telemetry_page_measurement_results__num_failed': [0, u'count'], - u'telemetry_page_measurement_results__num_errored': [0, u'count'], - u'string-fasta__string-fasta': [23.2, u'ms'], - u'crypto-sha1__crypto-sha1': [11.6, u'ms'], - u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': - [3.2, u'ms'], - u'access-nsieve__access-nsieve': [7.9, u'ms'], - u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'], - u'string-validate-input__string-validate-input': [19.3, u'ms'], - u'3d-raytrace__3d-raytrace': [24.7, u'ms'], - u'3d-cube__3d-cube': [28.0, u'ms'], - u'string-unpack-code__string-unpack-code': [46.7, u'ms'], - u'date-format-tofte__date-format-tofte': [26.3, u'ms'], - u'math-partial-sums__math-partial-sums': [22.0, u'ms'], - '\telemetry_Crosperf': ['PASS', ''], - u'crypto-aes__crypto-aes': [15.2, u'ms'], - u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'], - u'crypto-md5__crypto-md5': [10.5, u'ms'], - u'string-tagcloud__string-tagcloud': [52.8, u'ms'], - u'access-nbody__access-nbody': [8.5, u'ms'], - 'retval': 0, - u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'], - u'math-cordic__math-cordic': [8.7, u'ms'], - u'access-binary-trees__access-binary-trees': [4.5, u'ms'], - u'controlflow-recursive__controlflow-recursive': [4.4, u'ms'], - u'access-fannkuch__access-fannkuch': [17.8, u'ms'], - u'string-base64__string-base64': [16.0, u'ms'], - u'date-format-xparb__date-format-xparb': [20.9, u'ms'], - u'3d-morph__3d-morph': [22.1, u'ms'], - u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms'], - u'samples': [1, u'samples'] - }) - - # Clean up after test. - tempfile.mkdtemp = save_real_mkdtemp - command = 'rm -Rf %s' % self.tmpdir - self.result.ce.RunCommand(command) - - @mock.patch.object(misc, 'GetRoot') - @mock.patch.object(command_executer.CommandExecuter, 'RunCommand') - def test_cleanup(self, mock_runcmd, mock_getroot): - - # Test 1. 'rm_chroot_tmp' is True; self.results_dir exists; - # self.temp_dir exists; results_dir name contains 'test_that_results_'. - mock_getroot.return_value = ['/tmp/tmp_AbcXyz', 'test_that_results_fake'] - self.result.ce.RunCommand = mock_runcmd - self.result.results_dir = 'test_results_dir' - self.result.temp_dir = 'testtemp_dir' - self.result.CleanUp(True) - self.assertEqual(mock_getroot.call_count, 1) - self.assertEqual(mock_runcmd.call_count, 2) - self.assertEqual(mock_runcmd.call_args_list[0][0], - ('rm -rf test_results_dir', )) - self.assertEqual(mock_runcmd.call_args_list[1][0], - ('rm -rf testtemp_dir', )) - - # Test 2. Same, except ath results_dir name does not contain - # 'test_that_results_' - mock_getroot.reset_mock() - mock_runcmd.reset_mock() - mock_getroot.return_value = ['/tmp/tmp_AbcXyz', 'other_results_fake'] - self.result.ce.RunCommand = mock_runcmd - self.result.results_dir = 'test_results_dir' - self.result.temp_dir = 'testtemp_dir' - self.result.CleanUp(True) - self.assertEqual(mock_getroot.call_count, 1) - self.assertEqual(mock_runcmd.call_count, 2) - self.assertEqual(mock_runcmd.call_args_list[0][0], - ('rm -rf /tmp/tmp_AbcXyz', )) - self.assertEqual(mock_runcmd.call_args_list[1][0], - ('rm -rf testtemp_dir', )) - - # Test 3. mock_getroot returns nothing; 'rm_chroot_tmp' is False. - mock_getroot.reset_mock() - mock_runcmd.reset_mock() - self.result.CleanUp(False) - self.assertEqual(mock_getroot.call_count, 0) - self.assertEqual(mock_runcmd.call_count, 1) - self.assertEqual(mock_runcmd.call_args_list[0][0], - ('rm -rf testtemp_dir', )) - - # Test 4. 'rm_chroot_tmp' is True, but result_dir & temp_dir are None. - mock_getroot.reset_mock() - mock_runcmd.reset_mock() - self.result.results_dir = None - self.result.temp_dir = None - self.result.CleanUp(True) - self.assertEqual(mock_getroot.call_count, 0) - self.assertEqual(mock_runcmd.call_count, 0) - - @mock.patch.object(misc, 'GetInsideChrootPath') - @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand') - def test_store_to_cache_dir(self, mock_chrootruncmd, mock_getpath): - def FakeMkdtemp(directory=''): - if directory: - pass - return self.tmpdir - - if mock_chrootruncmd or mock_getpath: - pass - current_path = os.getcwd() - cache_dir = os.path.join(current_path, 'test_cache/test_output') - - self.result.ce = command_executer.GetCommandExecuter(log_level='average') - self.result.out = OUTPUT - self.result.err = error - self.result.retval = 0 - self.tmpdir = tempfile.mkdtemp() - if not os.path.exists(self.tmpdir): - os.makedirs(self.tmpdir) - self.result.results_dir = os.path.join(os.getcwd(), 'test_cache') - save_real_mkdtemp = tempfile.mkdtemp - tempfile.mkdtemp = FakeMkdtemp - - mock_mm = machine_manager.MockMachineManager('/tmp/chromeos_root', 0, - 'average', '') - mock_mm.machine_checksum_string['mock_label'] = 'fake_machine_checksum123' - - mock_keylist = ['key1', 'key2', 'key3'] - test_flag.SetTestMode(True) - self.result.StoreToCacheDir(cache_dir, mock_mm, mock_keylist) - - # Check that the correct things were written to the 'cache'. - test_dir = os.path.join(os.getcwd(), 'test_cache/test_output') - base_dir = os.path.join(os.getcwd(), 'test_cache/compare_output') - self.assertTrue(os.path.exists(os.path.join(test_dir, 'autotest.tbz2'))) - self.assertTrue(os.path.exists(os.path.join(test_dir, 'machine.txt'))) - self.assertTrue(os.path.exists(os.path.join(test_dir, 'results.pickle'))) - - f1 = os.path.join(test_dir, 'machine.txt') - f2 = os.path.join(base_dir, 'machine.txt') - cmd = 'diff %s %s' % (f1, f2) - [_, out, _] = self.result.ce.RunCommandWOutput(cmd) - self.assertEqual(len(out), 0) - - f1 = os.path.join(test_dir, 'results.pickle') - f2 = os.path.join(base_dir, 'results.pickle') - with open(f1, 'rb') as f: - f1_obj = pickle.load(f) - with open(f2, 'rb') as f: - f2_obj = pickle.load(f) - self.assertEqual(f1_obj, f2_obj) - - # Clean up after test. - tempfile.mkdtemp = save_real_mkdtemp - command = 'rm %s/*' % test_dir - self.result.ce.RunCommand(command) + self.result.ProcessResults() + mock_proc_turbo.assert_has_calls([mock.call()]) + mock_proc_cpustats.assert_not_called() + self.assertEqual(len(self.result.keyvals), 8) + self.assertEqual( + self.result.keyvals, + { + "Total": 10, + "cpufreq_all_avg": 2, + "cpufreq_all_max": 3, + "cpufreq_all_min": 1, + "cputemp_all_avg": 6.0, + "cputemp_all_min": 5.0, + "cputemp_all_max": 7.0, + "retval": 0, + }, + ) + + @mock.patch.object(Result, "ProcessCpustatsResults") + @mock.patch.object(Result, "ProcessTurbostatResults") + def test_process_results_without_cpu_data( + self, mock_proc_turbo, mock_proc_cpustats + ): + self.result.GetKeyvals = self.FakeGetKeyvals + + self.result.retval = 0 + self.result.turbostat_log_file = "" + self.result.cpustats_log_file = "" + self.result.ProcessResults() + mock_proc_turbo.assert_not_called() + mock_proc_cpustats.assert_not_called() + self.assertEqual(len(self.result.keyvals), 2) + self.assertEqual(self.result.keyvals, {"Total": 10, "retval": 0}) + + @mock.patch.object(misc, "GetInsideChrootPath") + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + def test_populate_from_cache_dir(self, mock_runchrootcmd, mock_getpath): + + # pylint: disable=redefined-builtin + def FakeMkdtemp(dir=None): + if dir: + pass + return self.tmpdir + + def FakeGetSamples(): + return [1, u"samples"] + + current_path = os.getcwd() + cache_dir = os.path.join(current_path, "test_cache/test_input") + self.result.ce = command_executer.GetCommandExecuter( + log_level="average" + ) + self.result.ce.ChrootRunCommandWOutput = mock_runchrootcmd + mock_runchrootcmd.return_value = [ + "", + ("%s,PASS\n%s/\telemetry_Crosperf,PASS\n") % (TMP_DIR1, TMP_DIR1), + "", + ] + mock_getpath.return_value = TMP_DIR1 + self.tmpdir = tempfile.mkdtemp() + save_real_mkdtemp = tempfile.mkdtemp + tempfile.mkdtemp = FakeMkdtemp + + self.result.PopulateFromCacheDir( + cache_dir, "sunspider", "telemetry_Crosperf", "" + ) + self.assertEqual( + self.result.keyvals, + { + u"Total__Total": [444.0, u"ms"], + u"regexp-dna__regexp-dna": [16.2, u"ms"], + u"telemetry_page_measurement_results__num_failed": [ + 0, + u"count", + ], + u"telemetry_page_measurement_results__num_errored": [ + 0, + u"count", + ], + u"string-fasta__string-fasta": [23.2, u"ms"], + u"crypto-sha1__crypto-sha1": [11.6, u"ms"], + u"bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte": [ + 3.2, + u"ms", + ], + u"access-nsieve__access-nsieve": [7.9, u"ms"], + u"bitops-nsieve-bits__bitops-nsieve-bits": [9.4, u"ms"], + u"string-validate-input__string-validate-input": [19.3, u"ms"], + u"3d-raytrace__3d-raytrace": [24.7, u"ms"], + u"3d-cube__3d-cube": [28.0, u"ms"], + u"string-unpack-code__string-unpack-code": [46.7, u"ms"], + u"date-format-tofte__date-format-tofte": [26.3, u"ms"], + u"math-partial-sums__math-partial-sums": [22.0, u"ms"], + "\telemetry_Crosperf": ["PASS", ""], + u"crypto-aes__crypto-aes": [15.2, u"ms"], + u"bitops-bitwise-and__bitops-bitwise-and": [8.4, u"ms"], + u"crypto-md5__crypto-md5": [10.5, u"ms"], + u"string-tagcloud__string-tagcloud": [52.8, u"ms"], + u"access-nbody__access-nbody": [8.5, u"ms"], + "retval": 0, + u"math-spectral-norm__math-spectral-norm": [6.6, u"ms"], + u"math-cordic__math-cordic": [8.7, u"ms"], + u"access-binary-trees__access-binary-trees": [4.5, u"ms"], + u"controlflow-recursive__controlflow-recursive": [4.4, u"ms"], + u"access-fannkuch__access-fannkuch": [17.8, u"ms"], + u"string-base64__string-base64": [16.0, u"ms"], + u"date-format-xparb__date-format-xparb": [20.9, u"ms"], + u"3d-morph__3d-morph": [22.1, u"ms"], + u"bitops-bits-in-byte__bitops-bits-in-byte": [9.1, u"ms"], + }, + ) + + self.result.GetSamples = FakeGetSamples + self.result.PopulateFromCacheDir( + cache_dir, "sunspider", "telemetry_Crosperf", "chrome" + ) + self.assertEqual( + self.result.keyvals, + { + u"Total__Total": [444.0, u"ms"], + u"regexp-dna__regexp-dna": [16.2, u"ms"], + u"telemetry_page_measurement_results__num_failed": [ + 0, + u"count", + ], + u"telemetry_page_measurement_results__num_errored": [ + 0, + u"count", + ], + u"string-fasta__string-fasta": [23.2, u"ms"], + u"crypto-sha1__crypto-sha1": [11.6, u"ms"], + u"bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte": [ + 3.2, + u"ms", + ], + u"access-nsieve__access-nsieve": [7.9, u"ms"], + u"bitops-nsieve-bits__bitops-nsieve-bits": [9.4, u"ms"], + u"string-validate-input__string-validate-input": [19.3, u"ms"], + u"3d-raytrace__3d-raytrace": [24.7, u"ms"], + u"3d-cube__3d-cube": [28.0, u"ms"], + u"string-unpack-code__string-unpack-code": [46.7, u"ms"], + u"date-format-tofte__date-format-tofte": [26.3, u"ms"], + u"math-partial-sums__math-partial-sums": [22.0, u"ms"], + "\telemetry_Crosperf": ["PASS", ""], + u"crypto-aes__crypto-aes": [15.2, u"ms"], + u"bitops-bitwise-and__bitops-bitwise-and": [8.4, u"ms"], + u"crypto-md5__crypto-md5": [10.5, u"ms"], + u"string-tagcloud__string-tagcloud": [52.8, u"ms"], + u"access-nbody__access-nbody": [8.5, u"ms"], + "retval": 0, + u"math-spectral-norm__math-spectral-norm": [6.6, u"ms"], + u"math-cordic__math-cordic": [8.7, u"ms"], + u"access-binary-trees__access-binary-trees": [4.5, u"ms"], + u"controlflow-recursive__controlflow-recursive": [4.4, u"ms"], + u"access-fannkuch__access-fannkuch": [17.8, u"ms"], + u"string-base64__string-base64": [16.0, u"ms"], + u"date-format-xparb__date-format-xparb": [20.9, u"ms"], + u"3d-morph__3d-morph": [22.1, u"ms"], + u"bitops-bits-in-byte__bitops-bits-in-byte": [9.1, u"ms"], + u"samples": [1, u"samples"], + }, + ) + + # Clean up after test. + tempfile.mkdtemp = save_real_mkdtemp + command = "rm -Rf %s" % self.tmpdir + self.result.ce.RunCommand(command) + + @mock.patch.object(misc, "GetRoot") + @mock.patch.object(command_executer.CommandExecuter, "RunCommand") + def test_cleanup(self, mock_runcmd, mock_getroot): + + # Test 1. 'rm_chroot_tmp' is True; self.results_dir exists; + # self.temp_dir exists; results_dir name contains 'test_that_results_'. + mock_getroot.return_value = [ + "/tmp/tmp_AbcXyz", + "test_that_results_fake", + ] + self.result.ce.RunCommand = mock_runcmd + self.result.results_dir = "test_results_dir" + self.result.temp_dir = "testtemp_dir" + self.result.CleanUp(True) + self.assertEqual(mock_getroot.call_count, 1) + self.assertEqual(mock_runcmd.call_count, 2) + self.assertEqual( + mock_runcmd.call_args_list[0][0], ("rm -rf test_results_dir",) + ) + self.assertEqual( + mock_runcmd.call_args_list[1][0], ("rm -rf testtemp_dir",) + ) + + # Test 2. Same, except ath results_dir name does not contain + # 'test_that_results_' + mock_getroot.reset_mock() + mock_runcmd.reset_mock() + mock_getroot.return_value = ["/tmp/tmp_AbcXyz", "other_results_fake"] + self.result.ce.RunCommand = mock_runcmd + self.result.results_dir = "test_results_dir" + self.result.temp_dir = "testtemp_dir" + self.result.CleanUp(True) + self.assertEqual(mock_getroot.call_count, 1) + self.assertEqual(mock_runcmd.call_count, 2) + self.assertEqual( + mock_runcmd.call_args_list[0][0], ("rm -rf /tmp/tmp_AbcXyz",) + ) + self.assertEqual( + mock_runcmd.call_args_list[1][0], ("rm -rf testtemp_dir",) + ) + + # Test 3. mock_getroot returns nothing; 'rm_chroot_tmp' is False. + mock_getroot.reset_mock() + mock_runcmd.reset_mock() + self.result.CleanUp(False) + self.assertEqual(mock_getroot.call_count, 0) + self.assertEqual(mock_runcmd.call_count, 1) + self.assertEqual( + mock_runcmd.call_args_list[0][0], ("rm -rf testtemp_dir",) + ) + + # Test 4. 'rm_chroot_tmp' is True, but result_dir & temp_dir are None. + mock_getroot.reset_mock() + mock_runcmd.reset_mock() + self.result.results_dir = None + self.result.temp_dir = None + self.result.CleanUp(True) + self.assertEqual(mock_getroot.call_count, 0) + self.assertEqual(mock_runcmd.call_count, 0) + + @mock.patch.object(misc, "GetInsideChrootPath") + @mock.patch.object(command_executer.CommandExecuter, "ChrootRunCommand") + def test_store_to_cache_dir(self, mock_chrootruncmd, mock_getpath): + def FakeMkdtemp(directory=""): + if directory: + pass + return self.tmpdir + + if mock_chrootruncmd or mock_getpath: + pass + current_path = os.getcwd() + cache_dir = os.path.join(current_path, "test_cache/test_output") + + self.result.ce = command_executer.GetCommandExecuter( + log_level="average" + ) + self.result.out = OUTPUT + self.result.err = error + self.result.retval = 0 + self.tmpdir = tempfile.mkdtemp() + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + self.result.results_dir = os.path.join(os.getcwd(), "test_cache") + save_real_mkdtemp = tempfile.mkdtemp + tempfile.mkdtemp = FakeMkdtemp + + mock_mm = machine_manager.MockMachineManager( + "/tmp/chromeos_root", 0, "average", "" + ) + mock_mm.machine_checksum_string[ + "mock_label" + ] = "fake_machine_checksum123" + + mock_keylist = ["key1", "key2", "key3"] + test_flag.SetTestMode(True) + self.result.StoreToCacheDir(cache_dir, mock_mm, mock_keylist) + + # Check that the correct things were written to the 'cache'. + test_dir = os.path.join(os.getcwd(), "test_cache/test_output") + base_dir = os.path.join(os.getcwd(), "test_cache/compare_output") + self.assertTrue(os.path.exists(os.path.join(test_dir, "autotest.tbz2"))) + self.assertTrue(os.path.exists(os.path.join(test_dir, "machine.txt"))) + self.assertTrue( + os.path.exists(os.path.join(test_dir, "results.pickle")) + ) + + f1 = os.path.join(test_dir, "machine.txt") + f2 = os.path.join(base_dir, "machine.txt") + cmd = "diff %s %s" % (f1, f2) + [_, out, _] = self.result.ce.RunCommandWOutput(cmd) + self.assertEqual(len(out), 0) + + f1 = os.path.join(test_dir, "results.pickle") + f2 = os.path.join(base_dir, "results.pickle") + with open(f1, "rb") as f: + f1_obj = pickle.load(f) + with open(f2, "rb") as f: + f2_obj = pickle.load(f) + self.assertEqual(f1_obj, f2_obj) + + # Clean up after test. + tempfile.mkdtemp = save_real_mkdtemp + command = "rm %s/*" % test_dir + self.result.ce.RunCommand(command) TELEMETRY_RESULT_KEYVALS = { - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'math-cordic (ms)': - '11.4', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'access-nbody (ms)': - '6.9', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'access-fannkuch (ms)': - '26.3', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'math-spectral-norm (ms)': - '6.3', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'bitops-nsieve-bits (ms)': - '9.3', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'math-partial-sums (ms)': - '32.8', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'regexp-dna (ms)': - '16.1', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - '3d-cube (ms)': - '42.7', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'crypto-md5 (ms)': - '10.8', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'crypto-sha1 (ms)': - '12.4', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'string-tagcloud (ms)': - '47.2', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'string-fasta (ms)': - '36.3', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'access-binary-trees (ms)': - '7.3', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'date-format-xparb (ms)': - '138.1', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'crypto-aes (ms)': - '19.2', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'Total (ms)': - '656.5', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'string-base64 (ms)': - '17.5', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'string-validate-input (ms)': - '24.8', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - '3d-raytrace (ms)': - '28.7', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'controlflow-recursive (ms)': - '5.3', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'bitops-bits-in-byte (ms)': - '9.8', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - '3d-morph (ms)': - '50.2', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'bitops-bitwise-and (ms)': - '8.8', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'access-nsieve (ms)': - '8.6', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'date-format-tofte (ms)': - '31.2', - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'bitops-3bit-bits-in-byte (ms)': - '3.5', - 'retval': - 0, - 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html ' - 'string-unpack-code (ms)': - '45.0' + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "math-cordic (ms)": "11.4", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "access-nbody (ms)": "6.9", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "access-fannkuch (ms)": "26.3", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "math-spectral-norm (ms)": "6.3", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "bitops-nsieve-bits (ms)": "9.3", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "math-partial-sums (ms)": "32.8", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "regexp-dna (ms)": "16.1", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "3d-cube (ms)": "42.7", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "crypto-md5 (ms)": "10.8", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "crypto-sha1 (ms)": "12.4", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "string-tagcloud (ms)": "47.2", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "string-fasta (ms)": "36.3", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "access-binary-trees (ms)": "7.3", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "date-format-xparb (ms)": "138.1", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "crypto-aes (ms)": "19.2", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "Total (ms)": "656.5", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "string-base64 (ms)": "17.5", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "string-validate-input (ms)": "24.8", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "3d-raytrace (ms)": "28.7", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "controlflow-recursive (ms)": "5.3", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "bitops-bits-in-byte (ms)": "9.8", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "3d-morph (ms)": "50.2", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "bitops-bitwise-and (ms)": "8.8", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "access-nsieve (ms)": "8.6", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "date-format-tofte (ms)": "31.2", + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "bitops-3bit-bits-in-byte (ms)": "3.5", + "retval": 0, + "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html " + "string-unpack-code (ms)": "45.0", } PURE_TELEMETRY_OUTPUT = """ @@ -1864,290 +1986,347 @@ page_name,3d-cube (ms),3d-morph (ms),3d-raytrace (ms),Total (ms),access-binary-t class TelemetryResultTest(unittest.TestCase): - """Telemetry result test.""" - def __init__(self, *args, **kwargs): - super(TelemetryResultTest, self).__init__(*args, **kwargs) - self.callFakeProcessResults = False - self.result = None - self.mock_logger = mock.Mock(spec=logger.Logger) - self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - self.mock_label = MockLabel('mock_label', 'build', 'chromeos_image', - 'autotest_dir', 'debug_dir', '/tmp', 'lumpy', - 'remote', 'image_args', 'cache_dir', 'average', - 'gcc', False, None) - self.mock_machine = machine_manager.MockCrosMachine( - 'falco.cros', '/tmp/chromeos', 'average') - - def test_populate_from_run(self): - def FakeProcessResults(): - self.callFakeProcessResults = True - - self.callFakeProcessResults = False - self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average', - self.mock_cmd_exec) - self.result.ProcessResults = FakeProcessResults - self.result.PopulateFromRun(OUTPUT, error, 3, 'fake_test', - 'telemetry_Crosperf', '') - self.assertTrue(self.callFakeProcessResults) - self.assertEqual(self.result.out, OUTPUT) - self.assertEqual(self.result.err, error) - self.assertEqual(self.result.retval, 3) - - def test_populate_from_cache_dir_and_process_results(self): - - self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average', - self.mock_machine) - current_path = os.getcwd() - cache_dir = os.path.join(current_path, - 'test_cache/test_puretelemetry_input') - self.result.PopulateFromCacheDir(cache_dir, '', '', '') - self.assertEqual(self.result.out.strip(), PURE_TELEMETRY_OUTPUT.strip()) - self.assertEqual(self.result.err, '') - self.assertEqual(self.result.retval, 0) - self.assertEqual(self.result.keyvals, TELEMETRY_RESULT_KEYVALS) + """Telemetry result test.""" + + def __init__(self, *args, **kwargs): + super(TelemetryResultTest, self).__init__(*args, **kwargs) + self.callFakeProcessResults = False + self.result = None + self.mock_logger = mock.Mock(spec=logger.Logger) + self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) + self.mock_label = MockLabel( + "mock_label", + "build", + "chromeos_image", + "autotest_dir", + "debug_dir", + "/tmp", + "lumpy", + "remote", + "image_args", + "cache_dir", + "average", + "gcc", + False, + None, + ) + self.mock_machine = machine_manager.MockCrosMachine( + "falco.cros", "/tmp/chromeos", "average" + ) + + def test_populate_from_run(self): + def FakeProcessResults(): + self.callFakeProcessResults = True + + self.callFakeProcessResults = False + self.result = TelemetryResult( + self.mock_logger, self.mock_label, "average", self.mock_cmd_exec + ) + self.result.ProcessResults = FakeProcessResults + self.result.PopulateFromRun( + OUTPUT, error, 3, "fake_test", "telemetry_Crosperf", "" + ) + self.assertTrue(self.callFakeProcessResults) + self.assertEqual(self.result.out, OUTPUT) + self.assertEqual(self.result.err, error) + self.assertEqual(self.result.retval, 3) + + def test_populate_from_cache_dir_and_process_results(self): + + self.result = TelemetryResult( + self.mock_logger, self.mock_label, "average", self.mock_machine + ) + current_path = os.getcwd() + cache_dir = os.path.join( + current_path, "test_cache/test_puretelemetry_input" + ) + self.result.PopulateFromCacheDir(cache_dir, "", "", "") + self.assertEqual(self.result.out.strip(), PURE_TELEMETRY_OUTPUT.strip()) + self.assertEqual(self.result.err, "") + self.assertEqual(self.result.retval, 0) + self.assertEqual(self.result.keyvals, TELEMETRY_RESULT_KEYVALS) class ResultsCacheTest(unittest.TestCase): - """Resultcache test class.""" - def __init__(self, *args, **kwargs): - super(ResultsCacheTest, self).__init__(*args, **kwargs) - self.fakeCacheReturnResult = None - self.mock_logger = mock.Mock(spec=logger.Logger) - self.mock_label = MockLabel('mock_label', 'build', 'chromeos_image', - 'autotest_dir', 'debug_dir', '/tmp', 'lumpy', - 'remote', 'image_args', 'cache_dir', 'average', - 'gcc', False, None) - - def setUp(self): - self.results_cache = ResultsCache() - - mock_machine = machine_manager.MockCrosMachine('falco.cros', - '/tmp/chromeos', 'average') - - mock_mm = machine_manager.MockMachineManager('/tmp/chromeos_root', 0, - 'average', '') - mock_mm.machine_checksum_string['mock_label'] = 'fake_machine_checksum123' - - self.results_cache.Init( - self.mock_label.chromeos_image, - self.mock_label.chromeos_root, - 'sunspider', - 1, # benchmark_run.iteration, - '', # benchmark_run.test_args, - '', # benchmark_run.profiler_args, - mock_mm, - mock_machine, - self.mock_label.board, - [CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH], - self.mock_logger, - 'average', - self.mock_label, - '', # benchmark_run.share_cache - 'telemetry_Crosperf', - True, # benchmark_run.show_all_results - False, # benchmark_run.run_local - '') # benchmark_run.cwp_dso - - @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum') - def test_get_cache_dir_for_write(self, mock_checksum): - def FakeGetMachines(label): - if label: - pass - m1 = machine_manager.MockCrosMachine('lumpy1.cros', - self.results_cache.chromeos_root, - 'average') - m2 = machine_manager.MockCrosMachine('lumpy2.cros', - self.results_cache.chromeos_root, - 'average') - return [m1, m2] - - mock_checksum.return_value = 'FakeImageChecksumabc123' - self.results_cache.machine_manager.GetMachines = FakeGetMachines - self.results_cache.machine_manager.machine_checksum['mock_label'] = ( - 'FakeMachineChecksumabc987') - # Based on the label, benchmark and machines, get the directory in which - # to store the cache information for this test run. - result_path = self.results_cache.GetCacheDirForWrite() - # Verify that the returned directory is correct (since the label - # contained a cache_dir, named 'cache_dir', that's what is expected in - # the result, rather than '~/cros_scratch'). - comp_path = os.path.join( - os.getcwd(), 'cache_dir/54524606abaae4fdf7b02f49f7ae7127_' - 'sunspider_1_fda29412ceccb72977516c4785d08e2c_' - 'FakeImageChecksumabc123_FakeMachineChecksum' - 'abc987__6') - self.assertEqual(result_path, comp_path) - - def test_form_cache_dir(self): - # This is very similar to the previous test (FormCacheDir is called - # from GetCacheDirForWrite). - cache_key_list = ('54524606abaae4fdf7b02f49f7ae7127', 'sunspider', '1', - '7215ee9c7d9dc229d2921a40e899ec5f', - 'FakeImageChecksumabc123', '*', '*', '6') - path = self.results_cache.FormCacheDir(cache_key_list) - self.assertEqual(len(path), 1) - path1 = path[0] - test_dirname = ('54524606abaae4fdf7b02f49f7ae7127_sunspider_1_7215ee9' - 'c7d9dc229d2921a40e899ec5f_FakeImageChecksumabc123_*_*_6') - comp_path = os.path.join(os.getcwd(), 'cache_dir', test_dirname) - self.assertEqual(path1, comp_path) - - @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum') - def test_get_cache_key_list(self, mock_checksum): - # This tests the mechanism that generates the various pieces of the - # cache directory name, based on various conditions. - - def FakeGetMachines(label): - if label: - pass - m1 = machine_manager.MockCrosMachine('lumpy1.cros', - self.results_cache.chromeos_root, - 'average') - m2 = machine_manager.MockCrosMachine('lumpy2.cros', - self.results_cache.chromeos_root, - 'average') - return [m1, m2] - - mock_checksum.return_value = 'FakeImageChecksumabc123' - self.results_cache.machine_manager.GetMachines = FakeGetMachines - self.results_cache.machine_manager.machine_checksum['mock_label'] = ( - 'FakeMachineChecksumabc987') - - # Test 1. Generating cache name for reading (not writing). - key_list = self.results_cache.GetCacheKeyList(True) - self.assertEqual(key_list[0], '*') # Machine checksum value, for read. - self.assertEqual(key_list[1], 'sunspider') - self.assertEqual(key_list[2], '1') - self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c') - self.assertEqual(key_list[4], 'FakeImageChecksumabc123') - self.assertEqual(key_list[5], '*') - self.assertEqual(key_list[6], '*') - self.assertEqual(key_list[7], '6') - - # Test 2. Generating cache name for writing, with local image type. - key_list = self.results_cache.GetCacheKeyList(False) - self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127') - self.assertEqual(key_list[1], 'sunspider') - self.assertEqual(key_list[2], '1') - self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c') - self.assertEqual(key_list[4], 'FakeImageChecksumabc123') - self.assertEqual(key_list[5], 'FakeMachineChecksumabc987') - self.assertEqual(key_list[6], '') - self.assertEqual(key_list[7], '6') - - # Test 3. Generating cache name for writing, with trybot image type. - self.results_cache.label.image_type = 'trybot' - key_list = self.results_cache.GetCacheKeyList(False) - self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127') - self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c') - self.assertEqual(key_list[4], '54524606abaae4fdf7b02f49f7ae7127') - self.assertEqual(key_list[5], 'FakeMachineChecksumabc987') - - # Test 4. Generating cache name for writing, with official image type. - self.results_cache.label.image_type = 'official' - key_list = self.results_cache.GetCacheKeyList(False) - self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127') - self.assertEqual(key_list[1], 'sunspider') - self.assertEqual(key_list[2], '1') - self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c') - self.assertEqual(key_list[4], '*') - self.assertEqual(key_list[5], 'FakeMachineChecksumabc987') - self.assertEqual(key_list[6], '') - self.assertEqual(key_list[7], '6') - - # Test 5. Generating cache name for writing, with local image type, and - # specifying that the image path must match the cached image path. - self.results_cache.label.image_type = 'local' - self.results_cache.cache_conditions.append( - CacheConditions.IMAGE_PATH_MATCH) - key_list = self.results_cache.GetCacheKeyList(False) - self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127') - self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c') - self.assertEqual(key_list[4], 'FakeImageChecksumabc123') - self.assertEqual(key_list[5], 'FakeMachineChecksumabc987') - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommand') - @mock.patch.object(os.path, 'isdir') - @mock.patch.object(Result, 'CreateFromCacheHit') - def test_read_result(self, mock_create, mock_isdir, mock_runcmd): - - self.fakeCacheReturnResult = None - - def FakeGetCacheDirForRead(): - return self.fakeCacheReturnResult - - def FakeGetCacheDirForWrite(): - return self.fakeCacheReturnResult - - mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - fake_result = Result(self.mock_logger, self.mock_label, 'average', - mock_cmd_exec) - fake_result.retval = 0 - - # Set up results_cache _GetCacheDirFor{Read,Write} to return - # self.fakeCacheReturnResult, which is initially None (see above). - # So initially, no cache dir is returned. - self.results_cache.GetCacheDirForRead = FakeGetCacheDirForRead - self.results_cache.GetCacheDirForWrite = FakeGetCacheDirForWrite - - mock_isdir.return_value = True - save_cc = [ - CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH - ] - self.results_cache.cache_conditions.append(CacheConditions.FALSE) - - # Test 1. CacheCondition.FALSE, which means do not read from the cache. - # (force re-running of test). Result should be None. - res = self.results_cache.ReadResult() - self.assertIsNone(res) - self.assertEqual(mock_runcmd.call_count, 1) - - # Test 2. Remove CacheCondition.FALSE. Result should still be None, - # because GetCacheDirForRead is returning None at the moment. - mock_runcmd.reset_mock() - self.results_cache.cache_conditions = save_cc - res = self.results_cache.ReadResult() - self.assertIsNone(res) - self.assertEqual(mock_runcmd.call_count, 0) - - # Test 3. Now set up cache dir to be returned by GetCacheDirForRead. - # Since cache_dir is found, will call Result.CreateFromCacheHit, which - # which will actually all our mock_create and should return fake_result. - self.fakeCacheReturnResult = 'fake/cache/dir' - mock_create.return_value = fake_result - res = self.results_cache.ReadResult() - self.assertEqual(mock_runcmd.call_count, 0) - self.assertEqual(res, fake_result) - - # Test 4. os.path.isdir(cache_dir) will now return false, so result - # should be None again (no cache found). - mock_isdir.return_value = False - res = self.results_cache.ReadResult() - self.assertEqual(mock_runcmd.call_count, 0) - self.assertIsNone(res) - - # Test 5. os.path.isdir returns true, but mock_create now returns None - # (the call to CreateFromCacheHit returns None), so overal result is None. - mock_isdir.return_value = True - mock_create.return_value = None - res = self.results_cache.ReadResult() - self.assertEqual(mock_runcmd.call_count, 0) - self.assertIsNone(res) - - # Test 6. Everything works 'as expected', result should be fake_result. - mock_create.return_value = fake_result - res = self.results_cache.ReadResult() - self.assertEqual(mock_runcmd.call_count, 0) - self.assertEqual(res, fake_result) - - # Test 7. The run failed; result should be None. - mock_create.return_value = fake_result - fake_result.retval = 1 - self.results_cache.cache_conditions.append(CacheConditions.RUN_SUCCEEDED) - res = self.results_cache.ReadResult() - self.assertEqual(mock_runcmd.call_count, 0) - self.assertIsNone(res) - - -if __name__ == '__main__': - unittest.main() + """Resultcache test class.""" + + def __init__(self, *args, **kwargs): + super(ResultsCacheTest, self).__init__(*args, **kwargs) + self.fakeCacheReturnResult = None + self.mock_logger = mock.Mock(spec=logger.Logger) + self.mock_label = MockLabel( + "mock_label", + "build", + "chromeos_image", + "autotest_dir", + "debug_dir", + "/tmp", + "lumpy", + "remote", + "image_args", + "cache_dir", + "average", + "gcc", + False, + None, + ) + + def setUp(self): + self.results_cache = ResultsCache() + + mock_machine = machine_manager.MockCrosMachine( + "falco.cros", "/tmp/chromeos", "average" + ) + + mock_mm = machine_manager.MockMachineManager( + "/tmp/chromeos_root", 0, "average", "" + ) + mock_mm.machine_checksum_string[ + "mock_label" + ] = "fake_machine_checksum123" + + self.results_cache.Init( + self.mock_label.chromeos_image, + self.mock_label.chromeos_root, + "sunspider", + 1, # benchmark_run.iteration, + "", # benchmark_run.test_args, + "", # benchmark_run.profiler_args, + mock_mm, + mock_machine, + self.mock_label.board, + [ + CacheConditions.CACHE_FILE_EXISTS, + CacheConditions.CHECKSUMS_MATCH, + ], + self.mock_logger, + "average", + self.mock_label, + "", # benchmark_run.share_cache + "telemetry_Crosperf", + True, # benchmark_run.show_all_results + False, # benchmark_run.run_local + "", + ) # benchmark_run.cwp_dso + + @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum") + def test_get_cache_dir_for_write(self, mock_checksum): + def FakeGetMachines(label): + if label: + pass + m1 = machine_manager.MockCrosMachine( + "lumpy1.cros", self.results_cache.chromeos_root, "average" + ) + m2 = machine_manager.MockCrosMachine( + "lumpy2.cros", self.results_cache.chromeos_root, "average" + ) + return [m1, m2] + + mock_checksum.return_value = "FakeImageChecksumabc123" + self.results_cache.machine_manager.GetMachines = FakeGetMachines + self.results_cache.machine_manager.machine_checksum[ + "mock_label" + ] = "FakeMachineChecksumabc987" + # Based on the label, benchmark and machines, get the directory in which + # to store the cache information for this test run. + result_path = self.results_cache.GetCacheDirForWrite() + # Verify that the returned directory is correct (since the label + # contained a cache_dir, named 'cache_dir', that's what is expected in + # the result, rather than '~/cros_scratch'). + comp_path = os.path.join( + os.getcwd(), + "cache_dir/54524606abaae4fdf7b02f49f7ae7127_" + "sunspider_1_fda29412ceccb72977516c4785d08e2c_" + "FakeImageChecksumabc123_FakeMachineChecksum" + "abc987__6", + ) + self.assertEqual(result_path, comp_path) + + def test_form_cache_dir(self): + # This is very similar to the previous test (FormCacheDir is called + # from GetCacheDirForWrite). + cache_key_list = ( + "54524606abaae4fdf7b02f49f7ae7127", + "sunspider", + "1", + "7215ee9c7d9dc229d2921a40e899ec5f", + "FakeImageChecksumabc123", + "*", + "*", + "6", + ) + path = self.results_cache.FormCacheDir(cache_key_list) + self.assertEqual(len(path), 1) + path1 = path[0] + test_dirname = ( + "54524606abaae4fdf7b02f49f7ae7127_sunspider_1_7215ee9" + "c7d9dc229d2921a40e899ec5f_FakeImageChecksumabc123_*_*_6" + ) + comp_path = os.path.join(os.getcwd(), "cache_dir", test_dirname) + self.assertEqual(path1, comp_path) + + @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum") + def test_get_cache_key_list(self, mock_checksum): + # This tests the mechanism that generates the various pieces of the + # cache directory name, based on various conditions. + + def FakeGetMachines(label): + if label: + pass + m1 = machine_manager.MockCrosMachine( + "lumpy1.cros", self.results_cache.chromeos_root, "average" + ) + m2 = machine_manager.MockCrosMachine( + "lumpy2.cros", self.results_cache.chromeos_root, "average" + ) + return [m1, m2] + + mock_checksum.return_value = "FakeImageChecksumabc123" + self.results_cache.machine_manager.GetMachines = FakeGetMachines + self.results_cache.machine_manager.machine_checksum[ + "mock_label" + ] = "FakeMachineChecksumabc987" + + # Test 1. Generating cache name for reading (not writing). + key_list = self.results_cache.GetCacheKeyList(True) + self.assertEqual(key_list[0], "*") # Machine checksum value, for read. + self.assertEqual(key_list[1], "sunspider") + self.assertEqual(key_list[2], "1") + self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c") + self.assertEqual(key_list[4], "FakeImageChecksumabc123") + self.assertEqual(key_list[5], "*") + self.assertEqual(key_list[6], "*") + self.assertEqual(key_list[7], "6") + + # Test 2. Generating cache name for writing, with local image type. + key_list = self.results_cache.GetCacheKeyList(False) + self.assertEqual(key_list[0], "54524606abaae4fdf7b02f49f7ae7127") + self.assertEqual(key_list[1], "sunspider") + self.assertEqual(key_list[2], "1") + self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c") + self.assertEqual(key_list[4], "FakeImageChecksumabc123") + self.assertEqual(key_list[5], "FakeMachineChecksumabc987") + self.assertEqual(key_list[6], "") + self.assertEqual(key_list[7], "6") + + # Test 3. Generating cache name for writing, with trybot image type. + self.results_cache.label.image_type = "trybot" + key_list = self.results_cache.GetCacheKeyList(False) + self.assertEqual(key_list[0], "54524606abaae4fdf7b02f49f7ae7127") + self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c") + self.assertEqual(key_list[4], "54524606abaae4fdf7b02f49f7ae7127") + self.assertEqual(key_list[5], "FakeMachineChecksumabc987") + + # Test 4. Generating cache name for writing, with official image type. + self.results_cache.label.image_type = "official" + key_list = self.results_cache.GetCacheKeyList(False) + self.assertEqual(key_list[0], "54524606abaae4fdf7b02f49f7ae7127") + self.assertEqual(key_list[1], "sunspider") + self.assertEqual(key_list[2], "1") + self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c") + self.assertEqual(key_list[4], "*") + self.assertEqual(key_list[5], "FakeMachineChecksumabc987") + self.assertEqual(key_list[6], "") + self.assertEqual(key_list[7], "6") + + # Test 5. Generating cache name for writing, with local image type, and + # specifying that the image path must match the cached image path. + self.results_cache.label.image_type = "local" + self.results_cache.cache_conditions.append( + CacheConditions.IMAGE_PATH_MATCH + ) + key_list = self.results_cache.GetCacheKeyList(False) + self.assertEqual(key_list[0], "54524606abaae4fdf7b02f49f7ae7127") + self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c") + self.assertEqual(key_list[4], "FakeImageChecksumabc123") + self.assertEqual(key_list[5], "FakeMachineChecksumabc987") + + @mock.patch.object(command_executer.CommandExecuter, "RunCommand") + @mock.patch.object(os.path, "isdir") + @mock.patch.object(Result, "CreateFromCacheHit") + def test_read_result(self, mock_create, mock_isdir, mock_runcmd): + + self.fakeCacheReturnResult = None + + def FakeGetCacheDirForRead(): + return self.fakeCacheReturnResult + + def FakeGetCacheDirForWrite(): + return self.fakeCacheReturnResult + + mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) + fake_result = Result( + self.mock_logger, self.mock_label, "average", mock_cmd_exec + ) + fake_result.retval = 0 + + # Set up results_cache _GetCacheDirFor{Read,Write} to return + # self.fakeCacheReturnResult, which is initially None (see above). + # So initially, no cache dir is returned. + self.results_cache.GetCacheDirForRead = FakeGetCacheDirForRead + self.results_cache.GetCacheDirForWrite = FakeGetCacheDirForWrite + + mock_isdir.return_value = True + save_cc = [ + CacheConditions.CACHE_FILE_EXISTS, + CacheConditions.CHECKSUMS_MATCH, + ] + self.results_cache.cache_conditions.append(CacheConditions.FALSE) + + # Test 1. CacheCondition.FALSE, which means do not read from the cache. + # (force re-running of test). Result should be None. + res = self.results_cache.ReadResult() + self.assertIsNone(res) + self.assertEqual(mock_runcmd.call_count, 1) + + # Test 2. Remove CacheCondition.FALSE. Result should still be None, + # because GetCacheDirForRead is returning None at the moment. + mock_runcmd.reset_mock() + self.results_cache.cache_conditions = save_cc + res = self.results_cache.ReadResult() + self.assertIsNone(res) + self.assertEqual(mock_runcmd.call_count, 0) + + # Test 3. Now set up cache dir to be returned by GetCacheDirForRead. + # Since cache_dir is found, will call Result.CreateFromCacheHit, which + # which will actually all our mock_create and should return fake_result. + self.fakeCacheReturnResult = "fake/cache/dir" + mock_create.return_value = fake_result + res = self.results_cache.ReadResult() + self.assertEqual(mock_runcmd.call_count, 0) + self.assertEqual(res, fake_result) + + # Test 4. os.path.isdir(cache_dir) will now return false, so result + # should be None again (no cache found). + mock_isdir.return_value = False + res = self.results_cache.ReadResult() + self.assertEqual(mock_runcmd.call_count, 0) + self.assertIsNone(res) + + # Test 5. os.path.isdir returns true, but mock_create now returns None + # (the call to CreateFromCacheHit returns None), so overal result is None. + mock_isdir.return_value = True + mock_create.return_value = None + res = self.results_cache.ReadResult() + self.assertEqual(mock_runcmd.call_count, 0) + self.assertIsNone(res) + + # Test 6. Everything works 'as expected', result should be fake_result. + mock_create.return_value = fake_result + res = self.results_cache.ReadResult() + self.assertEqual(mock_runcmd.call_count, 0) + self.assertEqual(res, fake_result) + + # Test 7. The run failed; result should be None. + mock_create.return_value = fake_result + fake_result.retval = 1 + self.results_cache.cache_conditions.append( + CacheConditions.RUN_SUCCEEDED + ) + res = self.results_cache.ReadResult() + self.assertEqual(mock_runcmd.call_count, 0) + self.assertIsNone(res) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py index 210776ab..59ac685b 100644 --- a/crosperf/results_organizer.py +++ b/crosperf/results_organizer.py @@ -15,214 +15,219 @@ import sys from cros_utils import misc -_TELEMETRY_RESULT_DEFAULTS_FILE = 'default-telemetry-results.json' -_DUP_KEY_REGEX = re.compile(r'(\w+)\{(\d+)\}') + +_TELEMETRY_RESULT_DEFAULTS_FILE = "default-telemetry-results.json" +_DUP_KEY_REGEX = re.compile(r"(\w+)\{(\d+)\}") def _AdjustIteration(benchmarks, max_dup, bench): - """Adjust the interation numbers if they have keys like ABCD{i}.""" - for benchmark in benchmarks: - if benchmark.name != bench or benchmark.iteration_adjusted: - continue - benchmark.iteration_adjusted = True - benchmark.iterations *= (max_dup + 1) + """Adjust the interation numbers if they have keys like ABCD{i}.""" + for benchmark in benchmarks: + if benchmark.name != bench or benchmark.iteration_adjusted: + continue + benchmark.iteration_adjusted = True + benchmark.iterations *= max_dup + 1 def _GetMaxDup(data): - """Find the maximum i inside ABCD{i}. + """Find the maximum i inside ABCD{i}. - data should be a [[[Key]]], where Key is a string that may look like - ABCD{i}. - """ - max_dup = 0 - for label in data: - for run in label: - for key in run: - match = _DUP_KEY_REGEX.match(key) - if match: - max_dup = max(max_dup, int(match.group(2))) - return max_dup + data should be a [[[Key]]], where Key is a string that may look like + ABCD{i}. + """ + max_dup = 0 + for label in data: + for run in label: + for key in run: + match = _DUP_KEY_REGEX.match(key) + if match: + max_dup = max(max_dup, int(match.group(2))) + return max_dup def _Repeat(func, times): - """Returns the result of running func() n times.""" - return [func() for _ in range(times)] + """Returns the result of running func() n times.""" + return [func() for _ in range(times)] def _DictWithReturnValues(retval, pass_fail): - """Create a new dictionary pre-populated with success/fail values.""" - new_dict = {} - # Note: 0 is a valid retval; test to make sure it's not None. - if retval is not None: - new_dict['retval'] = retval - if pass_fail: - new_dict[''] = pass_fail - return new_dict + """Create a new dictionary pre-populated with success/fail values.""" + new_dict = {} + # Note: 0 is a valid retval; test to make sure it's not None. + if retval is not None: + new_dict["retval"] = retval + if pass_fail: + new_dict[""] = pass_fail + return new_dict def _GetNonDupLabel(max_dup, runs): - """Create new list for the runs of the same label. - - Specifically, this will split out keys like foo{0}, foo{1} from one run into - their own runs. For example, given a run like: - {"foo": 1, "bar{0}": 2, "baz": 3, "qux{1}": 4, "pirate{0}": 5} - - You'll get: - [{"foo": 1, "baz": 3}, {"bar": 2, "pirate": 5}, {"qux": 4}] - - Hands back the lists of transformed runs, all concatenated together. - """ - new_runs = [] - for run in runs: - run_retval = run.get('retval', None) - run_pass_fail = run.get('', None) - new_run = {} - # pylint: disable=cell-var-from-loop - added_runs = _Repeat( - lambda: _DictWithReturnValues(run_retval, run_pass_fail), max_dup) - for key, value in run.items(): - match = _DUP_KEY_REGEX.match(key) - if not match: - new_run[key] = value - else: - new_key, index_str = match.groups() - added_runs[int(index_str) - 1][new_key] = str(value) - new_runs.append(new_run) - new_runs += added_runs - return new_runs + """Create new list for the runs of the same label. + + Specifically, this will split out keys like foo{0}, foo{1} from one run into + their own runs. For example, given a run like: + {"foo": 1, "bar{0}": 2, "baz": 3, "qux{1}": 4, "pirate{0}": 5} + + You'll get: + [{"foo": 1, "baz": 3}, {"bar": 2, "pirate": 5}, {"qux": 4}] + + Hands back the lists of transformed runs, all concatenated together. + """ + new_runs = [] + for run in runs: + run_retval = run.get("retval", None) + run_pass_fail = run.get("", None) + new_run = {} + # pylint: disable=cell-var-from-loop + added_runs = _Repeat( + lambda: _DictWithReturnValues(run_retval, run_pass_fail), max_dup + ) + for key, value in run.items(): + match = _DUP_KEY_REGEX.match(key) + if not match: + new_run[key] = value + else: + new_key, index_str = match.groups() + added_runs[int(index_str) - 1][new_key] = str(value) + new_runs.append(new_run) + new_runs += added_runs + return new_runs def _DuplicatePass(result, benchmarks): - """Properly expands keys like `foo{1}` in `result`.""" - for bench, data in result.items(): - max_dup = _GetMaxDup(data) - # If there's nothing to expand, there's nothing to do. - if not max_dup: - continue - for i, runs in enumerate(data): - data[i] = _GetNonDupLabel(max_dup, runs) - _AdjustIteration(benchmarks, max_dup, bench) + """Properly expands keys like `foo{1}` in `result`.""" + for bench, data in result.items(): + max_dup = _GetMaxDup(data) + # If there's nothing to expand, there's nothing to do. + if not max_dup: + continue + for i, runs in enumerate(data): + data[i] = _GetNonDupLabel(max_dup, runs) + _AdjustIteration(benchmarks, max_dup, bench) def _ReadSummaryFile(filename): - """Reads the summary file at filename.""" - dirname, _ = misc.GetRoot(filename) - fullname = os.path.join(dirname, _TELEMETRY_RESULT_DEFAULTS_FILE) - try: - # Slurp the summary file into a dictionary. The keys in the dictionary are - # the benchmark names. The value for a key is a list containing the names - # of all the result fields that should be returned in a 'default' report. - with open(fullname) as in_file: - return json.load(in_file) - except IOError as e: - # ENOENT means "no such file or directory" - if e.errno == errno.ENOENT: - return {} - raise + """Reads the summary file at filename.""" + dirname, _ = misc.GetRoot(filename) + fullname = os.path.join(dirname, _TELEMETRY_RESULT_DEFAULTS_FILE) + try: + # Slurp the summary file into a dictionary. The keys in the dictionary are + # the benchmark names. The value for a key is a list containing the names + # of all the result fields that should be returned in a 'default' report. + with open(fullname) as in_file: + return json.load(in_file) + except IOError as e: + # ENOENT means "no such file or directory" + if e.errno == errno.ENOENT: + return {} + raise def _MakeOrganizeResultOutline(benchmark_runs, labels): - """Creates the "outline" of the OrganizeResults result for a set of runs. - - Report generation returns lists of different sizes, depending on the input - data. Depending on the order in which we iterate through said input data, we - may populate the Nth index of a list, then the N-1st, then the N+Mth, ... - - It's cleaner to figure out the "skeleton"/"outline" ahead of time, so we don't - have to worry about resizing while computing results. - """ - # Count how many iterations exist for each benchmark run. - # We can't simply count up, since we may be given an incomplete set of - # iterations (e.g. [r.iteration for r in benchmark_runs] == [1, 3]) - iteration_count = {} - for run in benchmark_runs: - name = run.benchmark.name - old_iterations = iteration_count.get(name, -1) - # N.B. run.iteration starts at 1, not 0. - iteration_count[name] = max(old_iterations, run.iteration) - - # Result structure: {benchmark_name: [[{key: val}]]} - result = {} - for run in benchmark_runs: - name = run.benchmark.name - num_iterations = iteration_count[name] - # default param makes cros lint be quiet about defining num_iterations in a - # loop. - make_dicts = lambda n=num_iterations: _Repeat(dict, n) - result[name] = _Repeat(make_dicts, len(labels)) - return result + """Creates the "outline" of the OrganizeResults result for a set of runs. + + Report generation returns lists of different sizes, depending on the input + data. Depending on the order in which we iterate through said input data, we + may populate the Nth index of a list, then the N-1st, then the N+Mth, ... + + It's cleaner to figure out the "skeleton"/"outline" ahead of time, so we don't + have to worry about resizing while computing results. + """ + # Count how many iterations exist for each benchmark run. + # We can't simply count up, since we may be given an incomplete set of + # iterations (e.g. [r.iteration for r in benchmark_runs] == [1, 3]) + iteration_count = {} + for run in benchmark_runs: + name = run.benchmark.name + old_iterations = iteration_count.get(name, -1) + # N.B. run.iteration starts at 1, not 0. + iteration_count[name] = max(old_iterations, run.iteration) + + # Result structure: {benchmark_name: [[{key: val}]]} + result = {} + for run in benchmark_runs: + name = run.benchmark.name + num_iterations = iteration_count[name] + # default param makes cros lint be quiet about defining num_iterations in a + # loop. + make_dicts = lambda n=num_iterations: _Repeat(dict, n) + result[name] = _Repeat(make_dicts, len(labels)) + return result def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False): - """Create a dict from benchmark_runs. - - The structure of the output dict is as follows: - {"benchmark_1":[ - [{"key1":"v1", "key2":"v2"},{"key1":"v1", "key2","v2"}] - #one label - [] - #the other label - ] - "benchmark_2": - [ - ]}. - """ - result = _MakeOrganizeResultOutline(benchmark_runs, labels) - label_names = [label.name for label in labels] - label_indices = {name: i for i, name in enumerate(label_names)} - summary_file = _ReadSummaryFile(sys.argv[0]) - - if benchmarks is None: - benchmarks = [] - - for benchmark_run in benchmark_runs: - if not benchmark_run.result: - continue - benchmark = benchmark_run.benchmark - label_index = label_indices[benchmark_run.label.name] - cur_label_list = result[benchmark.name][label_index] - cur_dict = cur_label_list[benchmark_run.iteration - 1] - - show_all_results = json_report or benchmark.show_all_results - if not show_all_results: - summary_list = summary_file.get(benchmark.name) - if summary_list: - for key in benchmark_run.result.keyvals.keys(): - if any( - key.startswith(added_key) - for added_key in ['retval', 'cpufreq', 'cputemp']): - summary_list.append(key) - else: - # Did not find test_name in json file; show everything. - show_all_results = True - if benchmark_run.result.cwp_dso: - # If we are in cwp approximation mode, we only care about samples - if 'samples' in benchmark_run.result.keyvals: - cur_dict['samples'] = benchmark_run.result.keyvals['samples'] - cur_dict['retval'] = benchmark_run.result.keyvals['retval'] - for key, value in benchmark_run.result.keyvals.items(): - if any( - key.startswith(cpustat_keyword) - for cpustat_keyword in ['cpufreq', 'cputemp']): - cur_dict[key] = value - else: - for test_key in benchmark_run.result.keyvals: - if show_all_results or test_key in summary_list: - cur_dict[test_key] = benchmark_run.result.keyvals[test_key] - # Occasionally Telemetry tests will not fail but they will not return a - # result, either. Look for those cases, and force them to be a fail. - # (This can happen if, for example, the test has been disabled.) - if len(cur_dict) == 1 and cur_dict['retval'] == 0: - cur_dict['retval'] = 1 - benchmark_run.result.keyvals['retval'] = 1 - # TODO: This output should be sent via logger. - print( - "WARNING: Test '%s' appears to have succeeded but returned" - ' no results.' % benchmark.name, - file=sys.stderr) - if json_report and benchmark_run.machine: - cur_dict['machine'] = benchmark_run.machine.name - cur_dict['machine_checksum'] = benchmark_run.machine.checksum - cur_dict['machine_string'] = benchmark_run.machine.checksum_string - _DuplicatePass(result, benchmarks) - return result + """Create a dict from benchmark_runs. + + The structure of the output dict is as follows: + {"benchmark_1":[ + [{"key1":"v1", "key2":"v2"},{"key1":"v1", "key2","v2"}] + #one label + [] + #the other label + ] + "benchmark_2": + [ + ]}. + """ + result = _MakeOrganizeResultOutline(benchmark_runs, labels) + label_names = [label.name for label in labels] + label_indices = {name: i for i, name in enumerate(label_names)} + summary_file = _ReadSummaryFile(sys.argv[0]) + + if benchmarks is None: + benchmarks = [] + + for benchmark_run in benchmark_runs: + if not benchmark_run.result: + continue + benchmark = benchmark_run.benchmark + label_index = label_indices[benchmark_run.label.name] + cur_label_list = result[benchmark.name][label_index] + cur_dict = cur_label_list[benchmark_run.iteration - 1] + + show_all_results = json_report or benchmark.show_all_results + if not show_all_results: + summary_list = summary_file.get(benchmark.name) + if summary_list: + for key in benchmark_run.result.keyvals.keys(): + if any( + key.startswith(added_key) + for added_key in ["retval", "cpufreq", "cputemp"] + ): + summary_list.append(key) + else: + # Did not find test_name in json file; show everything. + show_all_results = True + if benchmark_run.result.cwp_dso: + # If we are in cwp approximation mode, we only care about samples + if "samples" in benchmark_run.result.keyvals: + cur_dict["samples"] = benchmark_run.result.keyvals["samples"] + cur_dict["retval"] = benchmark_run.result.keyvals["retval"] + for key, value in benchmark_run.result.keyvals.items(): + if any( + key.startswith(cpustat_keyword) + for cpustat_keyword in ["cpufreq", "cputemp"] + ): + cur_dict[key] = value + else: + for test_key in benchmark_run.result.keyvals: + if show_all_results or test_key in summary_list: + cur_dict[test_key] = benchmark_run.result.keyvals[test_key] + # Occasionally Telemetry tests will not fail but they will not return a + # result, either. Look for those cases, and force them to be a fail. + # (This can happen if, for example, the test has been disabled.) + if len(cur_dict) == 1 and cur_dict["retval"] == 0: + cur_dict["retval"] = 1 + benchmark_run.result.keyvals["retval"] = 1 + # TODO: This output should be sent via logger. + print( + "WARNING: Test '%s' appears to have succeeded but returned" + " no results." % benchmark.name, + file=sys.stderr, + ) + if json_report and benchmark_run.machine: + cur_dict["machine"] = benchmark_run.machine.name + cur_dict["machine_checksum"] = benchmark_run.machine.checksum + cur_dict["machine_string"] = benchmark_run.machine.checksum_string + _DuplicatePass(result, benchmarks) + return result diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py index efdd215b..90a95a73 100755 --- a/crosperf/results_organizer_unittest.py +++ b/crosperf/results_organizer_unittest.py @@ -16,155 +16,149 @@ from __future__ import print_function import unittest from benchmark_run import BenchmarkRun +import mock_instance from results_cache import Result from results_organizer import OrganizeResults -import mock_instance result = { - 'benchmark1': [[{ - '': 'PASS', - 'bool': 'True', - 'milliseconds_1': '1', - 'milliseconds_2': '8', - 'milliseconds_3': '9.2', - 'ms_1': '2.1', - 'total': '5' - }, { - '': 'PASS', - 'test': '2' - }, { - '': 'PASS', - 'test': '4' - }, { - '': 'PASS', - 'bool': 'FALSE', - 'milliseconds_1': '3', - 'milliseconds_2': '5', - 'ms_1': '2.2', - 'total': '6' - }, { - '': 'PASS', - 'test': '3' - }, { - '': 'PASS', - 'test': '4' - }], [{ - '': 'PASS', - 'bool': 'FALSE', - 'milliseconds_4': '30', - 'milliseconds_5': '50', - 'ms_1': '2.23', - 'total': '6' - }, { - '': 'PASS', - 'test': '5' - }, { - '': 'PASS', - 'test': '4' - }, { - '': 'PASS', - 'bool': 'FALSE', - 'milliseconds_1': '3', - 'milliseconds_6': '7', - 'ms_1': '2.3', - 'total': '7' - }, { - '': 'PASS', - 'test': '2' - }, { - '': 'PASS', - 'test': '6' - }]], - 'benchmark2': [[{ - '': 'PASS', - 'bool': 'TRUE', - 'milliseconds_1': '3', - 'milliseconds_8': '6', - 'ms_1': '2.3', - 'total': '7' - }, { - '': 'PASS', - 'test': '2' - }, { - '': 'PASS', - 'test': '6' - }, { - '': 'PASS', - 'bool': 'TRUE', - 'milliseconds_1': '3', - 'milliseconds_8': '6', - 'ms_1': '2.2', - 'total': '7' - }, { - '': 'PASS', - 'test': '2' - }, { - '': 'PASS', - 'test': '2' - }], [{ - '': 'PASS', - 'bool': 'TRUE', - 'milliseconds_1': '3', - 'milliseconds_8': '6', - 'ms_1': '2', - 'total': '7' - }, { - '': 'PASS', - 'test': '2' - }, { - '': 'PASS', - 'test': '4' - }, { - '': 'PASS', - 'bool': 'TRUE', - 'milliseconds_1': '3', - 'milliseconds_8': '6', - 'ms_1': '1', - 'total': '7' - }, { - '': 'PASS', - 'test': '1' - }, { - '': 'PASS', - 'test': '6' - }]] -} # yapf: disable + "benchmark1": [ + [ + { + "": "PASS", + "bool": "True", + "milliseconds_1": "1", + "milliseconds_2": "8", + "milliseconds_3": "9.2", + "ms_1": "2.1", + "total": "5", + }, + {"": "PASS", "test": "2"}, + {"": "PASS", "test": "4"}, + { + "": "PASS", + "bool": "FALSE", + "milliseconds_1": "3", + "milliseconds_2": "5", + "ms_1": "2.2", + "total": "6", + }, + {"": "PASS", "test": "3"}, + {"": "PASS", "test": "4"}, + ], + [ + { + "": "PASS", + "bool": "FALSE", + "milliseconds_4": "30", + "milliseconds_5": "50", + "ms_1": "2.23", + "total": "6", + }, + {"": "PASS", "test": "5"}, + {"": "PASS", "test": "4"}, + { + "": "PASS", + "bool": "FALSE", + "milliseconds_1": "3", + "milliseconds_6": "7", + "ms_1": "2.3", + "total": "7", + }, + {"": "PASS", "test": "2"}, + {"": "PASS", "test": "6"}, + ], + ], + "benchmark2": [ + [ + { + "": "PASS", + "bool": "TRUE", + "milliseconds_1": "3", + "milliseconds_8": "6", + "ms_1": "2.3", + "total": "7", + }, + {"": "PASS", "test": "2"}, + {"": "PASS", "test": "6"}, + { + "": "PASS", + "bool": "TRUE", + "milliseconds_1": "3", + "milliseconds_8": "6", + "ms_1": "2.2", + "total": "7", + }, + {"": "PASS", "test": "2"}, + {"": "PASS", "test": "2"}, + ], + [ + { + "": "PASS", + "bool": "TRUE", + "milliseconds_1": "3", + "milliseconds_8": "6", + "ms_1": "2", + "total": "7", + }, + {"": "PASS", "test": "2"}, + {"": "PASS", "test": "4"}, + { + "": "PASS", + "bool": "TRUE", + "milliseconds_1": "3", + "milliseconds_8": "6", + "ms_1": "1", + "total": "7", + }, + {"": "PASS", "test": "1"}, + {"": "PASS", "test": "6"}, + ], + ], +} # yapf: disable class ResultOrganizerTest(unittest.TestCase): - """Test result organizer.""" + """Test result organizer.""" - def testResultOrganizer(self): - labels = [mock_instance.label1, mock_instance.label2] - benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2] - benchmark_runs = [None] * 8 - benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '', - '', 'average', '', {}) - benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '', - '', 'average', '', {}) - benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '', - '', 'average', '', {}) - benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '', - '', 'average', '', {}) - benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '', - '', 'average', '', {}) - benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '', - '', 'average', '', {}) - benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '', - '', 'average', '', {}) - benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '', - '', 'average', '', {}) + def testResultOrganizer(self): + labels = [mock_instance.label1, mock_instance.label2] + benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2] + benchmark_runs = [None] * 8 + benchmark_runs[0] = BenchmarkRun( + "b1", benchmarks[0], labels[0], 1, "", "", "", "average", "", {} + ) + benchmark_runs[1] = BenchmarkRun( + "b2", benchmarks[0], labels[0], 2, "", "", "", "average", "", {} + ) + benchmark_runs[2] = BenchmarkRun( + "b3", benchmarks[0], labels[1], 1, "", "", "", "average", "", {} + ) + benchmark_runs[3] = BenchmarkRun( + "b4", benchmarks[0], labels[1], 2, "", "", "", "average", "", {} + ) + benchmark_runs[4] = BenchmarkRun( + "b5", benchmarks[1], labels[0], 1, "", "", "", "average", "", {} + ) + benchmark_runs[5] = BenchmarkRun( + "b6", benchmarks[1], labels[0], 2, "", "", "", "average", "", {} + ) + benchmark_runs[6] = BenchmarkRun( + "b7", benchmarks[1], labels[1], 1, "", "", "", "average", "", {} + ) + benchmark_runs[7] = BenchmarkRun( + "b8", benchmarks[1], labels[1], 2, "", "", "", "average", "", {} + ) - i = 0 - for b in benchmark_runs: - b.result = Result('', b.label, 'average', 'machine') - b.result.keyvals = mock_instance.keyval[i] - i += 1 + i = 0 + for b in benchmark_runs: + b.result = Result("", b.label, "average", "machine") + b.result.keyvals = mock_instance.keyval[i] + i += 1 - organized = OrganizeResults(benchmark_runs, labels, benchmarks) - self.assertEqual(organized, result) + organized = OrganizeResults(benchmark_runs, labels, benchmarks) + self.assertEqual(organized, result) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/results_report.py b/crosperf/results_report.py index 571584bd..50412086 100644 --- a/crosperf/results_report.py +++ b/crosperf/results_report.py @@ -14,15 +14,15 @@ import os import re import time +from column_chart import ColumnChart from cros_utils.tabulator import AmeanResult from cros_utils.tabulator import Cell from cros_utils.tabulator import CoeffVarFormat from cros_utils.tabulator import CoeffVarResult from cros_utils.tabulator import Column -from cros_utils.tabulator import SamplesTableGenerator from cros_utils.tabulator import Format -from cros_utils.tabulator import IterationResult from cros_utils.tabulator import GmeanRatioResult +from cros_utils.tabulator import IterationResult from cros_utils.tabulator import LiteralResult from cros_utils.tabulator import MaxResult from cros_utils.tabulator import MinResult @@ -30,20 +30,18 @@ from cros_utils.tabulator import PValueFormat from cros_utils.tabulator import PValueResult from cros_utils.tabulator import RatioFormat from cros_utils.tabulator import RawResult +from cros_utils.tabulator import SamplesTableGenerator from cros_utils.tabulator import StdResult from cros_utils.tabulator import TableFormatter from cros_utils.tabulator import TableGenerator from cros_utils.tabulator import TablePrinter -from update_telemetry_defaults import TelemetryDefaults - -from column_chart import ColumnChart from results_organizer import OrganizeResults - import results_report_templates as templates +from update_telemetry_defaults import TelemetryDefaults def ParseChromeosImage(chromeos_image): - """Parse the chromeos_image string for the image and version. + """Parse the chromeos_image string for the image and version. The chromeos_image string will probably be in one of two formats: 1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \ @@ -64,760 +62,857 @@ def ParseChromeosImage(chromeos_image): version, image: The results of parsing the input string, as explained above. """ - # Find the Chromeos Version, e.g. R45-2345.0.0..... - # chromeos_image should have been something like: - # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin" - if chromeos_image.endswith('/chromiumos_test_image.bin'): - full_version = chromeos_image.split('/')[-2] - # Strip the date and time off of local builds (which have the format - # "R43-2345.0.0.date-and-time"). - version, _ = os.path.splitext(full_version) - else: - version = '' - - # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then - # it's an official image that got downloaded, so chop off the download path - # to make the official image name more clear. - official_image_path = '/chroot/tmp' - if official_image_path in chromeos_image: - image = chromeos_image.split(official_image_path, 1)[1] - else: - image = chromeos_image - return version, image + # Find the Chromeos Version, e.g. R45-2345.0.0..... + # chromeos_image should have been something like: + # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin" + if chromeos_image.endswith("/chromiumos_test_image.bin"): + full_version = chromeos_image.split("/")[-2] + # Strip the date and time off of local builds (which have the format + # "R43-2345.0.0.date-and-time"). + version, _ = os.path.splitext(full_version) + else: + version = "" + + # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then + # it's an official image that got downloaded, so chop off the download path + # to make the official image name more clear. + official_image_path = "/chroot/tmp" + if official_image_path in chromeos_image: + image = chromeos_image.split(official_image_path, 1)[1] + else: + image = chromeos_image + return version, image def _AppendUntilLengthIs(gen, the_list, target_len): - """Appends to `list` until `list` is `target_len` elements long. + """Appends to `list` until `list` is `target_len` elements long. - Uses `gen` to generate elements. - """ - the_list.extend(gen() for _ in range(target_len - len(the_list))) - return the_list + Uses `gen` to generate elements. + """ + the_list.extend(gen() for _ in range(target_len - len(the_list))) + return the_list def _FilterPerfReport(event_threshold, report): - """Filters out entries with `< event_threshold` percent in a perf report.""" + """Filters out entries with `< event_threshold` percent in a perf report.""" - def filter_dict(m): - return { - fn_name: pct for fn_name, pct in m.items() if pct >= event_threshold - } + def filter_dict(m): + return { + fn_name: pct for fn_name, pct in m.items() if pct >= event_threshold + } - return {event: filter_dict(m) for event, m in report.items()} + return {event: filter_dict(m) for event, m in report.items()} class _PerfTable(object): - """Generates dicts from a perf table. - - Dicts look like: - {'benchmark_name': {'perf_event_name': [LabelData]}} - where LabelData is a list of perf dicts, each perf dict coming from the same - label. - Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the - percentage of time spent in function_name). - """ - - def __init__(self, - benchmark_names_and_iterations, - label_names, - read_perf_report, - event_threshold=None): - """Constructor. - - read_perf_report is a function that takes a label name, benchmark name, and - benchmark iteration, and returns a dictionary describing the perf output for - that given run. + """Generates dicts from a perf table. + + Dicts look like: + {'benchmark_name': {'perf_event_name': [LabelData]}} + where LabelData is a list of perf dicts, each perf dict coming from the same + label. + Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the + percentage of time spent in function_name). """ - self.event_threshold = event_threshold - self._label_indices = {name: i for i, name in enumerate(label_names)} - self.perf_data = {} - for label in label_names: - for bench_name, bench_iterations in benchmark_names_and_iterations: - for i in range(bench_iterations): - report = read_perf_report(label, bench_name, i) - self._ProcessPerfReport(report, label, bench_name, i) - - def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration): - """Add the data from one run to the dict.""" - perf_of_run = perf_report - if self.event_threshold is not None: - perf_of_run = _FilterPerfReport(self.event_threshold, perf_report) - if benchmark_name not in self.perf_data: - self.perf_data[benchmark_name] = {event: [] for event in perf_of_run} - ben_data = self.perf_data[benchmark_name] - label_index = self._label_indices[label] - for event in ben_data: - _AppendUntilLengthIs(list, ben_data[event], label_index + 1) - data_for_label = ben_data[event][label_index] - _AppendUntilLengthIs(dict, data_for_label, iteration + 1) - data_for_label[iteration] = perf_of_run[event] if perf_of_run else {} + + def __init__( + self, + benchmark_names_and_iterations, + label_names, + read_perf_report, + event_threshold=None, + ): + """Constructor. + + read_perf_report is a function that takes a label name, benchmark name, and + benchmark iteration, and returns a dictionary describing the perf output for + that given run. + """ + self.event_threshold = event_threshold + self._label_indices = {name: i for i, name in enumerate(label_names)} + self.perf_data = {} + for label in label_names: + for bench_name, bench_iterations in benchmark_names_and_iterations: + for i in range(bench_iterations): + report = read_perf_report(label, bench_name, i) + self._ProcessPerfReport(report, label, bench_name, i) + + def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration): + """Add the data from one run to the dict.""" + perf_of_run = perf_report + if self.event_threshold is not None: + perf_of_run = _FilterPerfReport(self.event_threshold, perf_report) + if benchmark_name not in self.perf_data: + self.perf_data[benchmark_name] = { + event: [] for event in perf_of_run + } + ben_data = self.perf_data[benchmark_name] + label_index = self._label_indices[label] + for event in ben_data: + _AppendUntilLengthIs(list, ben_data[event], label_index + 1) + data_for_label = ben_data[event][label_index] + _AppendUntilLengthIs(dict, data_for_label, iteration + 1) + data_for_label[iteration] = ( + perf_of_run[event] if perf_of_run else {} + ) def _GetResultsTableHeader(ben_name, iterations): - benchmark_info = ('Benchmark: {0}; Iterations: {1}'.format( - ben_name, iterations)) - cell = Cell() - cell.string_value = benchmark_info - cell.header = True - return [[cell]] + benchmark_info = "Benchmark: {0}; Iterations: {1}".format( + ben_name, iterations + ) + cell = Cell() + cell.string_value = benchmark_info + cell.header = True + return [[cell]] def _GetDSOHeader(cwp_dso): - info = 'CWP_DSO: %s' % cwp_dso - cell = Cell() - cell.string_value = info - cell.header = False - return [[cell]] + info = "CWP_DSO: %s" % cwp_dso + cell = Cell() + cell.string_value = info + cell.header = False + return [[cell]] def _ParseColumn(columns, iteration): - new_column = [] - for column in columns: - if column.result.__class__.__name__ != 'RawResult': - new_column.append(column) - else: - new_column.extend( - Column(LiteralResult(i), Format(), str(i + 1)) - for i in range(iteration)) - return new_column + new_column = [] + for column in columns: + if column.result.__class__.__name__ != "RawResult": + new_column.append(column) + else: + new_column.extend( + Column(LiteralResult(i), Format(), str(i + 1)) + for i in range(iteration) + ) + return new_column def _GetTables(benchmark_results, columns, table_type): - iter_counts = benchmark_results.iter_counts - result = benchmark_results.run_keyvals - tables = [] - for bench_name, runs in result.items(): - iterations = iter_counts[bench_name] - ben_table = _GetResultsTableHeader(bench_name, iterations) - - all_runs_empty = all(not dict for label in runs for dict in label) - if all_runs_empty: - cell = Cell() - cell.string_value = ('This benchmark contains no result.' - ' Is the benchmark name valid?') - cell_table = [[cell]] - else: - table = TableGenerator(runs, benchmark_results.label_names).GetTable() - parsed_columns = _ParseColumn(columns, iterations) - tf = TableFormatter(table, parsed_columns) - cell_table = tf.GetCellTable(table_type) - tables.append(ben_table) - tables.append(cell_table) - return tables + iter_counts = benchmark_results.iter_counts + result = benchmark_results.run_keyvals + tables = [] + for bench_name, runs in result.items(): + iterations = iter_counts[bench_name] + ben_table = _GetResultsTableHeader(bench_name, iterations) + + all_runs_empty = all(not dict for label in runs for dict in label) + if all_runs_empty: + cell = Cell() + cell.string_value = ( + "This benchmark contains no result." + " Is the benchmark name valid?" + ) + cell_table = [[cell]] + else: + table = TableGenerator( + runs, benchmark_results.label_names + ).GetTable() + parsed_columns = _ParseColumn(columns, iterations) + tf = TableFormatter(table, parsed_columns) + cell_table = tf.GetCellTable(table_type) + tables.append(ben_table) + tables.append(cell_table) + return tables def _GetPerfTables(benchmark_results, columns, table_type): - p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations, - benchmark_results.label_names, - benchmark_results.read_perf_report) - - tables = [] - for benchmark in p_table.perf_data: - iterations = benchmark_results.iter_counts[benchmark] - ben_table = _GetResultsTableHeader(benchmark, iterations) - tables.append(ben_table) - benchmark_data = p_table.perf_data[benchmark] - table = [] - for event in benchmark_data: - tg = TableGenerator( - benchmark_data[event], - benchmark_results.label_names, - sort=TableGenerator.SORT_BY_VALUES_DESC) - table = tg.GetTable(ResultsReport.PERF_ROWS) - parsed_columns = _ParseColumn(columns, iterations) - tf = TableFormatter(table, parsed_columns) - tf.GenerateCellTable(table_type) - tf.AddColumnName() - tf.AddLabelName() - tf.AddHeader(str(event)) - table = tf.GetCellTable(table_type, headers=False) - tables.append(table) - return tables + p_table = _PerfTable( + benchmark_results.benchmark_names_and_iterations, + benchmark_results.label_names, + benchmark_results.read_perf_report, + ) + + tables = [] + for benchmark in p_table.perf_data: + iterations = benchmark_results.iter_counts[benchmark] + ben_table = _GetResultsTableHeader(benchmark, iterations) + tables.append(ben_table) + benchmark_data = p_table.perf_data[benchmark] + table = [] + for event in benchmark_data: + tg = TableGenerator( + benchmark_data[event], + benchmark_results.label_names, + sort=TableGenerator.SORT_BY_VALUES_DESC, + ) + table = tg.GetTable(ResultsReport.PERF_ROWS) + parsed_columns = _ParseColumn(columns, iterations) + tf = TableFormatter(table, parsed_columns) + tf.GenerateCellTable(table_type) + tf.AddColumnName() + tf.AddLabelName() + tf.AddHeader(str(event)) + table = tf.GetCellTable(table_type, headers=False) + tables.append(table) + return tables def _GetSamplesTables(benchmark_results, columns, table_type): - tables = [] - dso_header_table = _GetDSOHeader(benchmark_results.cwp_dso) - tables.append(dso_header_table) - (table, new_keyvals, iter_counts) = SamplesTableGenerator( - benchmark_results.run_keyvals, benchmark_results.label_names, - benchmark_results.iter_counts, benchmark_results.weights).GetTable() - parsed_columns = _ParseColumn(columns, 1) - tf = TableFormatter(table, parsed_columns, samples_table=True) - cell_table = tf.GetCellTable(table_type) - tables.append(cell_table) - return (tables, new_keyvals, iter_counts) + tables = [] + dso_header_table = _GetDSOHeader(benchmark_results.cwp_dso) + tables.append(dso_header_table) + (table, new_keyvals, iter_counts) = SamplesTableGenerator( + benchmark_results.run_keyvals, + benchmark_results.label_names, + benchmark_results.iter_counts, + benchmark_results.weights, + ).GetTable() + parsed_columns = _ParseColumn(columns, 1) + tf = TableFormatter(table, parsed_columns, samples_table=True) + cell_table = tf.GetCellTable(table_type) + tables.append(cell_table) + return (tables, new_keyvals, iter_counts) class ResultsReport(object): - """Class to handle the report format.""" - MAX_COLOR_CODE = 255 - PERF_ROWS = 5 - - def __init__(self, results): - self.benchmark_results = results - - def _GetTablesWithColumns(self, columns, table_type, summary_type): - if summary_type == 'perf': - get_tables = _GetPerfTables - elif summary_type == 'samples': - get_tables = _GetSamplesTables - else: - get_tables = _GetTables - ret = get_tables(self.benchmark_results, columns, table_type) - # If we are generating a samples summary table, the return value of - # get_tables will be a tuple, and we will update the benchmark_results for - # composite benchmark so that full table can use it. - if isinstance(ret, tuple): - self.benchmark_results.run_keyvals = ret[1] - self.benchmark_results.iter_counts = ret[2] - ret = ret[0] - return ret - - def GetFullTables(self, perf=False): - ignore_min_max = self.benchmark_results.ignore_min_max - columns = [ - Column(RawResult(), Format()), - Column(MinResult(), Format()), - Column(MaxResult(), Format()), - Column(AmeanResult(ignore_min_max), Format()), - Column(StdResult(ignore_min_max), Format(), 'StdDev'), - Column(CoeffVarResult(ignore_min_max), CoeffVarFormat(), 'StdDev/Mean'), - Column(GmeanRatioResult(ignore_min_max), RatioFormat(), 'GmeanSpeedup'), - Column(PValueResult(ignore_min_max), PValueFormat(), 'p-value') - ] - return self._GetTablesWithColumns(columns, 'full', perf) - - def GetSummaryTables(self, summary_type=''): - ignore_min_max = self.benchmark_results.ignore_min_max - columns = [] - if summary_type == 'samples': - columns += [Column(IterationResult(), Format(), 'Iterations [Pass:Fail]')] - columns += [ - Column( - AmeanResult(ignore_min_max), Format(), - 'Weighted Samples Amean' if summary_type == 'samples' else ''), - Column(StdResult(ignore_min_max), Format(), 'StdDev'), - Column(CoeffVarResult(ignore_min_max), CoeffVarFormat(), 'StdDev/Mean'), - Column(GmeanRatioResult(ignore_min_max), RatioFormat(), 'GmeanSpeedup'), - Column(PValueResult(ignore_min_max), PValueFormat(), 'p-value') - ] - return self._GetTablesWithColumns(columns, 'summary', summary_type) + """Class to handle the report format.""" + + MAX_COLOR_CODE = 255 + PERF_ROWS = 5 + + def __init__(self, results): + self.benchmark_results = results + + def _GetTablesWithColumns(self, columns, table_type, summary_type): + if summary_type == "perf": + get_tables = _GetPerfTables + elif summary_type == "samples": + get_tables = _GetSamplesTables + else: + get_tables = _GetTables + ret = get_tables(self.benchmark_results, columns, table_type) + # If we are generating a samples summary table, the return value of + # get_tables will be a tuple, and we will update the benchmark_results for + # composite benchmark so that full table can use it. + if isinstance(ret, tuple): + self.benchmark_results.run_keyvals = ret[1] + self.benchmark_results.iter_counts = ret[2] + ret = ret[0] + return ret + + def GetFullTables(self, perf=False): + ignore_min_max = self.benchmark_results.ignore_min_max + columns = [ + Column(RawResult(), Format()), + Column(MinResult(), Format()), + Column(MaxResult(), Format()), + Column(AmeanResult(ignore_min_max), Format()), + Column(StdResult(ignore_min_max), Format(), "StdDev"), + Column( + CoeffVarResult(ignore_min_max), CoeffVarFormat(), "StdDev/Mean" + ), + Column( + GmeanRatioResult(ignore_min_max), RatioFormat(), "GmeanSpeedup" + ), + Column(PValueResult(ignore_min_max), PValueFormat(), "p-value"), + ] + return self._GetTablesWithColumns(columns, "full", perf) + + def GetSummaryTables(self, summary_type=""): + ignore_min_max = self.benchmark_results.ignore_min_max + columns = [] + if summary_type == "samples": + columns += [ + Column(IterationResult(), Format(), "Iterations [Pass:Fail]") + ] + columns += [ + Column( + AmeanResult(ignore_min_max), + Format(), + "Weighted Samples Amean" if summary_type == "samples" else "", + ), + Column(StdResult(ignore_min_max), Format(), "StdDev"), + Column( + CoeffVarResult(ignore_min_max), CoeffVarFormat(), "StdDev/Mean" + ), + Column( + GmeanRatioResult(ignore_min_max), RatioFormat(), "GmeanSpeedup" + ), + Column(PValueResult(ignore_min_max), PValueFormat(), "p-value"), + ] + return self._GetTablesWithColumns(columns, "summary", summary_type) def _PrintTable(tables, out_to): - # tables may be None. - if not tables: - return '' - - if out_to == 'HTML': - out_type = TablePrinter.HTML - elif out_to == 'PLAIN': - out_type = TablePrinter.PLAIN - elif out_to == 'CONSOLE': - out_type = TablePrinter.CONSOLE - elif out_to == 'TSV': - out_type = TablePrinter.TSV - elif out_to == 'EMAIL': - out_type = TablePrinter.EMAIL - else: - raise ValueError('Invalid out_to value: %s' % (out_to,)) - - printers = (TablePrinter(table, out_type) for table in tables) - return ''.join(printer.Print() for printer in printers) + # tables may be None. + if not tables: + return "" + + if out_to == "HTML": + out_type = TablePrinter.HTML + elif out_to == "PLAIN": + out_type = TablePrinter.PLAIN + elif out_to == "CONSOLE": + out_type = TablePrinter.CONSOLE + elif out_to == "TSV": + out_type = TablePrinter.TSV + elif out_to == "EMAIL": + out_type = TablePrinter.EMAIL + else: + raise ValueError("Invalid out_to value: %s" % (out_to,)) + printers = (TablePrinter(table, out_type) for table in tables) + return "".join(printer.Print() for printer in printers) -class TextResultsReport(ResultsReport): - """Class to generate text result report.""" - - H1_STR = '===========================================' - H2_STR = '-------------------------------------------' - - def __init__(self, results, email=False, experiment=None): - super(TextResultsReport, self).__init__(results) - self.email = email - self.experiment = experiment - - @staticmethod - def _MakeTitle(title): - header_line = TextResultsReport.H1_STR - # '' at the end gives one newline. - return '\n'.join([header_line, title, header_line, '']) - - @staticmethod - def _MakeSection(title, body): - header_line = TextResultsReport.H2_STR - # '\n' at the end gives us two newlines. - return '\n'.join([header_line, title, header_line, body, '\n']) - - @staticmethod - def FromExperiment(experiment, email=False): - results = BenchmarkResults.FromExperiment(experiment) - return TextResultsReport(results, email, experiment) - - def GetStatusTable(self): - """Generate the status table by the tabulator.""" - table = [['', '']] - columns = [ - Column(LiteralResult(iteration=0), Format(), 'Status'), - Column(LiteralResult(iteration=1), Format(), 'Failing Reason') - ] - - for benchmark_run in self.experiment.benchmark_runs: - status = [ - benchmark_run.name, - [benchmark_run.timeline.GetLastEvent(), benchmark_run.failure_reason] - ] - table.append(status) - cell_table = TableFormatter(table, columns).GetCellTable('status') - return [cell_table] - - def GetTotalWaitCooldownTime(self): - """Get cooldown wait time in seconds from experiment benchmark runs. - - Returns: - Dictionary {'dut': int(wait_time_in_seconds)} - """ - waittime_dict = {} - for dut in self.experiment.machine_manager.GetMachines(): - waittime_dict[dut.name] = dut.GetCooldownWaitTime() - return waittime_dict - - def GetReport(self): - """Generate the report for email and console.""" - output_type = 'EMAIL' if self.email else 'CONSOLE' - experiment = self.experiment - - sections = [] - if experiment is not None: - title_contents = "Results report for '%s'" % (experiment.name,) - else: - title_contents = 'Results report' - sections.append(self._MakeTitle(title_contents)) - if not self.benchmark_results.cwp_dso: - summary_table = _PrintTable(self.GetSummaryTables(), output_type) - else: - summary_table = _PrintTable( - self.GetSummaryTables(summary_type='samples'), output_type) - sections.append(self._MakeSection('Summary', summary_table)) - - if experiment is not None: - table = _PrintTable(self.GetStatusTable(), output_type) - sections.append(self._MakeSection('Benchmark Run Status', table)) - - if not self.benchmark_results.cwp_dso: - perf_table = _PrintTable( - self.GetSummaryTables(summary_type='perf'), output_type) - sections.append(self._MakeSection('Perf Data', perf_table)) - - if experiment is not None: - experiment_file = experiment.experiment_file - sections.append(self._MakeSection('Experiment File', experiment_file)) - - cpu_info = experiment.machine_manager.GetAllCPUInfo(experiment.labels) - sections.append(self._MakeSection('CPUInfo', cpu_info)) - - totaltime = (time.time() - - experiment.start_time) if experiment.start_time else 0 - totaltime_str = 'Total experiment time:\n%d min' % (totaltime // 60) - cooldown_waittime_list = ['Cooldown wait time:'] - # When running experiment on multiple DUTs cooldown wait time may vary - # on different devices. In addition its combined time may exceed total - # experiment time which will look weird but it is reasonable. - # For this matter print cooldown time per DUT. - for dut, waittime in sorted(self.GetTotalWaitCooldownTime().items()): - cooldown_waittime_list.append('DUT %s: %d min' % (dut, waittime // 60)) - cooldown_waittime_str = '\n'.join(cooldown_waittime_list) - sections.append( - self._MakeSection('Duration', - '\n\n'.join([totaltime_str, - cooldown_waittime_str]))) - - return '\n'.join(sections) +class TextResultsReport(ResultsReport): + """Class to generate text result report.""" + + H1_STR = "===========================================" + H2_STR = "-------------------------------------------" + + def __init__(self, results, email=False, experiment=None): + super(TextResultsReport, self).__init__(results) + self.email = email + self.experiment = experiment + + @staticmethod + def _MakeTitle(title): + header_line = TextResultsReport.H1_STR + # '' at the end gives one newline. + return "\n".join([header_line, title, header_line, ""]) + + @staticmethod + def _MakeSection(title, body): + header_line = TextResultsReport.H2_STR + # '\n' at the end gives us two newlines. + return "\n".join([header_line, title, header_line, body, "\n"]) + + @staticmethod + def FromExperiment(experiment, email=False): + results = BenchmarkResults.FromExperiment(experiment) + return TextResultsReport(results, email, experiment) + + def GetStatusTable(self): + """Generate the status table by the tabulator.""" + table = [["", ""]] + columns = [ + Column(LiteralResult(iteration=0), Format(), "Status"), + Column(LiteralResult(iteration=1), Format(), "Failing Reason"), + ] + + for benchmark_run in self.experiment.benchmark_runs: + status = [ + benchmark_run.name, + [ + benchmark_run.timeline.GetLastEvent(), + benchmark_run.failure_reason, + ], + ] + table.append(status) + cell_table = TableFormatter(table, columns).GetCellTable("status") + return [cell_table] + + def GetTotalWaitCooldownTime(self): + """Get cooldown wait time in seconds from experiment benchmark runs. + + Returns: + Dictionary {'dut': int(wait_time_in_seconds)} + """ + waittime_dict = {} + for dut in self.experiment.machine_manager.GetMachines(): + waittime_dict[dut.name] = dut.GetCooldownWaitTime() + return waittime_dict + + def GetReport(self): + """Generate the report for email and console.""" + output_type = "EMAIL" if self.email else "CONSOLE" + experiment = self.experiment + + sections = [] + if experiment is not None: + title_contents = "Results report for '%s'" % (experiment.name,) + else: + title_contents = "Results report" + sections.append(self._MakeTitle(title_contents)) + + if not self.benchmark_results.cwp_dso: + summary_table = _PrintTable(self.GetSummaryTables(), output_type) + else: + summary_table = _PrintTable( + self.GetSummaryTables(summary_type="samples"), output_type + ) + sections.append(self._MakeSection("Summary", summary_table)) + + if experiment is not None: + table = _PrintTable(self.GetStatusTable(), output_type) + sections.append(self._MakeSection("Benchmark Run Status", table)) + + if not self.benchmark_results.cwp_dso: + perf_table = _PrintTable( + self.GetSummaryTables(summary_type="perf"), output_type + ) + sections.append(self._MakeSection("Perf Data", perf_table)) + + if experiment is not None: + experiment_file = experiment.experiment_file + sections.append( + self._MakeSection("Experiment File", experiment_file) + ) + + cpu_info = experiment.machine_manager.GetAllCPUInfo( + experiment.labels + ) + sections.append(self._MakeSection("CPUInfo", cpu_info)) + + totaltime = ( + (time.time() - experiment.start_time) + if experiment.start_time + else 0 + ) + totaltime_str = "Total experiment time:\n%d min" % (totaltime // 60) + cooldown_waittime_list = ["Cooldown wait time:"] + # When running experiment on multiple DUTs cooldown wait time may vary + # on different devices. In addition its combined time may exceed total + # experiment time which will look weird but it is reasonable. + # For this matter print cooldown time per DUT. + for dut, waittime in sorted( + self.GetTotalWaitCooldownTime().items() + ): + cooldown_waittime_list.append( + "DUT %s: %d min" % (dut, waittime // 60) + ) + cooldown_waittime_str = "\n".join(cooldown_waittime_list) + sections.append( + self._MakeSection( + "Duration", + "\n\n".join([totaltime_str, cooldown_waittime_str]), + ) + ) + + return "\n".join(sections) def _GetHTMLCharts(label_names, test_results): - charts = [] - for item, runs in test_results.items(): - # Fun fact: label_names is actually *entirely* useless as a param, since we - # never add headers. We still need to pass it anyway. - table = TableGenerator(runs, label_names).GetTable() - columns = [ - Column(AmeanResult(), Format()), - Column(MinResult(), Format()), - Column(MaxResult(), Format()) - ] - tf = TableFormatter(table, columns) - data_table = tf.GetCellTable('full', headers=False) - - for cur_row_data in data_table: - test_key = cur_row_data[0].string_value - title = '{0}: {1}'.format(item, test_key.replace('/', '')) - chart = ColumnChart(title, 300, 200) - chart.AddColumn('Label', 'string') - chart.AddColumn('Average', 'number') - chart.AddColumn('Min', 'number') - chart.AddColumn('Max', 'number') - chart.AddSeries('Min', 'line', 'black') - chart.AddSeries('Max', 'line', 'black') - cur_index = 1 - for label in label_names: - chart.AddRow([ - label, cur_row_data[cur_index].value, - cur_row_data[cur_index + 1].value, cur_row_data[cur_index + 2].value - ]) - if isinstance(cur_row_data[cur_index].value, str): - chart = None - break - cur_index += 3 - if chart: - charts.append(chart) - return charts + charts = [] + for item, runs in test_results.items(): + # Fun fact: label_names is actually *entirely* useless as a param, since we + # never add headers. We still need to pass it anyway. + table = TableGenerator(runs, label_names).GetTable() + columns = [ + Column(AmeanResult(), Format()), + Column(MinResult(), Format()), + Column(MaxResult(), Format()), + ] + tf = TableFormatter(table, columns) + data_table = tf.GetCellTable("full", headers=False) + + for cur_row_data in data_table: + test_key = cur_row_data[0].string_value + title = "{0}: {1}".format(item, test_key.replace("/", "")) + chart = ColumnChart(title, 300, 200) + chart.AddColumn("Label", "string") + chart.AddColumn("Average", "number") + chart.AddColumn("Min", "number") + chart.AddColumn("Max", "number") + chart.AddSeries("Min", "line", "black") + chart.AddSeries("Max", "line", "black") + cur_index = 1 + for label in label_names: + chart.AddRow( + [ + label, + cur_row_data[cur_index].value, + cur_row_data[cur_index + 1].value, + cur_row_data[cur_index + 2].value, + ] + ) + if isinstance(cur_row_data[cur_index].value, str): + chart = None + break + cur_index += 3 + if chart: + charts.append(chart) + return charts class HTMLResultsReport(ResultsReport): - """Class to generate html result report.""" - - def __init__(self, benchmark_results, experiment=None): - super(HTMLResultsReport, self).__init__(benchmark_results) - self.experiment = experiment - - @staticmethod - def FromExperiment(experiment): - return HTMLResultsReport( - BenchmarkResults.FromExperiment(experiment), experiment=experiment) - - def GetReport(self): - label_names = self.benchmark_results.label_names - test_results = self.benchmark_results.run_keyvals - charts = _GetHTMLCharts(label_names, test_results) - chart_javascript = ''.join(chart.GetJavascript() for chart in charts) - chart_divs = ''.join(chart.GetDiv() for chart in charts) - - if not self.benchmark_results.cwp_dso: - summary_table = self.GetSummaryTables() - perf_table = self.GetSummaryTables(summary_type='perf') - else: - summary_table = self.GetSummaryTables(summary_type='samples') - perf_table = None - full_table = self.GetFullTables() - - experiment_file = '' - if self.experiment is not None: - experiment_file = self.experiment.experiment_file - # Use kwargs for code readability, and so that testing is a bit easier. - return templates.GenerateHTMLPage( - perf_table=perf_table, - chart_js=chart_javascript, - summary_table=summary_table, - print_table=_PrintTable, - chart_divs=chart_divs, - full_table=full_table, - experiment_file=experiment_file) + """Class to generate html result report.""" + + def __init__(self, benchmark_results, experiment=None): + super(HTMLResultsReport, self).__init__(benchmark_results) + self.experiment = experiment + + @staticmethod + def FromExperiment(experiment): + return HTMLResultsReport( + BenchmarkResults.FromExperiment(experiment), experiment=experiment + ) + + def GetReport(self): + label_names = self.benchmark_results.label_names + test_results = self.benchmark_results.run_keyvals + charts = _GetHTMLCharts(label_names, test_results) + chart_javascript = "".join(chart.GetJavascript() for chart in charts) + chart_divs = "".join(chart.GetDiv() for chart in charts) + + if not self.benchmark_results.cwp_dso: + summary_table = self.GetSummaryTables() + perf_table = self.GetSummaryTables(summary_type="perf") + else: + summary_table = self.GetSummaryTables(summary_type="samples") + perf_table = None + full_table = self.GetFullTables() + + experiment_file = "" + if self.experiment is not None: + experiment_file = self.experiment.experiment_file + # Use kwargs for code readability, and so that testing is a bit easier. + return templates.GenerateHTMLPage( + perf_table=perf_table, + chart_js=chart_javascript, + summary_table=summary_table, + print_table=_PrintTable, + chart_divs=chart_divs, + full_table=full_table, + experiment_file=experiment_file, + ) def ParseStandardPerfReport(report_data): - """Parses the output of `perf report`. + """Parses the output of `perf report`. - It'll parse the following: - {{garbage}} - # Samples: 1234M of event 'foo' + It'll parse the following: + {{garbage}} + # Samples: 1234M of event 'foo' - 1.23% command shared_object location function::name + 1.23% command shared_object location function::name - 1.22% command shared_object location function2::name + 1.22% command shared_object location function2::name - # Samples: 999K of event 'bar' + # Samples: 999K of event 'bar' - 0.23% command shared_object location function3::name - {{etc.}} + 0.23% command shared_object location function3::name + {{etc.}} - Into: - {'foo': {'function::name': 1.23, 'function2::name': 1.22}, - 'bar': {'function3::name': 0.23, etc.}} - """ - # This function fails silently on its if it's handed a string (as opposed to a - # list of lines). So, auto-split if we do happen to get a string. - if isinstance(report_data, str): - report_data = report_data.splitlines() - # When switching to python3 catch the case when bytes are passed. - elif isinstance(report_data, bytes): - raise TypeError() - - # Samples: N{K,M,G} of event 'event-name' - samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'") - - # We expect lines like: - # N.NN% command samples shared_object [location] symbol - # - # Note that we're looking at stripped lines, so there is no space at the - # start. - perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN% - r'\s*\d+' # samples count (ignored) - r'\s*\S+' # command (ignored) - r'\s*\S+' # shared_object (ignored) - r'\s*\[.\]' # location (ignored) - r'\s*(\S.+)' # function - ) - - stripped_lines = (l.strip() for l in report_data) - nonempty_lines = (l for l in stripped_lines if l) - # Ignore all lines before we see samples_regex - interesting_lines = itertools.dropwhile(lambda x: not samples_regex.match(x), - nonempty_lines) - - first_sample_line = next(interesting_lines, None) - # Went through the entire file without finding a 'samples' header. Quit. - if first_sample_line is None: - return {} - - sample_name = samples_regex.match(first_sample_line).group(1) - current_result = {} - results = {sample_name: current_result} - for line in interesting_lines: - samples_match = samples_regex.match(line) - if samples_match: - sample_name = samples_match.group(1) - current_result = {} - results[sample_name] = current_result - continue - - match = perf_regex.match(line) - if not match: - continue - percentage_str, func_name = match.groups() - try: - percentage = float(percentage_str) - except ValueError: - # Couldn't parse it; try to be "resilient". - continue - current_result[func_name] = percentage - return results + Into: + {'foo': {'function::name': 1.23, 'function2::name': 1.22}, + 'bar': {'function3::name': 0.23, etc.}} + """ + # This function fails silently on its if it's handed a string (as opposed to a + # list of lines). So, auto-split if we do happen to get a string. + if isinstance(report_data, str): + report_data = report_data.splitlines() + # When switching to python3 catch the case when bytes are passed. + elif isinstance(report_data, bytes): + raise TypeError() + + # Samples: N{K,M,G} of event 'event-name' + samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'") + + # We expect lines like: + # N.NN% command samples shared_object [location] symbol + # + # Note that we're looking at stripped lines, so there is no space at the + # start. + perf_regex = re.compile( + r"^(\d+(?:.\d*)?)%" # N.NN% + r"\s*\d+" # samples count (ignored) + r"\s*\S+" # command (ignored) + r"\s*\S+" # shared_object (ignored) + r"\s*\[.\]" # location (ignored) + r"\s*(\S.+)" # function + ) + + stripped_lines = (l.strip() for l in report_data) + nonempty_lines = (l for l in stripped_lines if l) + # Ignore all lines before we see samples_regex + interesting_lines = itertools.dropwhile( + lambda x: not samples_regex.match(x), nonempty_lines + ) + + first_sample_line = next(interesting_lines, None) + # Went through the entire file without finding a 'samples' header. Quit. + if first_sample_line is None: + return {} + sample_name = samples_regex.match(first_sample_line).group(1) + current_result = {} + results = {sample_name: current_result} + for line in interesting_lines: + samples_match = samples_regex.match(line) + if samples_match: + sample_name = samples_match.group(1) + current_result = {} + results[sample_name] = current_result + continue + + match = perf_regex.match(line) + if not match: + continue + percentage_str, func_name = match.groups() + try: + percentage = float(percentage_str) + except ValueError: + # Couldn't parse it; try to be "resilient". + continue + current_result[func_name] = percentage + return results -def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name, - benchmark_iteration): - """Reads a perf report for the given benchmark. Returns {} on failure. - The result should be a map of maps; it should look like: - {perf_event_name: {function_name: pct_time_spent}}, e.g. - {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}} - """ - raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1) - dir_name = ''.join(c for c in raw_dir_name if c.isalnum()) - file_name = os.path.join(results_directory, dir_name, 'perf.data.report.0') - try: - with open(file_name) as in_file: - return ParseStandardPerfReport(in_file) - except IOError: - # Yes, we swallow any IO-related errors. - return {} +def _ReadExperimentPerfReport( + results_directory, label_name, benchmark_name, benchmark_iteration +): + """Reads a perf report for the given benchmark. Returns {} on failure. + + The result should be a map of maps; it should look like: + {perf_event_name: {function_name: pct_time_spent}}, e.g. + {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}} + """ + raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1) + dir_name = "".join(c for c in raw_dir_name if c.isalnum()) + file_name = os.path.join(results_directory, dir_name, "perf.data.report.0") + try: + with open(file_name) as in_file: + return ParseStandardPerfReport(in_file) + except IOError: + # Yes, we swallow any IO-related errors. + return {} # Split out so that testing (specifically: mocking) is easier def _ExperimentToKeyvals(experiment, for_json_report): - """Converts an experiment to keyvals.""" - return OrganizeResults( - experiment.benchmark_runs, experiment.labels, json_report=for_json_report) + """Converts an experiment to keyvals.""" + return OrganizeResults( + experiment.benchmark_runs, + experiment.labels, + json_report=for_json_report, + ) class BenchmarkResults(object): - """The minimum set of fields that any ResultsReport will take.""" - - def __init__(self, - label_names, - benchmark_names_and_iterations, - run_keyvals, - ignore_min_max=False, - read_perf_report=None, - cwp_dso=None, - weights=None): - if read_perf_report is None: - - def _NoPerfReport(*_args, **_kwargs): - return {} - - read_perf_report = _NoPerfReport - - self.label_names = label_names - self.benchmark_names_and_iterations = benchmark_names_and_iterations - self.iter_counts = dict(benchmark_names_and_iterations) - self.run_keyvals = run_keyvals - self.ignore_min_max = ignore_min_max - self.read_perf_report = read_perf_report - self.cwp_dso = cwp_dso - self.weights = dict(weights) if weights else None - - @staticmethod - def FromExperiment(experiment, for_json_report=False): - label_names = [label.name for label in experiment.labels] - benchmark_names_and_iterations = [(benchmark.name, benchmark.iterations) - for benchmark in experiment.benchmarks] - run_keyvals = _ExperimentToKeyvals(experiment, for_json_report) - ignore_min_max = experiment.ignore_min_max - read_perf_report = functools.partial(_ReadExperimentPerfReport, - experiment.results_directory) - cwp_dso = experiment.cwp_dso - weights = [(benchmark.name, benchmark.weight) - for benchmark in experiment.benchmarks] - return BenchmarkResults(label_names, benchmark_names_and_iterations, - run_keyvals, ignore_min_max, read_perf_report, - cwp_dso, weights) + """The minimum set of fields that any ResultsReport will take.""" + + def __init__( + self, + label_names, + benchmark_names_and_iterations, + run_keyvals, + ignore_min_max=False, + read_perf_report=None, + cwp_dso=None, + weights=None, + ): + if read_perf_report is None: + + def _NoPerfReport(*_args, **_kwargs): + return {} + + read_perf_report = _NoPerfReport + + self.label_names = label_names + self.benchmark_names_and_iterations = benchmark_names_and_iterations + self.iter_counts = dict(benchmark_names_and_iterations) + self.run_keyvals = run_keyvals + self.ignore_min_max = ignore_min_max + self.read_perf_report = read_perf_report + self.cwp_dso = cwp_dso + self.weights = dict(weights) if weights else None + + @staticmethod + def FromExperiment(experiment, for_json_report=False): + label_names = [label.name for label in experiment.labels] + benchmark_names_and_iterations = [ + (benchmark.name, benchmark.iterations) + for benchmark in experiment.benchmarks + ] + run_keyvals = _ExperimentToKeyvals(experiment, for_json_report) + ignore_min_max = experiment.ignore_min_max + read_perf_report = functools.partial( + _ReadExperimentPerfReport, experiment.results_directory + ) + cwp_dso = experiment.cwp_dso + weights = [ + (benchmark.name, benchmark.weight) + for benchmark in experiment.benchmarks + ] + return BenchmarkResults( + label_names, + benchmark_names_and_iterations, + run_keyvals, + ignore_min_max, + read_perf_report, + cwp_dso, + weights, + ) def _GetElemByName(name, from_list): - """Gets an element from the given list by its name field. + """Gets an element from the given list by its name field. - Raises an error if it doesn't find exactly one match. - """ - elems = [e for e in from_list if e.name == name] - if len(elems) != 1: - raise ValueError('Expected 1 item named %s, found %d' % (name, len(elems))) - return elems[0] + Raises an error if it doesn't find exactly one match. + """ + elems = [e for e in from_list if e.name == name] + if len(elems) != 1: + raise ValueError( + "Expected 1 item named %s, found %d" % (name, len(elems)) + ) + return elems[0] def _Unlist(l): - """If l is a list, extracts the first element of l. Otherwise, returns l.""" - return l[0] if isinstance(l, list) else l + """If l is a list, extracts the first element of l. Otherwise, returns l.""" + return l[0] if isinstance(l, list) else l class JSONResultsReport(ResultsReport): - """Class that generates JSON reports for experiments.""" - - def __init__(self, - benchmark_results, - benchmark_date=None, - benchmark_time=None, - experiment=None, - json_args=None): - """Construct a JSONResultsReport. - - json_args is the dict of arguments we pass to json.dumps in GetReport(). - """ - super(JSONResultsReport, self).__init__(benchmark_results) - - defaults = TelemetryDefaults() - defaults.ReadDefaultsFile() - summary_field_defaults = defaults.GetDefault() - if summary_field_defaults is None: - summary_field_defaults = {} - self.summary_field_defaults = summary_field_defaults - - if json_args is None: - json_args = {} - self.json_args = json_args - - self.experiment = experiment - if not benchmark_date: - timestamp = datetime.datetime.strftime(datetime.datetime.now(), - '%Y-%m-%d %H:%M:%S') - benchmark_date, benchmark_time = timestamp.split(' ') - self.date = benchmark_date - self.time = benchmark_time - - @staticmethod - def FromExperiment(experiment, - benchmark_date=None, - benchmark_time=None, - json_args=None): - benchmark_results = BenchmarkResults.FromExperiment( - experiment, for_json_report=True) - return JSONResultsReport(benchmark_results, benchmark_date, benchmark_time, - experiment, json_args) - - def GetReportObjectIgnoringExperiment(self): - """Gets the JSON report object specifically for the output data. - - Ignores any experiment-specific fields (e.g. board, machine checksum, ...). - """ - benchmark_results = self.benchmark_results - label_names = benchmark_results.label_names - summary_field_defaults = self.summary_field_defaults - final_results = [] - for test, test_results in benchmark_results.run_keyvals.items(): - for label_name, label_results in zip(label_names, test_results): - for iter_results in label_results: - passed = iter_results.get('retval') == 0 - json_results = { - 'date': self.date, - 'time': self.time, - 'label': label_name, - 'test_name': test, - 'pass': passed, - } - final_results.append(json_results) - - if not passed: - continue - - # Get overall results. - summary_fields = summary_field_defaults.get(test) - if summary_fields is not None: - value = [] - json_results['overall_result'] = value - for f in summary_fields: - v = iter_results.get(f) - if v is None: + """Class that generates JSON reports for experiments.""" + + def __init__( + self, + benchmark_results, + benchmark_date=None, + benchmark_time=None, + experiment=None, + json_args=None, + ): + """Construct a JSONResultsReport. + + json_args is the dict of arguments we pass to json.dumps in GetReport(). + """ + super(JSONResultsReport, self).__init__(benchmark_results) + + defaults = TelemetryDefaults() + defaults.ReadDefaultsFile() + summary_field_defaults = defaults.GetDefault() + if summary_field_defaults is None: + summary_field_defaults = {} + self.summary_field_defaults = summary_field_defaults + + if json_args is None: + json_args = {} + self.json_args = json_args + + self.experiment = experiment + if not benchmark_date: + timestamp = datetime.datetime.strftime( + datetime.datetime.now(), "%Y-%m-%d %H:%M:%S" + ) + benchmark_date, benchmark_time = timestamp.split(" ") + self.date = benchmark_date + self.time = benchmark_time + + @staticmethod + def FromExperiment( + experiment, benchmark_date=None, benchmark_time=None, json_args=None + ): + benchmark_results = BenchmarkResults.FromExperiment( + experiment, for_json_report=True + ) + return JSONResultsReport( + benchmark_results, + benchmark_date, + benchmark_time, + experiment, + json_args, + ) + + def GetReportObjectIgnoringExperiment(self): + """Gets the JSON report object specifically for the output data. + + Ignores any experiment-specific fields (e.g. board, machine checksum, ...). + """ + benchmark_results = self.benchmark_results + label_names = benchmark_results.label_names + summary_field_defaults = self.summary_field_defaults + final_results = [] + for test, test_results in benchmark_results.run_keyvals.items(): + for label_name, label_results in zip(label_names, test_results): + for iter_results in label_results: + passed = iter_results.get("retval") == 0 + json_results = { + "date": self.date, + "time": self.time, + "label": label_name, + "test_name": test, + "pass": passed, + } + final_results.append(json_results) + + if not passed: + continue + + # Get overall results. + summary_fields = summary_field_defaults.get(test) + if summary_fields is not None: + value = [] + json_results["overall_result"] = value + for f in summary_fields: + v = iter_results.get(f) + if v is None: + continue + # New telemetry results format: sometimes we get a list of lists + # now. + v = _Unlist(_Unlist(v)) + value.append((f, float(v))) + + # Get detailed results. + detail_results = {} + json_results["detailed_results"] = detail_results + for k, v in iter_results.items(): + if ( + k == "retval" + or k == "PASS" + or k == ["PASS"] + or v == "PASS" + ): + continue + + v = _Unlist(v) + if "machine" in k: + json_results[k] = v + elif v is not None: + if isinstance(v, list): + detail_results[k] = [float(d) for d in v] + else: + detail_results[k] = float(v) + return final_results + + def GetReportObject(self): + """Generate the JSON report, returning it as a python object.""" + report_list = self.GetReportObjectIgnoringExperiment() + if self.experiment is not None: + self._AddExperimentSpecificFields(report_list) + return report_list + + def _AddExperimentSpecificFields(self, report_list): + """Add experiment-specific data to the JSON report.""" + board = self.experiment.labels[0].board + manager = self.experiment.machine_manager + for report in report_list: + label_name = report["label"] + label = _GetElemByName(label_name, self.experiment.labels) + + img_path = os.path.realpath( + os.path.expanduser(label.chromeos_image) + ) + ver, img = ParseChromeosImage(img_path) + + report.update( + { + "board": board, + "chromeos_image": img, + "chromeos_version": ver, + "chrome_version": label.chrome_version, + "compiler": label.compiler, + } + ) + + if not report["pass"]: continue - # New telemetry results format: sometimes we get a list of lists - # now. - v = _Unlist(_Unlist(v)) - value.append((f, float(v))) - - # Get detailed results. - detail_results = {} - json_results['detailed_results'] = detail_results - for k, v in iter_results.items(): - if k == 'retval' or k == 'PASS' or k == ['PASS'] or v == 'PASS': - continue - - v = _Unlist(v) - if 'machine' in k: - json_results[k] = v - elif v is not None: - if isinstance(v, list): - detail_results[k] = [float(d) for d in v] - else: - detail_results[k] = float(v) - return final_results - - def GetReportObject(self): - """Generate the JSON report, returning it as a python object.""" - report_list = self.GetReportObjectIgnoringExperiment() - if self.experiment is not None: - self._AddExperimentSpecificFields(report_list) - return report_list - - def _AddExperimentSpecificFields(self, report_list): - """Add experiment-specific data to the JSON report.""" - board = self.experiment.labels[0].board - manager = self.experiment.machine_manager - for report in report_list: - label_name = report['label'] - label = _GetElemByName(label_name, self.experiment.labels) - - img_path = os.path.realpath(os.path.expanduser(label.chromeos_image)) - ver, img = ParseChromeosImage(img_path) - - report.update({ - 'board': board, - 'chromeos_image': img, - 'chromeos_version': ver, - 'chrome_version': label.chrome_version, - 'compiler': label.compiler - }) - - if not report['pass']: - continue - if 'machine_checksum' not in report: - report['machine_checksum'] = manager.machine_checksum[label_name] - if 'machine_string' not in report: - report['machine_string'] = manager.machine_checksum_string[label_name] - - def GetReport(self): - """Dump the results of self.GetReportObject() to a string as JSON.""" - # This exists for consistency with the other GetReport methods. - # Specifically, they all return strings, so it's a bit awkward if the JSON - # results reporter returns an object. - return json.dumps(self.GetReportObject(), **self.json_args) + if "machine_checksum" not in report: + report["machine_checksum"] = manager.machine_checksum[ + label_name + ] + if "machine_string" not in report: + report["machine_string"] = manager.machine_checksum_string[ + label_name + ] + + def GetReport(self): + """Dump the results of self.GetReportObject() to a string as JSON.""" + # This exists for consistency with the other GetReport methods. + # Specifically, they all return strings, so it's a bit awkward if the JSON + # results reporter returns an object. + return json.dumps(self.GetReportObject(), **self.json_args) diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py index 43b935b2..ec87ac41 100644 --- a/crosperf/results_report_templates.py +++ b/crosperf/results_report_templates.py @@ -9,18 +9,21 @@ from __future__ import print_function import html from string import Template -_TabMenuTemplate = Template(""" + +_TabMenuTemplate = Template( + """ <div class='tab-menu'> <a href="javascript:switchTab('$table_name', 'html')">HTML</a> <a href="javascript:switchTab('$table_name', 'text')">Text</a> <a href="javascript:switchTab('$table_name', 'tsv')">TSV</a> -</div>""") +</div>""" +) def _GetTabMenuHTML(table_name): - # N.B. cgi.escape does some very basic HTML escaping. Nothing more. - escaped = html.escape(table_name) - return _TabMenuTemplate.substitute(table_name=escaped) + # N.B. cgi.escape does some very basic HTML escaping. Nothing more. + escaped = html.escape(table_name) + return _TabMenuTemplate.substitute(table_name=escaped) _ExperimentFileHTML = """ @@ -33,12 +36,15 @@ _ExperimentFileHTML = """ def _GetExperimentFileHTML(experiment_file_text): - if not experiment_file_text: - return '' - return _ExperimentFileHTML % (html.escape(experiment_file_text, quote=False),) + if not experiment_file_text: + return "" + return _ExperimentFileHTML % ( + html.escape(experiment_file_text, quote=False), + ) -_ResultsSectionHTML = Template(""" +_ResultsSectionHTML = Template( + """ <div class='results-section'> <div class='results-section-title'>$sect_name</div> <div class='results-section-content'> @@ -48,22 +54,25 @@ _ResultsSectionHTML = Template(""" </div> $tab_menu </div> -""") +""" +) def _GetResultsSectionHTML(print_table, table_name, data): - first_word = table_name.strip().split()[0] - short_name = first_word.lower() - return _ResultsSectionHTML.substitute( - sect_name=table_name, - html_table=print_table(data, 'HTML'), - text_table=print_table(data, 'PLAIN'), - tsv_table=print_table(data, 'TSV'), - tab_menu=_GetTabMenuHTML(short_name), - short_name=short_name) - - -_MainHTML = Template(""" + first_word = table_name.strip().split()[0] + short_name = first_word.lower() + return _ResultsSectionHTML.substitute( + sect_name=table_name, + html_table=print_table(data, "HTML"), + text_table=print_table(data, "PLAIN"), + tsv_table=print_table(data, "TSV"), + tab_menu=_GetTabMenuHTML(short_name), + short_name=short_name, + ) + + +_MainHTML = Template( + """ <html> <head> <style type="text/css"> @@ -169,37 +178,50 @@ _MainHTML = Template(""" $experiment_file </body> </html> -""") +""" +) # It's a bit ugly that we take some HTML things, and some non-HTML things, but I # need to balance prettiness with time spent making things pretty. -def GenerateHTMLPage(perf_table, chart_js, summary_table, print_table, - chart_divs, full_table, experiment_file): - """Generates a crosperf HTML page from the given arguments. - - print_table is a two-arg function called like: print_table(t, f) - t is one of [summary_table, print_table, full_table]; it's the table we want - to format. - f is one of ['TSV', 'HTML', 'PLAIN']; it's the type of format we want. - """ - summary_table_html = _GetResultsSectionHTML(print_table, 'Summary Table', - summary_table) - if perf_table: - perf_html = _GetResultsSectionHTML(print_table, 'Perf Table', perf_table) - perf_init = "switchTab('perf', 'html')" - else: - perf_html = '' - perf_init = '' - - full_table_html = _GetResultsSectionHTML(print_table, 'Full Table', - full_table) - experiment_file_html = _GetExperimentFileHTML(experiment_file) - return _MainHTML.substitute( - perf_init=perf_init, - chart_js=chart_js, - summary_table=summary_table_html, - perf_html=perf_html, - chart_divs=chart_divs, - full_table=full_table_html, - experiment_file=experiment_file_html) +def GenerateHTMLPage( + perf_table, + chart_js, + summary_table, + print_table, + chart_divs, + full_table, + experiment_file, +): + """Generates a crosperf HTML page from the given arguments. + + print_table is a two-arg function called like: print_table(t, f) + t is one of [summary_table, print_table, full_table]; it's the table we want + to format. + f is one of ['TSV', 'HTML', 'PLAIN']; it's the type of format we want. + """ + summary_table_html = _GetResultsSectionHTML( + print_table, "Summary Table", summary_table + ) + if perf_table: + perf_html = _GetResultsSectionHTML( + print_table, "Perf Table", perf_table + ) + perf_init = "switchTab('perf', 'html')" + else: + perf_html = "" + perf_init = "" + + full_table_html = _GetResultsSectionHTML( + print_table, "Full Table", full_table + ) + experiment_file_html = _GetExperimentFileHTML(experiment_file) + return _MainHTML.substitute( + perf_init=perf_init, + chart_js=chart_js, + summary_table=summary_table_html, + perf_html=perf_html, + chart_divs=chart_divs, + full_table=full_table_html, + experiment_file=experiment_file_html, + ) diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py index 3b7bc35b..ef073a71 100755 --- a/crosperf/results_report_unittest.py +++ b/crosperf/results_report_unittest.py @@ -16,8 +16,6 @@ import os import unittest import unittest.mock as mock -import test_flag - from benchmark_run import MockBenchmarkRun from cros_utils import logger from experiment_factory import ExperimentFactory @@ -31,39 +29,46 @@ from results_report import JSONResultsReport from results_report import ParseChromeosImage from results_report import ParseStandardPerfReport from results_report import TextResultsReport +import test_flag class FreeFunctionsTest(unittest.TestCase): - """Tests for any free functions in results_report.""" - - def testParseChromeosImage(self): - # N.B. the cases with blank versions aren't explicitly supported by - # ParseChromeosImage. I'm not sure if they need to be supported, but the - # goal of this was to capture existing functionality as much as possible. - base_case = '/my/chroot/src/build/images/x86-generic/R01-1.0.date-time' \ - '/chromiumos_test_image.bin' - self.assertEqual(ParseChromeosImage(base_case), ('R01-1.0', base_case)) - - dir_base_case = os.path.dirname(base_case) - self.assertEqual(ParseChromeosImage(dir_base_case), ('', dir_base_case)) - - buildbot_case = '/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time' \ - '/chromiumos_test_image.bin' - buildbot_img = buildbot_case.split('/chroot/tmp')[1] - - self.assertEqual( - ParseChromeosImage(buildbot_case), ('R02-1.0', buildbot_img)) - self.assertEqual( - ParseChromeosImage(os.path.dirname(buildbot_case)), - ('', os.path.dirname(buildbot_img))) - - # Ensure we do something reasonable when giving paths that don't quite - # match the expected pattern. - fun_case = '/chromiumos_test_image.bin' - self.assertEqual(ParseChromeosImage(fun_case), ('', fun_case)) - - fun_case2 = 'chromiumos_test_image.bin' - self.assertEqual(ParseChromeosImage(fun_case2), ('', fun_case2)) + """Tests for any free functions in results_report.""" + + def testParseChromeosImage(self): + # N.B. the cases with blank versions aren't explicitly supported by + # ParseChromeosImage. I'm not sure if they need to be supported, but the + # goal of this was to capture existing functionality as much as possible. + base_case = ( + "/my/chroot/src/build/images/x86-generic/R01-1.0.date-time" + "/chromiumos_test_image.bin" + ) + self.assertEqual(ParseChromeosImage(base_case), ("R01-1.0", base_case)) + + dir_base_case = os.path.dirname(base_case) + self.assertEqual(ParseChromeosImage(dir_base_case), ("", dir_base_case)) + + buildbot_case = ( + "/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time" + "/chromiumos_test_image.bin" + ) + buildbot_img = buildbot_case.split("/chroot/tmp")[1] + + self.assertEqual( + ParseChromeosImage(buildbot_case), ("R02-1.0", buildbot_img) + ) + self.assertEqual( + ParseChromeosImage(os.path.dirname(buildbot_case)), + ("", os.path.dirname(buildbot_img)), + ) + + # Ensure we do something reasonable when giving paths that don't quite + # match the expected pattern. + fun_case = "/chromiumos_test_image.bin" + self.assertEqual(ParseChromeosImage(fun_case), ("", fun_case)) + + fun_case2 = "chromiumos_test_image.bin" + self.assertEqual(ParseChromeosImage(fun_case2), ("", fun_case2)) # There are many ways for this to be done better, but the linter complains @@ -72,19 +77,20 @@ _fake_path_number = [0] def FakePath(ext): - """Makes a unique path that shouldn't exist on the host system. + """Makes a unique path that shouldn't exist on the host system. - Each call returns a different path, so if said path finds its way into an - error message, it may be easier to track it to its source. - """ - _fake_path_number[0] += 1 - prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0],) - return os.path.join(prefix, ext) + Each call returns a different path, so if said path finds its way into an + error message, it may be easier to track it to its source. + """ + _fake_path_number[0] += 1 + prefix = "/tmp/should/not/exist/%d/" % (_fake_path_number[0],) + return os.path.join(prefix, ext) -def MakeMockExperiment(compiler='gcc'): - """Mocks an experiment using the given compiler.""" - mock_experiment_file = io.StringIO(""" +def MakeMockExperiment(compiler="gcc"): + """Mocks an experiment using the given compiler.""" + mock_experiment_file = io.StringIO( + """ board: x86-alex remote: 127.0.0.1 locks_dir: /tmp @@ -101,363 +107,398 @@ def MakeMockExperiment(compiler='gcc'): remote: 127.0.0.2 chromeos_image: %s } - """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin'))) - efile = ExperimentFile(mock_experiment_file) - experiment = ExperimentFactory().GetExperiment(efile, - FakePath('working_directory'), - FakePath('log_dir')) - for label in experiment.labels: - label.compiler = compiler - return experiment + """ + % (FakePath("cros_image1.bin"), FakePath("cros_image2.bin")) + ) + efile = ExperimentFile(mock_experiment_file) + experiment = ExperimentFactory().GetExperiment( + efile, FakePath("working_directory"), FakePath("log_dir") + ) + for label in experiment.labels: + label.compiler = compiler + return experiment def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0): - """Injects successful experiment runs (for each label) into the experiment.""" - # Defensive copy of keyvals, so if it's modified, we'll know. - keyvals = dict(keyvals) - num_configs = len(experiment.benchmarks) * len(experiment.labels) - num_runs = len(experiment.benchmark_runs) // num_configs - - # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in - # benchmark_run_unittest) - bench = experiment.benchmarks[for_benchmark] - cache_conditions = [] - log_level = 'average' - share_cache = '' - locks_dir = '' - log = logger.GetLogger() - machine_manager = MockMachineManager( - FakePath('chromeos_root'), 0, log_level, locks_dir) - machine_manager.AddMachine('testing_machine') - machine = next( - m for m in machine_manager.GetMachines() if m.name == 'testing_machine') - - def MakeSuccessfulRun(n, label): - run = MockBenchmarkRun('mock_success%d' % (n,), bench, label, - 1 + n + num_runs, cache_conditions, machine_manager, - log, log_level, share_cache, {}) - mock_result = MockResult(log, label, log_level, machine) - mock_result.keyvals = keyvals - run.result = mock_result - return run - - for label in experiment.labels: - experiment.benchmark_runs.extend( - MakeSuccessfulRun(n, label) for n in range(how_many)) - return experiment + """Injects successful experiment runs (for each label) into the experiment.""" + # Defensive copy of keyvals, so if it's modified, we'll know. + keyvals = dict(keyvals) + num_configs = len(experiment.benchmarks) * len(experiment.labels) + num_runs = len(experiment.benchmark_runs) // num_configs + + # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in + # benchmark_run_unittest) + bench = experiment.benchmarks[for_benchmark] + cache_conditions = [] + log_level = "average" + share_cache = "" + locks_dir = "" + log = logger.GetLogger() + machine_manager = MockMachineManager( + FakePath("chromeos_root"), 0, log_level, locks_dir + ) + machine_manager.AddMachine("testing_machine") + machine = next( + m for m in machine_manager.GetMachines() if m.name == "testing_machine" + ) + + def MakeSuccessfulRun(n, label): + run = MockBenchmarkRun( + "mock_success%d" % (n,), + bench, + label, + 1 + n + num_runs, + cache_conditions, + machine_manager, + log, + log_level, + share_cache, + {}, + ) + mock_result = MockResult(log, label, log_level, machine) + mock_result.keyvals = keyvals + run.result = mock_result + return run + + for label in experiment.labels: + experiment.benchmark_runs.extend( + MakeSuccessfulRun(n, label) for n in range(how_many) + ) + return experiment class TextResultsReportTest(unittest.TestCase): - """Tests that the output of a text report contains the things we pass in. - - At the moment, this doesn't care deeply about the format in which said - things are displayed. It just cares that they're present. - """ - - def _checkReport(self, mock_getcooldown, email): - num_success = 2 - success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96} - experiment = _InjectSuccesses(MakeMockExperiment(), num_success, - success_keyvals) - SECONDS_IN_MIN = 60 - mock_getcooldown.return_value = { - experiment.remote[0]: 12 * SECONDS_IN_MIN, - experiment.remote[1]: 8 * SECONDS_IN_MIN - } - - text_report = TextResultsReport.FromExperiment( - experiment, email=email).GetReport() - self.assertIn(str(success_keyvals['a_float']), text_report) - self.assertIn(success_keyvals['machine'], text_report) - self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report) - self.assertIn('\nDuration\n', text_report) - self.assertIn('Total experiment time:\n', text_report) - self.assertIn('Cooldown wait time:\n', text_report) - self.assertIn('DUT %s: %d min' % (experiment.remote[0], 12), text_report) - self.assertIn('DUT %s: %d min' % (experiment.remote[1], 8), text_report) - return text_report - - @mock.patch.object(TextResultsReport, 'GetTotalWaitCooldownTime') - def testOutput(self, mock_getcooldown): - email_report = self._checkReport(mock_getcooldown, email=True) - text_report = self._checkReport(mock_getcooldown, email=False) - - # Ensure that the reports somehow different. Otherwise, having the - # distinction is useless. - self.assertNotEqual(email_report, text_report) - - def test_get_totalwait_cooldowntime(self): - experiment = MakeMockExperiment() - cros_machines = experiment.machine_manager.GetMachines() - cros_machines[0].AddCooldownWaitTime(120) - cros_machines[1].AddCooldownWaitTime(240) - text_results = TextResultsReport.FromExperiment(experiment, email=False) - total = text_results.GetTotalWaitCooldownTime() - self.assertEqual(total[experiment.remote[0]], 120) - self.assertEqual(total[experiment.remote[1]], 240) + """Tests that the output of a text report contains the things we pass in. + + At the moment, this doesn't care deeply about the format in which said + things are displayed. It just cares that they're present. + """ + + def _checkReport(self, mock_getcooldown, email): + num_success = 2 + success_keyvals = {"retval": 0, "machine": "some bot", "a_float": 3.96} + experiment = _InjectSuccesses( + MakeMockExperiment(), num_success, success_keyvals + ) + SECONDS_IN_MIN = 60 + mock_getcooldown.return_value = { + experiment.remote[0]: 12 * SECONDS_IN_MIN, + experiment.remote[1]: 8 * SECONDS_IN_MIN, + } + + text_report = TextResultsReport.FromExperiment( + experiment, email=email + ).GetReport() + self.assertIn(str(success_keyvals["a_float"]), text_report) + self.assertIn(success_keyvals["machine"], text_report) + self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report) + self.assertIn("\nDuration\n", text_report) + self.assertIn("Total experiment time:\n", text_report) + self.assertIn("Cooldown wait time:\n", text_report) + self.assertIn( + "DUT %s: %d min" % (experiment.remote[0], 12), text_report + ) + self.assertIn("DUT %s: %d min" % (experiment.remote[1], 8), text_report) + return text_report + + @mock.patch.object(TextResultsReport, "GetTotalWaitCooldownTime") + def testOutput(self, mock_getcooldown): + email_report = self._checkReport(mock_getcooldown, email=True) + text_report = self._checkReport(mock_getcooldown, email=False) + + # Ensure that the reports somehow different. Otherwise, having the + # distinction is useless. + self.assertNotEqual(email_report, text_report) + + def test_get_totalwait_cooldowntime(self): + experiment = MakeMockExperiment() + cros_machines = experiment.machine_manager.GetMachines() + cros_machines[0].AddCooldownWaitTime(120) + cros_machines[1].AddCooldownWaitTime(240) + text_results = TextResultsReport.FromExperiment(experiment, email=False) + total = text_results.GetTotalWaitCooldownTime() + self.assertEqual(total[experiment.remote[0]], 120) + self.assertEqual(total[experiment.remote[1]], 240) class HTMLResultsReportTest(unittest.TestCase): - """Tests that the output of a HTML report contains the things we pass in. - - At the moment, this doesn't care deeply about the format in which said - things are displayed. It just cares that they're present. - """ - - _TestOutput = collections.namedtuple('TestOutput', [ - 'summary_table', 'perf_html', 'chart_js', 'charts', 'full_table', - 'experiment_file' - ]) - - @staticmethod - def _GetTestOutput(perf_table, chart_js, summary_table, print_table, - chart_divs, full_table, experiment_file): - # N.B. Currently we don't check chart_js; it's just passed through because - # cros lint complains otherwise. - summary_table = print_table(summary_table, 'HTML') - perf_html = print_table(perf_table, 'HTML') - full_table = print_table(full_table, 'HTML') - return HTMLResultsReportTest._TestOutput( - summary_table=summary_table, - perf_html=perf_html, - chart_js=chart_js, - charts=chart_divs, - full_table=full_table, - experiment_file=experiment_file) - - def _GetOutput(self, experiment=None, benchmark_results=None): - with mock.patch('results_report_templates.GenerateHTMLPage') as standin: - if experiment is not None: - HTMLResultsReport.FromExperiment(experiment).GetReport() - else: - HTMLResultsReport(benchmark_results).GetReport() - mod_mock = standin - self.assertEqual(mod_mock.call_count, 1) - # call_args[0] is positional args, call_args[1] is kwargs. - self.assertEqual(mod_mock.call_args[0], tuple()) - fmt_args = mod_mock.call_args[1] - return self._GetTestOutput(**fmt_args) - - def testNoSuccessOutput(self): - output = self._GetOutput(MakeMockExperiment()) - self.assertIn('no result', output.summary_table) - self.assertIn('no result', output.full_table) - self.assertEqual(output.charts, '') - self.assertNotEqual(output.experiment_file, '') - - def testSuccessfulOutput(self): - num_success = 2 - success_keyvals = {'retval': 0, 'a_float': 3.96} - output = self._GetOutput( - _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals)) - - self.assertNotIn('no result', output.summary_table) - # self.assertIn(success_keyvals['machine'], output.summary_table) - self.assertIn('a_float', output.summary_table) - self.assertIn(str(success_keyvals['a_float']), output.summary_table) - self.assertIn('a_float', output.full_table) - # The _ in a_float is filtered out when we're generating HTML. - self.assertIn('afloat', output.charts) - # And make sure we have our experiment file... - self.assertNotEqual(output.experiment_file, '') - - def testBenchmarkResultFailure(self): - labels = ['label1'] - benchmark_names_and_iterations = [('bench1', 1)] - benchmark_keyvals = {'bench1': [[]]} - results = BenchmarkResults(labels, benchmark_names_and_iterations, - benchmark_keyvals) - output = self._GetOutput(benchmark_results=results) - self.assertIn('no result', output.summary_table) - self.assertEqual(output.charts, '') - self.assertEqual(output.experiment_file, '') - - def testBenchmarkResultSuccess(self): - labels = ['label1'] - benchmark_names_and_iterations = [('bench1', 1)] - benchmark_keyvals = {'bench1': [[{'retval': 1, 'foo': 2.0}]]} - results = BenchmarkResults(labels, benchmark_names_and_iterations, - benchmark_keyvals) - output = self._GetOutput(benchmark_results=results) - self.assertNotIn('no result', output.summary_table) - self.assertIn('bench1', output.summary_table) - self.assertIn('bench1', output.full_table) - self.assertNotEqual(output.charts, '') - self.assertEqual(output.experiment_file, '') + """Tests that the output of a HTML report contains the things we pass in. + + At the moment, this doesn't care deeply about the format in which said + things are displayed. It just cares that they're present. + """ + + _TestOutput = collections.namedtuple( + "TestOutput", + [ + "summary_table", + "perf_html", + "chart_js", + "charts", + "full_table", + "experiment_file", + ], + ) + + @staticmethod + def _GetTestOutput( + perf_table, + chart_js, + summary_table, + print_table, + chart_divs, + full_table, + experiment_file, + ): + # N.B. Currently we don't check chart_js; it's just passed through because + # cros lint complains otherwise. + summary_table = print_table(summary_table, "HTML") + perf_html = print_table(perf_table, "HTML") + full_table = print_table(full_table, "HTML") + return HTMLResultsReportTest._TestOutput( + summary_table=summary_table, + perf_html=perf_html, + chart_js=chart_js, + charts=chart_divs, + full_table=full_table, + experiment_file=experiment_file, + ) + + def _GetOutput(self, experiment=None, benchmark_results=None): + with mock.patch("results_report_templates.GenerateHTMLPage") as standin: + if experiment is not None: + HTMLResultsReport.FromExperiment(experiment).GetReport() + else: + HTMLResultsReport(benchmark_results).GetReport() + mod_mock = standin + self.assertEqual(mod_mock.call_count, 1) + # call_args[0] is positional args, call_args[1] is kwargs. + self.assertEqual(mod_mock.call_args[0], tuple()) + fmt_args = mod_mock.call_args[1] + return self._GetTestOutput(**fmt_args) + + def testNoSuccessOutput(self): + output = self._GetOutput(MakeMockExperiment()) + self.assertIn("no result", output.summary_table) + self.assertIn("no result", output.full_table) + self.assertEqual(output.charts, "") + self.assertNotEqual(output.experiment_file, "") + + def testSuccessfulOutput(self): + num_success = 2 + success_keyvals = {"retval": 0, "a_float": 3.96} + output = self._GetOutput( + _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals) + ) + + self.assertNotIn("no result", output.summary_table) + # self.assertIn(success_keyvals['machine'], output.summary_table) + self.assertIn("a_float", output.summary_table) + self.assertIn(str(success_keyvals["a_float"]), output.summary_table) + self.assertIn("a_float", output.full_table) + # The _ in a_float is filtered out when we're generating HTML. + self.assertIn("afloat", output.charts) + # And make sure we have our experiment file... + self.assertNotEqual(output.experiment_file, "") + + def testBenchmarkResultFailure(self): + labels = ["label1"] + benchmark_names_and_iterations = [("bench1", 1)] + benchmark_keyvals = {"bench1": [[]]} + results = BenchmarkResults( + labels, benchmark_names_and_iterations, benchmark_keyvals + ) + output = self._GetOutput(benchmark_results=results) + self.assertIn("no result", output.summary_table) + self.assertEqual(output.charts, "") + self.assertEqual(output.experiment_file, "") + + def testBenchmarkResultSuccess(self): + labels = ["label1"] + benchmark_names_and_iterations = [("bench1", 1)] + benchmark_keyvals = {"bench1": [[{"retval": 1, "foo": 2.0}]]} + results = BenchmarkResults( + labels, benchmark_names_and_iterations, benchmark_keyvals + ) + output = self._GetOutput(benchmark_results=results) + self.assertNotIn("no result", output.summary_table) + self.assertIn("bench1", output.summary_table) + self.assertIn("bench1", output.full_table) + self.assertNotEqual(output.charts, "") + self.assertEqual(output.experiment_file, "") class JSONResultsReportTest(unittest.TestCase): - """Tests JSONResultsReport.""" - - REQUIRED_REPORT_KEYS = ('date', 'time', 'label', 'test_name', 'pass') - EXPERIMENT_REPORT_KEYS = ('board', 'chromeos_image', 'chromeos_version', - 'chrome_version', 'compiler') - - @staticmethod - def _GetRequiredKeys(is_experiment): - required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS - if is_experiment: - required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS - return required_keys - - def _CheckRequiredKeys(self, test_output, is_experiment): - required_keys = self._GetRequiredKeys(is_experiment) - for output in test_output: - for key in required_keys: - self.assertIn(key, output) - - def testAllFailedJSONReportOutput(self): - experiment = MakeMockExperiment() - results = JSONResultsReport.FromExperiment(experiment).GetReportObject() - self._CheckRequiredKeys(results, is_experiment=True) - # Nothing succeeded; we don't send anything more than what's required. - required_keys = self._GetRequiredKeys(is_experiment=True) - for result in results: - self.assertCountEqual(result.keys(), required_keys) - - def testJSONReportOutputWithSuccesses(self): - success_keyvals = { - 'retval': 0, - 'a_float': '2.3', - 'many_floats': [['1.0', '2.0'], ['3.0']], - 'machine': "i'm a pirate" - } - - # 2 is arbitrary. - num_success = 2 - experiment = _InjectSuccesses(MakeMockExperiment(), num_success, - success_keyvals) - results = JSONResultsReport.FromExperiment(experiment).GetReportObject() - self._CheckRequiredKeys(results, is_experiment=True) - - num_passes = num_success * len(experiment.labels) - non_failures = [r for r in results if r['pass']] - self.assertEqual(num_passes, len(non_failures)) - - # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped? - expected_detailed = {'a_float': 2.3, 'many_floats': [1.0, 2.0]} - for pass_ in non_failures: - self.assertIn('detailed_results', pass_) - self.assertDictEqual(expected_detailed, pass_['detailed_results']) - self.assertIn('machine', pass_) - self.assertEqual(success_keyvals['machine'], pass_['machine']) - - def testFailedJSONReportOutputWithoutExperiment(self): - labels = ['label1'] - # yapf:disable - benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2), - ('bench3', 1), ('bench4', 0)] - # yapf:enable - - benchmark_keyvals = { - 'bench1': [[{ - 'retval': 1, - 'foo': 2.0 - }]], - 'bench2': [[{ - 'retval': 1, - 'foo': 4.0 - }, { - 'retval': -1, - 'bar': 999 - }]], - # lack of retval is considered a failure. - 'bench3': [[{}]], - 'bench4': [[]] - } - bench_results = BenchmarkResults(labels, benchmark_names_and_iterations, - benchmark_keyvals) - results = JSONResultsReport(bench_results).GetReportObject() - self._CheckRequiredKeys(results, is_experiment=False) - self.assertFalse(any(r['pass'] for r in results)) - - def testJSONGetReportObeysJSONSettings(self): - labels = ['label1'] - benchmark_names_and_iterations = [('bench1', 1)] - # These can be anything, really. So long as they're distinctive. - separators = (',\t\n\t', ':\t\n\t') - benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]} - bench_results = BenchmarkResults(labels, benchmark_names_and_iterations, - benchmark_keyvals) - reporter = JSONResultsReport( - bench_results, json_args={'separators': separators}) - result_str = reporter.GetReport() - self.assertIn(separators[0], result_str) - self.assertIn(separators[1], result_str) - - def testSuccessfulJSONReportOutputWithoutExperiment(self): - labels = ['label1'] - benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)] - benchmark_keyvals = { - 'bench1': [[{ - 'retval': 0, - 'foo': 2.0 - }]], - 'bench2': [[{ - 'retval': 0, - 'foo': 4.0 - }, { - 'retval': 0, - 'bar': 999 - }]] - } - bench_results = BenchmarkResults(labels, benchmark_names_and_iterations, - benchmark_keyvals) - results = JSONResultsReport(bench_results).GetReportObject() - self._CheckRequiredKeys(results, is_experiment=False) - self.assertTrue(all(r['pass'] for r in results)) - # Enforce that the results have *some* deterministic order. - keyfn = lambda r: (r['test_name'], r['detailed_results'].get('foo', 5.0)) - sorted_results = sorted(results, key=keyfn) - detailed_results = [r['detailed_results'] for r in sorted_results] - bench1, bench2_foo, bench2_bar = detailed_results - self.assertEqual(bench1['foo'], 2.0) - self.assertEqual(bench2_foo['foo'], 4.0) - self.assertEqual(bench2_bar['bar'], 999) - self.assertNotIn('bar', bench1) - self.assertNotIn('bar', bench2_foo) - self.assertNotIn('foo', bench2_bar) + """Tests JSONResultsReport.""" + + REQUIRED_REPORT_KEYS = ("date", "time", "label", "test_name", "pass") + EXPERIMENT_REPORT_KEYS = ( + "board", + "chromeos_image", + "chromeos_version", + "chrome_version", + "compiler", + ) + + @staticmethod + def _GetRequiredKeys(is_experiment): + required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS + if is_experiment: + required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS + return required_keys + + def _CheckRequiredKeys(self, test_output, is_experiment): + required_keys = self._GetRequiredKeys(is_experiment) + for output in test_output: + for key in required_keys: + self.assertIn(key, output) + + def testAllFailedJSONReportOutput(self): + experiment = MakeMockExperiment() + results = JSONResultsReport.FromExperiment(experiment).GetReportObject() + self._CheckRequiredKeys(results, is_experiment=True) + # Nothing succeeded; we don't send anything more than what's required. + required_keys = self._GetRequiredKeys(is_experiment=True) + for result in results: + self.assertCountEqual(result.keys(), required_keys) + + def testJSONReportOutputWithSuccesses(self): + success_keyvals = { + "retval": 0, + "a_float": "2.3", + "many_floats": [["1.0", "2.0"], ["3.0"]], + "machine": "i'm a pirate", + } + + # 2 is arbitrary. + num_success = 2 + experiment = _InjectSuccesses( + MakeMockExperiment(), num_success, success_keyvals + ) + results = JSONResultsReport.FromExperiment(experiment).GetReportObject() + self._CheckRequiredKeys(results, is_experiment=True) + + num_passes = num_success * len(experiment.labels) + non_failures = [r for r in results if r["pass"]] + self.assertEqual(num_passes, len(non_failures)) + + # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped? + expected_detailed = {"a_float": 2.3, "many_floats": [1.0, 2.0]} + for pass_ in non_failures: + self.assertIn("detailed_results", pass_) + self.assertDictEqual(expected_detailed, pass_["detailed_results"]) + self.assertIn("machine", pass_) + self.assertEqual(success_keyvals["machine"], pass_["machine"]) + + def testFailedJSONReportOutputWithoutExperiment(self): + labels = ["label1"] + # yapf:disable + benchmark_names_and_iterations = [ + ("bench1", 1), + ("bench2", 2), + ("bench3", 1), + ("bench4", 0), + ] + # yapf:enable + + benchmark_keyvals = { + "bench1": [[{"retval": 1, "foo": 2.0}]], + "bench2": [[{"retval": 1, "foo": 4.0}, {"retval": -1, "bar": 999}]], + # lack of retval is considered a failure. + "bench3": [[{}]], + "bench4": [[]], + } + bench_results = BenchmarkResults( + labels, benchmark_names_and_iterations, benchmark_keyvals + ) + results = JSONResultsReport(bench_results).GetReportObject() + self._CheckRequiredKeys(results, is_experiment=False) + self.assertFalse(any(r["pass"] for r in results)) + + def testJSONGetReportObeysJSONSettings(self): + labels = ["label1"] + benchmark_names_and_iterations = [("bench1", 1)] + # These can be anything, really. So long as they're distinctive. + separators = (",\t\n\t", ":\t\n\t") + benchmark_keyvals = {"bench1": [[{"retval": 0, "foo": 2.0}]]} + bench_results = BenchmarkResults( + labels, benchmark_names_and_iterations, benchmark_keyvals + ) + reporter = JSONResultsReport( + bench_results, json_args={"separators": separators} + ) + result_str = reporter.GetReport() + self.assertIn(separators[0], result_str) + self.assertIn(separators[1], result_str) + + def testSuccessfulJSONReportOutputWithoutExperiment(self): + labels = ["label1"] + benchmark_names_and_iterations = [("bench1", 1), ("bench2", 2)] + benchmark_keyvals = { + "bench1": [[{"retval": 0, "foo": 2.0}]], + "bench2": [[{"retval": 0, "foo": 4.0}, {"retval": 0, "bar": 999}]], + } + bench_results = BenchmarkResults( + labels, benchmark_names_and_iterations, benchmark_keyvals + ) + results = JSONResultsReport(bench_results).GetReportObject() + self._CheckRequiredKeys(results, is_experiment=False) + self.assertTrue(all(r["pass"] for r in results)) + # Enforce that the results have *some* deterministic order. + keyfn = lambda r: ( + r["test_name"], + r["detailed_results"].get("foo", 5.0), + ) + sorted_results = sorted(results, key=keyfn) + detailed_results = [r["detailed_results"] for r in sorted_results] + bench1, bench2_foo, bench2_bar = detailed_results + self.assertEqual(bench1["foo"], 2.0) + self.assertEqual(bench2_foo["foo"], 4.0) + self.assertEqual(bench2_bar["bar"], 999) + self.assertNotIn("bar", bench1) + self.assertNotIn("bar", bench2_foo) + self.assertNotIn("foo", bench2_bar) class PerfReportParserTest(unittest.TestCase): - """Tests for the perf report parser in results_report.""" - - @staticmethod - def _ReadRealPerfReport(): - my_dir = os.path.dirname(os.path.realpath(__file__)) - with open(os.path.join(my_dir, 'perf_files/perf.data.report.0')) as f: - return f.read() - - def testParserParsesRealWorldPerfReport(self): - report = ParseStandardPerfReport(self._ReadRealPerfReport()) - self.assertCountEqual(['cycles', 'instructions'], list(report.keys())) - - # Arbitrarily selected known percentages from the perf report. - known_cycles_percentages = { - '0xffffffffa4a1f1c9': 0.66, - '0x0000115bb7ba9b54': 0.47, - '0x0000000000082e08': 0.00, - '0xffffffffa4a13e63': 0.00, - } - report_cycles = report['cycles'] - self.assertEqual(len(report_cycles), 214) - for k, v in known_cycles_percentages.items(): - self.assertIn(k, report_cycles) - self.assertEqual(v, report_cycles[k]) - - known_instrunctions_percentages = { - '0x0000115bb6c35d7a': 1.65, - '0x0000115bb7ba9b54': 0.67, - '0x0000000000024f56': 0.00, - '0xffffffffa4a0ee03': 0.00, - } - report_instructions = report['instructions'] - self.assertEqual(len(report_instructions), 492) - for k, v in known_instrunctions_percentages.items(): - self.assertIn(k, report_instructions) - self.assertEqual(v, report_instructions[k]) - - -if __name__ == '__main__': - test_flag.SetTestMode(True) - unittest.main() + """Tests for the perf report parser in results_report.""" + + @staticmethod + def _ReadRealPerfReport(): + my_dir = os.path.dirname(os.path.realpath(__file__)) + with open(os.path.join(my_dir, "perf_files/perf.data.report.0")) as f: + return f.read() + + def testParserParsesRealWorldPerfReport(self): + report = ParseStandardPerfReport(self._ReadRealPerfReport()) + self.assertCountEqual(["cycles", "instructions"], list(report.keys())) + + # Arbitrarily selected known percentages from the perf report. + known_cycles_percentages = { + "0xffffffffa4a1f1c9": 0.66, + "0x0000115bb7ba9b54": 0.47, + "0x0000000000082e08": 0.00, + "0xffffffffa4a13e63": 0.00, + } + report_cycles = report["cycles"] + self.assertEqual(len(report_cycles), 214) + for k, v in known_cycles_percentages.items(): + self.assertIn(k, report_cycles) + self.assertEqual(v, report_cycles[k]) + + known_instrunctions_percentages = { + "0x0000115bb6c35d7a": 1.65, + "0x0000115bb7ba9b54": 0.67, + "0x0000000000024f56": 0.00, + "0xffffffffa4a0ee03": 0.00, + } + report_instructions = report["instructions"] + self.assertEqual(len(report_instructions), 492) + for k, v in known_instrunctions_percentages.items(): + self.assertIn(k, report_instructions) + self.assertEqual(v, report_instructions[k]) + + +if __name__ == "__main__": + test_flag.SetTestMode(True) + unittest.main() diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py index b9714529..692f3420 100644 --- a/crosperf/schedv2.py +++ b/crosperf/schedv2.py @@ -8,442 +8,474 @@ from __future__ import division from __future__ import print_function -import sys -import traceback - from collections import defaultdict +import sys from threading import Lock from threading import Thread +import traceback -import test_flag - -from machine_image_manager import MachineImageManager from cros_utils import command_executer from cros_utils import logger +from machine_image_manager import MachineImageManager +import test_flag class DutWorker(Thread): - """Working thread for a dut.""" - - def __init__(self, dut, sched): - super(DutWorker, self).__init__(name='DutWorker-{}'.format(dut.name)) - self._dut = dut - self._sched = sched - self._stat_num_br_run = 0 - self._stat_num_reimage = 0 - self._stat_annotation = '' - self._logger = logger.GetLogger(self._sched.get_experiment().log_dir) - self.daemon = True - self._terminated = False - self._active_br = None - # Race condition accessing _active_br between _execute_benchmark_run and - # _terminate, so lock it up. - self._active_br_lock = Lock() - - def terminate(self): - self._terminated = True - with self._active_br_lock: - if self._active_br is not None: - # BenchmarkRun.Terminate() terminates any running testcase via - # suite_runner.Terminate and updates timeline. - self._active_br.Terminate() - - def run(self): - """Do the "run-test->(optionally reimage)->run-test" chore. - - Note - 'br' below means 'benchmark_run'. - """ - - # Firstly, handle benchmarkruns that have cache hit. - br = self._sched.get_cached_benchmark_run() - while br: - try: - self._stat_annotation = 'finishing cached {}'.format(br) - br.run() - except RuntimeError: - traceback.print_exc(file=sys.stdout) - br = self._sched.get_cached_benchmark_run() - - # Secondly, handle benchmarkruns that needs to be run on dut. - self._setup_dut_label() - try: - self._logger.LogOutput('{} started.'.format(self)) - while not self._terminated: - br = self._sched.get_benchmark_run(self._dut) - if br is None: - # No br left for this label. Considering reimaging. - label = self._sched.allocate_label(self._dut) - if label is None: - # No br even for other labels. We are done. - self._logger.LogOutput('ImageManager found no label ' - 'for dut, stopping working ' - 'thread {}.'.format(self)) - break - if self._reimage(label): - # Reimage to run other br fails, dut is doomed, stop - # this thread. - self._logger.LogWarning('Re-image failed, dut ' - 'in an unstable state, stopping ' - 'working thread {}.'.format(self)) - break - else: - # Execute the br. - self._execute_benchmark_run(br) - finally: - self._stat_annotation = 'finished' - # Thread finishes. Notify scheduler that I'm done. - self._sched.dut_worker_finished(self) - - def _reimage(self, label): - """Reimage image to label. - - Args: - label: the label to remimage onto dut. - - Returns: - 0 if successful, otherwise 1. - """ - - # Termination could happen anywhere, check it. - if self._terminated: - return 1 - - if self._sched.get_experiment().crosfleet: - self._logger.LogOutput('Crosfleet mode, do not image before testing.') - self._dut.label = label - return 0 - - self._logger.LogOutput('Reimaging {} using {}'.format(self, label)) - self._stat_num_reimage += 1 - self._stat_annotation = 'reimaging using "{}"'.format(label.name) - try: - # Note, only 1 reimage at any given time, this is guaranteed in - # ImageMachine, so no sync needed below. - retval = self._sched.get_experiment().machine_manager.ImageMachine( - self._dut, label) + """Working thread for a dut.""" + + def __init__(self, dut, sched): + super(DutWorker, self).__init__(name="DutWorker-{}".format(dut.name)) + self._dut = dut + self._sched = sched + self._stat_num_br_run = 0 + self._stat_num_reimage = 0 + self._stat_annotation = "" + self._logger = logger.GetLogger(self._sched.get_experiment().log_dir) + self.daemon = True + self._terminated = False + self._active_br = None + # Race condition accessing _active_br between _execute_benchmark_run and + # _terminate, so lock it up. + self._active_br_lock = Lock() - if retval: - return 1 - except RuntimeError: - return 1 + def terminate(self): + self._terminated = True + with self._active_br_lock: + if self._active_br is not None: + # BenchmarkRun.Terminate() terminates any running testcase via + # suite_runner.Terminate and updates timeline. + self._active_br.Terminate() - self._dut.label = label - return 0 + def run(self): + """Do the "run-test->(optionally reimage)->run-test" chore. - def _execute_benchmark_run(self, br): - """Execute a single benchmark_run. + Note - 'br' below means 'benchmark_run'. + """ + + # Firstly, handle benchmarkruns that have cache hit. + br = self._sched.get_cached_benchmark_run() + while br: + try: + self._stat_annotation = "finishing cached {}".format(br) + br.run() + except RuntimeError: + traceback.print_exc(file=sys.stdout) + br = self._sched.get_cached_benchmark_run() + + # Secondly, handle benchmarkruns that needs to be run on dut. + self._setup_dut_label() + try: + self._logger.LogOutput("{} started.".format(self)) + while not self._terminated: + br = self._sched.get_benchmark_run(self._dut) + if br is None: + # No br left for this label. Considering reimaging. + label = self._sched.allocate_label(self._dut) + if label is None: + # No br even for other labels. We are done. + self._logger.LogOutput( + "ImageManager found no label " + "for dut, stopping working " + "thread {}.".format(self) + ) + break + if self._reimage(label): + # Reimage to run other br fails, dut is doomed, stop + # this thread. + self._logger.LogWarning( + "Re-image failed, dut " + "in an unstable state, stopping " + "working thread {}.".format(self) + ) + break + else: + # Execute the br. + self._execute_benchmark_run(br) + finally: + self._stat_annotation = "finished" + # Thread finishes. Notify scheduler that I'm done. + self._sched.dut_worker_finished(self) + + def _reimage(self, label): + """Reimage image to label. + + Args: + label: the label to remimage onto dut. + + Returns: + 0 if successful, otherwise 1. + """ + + # Termination could happen anywhere, check it. + if self._terminated: + return 1 + + if self._sched.get_experiment().crosfleet: + self._logger.LogOutput( + "Crosfleet mode, do not image before testing." + ) + self._dut.label = label + return 0 + + self._logger.LogOutput("Reimaging {} using {}".format(self, label)) + self._stat_num_reimage += 1 + self._stat_annotation = 'reimaging using "{}"'.format(label.name) + try: + # Note, only 1 reimage at any given time, this is guaranteed in + # ImageMachine, so no sync needed below. + retval = self._sched.get_experiment().machine_manager.ImageMachine( + self._dut, label + ) + + if retval: + return 1 + except RuntimeError: + return 1 + + self._dut.label = label + return 0 + + def _execute_benchmark_run(self, br): + """Execute a single benchmark_run. Note - this function never throws exceptions. - """ + """ - # Termination could happen anywhere, check it. - if self._terminated: - return - - self._logger.LogOutput('{} started working on {}'.format(self, br)) - self._stat_num_br_run += 1 - self._stat_annotation = 'executing {}'.format(br) - # benchmark_run.run does not throws, but just play it safe here. - try: - assert br.owner_thread is None - br.owner_thread = self - with self._active_br_lock: - self._active_br = br - br.run() - finally: - self._sched.get_experiment().BenchmarkRunFinished(br) - with self._active_br_lock: - self._active_br = None + # Termination could happen anywhere, check it. + if self._terminated: + return - def _setup_dut_label(self): - """Try to match dut image with a certain experiment label. + self._logger.LogOutput("{} started working on {}".format(self, br)) + self._stat_num_br_run += 1 + self._stat_annotation = "executing {}".format(br) + # benchmark_run.run does not throws, but just play it safe here. + try: + assert br.owner_thread is None + br.owner_thread = self + with self._active_br_lock: + self._active_br = br + br.run() + finally: + self._sched.get_experiment().BenchmarkRunFinished(br) + with self._active_br_lock: + self._active_br = None + + def _setup_dut_label(self): + """Try to match dut image with a certain experiment label. If such match is found, we just skip doing reimage and jump to execute some benchmark_runs. - """ - - checksum_file = '/usr/local/osimage_checksum_file' - try: - rv, checksum, _ = command_executer.GetCommandExecuter().\ - CrosRunCommandWOutput( - 'cat ' + checksum_file, - chromeos_root=self._sched.get_labels(0).chromeos_root, - machine=self._dut.name, - print_to_console=False) - if rv == 0: - checksum = checksum.strip() - for l in self._sched.get_labels(): - if l.checksum == checksum: - self._logger.LogOutput("Dut '{}' is pre-installed with '{}'".format( - self._dut.name, l)) - self._dut.label = l - return - except RuntimeError: - traceback.print_exc(file=sys.stdout) - self._dut.label = None - - def __str__(self): - return 'DutWorker[dut="{}", label="{}"]'.format( - self._dut.name, self._dut.label.name if self._dut.label else 'None') - - def dut(self): - return self._dut - - def status_str(self): - """Report thread status.""" - - return ('Worker thread "{}", label="{}", benchmark_run={}, ' - 'reimage={}, now {}'.format( + """ + + checksum_file = "/usr/local/osimage_checksum_file" + try: + ( + rv, + checksum, + _, + ) = command_executer.GetCommandExecuter().CrosRunCommandWOutput( + "cat " + checksum_file, + chromeos_root=self._sched.get_labels(0).chromeos_root, + machine=self._dut.name, + print_to_console=False, + ) + if rv == 0: + checksum = checksum.strip() + for l in self._sched.get_labels(): + if l.checksum == checksum: + self._logger.LogOutput( + "Dut '{}' is pre-installed with '{}'".format( + self._dut.name, l + ) + ) + self._dut.label = l + return + except RuntimeError: + traceback.print_exc(file=sys.stdout) + self._dut.label = None + + def __str__(self): + return 'DutWorker[dut="{}", label="{}"]'.format( + self._dut.name, self._dut.label.name if self._dut.label else "None" + ) + + def dut(self): + return self._dut + + def status_str(self): + """Report thread status.""" + + return ( + 'Worker thread "{}", label="{}", benchmark_run={}, ' + "reimage={}, now {}".format( self._dut.name, - 'None' if self._dut.label is None else self._dut.label.name, - self._stat_num_br_run, self._stat_num_reimage, - self._stat_annotation)) + "None" if self._dut.label is None else self._dut.label.name, + self._stat_num_br_run, + self._stat_num_reimage, + self._stat_annotation, + ) + ) class BenchmarkRunCacheReader(Thread): - """The thread to read cache for a list of benchmark_runs. + """The thread to read cache for a list of benchmark_runs. On creation, each instance of this class is given a br_list, which is a subset of experiment._benchmark_runs. - """ - - def __init__(self, schedv2, br_list): - super(BenchmarkRunCacheReader, self).__init__() - self._schedv2 = schedv2 - self._br_list = br_list - self._logger = self._schedv2.get_logger() - - def run(self): - for br in self._br_list: - try: - br.ReadCache() - if br.cache_hit: - self._logger.LogOutput('Cache hit - {}'.format(br)) - with self._schedv2.lock_on('_cached_br_list'): - self._schedv2.get_cached_run_list().append(br) - else: - self._logger.LogOutput('Cache not hit - {}'.format(br)) - except RuntimeError: - traceback.print_exc(file=sys.stderr) + """ + + def __init__(self, schedv2, br_list): + super(BenchmarkRunCacheReader, self).__init__() + self._schedv2 = schedv2 + self._br_list = br_list + self._logger = self._schedv2.get_logger() + + def run(self): + for br in self._br_list: + try: + br.ReadCache() + if br.cache_hit: + self._logger.LogOutput("Cache hit - {}".format(br)) + with self._schedv2.lock_on("_cached_br_list"): + self._schedv2.get_cached_run_list().append(br) + else: + self._logger.LogOutput("Cache not hit - {}".format(br)) + except RuntimeError: + traceback.print_exc(file=sys.stderr) class Schedv2(object): - """New scheduler for crosperf.""" + """New scheduler for crosperf.""" - def __init__(self, experiment): - self._experiment = experiment - self._logger = logger.GetLogger(experiment.log_dir) + def __init__(self, experiment): + self._experiment = experiment + self._logger = logger.GetLogger(experiment.log_dir) - # Create shortcuts to nested data structure. "_duts" points to a list of - # locked machines. _labels points to a list of all labels. - self._duts = self._experiment.machine_manager.GetMachines() - self._labels = self._experiment.labels + # Create shortcuts to nested data structure. "_duts" points to a list of + # locked machines. _labels points to a list of all labels. + self._duts = self._experiment.machine_manager.GetMachines() + self._labels = self._experiment.labels - # Bookkeeping for synchronization. - self._workers_lock = Lock() - # pylint: disable=unnecessary-lambda - self._lock_map = defaultdict(lambda: Lock()) + # Bookkeeping for synchronization. + self._workers_lock = Lock() + # pylint: disable=unnecessary-lambda + self._lock_map = defaultdict(lambda: Lock()) - # Test mode flag - self._in_test_mode = test_flag.GetTestMode() + # Test mode flag + self._in_test_mode = test_flag.GetTestMode() - # Read benchmarkrun cache. - self._read_br_cache() + # Read benchmarkrun cache. + self._read_br_cache() - # Mapping from label to a list of benchmark_runs. - self._label_brl_map = dict((l, []) for l in self._labels) - for br in self._experiment.benchmark_runs: - assert br.label in self._label_brl_map - # Only put no-cache-hit br into the map. - if br not in self._cached_br_list: - self._label_brl_map[br.label].append(br) + # Mapping from label to a list of benchmark_runs. + self._label_brl_map = dict((l, []) for l in self._labels) + for br in self._experiment.benchmark_runs: + assert br.label in self._label_brl_map + # Only put no-cache-hit br into the map. + if br not in self._cached_br_list: + self._label_brl_map[br.label].append(br) - # Use machine image manager to calculate initial label allocation. - self._mim = MachineImageManager(self._labels, self._duts) - self._mim.compute_initial_allocation() + # Use machine image manager to calculate initial label allocation. + self._mim = MachineImageManager(self._labels, self._duts) + self._mim.compute_initial_allocation() - # Create worker thread, 1 per dut. - self._active_workers = [DutWorker(dut, self) for dut in self._duts] - self._finished_workers = [] + # Create worker thread, 1 per dut. + self._active_workers = [DutWorker(dut, self) for dut in self._duts] + self._finished_workers = [] - # Termination flag. - self._terminated = False + # Termination flag. + self._terminated = False - def run_sched(self): - """Start all dut worker threads and return immediately.""" + def run_sched(self): + """Start all dut worker threads and return immediately.""" - for w in self._active_workers: - w.start() + for w in self._active_workers: + w.start() - def _read_br_cache(self): - """Use multi-threading to read cache for all benchmarkruns. + def _read_br_cache(self): + """Use multi-threading to read cache for all benchmarkruns. We do this by firstly creating a few threads, and then assign each thread a segment of all brs. Each thread will check cache status for each br and put those with cache into '_cached_br_list'. - """ - - self._cached_br_list = [] - n_benchmarkruns = len(self._experiment.benchmark_runs) - if n_benchmarkruns <= 4: - # Use single thread to read cache. - self._logger.LogOutput(('Starting to read cache status for ' - '{} benchmark runs ...').format(n_benchmarkruns)) - BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run() - return - - # Split benchmarkruns set into segments. Each segment will be handled by - # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4). - n_threads = max(2, min(20, (n_benchmarkruns + 3) // 4)) - self._logger.LogOutput( - ('Starting {} threads to read cache status for ' - '{} benchmark runs ...').format(n_threads, n_benchmarkruns)) - benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) // n_threads - benchmarkrun_segments = [] - for i in range(n_threads - 1): - start = i * benchmarkruns_per_thread - end = (i + 1) * benchmarkruns_per_thread - benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end]) - benchmarkrun_segments.append( - self._experiment.benchmark_runs[(n_threads - 1) * - benchmarkruns_per_thread:]) - - # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs. - assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns - - # Create and start all readers. - cache_readers = [ - BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments - ] - - for x in cache_readers: - x.start() - - # Wait till all readers finish. - for x in cache_readers: - x.join() - - # Summarize. - self._logger.LogOutput( - 'Total {} cache hit out of {} benchmark_runs.'.format( - len(self._cached_br_list), n_benchmarkruns)) - - def get_cached_run_list(self): - return self._cached_br_list - - def get_label_map(self): - return self._label_brl_map - - def get_experiment(self): - return self._experiment - - def get_labels(self, i=None): - if i is None: - return self._labels - return self._labels[i] - - def get_logger(self): - return self._logger - - def get_cached_benchmark_run(self): - """Get a benchmark_run with 'cache hit'. - - Returns: - The benchmark that has cache hit, if any. Otherwise none. - """ - - with self.lock_on('_cached_br_list'): - if self._cached_br_list: - return self._cached_br_list.pop() - return None - - def get_benchmark_run(self, dut): - """Get a benchmark_run (br) object for a certain dut. - - Args: - dut: the dut for which a br is returned. - - Returns: - A br with its label matching that of the dut. If no such br could be - found, return None (this usually means a reimage is required for the - dut). - """ - - # If terminated, stop providing any br. - if self._terminated: - return None - - # If dut bears an unrecognized label, return None. - if dut.label is None: - return None - - # If br list for the dut's label is empty (that means all brs for this - # label have been done), return None. - with self.lock_on(dut.label): - brl = self._label_brl_map[dut.label] - if not brl: - return None - # Return the first br. - return brl.pop(0) - - def allocate_label(self, dut): - """Allocate a label to a dut. - - The work is delegated to MachineImageManager. + """ + + self._cached_br_list = [] + n_benchmarkruns = len(self._experiment.benchmark_runs) + if n_benchmarkruns <= 4: + # Use single thread to read cache. + self._logger.LogOutput( + ( + "Starting to read cache status for " "{} benchmark runs ..." + ).format(n_benchmarkruns) + ) + BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run() + return - The dut_worker calling this method is responsible for reimage the dut to - this label. + # Split benchmarkruns set into segments. Each segment will be handled by + # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4). + n_threads = max(2, min(20, (n_benchmarkruns + 3) // 4)) + self._logger.LogOutput( + ( + "Starting {} threads to read cache status for " + "{} benchmark runs ..." + ).format(n_threads, n_benchmarkruns) + ) + benchmarkruns_per_thread = ( + n_benchmarkruns + n_threads - 1 + ) // n_threads + benchmarkrun_segments = [] + for i in range(n_threads - 1): + start = i * benchmarkruns_per_thread + end = (i + 1) * benchmarkruns_per_thread + benchmarkrun_segments.append( + self._experiment.benchmark_runs[start:end] + ) + benchmarkrun_segments.append( + self._experiment.benchmark_runs[ + (n_threads - 1) * benchmarkruns_per_thread : + ] + ) + + # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs. + assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns + + # Create and start all readers. + cache_readers = [ + BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments + ] + + for x in cache_readers: + x.start() + + # Wait till all readers finish. + for x in cache_readers: + x.join() + + # Summarize. + self._logger.LogOutput( + "Total {} cache hit out of {} benchmark_runs.".format( + len(self._cached_br_list), n_benchmarkruns + ) + ) + + def get_cached_run_list(self): + return self._cached_br_list + + def get_label_map(self): + return self._label_brl_map + + def get_experiment(self): + return self._experiment + + def get_labels(self, i=None): + if i is None: + return self._labels + return self._labels[i] + + def get_logger(self): + return self._logger + + def get_cached_benchmark_run(self): + """Get a benchmark_run with 'cache hit'. + + Returns: + The benchmark that has cache hit, if any. Otherwise none. + """ + + with self.lock_on("_cached_br_list"): + if self._cached_br_list: + return self._cached_br_list.pop() + return None + + def get_benchmark_run(self, dut): + """Get a benchmark_run (br) object for a certain dut. + + Args: + dut: the dut for which a br is returned. + + Returns: + A br with its label matching that of the dut. If no such br could be + found, return None (this usually means a reimage is required for the + dut). + """ + + # If terminated, stop providing any br. + if self._terminated: + return None + + # If dut bears an unrecognized label, return None. + if dut.label is None: + return None + + # If br list for the dut's label is empty (that means all brs for this + # label have been done), return None. + with self.lock_on(dut.label): + brl = self._label_brl_map[dut.label] + if not brl: + return None + # Return the first br. + return brl.pop(0) + + def allocate_label(self, dut): + """Allocate a label to a dut. + + The work is delegated to MachineImageManager. + + The dut_worker calling this method is responsible for reimage the dut to + this label. - Args: - dut: the new label that is to be reimaged onto the dut. + Args: + dut: the new label that is to be reimaged onto the dut. - Returns: - The label or None. - """ + Returns: + The label or None. + """ - if self._terminated: - return None + if self._terminated: + return None - return self._mim.allocate(dut, self) + return self._mim.allocate(dut, self) - def dut_worker_finished(self, dut_worker): - """Notify schedv2 that the dut_worker thread finished. + def dut_worker_finished(self, dut_worker): + """Notify schedv2 that the dut_worker thread finished. - Args: - dut_worker: the thread that is about to end. - """ + Args: + dut_worker: the thread that is about to end. + """ - self._logger.LogOutput('{} finished.'.format(dut_worker)) - with self._workers_lock: - self._active_workers.remove(dut_worker) - self._finished_workers.append(dut_worker) + self._logger.LogOutput("{} finished.".format(dut_worker)) + with self._workers_lock: + self._active_workers.remove(dut_worker) + self._finished_workers.append(dut_worker) - def is_complete(self): - return len(self._active_workers) == 0 + def is_complete(self): + return len(self._active_workers) == 0 - def lock_on(self, my_object): - return self._lock_map[my_object] + def lock_on(self, my_object): + return self._lock_map[my_object] - def terminate(self): - """Mark flag so we stop providing br/reimages. + def terminate(self): + """Mark flag so we stop providing br/reimages. Also terminate each DutWorker, so they refuse to execute br or reimage. - """ - - self._terminated = True - for dut_worker in self._active_workers: - dut_worker.terminate() - - def threads_status_as_string(self): - """Report the dut worker threads status.""" - - status = '{} active threads, {} finished threads.\n'.format( - len(self._active_workers), len(self._finished_workers)) - status += ' Active threads:' - for dw in self._active_workers: - status += '\n ' + dw.status_str() - if self._finished_workers: - status += '\n Finished threads:' - for dw in self._finished_workers: - status += '\n ' + dw.status_str() - return status + """ + + self._terminated = True + for dut_worker in self._active_workers: + dut_worker.terminate() + + def threads_status_as_string(self): + """Report the dut worker threads status.""" + + status = "{} active threads, {} finished threads.\n".format( + len(self._active_workers), len(self._finished_workers) + ) + status += " Active threads:" + for dw in self._active_workers: + status += "\n " + dw.status_str() + if self._finished_workers: + status += "\n Finished threads:" + for dw in self._finished_workers: + status += "\n " + dw.status_str() + return status diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py index 435742f6..e939bc5b 100755 --- a/crosperf/schedv2_unittest.py +++ b/crosperf/schedv2_unittest.py @@ -15,12 +15,13 @@ import unittest import unittest.mock as mock import benchmark_run -import test_flag +from cros_utils.command_executer import CommandExecuter from experiment_factory import ExperimentFactory from experiment_file import ExperimentFile -from cros_utils.command_executer import CommandExecuter from experiment_runner_unittest import FakeLogger from schedv2 import Schedv2 +import test_flag + EXPERIMENT_FILE_1 = """\ board: daisy @@ -66,160 +67,184 @@ image2 {{ class Schedv2Test(unittest.TestCase): - """Class for setting up and running the unit tests.""" + """Class for setting up and running the unit tests.""" - def setUp(self): - self.exp = None + def setUp(self): + self.exp = None - mock_logger = FakeLogger() - mock_cmd_exec = mock.Mock(spec=CommandExecuter) + mock_logger = FakeLogger() + mock_cmd_exec = mock.Mock(spec=CommandExecuter) - @mock.patch('benchmark_run.BenchmarkRun', new=benchmark_run.MockBenchmarkRun) - def _make_fake_experiment(self, expstr): - """Create fake experiment from string. + @mock.patch( + "benchmark_run.BenchmarkRun", new=benchmark_run.MockBenchmarkRun + ) + def _make_fake_experiment(self, expstr): + """Create fake experiment from string. Note - we mock out BenchmarkRun in this step. - """ - experiment_file = ExperimentFile(io.StringIO(expstr)) - experiment = ExperimentFactory().GetExperiment( - experiment_file, working_directory='', log_dir='') - return experiment - - def test_remote(self): - """Test that remotes in labels are aggregated into experiment.remote.""" - - self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1) - self.exp.log_level = 'verbose' - my_schedv2 = Schedv2(self.exp) - self.assertFalse(my_schedv2.is_complete()) - self.assertIn('chromeos-daisy1.cros', self.exp.remote) - self.assertIn('chromeos-daisy2.cros', self.exp.remote) - self.assertIn('chromeos-daisy3.cros', self.exp.remote) - self.assertIn('chromeos-daisy4.cros', self.exp.remote) - self.assertIn('chromeos-daisy5.cros', self.exp.remote) - - def test_unreachable_remote(self): - """Test unreachable remotes are removed from experiment and label.""" - - def MockIsReachable(cm): - return (cm.name != 'chromeos-daisy3.cros' and - cm.name != 'chromeos-daisy5.cros') - - with mock.patch( - 'machine_manager.MockCrosMachine.IsReachable', new=MockIsReachable): - self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1) - self.assertIn('chromeos-daisy1.cros', self.exp.remote) - self.assertIn('chromeos-daisy2.cros', self.exp.remote) - self.assertNotIn('chromeos-daisy3.cros', self.exp.remote) - self.assertIn('chromeos-daisy4.cros', self.exp.remote) - self.assertNotIn('chromeos-daisy5.cros', self.exp.remote) - - for l in self.exp.labels: - if l.name == 'image2': - self.assertNotIn('chromeos-daisy5.cros', l.remote) - self.assertIn('chromeos-daisy4.cros', l.remote) - elif l.name == 'image1': - self.assertNotIn('chromeos-daisy3.cros', l.remote) - - @mock.patch('schedv2.BenchmarkRunCacheReader') - def test_BenchmarkRunCacheReader_1(self, reader): - """Test benchmarkrun set is split into 5 segments.""" - - self.exp = self._make_fake_experiment( - EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=9)) - my_schedv2 = Schedv2(self.exp) - self.assertFalse(my_schedv2.is_complete()) - # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4, - # 4, 2 brs respectively. - # Assert that BenchmarkRunCacheReader() is called 5 times. - self.assertEqual(reader.call_count, 5) - # reader.call_args_list[n] - nth call. - # reader.call_args_list[n][0] - positioned args in nth call. - # reader.call_args_list[n][0][1] - the 2nd arg in nth call, - # that is 'br_list' in 'schedv2.BenchmarkRunCacheReader'. - self.assertEqual(len(reader.call_args_list[0][0][1]), 4) - self.assertEqual(len(reader.call_args_list[1][0][1]), 4) - self.assertEqual(len(reader.call_args_list[2][0][1]), 4) - self.assertEqual(len(reader.call_args_list[3][0][1]), 4) - self.assertEqual(len(reader.call_args_list[4][0][1]), 2) - - @mock.patch('schedv2.BenchmarkRunCacheReader') - def test_BenchmarkRunCacheReader_2(self, reader): - """Test benchmarkrun set is split into 4 segments.""" - - self.exp = self._make_fake_experiment( - EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=8)) - my_schedv2 = Schedv2(self.exp) - self.assertFalse(my_schedv2.is_complete()) - # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs. - self.assertEqual(reader.call_count, 4) - self.assertEqual(len(reader.call_args_list[0][0][1]), 4) - self.assertEqual(len(reader.call_args_list[1][0][1]), 4) - self.assertEqual(len(reader.call_args_list[2][0][1]), 4) - self.assertEqual(len(reader.call_args_list[3][0][1]), 4) - - @mock.patch('schedv2.BenchmarkRunCacheReader') - def test_BenchmarkRunCacheReader_3(self, reader): - """Test benchmarkrun set is split into 2 segments.""" - - self.exp = self._make_fake_experiment( - EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=3)) - my_schedv2 = Schedv2(self.exp) - self.assertFalse(my_schedv2.is_complete()) - # We have 3 * 2 == 6 brs, we use 2 threads. - self.assertEqual(reader.call_count, 2) - self.assertEqual(len(reader.call_args_list[0][0][1]), 3) - self.assertEqual(len(reader.call_args_list[1][0][1]), 3) - - @mock.patch('schedv2.BenchmarkRunCacheReader') - def test_BenchmarkRunCacheReader_4(self, reader): - """Test benchmarkrun set is not splitted.""" - - self.exp = self._make_fake_experiment( - EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=1)) - my_schedv2 = Schedv2(self.exp) - self.assertFalse(my_schedv2.is_complete()) - # We have 1 * 2 == 2 br, so only 1 instance. - self.assertEqual(reader.call_count, 1) - self.assertEqual(len(reader.call_args_list[0][0][1]), 2) - - def test_cachehit(self): - """Test cache-hit and none-cache-hit brs are properly organized.""" - - def MockReadCache(br): - br.cache_hit = (br.label.name == 'image2') - - with mock.patch( - 'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache): - # We have 2 * 30 brs, half of which are put into _cached_br_list. - self.exp = self._make_fake_experiment( - EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30)) - my_schedv2 = Schedv2(self.exp) - self.assertEqual(len(my_schedv2.get_cached_run_list()), 30) - # The non-cache-hit brs are put into Schedv2._label_brl_map. - self.assertEqual( - functools.reduce(lambda a, x: a + len(x[1]), - my_schedv2.get_label_map().items(), 0), 30) - - def test_nocachehit(self): - """Test no cache-hit.""" - - def MockReadCache(br): - br.cache_hit = False - - with mock.patch( - 'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache): - # We have 2 * 30 brs, none of which are put into _cached_br_list. - self.exp = self._make_fake_experiment( - EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30)) - my_schedv2 = Schedv2(self.exp) - self.assertEqual(len(my_schedv2.get_cached_run_list()), 0) - # The non-cache-hit brs are put into Schedv2._label_brl_map. - self.assertEqual( - functools.reduce(lambda a, x: a + len(x[1]), - my_schedv2.get_label_map().items(), 0), 60) - - -if __name__ == '__main__': - test_flag.SetTestMode(True) - unittest.main() + """ + experiment_file = ExperimentFile(io.StringIO(expstr)) + experiment = ExperimentFactory().GetExperiment( + experiment_file, working_directory="", log_dir="" + ) + return experiment + + def test_remote(self): + """Test that remotes in labels are aggregated into experiment.remote.""" + + self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1) + self.exp.log_level = "verbose" + my_schedv2 = Schedv2(self.exp) + self.assertFalse(my_schedv2.is_complete()) + self.assertIn("chromeos-daisy1.cros", self.exp.remote) + self.assertIn("chromeos-daisy2.cros", self.exp.remote) + self.assertIn("chromeos-daisy3.cros", self.exp.remote) + self.assertIn("chromeos-daisy4.cros", self.exp.remote) + self.assertIn("chromeos-daisy5.cros", self.exp.remote) + + def test_unreachable_remote(self): + """Test unreachable remotes are removed from experiment and label.""" + + def MockIsReachable(cm): + return ( + cm.name != "chromeos-daisy3.cros" + and cm.name != "chromeos-daisy5.cros" + ) + + with mock.patch( + "machine_manager.MockCrosMachine.IsReachable", new=MockIsReachable + ): + self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1) + self.assertIn("chromeos-daisy1.cros", self.exp.remote) + self.assertIn("chromeos-daisy2.cros", self.exp.remote) + self.assertNotIn("chromeos-daisy3.cros", self.exp.remote) + self.assertIn("chromeos-daisy4.cros", self.exp.remote) + self.assertNotIn("chromeos-daisy5.cros", self.exp.remote) + + for l in self.exp.labels: + if l.name == "image2": + self.assertNotIn("chromeos-daisy5.cros", l.remote) + self.assertIn("chromeos-daisy4.cros", l.remote) + elif l.name == "image1": + self.assertNotIn("chromeos-daisy3.cros", l.remote) + + @mock.patch("schedv2.BenchmarkRunCacheReader") + def test_BenchmarkRunCacheReader_1(self, reader): + """Test benchmarkrun set is split into 5 segments.""" + + self.exp = self._make_fake_experiment( + EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=9) + ) + my_schedv2 = Schedv2(self.exp) + self.assertFalse(my_schedv2.is_complete()) + # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4, + # 4, 2 brs respectively. + # Assert that BenchmarkRunCacheReader() is called 5 times. + self.assertEqual(reader.call_count, 5) + # reader.call_args_list[n] - nth call. + # reader.call_args_list[n][0] - positioned args in nth call. + # reader.call_args_list[n][0][1] - the 2nd arg in nth call, + # that is 'br_list' in 'schedv2.BenchmarkRunCacheReader'. + self.assertEqual(len(reader.call_args_list[0][0][1]), 4) + self.assertEqual(len(reader.call_args_list[1][0][1]), 4) + self.assertEqual(len(reader.call_args_list[2][0][1]), 4) + self.assertEqual(len(reader.call_args_list[3][0][1]), 4) + self.assertEqual(len(reader.call_args_list[4][0][1]), 2) + + @mock.patch("schedv2.BenchmarkRunCacheReader") + def test_BenchmarkRunCacheReader_2(self, reader): + """Test benchmarkrun set is split into 4 segments.""" + + self.exp = self._make_fake_experiment( + EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=8) + ) + my_schedv2 = Schedv2(self.exp) + self.assertFalse(my_schedv2.is_complete()) + # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs. + self.assertEqual(reader.call_count, 4) + self.assertEqual(len(reader.call_args_list[0][0][1]), 4) + self.assertEqual(len(reader.call_args_list[1][0][1]), 4) + self.assertEqual(len(reader.call_args_list[2][0][1]), 4) + self.assertEqual(len(reader.call_args_list[3][0][1]), 4) + + @mock.patch("schedv2.BenchmarkRunCacheReader") + def test_BenchmarkRunCacheReader_3(self, reader): + """Test benchmarkrun set is split into 2 segments.""" + + self.exp = self._make_fake_experiment( + EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=3) + ) + my_schedv2 = Schedv2(self.exp) + self.assertFalse(my_schedv2.is_complete()) + # We have 3 * 2 == 6 brs, we use 2 threads. + self.assertEqual(reader.call_count, 2) + self.assertEqual(len(reader.call_args_list[0][0][1]), 3) + self.assertEqual(len(reader.call_args_list[1][0][1]), 3) + + @mock.patch("schedv2.BenchmarkRunCacheReader") + def test_BenchmarkRunCacheReader_4(self, reader): + """Test benchmarkrun set is not splitted.""" + + self.exp = self._make_fake_experiment( + EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=1) + ) + my_schedv2 = Schedv2(self.exp) + self.assertFalse(my_schedv2.is_complete()) + # We have 1 * 2 == 2 br, so only 1 instance. + self.assertEqual(reader.call_count, 1) + self.assertEqual(len(reader.call_args_list[0][0][1]), 2) + + def test_cachehit(self): + """Test cache-hit and none-cache-hit brs are properly organized.""" + + def MockReadCache(br): + br.cache_hit = br.label.name == "image2" + + with mock.patch( + "benchmark_run.MockBenchmarkRun.ReadCache", new=MockReadCache + ): + # We have 2 * 30 brs, half of which are put into _cached_br_list. + self.exp = self._make_fake_experiment( + EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30) + ) + my_schedv2 = Schedv2(self.exp) + self.assertEqual(len(my_schedv2.get_cached_run_list()), 30) + # The non-cache-hit brs are put into Schedv2._label_brl_map. + self.assertEqual( + functools.reduce( + lambda a, x: a + len(x[1]), + my_schedv2.get_label_map().items(), + 0, + ), + 30, + ) + + def test_nocachehit(self): + """Test no cache-hit.""" + + def MockReadCache(br): + br.cache_hit = False + + with mock.patch( + "benchmark_run.MockBenchmarkRun.ReadCache", new=MockReadCache + ): + # We have 2 * 30 brs, none of which are put into _cached_br_list. + self.exp = self._make_fake_experiment( + EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30) + ) + my_schedv2 = Schedv2(self.exp) + self.assertEqual(len(my_schedv2.get_cached_run_list()), 0) + # The non-cache-hit brs are put into Schedv2._label_brl_map. + self.assertEqual( + functools.reduce( + lambda a, x: a + len(x[1]), + my_schedv2.get_label_map().items(), + 0, + ), + 60, + ) + + +if __name__ == "__main__": + test_flag.SetTestMode(True) + unittest.main() diff --git a/crosperf/settings.py b/crosperf/settings.py index 9aa6879b..5ea25927 100644 --- a/crosperf/settings.py +++ b/crosperf/settings.py @@ -13,74 +13,100 @@ from download_images import ImageDownloader class Settings(object): - """Class representing settings (a set of fields) from an experiment file.""" + """Class representing settings (a set of fields) from an experiment file.""" - def __init__(self, name, settings_type): - self.name = name - self.settings_type = settings_type - self.fields = {} - self.parent = None + def __init__(self, name, settings_type): + self.name = name + self.settings_type = settings_type + self.fields = {} + self.parent = None - def SetParentSettings(self, settings): - """Set the parent settings which these settings can inherit from.""" - self.parent = settings + def SetParentSettings(self, settings): + """Set the parent settings which these settings can inherit from.""" + self.parent = settings - def AddField(self, field): - name = field.name - if name in self.fields: - raise SyntaxError('Field %s defined previously.' % name) - self.fields[name] = field + def AddField(self, field): + name = field.name + if name in self.fields: + raise SyntaxError("Field %s defined previously." % name) + self.fields[name] = field - def SetField(self, name, value, append=False): - if name not in self.fields: - raise SyntaxError("'%s' is not a valid field in '%s' settings" % - (name, self.settings_type)) - if append: - self.fields[name].Append(value) - else: - self.fields[name].Set(value) + def SetField(self, name, value, append=False): + if name not in self.fields: + raise SyntaxError( + "'%s' is not a valid field in '%s' settings" + % (name, self.settings_type) + ) + if append: + self.fields[name].Append(value) + else: + self.fields[name].Set(value) - def GetField(self, name): - """Get the value of a field with a given name.""" - if name not in self.fields: - raise SyntaxError( - "Field '%s' not a valid field in '%s' settings." % (name, self.name)) - field = self.fields[name] - if not field.assigned and field.required: - raise SyntaxError("Required field '%s' not defined in '%s' settings." % - (name, self.name)) - return self.fields[name].Get() + def GetField(self, name): + """Get the value of a field with a given name.""" + if name not in self.fields: + raise SyntaxError( + "Field '%s' not a valid field in '%s' settings." + % (name, self.name) + ) + field = self.fields[name] + if not field.assigned and field.required: + raise SyntaxError( + "Required field '%s' not defined in '%s' settings." + % (name, self.name) + ) + return self.fields[name].Get() - def Inherit(self): - """Inherit any unset values from the parent settings.""" - for name in self.fields: - if (not self.fields[name].assigned and self.parent and - name in self.parent.fields and self.parent.fields[name].assigned): - self.fields[name].Set(self.parent.GetField(name), parse=False) + def Inherit(self): + """Inherit any unset values from the parent settings.""" + for name in self.fields: + if ( + not self.fields[name].assigned + and self.parent + and name in self.parent.fields + and self.parent.fields[name].assigned + ): + self.fields[name].Set(self.parent.GetField(name), parse=False) - def Override(self, settings): - """Override settings with settings from a different object.""" - for name in settings.fields: - if name in self.fields and settings.fields[name].assigned: - self.fields[name].Set(settings.GetField(name), parse=False) + def Override(self, settings): + """Override settings with settings from a different object.""" + for name in settings.fields: + if name in self.fields and settings.fields[name].assigned: + self.fields[name].Set(settings.GetField(name), parse=False) - def Validate(self): - """Check that all required fields have been set.""" - for name in self.fields: - if not self.fields[name].assigned and self.fields[name].required: - raise SyntaxError('Field %s is invalid.' % name) + def Validate(self): + """Check that all required fields have been set.""" + for name in self.fields: + if not self.fields[name].assigned and self.fields[name].required: + raise SyntaxError("Field %s is invalid." % name) - def GetXbuddyPath(self, path_str, autotest_path, debug_path, board, - chromeos_root, log_level, download_debug): - prefix = 'remote' - l = logger.GetLogger() - if (path_str.find('trybot') < 0 and path_str.find('toolchain') < 0 and - path_str.find(board) < 0 and path_str.find(board.replace('_', '-'))): - xbuddy_path = '%s/%s/%s' % (prefix, board, path_str) - else: - xbuddy_path = '%s/%s' % (prefix, path_str) - image_downloader = ImageDownloader(l, log_level) - # Returns three variables: image, autotest_path, debug_path - return image_downloader.Run( - misc.CanonicalizePath(chromeos_root), xbuddy_path, autotest_path, - debug_path, download_debug) + def GetXbuddyPath( + self, + path_str, + autotest_path, + debug_path, + board, + chromeos_root, + log_level, + download_debug, + ): + prefix = "remote" + l = logger.GetLogger() + if ( + path_str.find("trybot") < 0 + and path_str.find("toolchain") < 0 + and path_str.find(board) < 0 + and path_str.find(board.replace("_", "-")) + ): + xbuddy_path = "%s/%s/%s" % (prefix, board, path_str) + else: + xbuddy_path = "%s/%s" % (prefix, path_str) + image_downloader = ImageDownloader(l, log_level) + # Returns three variables: image, autotest_path, debug_path + return image_downloader.Run( + misc.CanonicalizePath(chromeos_root), + xbuddy_path, + autotest_path, + debug_path, + download_debug, + ) diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py index 4831f64d..b91156dc 100644 --- a/crosperf/settings_factory.py +++ b/crosperf/settings_factory.py @@ -17,407 +17,558 @@ from settings import Settings class BenchmarkSettings(Settings): - """Settings used to configure individual benchmarks.""" + """Settings used to configure individual benchmarks.""" - def __init__(self, name): - super(BenchmarkSettings, self).__init__(name, 'benchmark') - self.AddField( - TextField('test_name', - description='The name of the test to run. ' - 'Defaults to the name of the benchmark.')) - self.AddField( - TextField('test_args', - description='Arguments to be passed to the ' - 'test.')) - self.AddField( - IntegerField( - 'iterations', - required=False, - default=0, - description='Number of iterations to run the test. ' - 'If not set, will run each benchmark test the optimum number of ' - 'times to get a stable result.')) - self.AddField( - TextField('suite', - default='test_that', - description='The type of the benchmark.')) - self.AddField( - IntegerField('retries', - default=0, - description='Number of times to retry a ' - 'benchmark run.')) - self.AddField( - BooleanField('run_local', - description='Run benchmark harness on the DUT. ' - 'Currently only compatible with the suite: ' - 'telemetry_Crosperf.', - required=False, - default=True)) - self.AddField( - FloatField( - 'weight', - default=0.0, - description='Weight of the benchmark for CWP approximation')) + def __init__(self, name): + super(BenchmarkSettings, self).__init__(name, "benchmark") + self.AddField( + TextField( + "test_name", + description="The name of the test to run. " + "Defaults to the name of the benchmark.", + ) + ) + self.AddField( + TextField( + "test_args", + description="Arguments to be passed to the " "test.", + ) + ) + self.AddField( + IntegerField( + "iterations", + required=False, + default=0, + description="Number of iterations to run the test. " + "If not set, will run each benchmark test the optimum number of " + "times to get a stable result.", + ) + ) + self.AddField( + TextField( + "suite", + default="test_that", + description="The type of the benchmark.", + ) + ) + self.AddField( + IntegerField( + "retries", + default=0, + description="Number of times to retry a " "benchmark run.", + ) + ) + self.AddField( + BooleanField( + "run_local", + description="Run benchmark harness on the DUT. " + "Currently only compatible with the suite: " + "telemetry_Crosperf.", + required=False, + default=True, + ) + ) + self.AddField( + FloatField( + "weight", + default=0.0, + description="Weight of the benchmark for CWP approximation", + ) + ) class LabelSettings(Settings): - """Settings for each label.""" + """Settings for each label.""" - def __init__(self, name): - super(LabelSettings, self).__init__(name, 'label') - self.AddField( - TextField('chromeos_image', - required=False, - description='The path to the image to run tests ' - 'on, for local/custom-built images. See the ' - "'build' option for official or trybot images.")) - self.AddField( - TextField( - 'autotest_path', - required=False, - description='Autotest directory path relative to chroot which ' - 'has autotest files for the image to run tests requiring autotest ' - 'files.')) - self.AddField( - TextField( - 'debug_path', - required=False, - description='Debug info directory relative to chroot which has ' - 'symbols and vmlinux that can be used by perf tool.')) - self.AddField( - TextField('chromeos_root', - description='The path to a chromeos checkout which ' - 'contains a src/scripts directory. Defaults to ' - 'the chromeos checkout which contains the ' - 'chromeos_image.')) - self.AddField( - ListField('remote', - description='A comma-separated list of IPs of chromeos' - 'devices to run experiments on.')) - self.AddField( - TextField('image_args', - required=False, - default='', - description='Extra arguments to pass to ' - 'image_chromeos.py.')) - self.AddField( - TextField('cache_dir', - default='', - description='The cache dir for this image.')) - self.AddField( - TextField('compiler', - default='gcc', - description='The compiler used to build the ' - 'ChromeOS image (gcc or llvm).')) - self.AddField( - TextField('chrome_src', - description='The path to the source of chrome. ' - 'This is used to run telemetry benchmarks. ' - 'The default one is the src inside chroot.', - required=False, - default='')) - self.AddField( - TextField('build', - description='The xbuddy specification for an ' - 'official or trybot image to use for tests. ' - "'/remote' is assumed, and the board is given " - "elsewhere, so omit the '/remote/<board>/' xbuddy " - 'prefix.', - required=False, - default='')) + def __init__(self, name): + super(LabelSettings, self).__init__(name, "label") + self.AddField( + TextField( + "chromeos_image", + required=False, + description="The path to the image to run tests " + "on, for local/custom-built images. See the " + "'build' option for official or trybot images.", + ) + ) + self.AddField( + TextField( + "autotest_path", + required=False, + description="Autotest directory path relative to chroot which " + "has autotest files for the image to run tests requiring autotest " + "files.", + ) + ) + self.AddField( + TextField( + "debug_path", + required=False, + description="Debug info directory relative to chroot which has " + "symbols and vmlinux that can be used by perf tool.", + ) + ) + self.AddField( + TextField( + "chromeos_root", + description="The path to a chromeos checkout which " + "contains a src/scripts directory. Defaults to " + "the chromeos checkout which contains the " + "chromeos_image.", + ) + ) + self.AddField( + ListField( + "remote", + description="A comma-separated list of IPs of chromeos" + "devices to run experiments on.", + ) + ) + self.AddField( + TextField( + "image_args", + required=False, + default="", + description="Extra arguments to pass to " "image_chromeos.py.", + ) + ) + self.AddField( + TextField( + "cache_dir", + default="", + description="The cache dir for this image.", + ) + ) + self.AddField( + TextField( + "compiler", + default="gcc", + description="The compiler used to build the " + "ChromeOS image (gcc or llvm).", + ) + ) + self.AddField( + TextField( + "chrome_src", + description="The path to the source of chrome. " + "This is used to run telemetry benchmarks. " + "The default one is the src inside chroot.", + required=False, + default="", + ) + ) + self.AddField( + TextField( + "build", + description="The xbuddy specification for an " + "official or trybot image to use for tests. " + "'/remote' is assumed, and the board is given " + "elsewhere, so omit the '/remote/<board>/' xbuddy " + "prefix.", + required=False, + default="", + ) + ) class GlobalSettings(Settings): - """Settings that apply per-experiment.""" + """Settings that apply per-experiment.""" - def __init__(self, name): - super(GlobalSettings, self).__init__(name, 'global') - self.AddField( - TextField('name', - description='The name of the experiment. Just an ' - 'identifier.')) - self.AddField( - TextField('board', - description='The target board for running ' - 'experiments on, e.g. x86-alex.')) - self.AddField( - BooleanField('crosfleet', - description='Whether to run experiments via crosfleet.', - default=False)) - self.AddField( - ListField('remote', - description='A comma-separated list of IPs of ' - 'chromeos devices to run experiments on.')) - self.AddField( - BooleanField('rerun_if_failed', - description='Whether to re-run failed test runs ' - 'or not.', - default=False)) - self.AddField( - BooleanField('rm_chroot_tmp', - default=False, - description='Whether to remove the test_that ' - 'result in the chroot.')) - self.AddField( - ListField('email', - description='Space-separated list of email ' - 'addresses to send email to.')) - self.AddField( - BooleanField('rerun', - description='Whether to ignore the cache and ' - 'for tests to be re-run.', - default=False)) - self.AddField( - BooleanField('same_specs', - default=True, - description='Ensure cached runs are run on the ' - 'same kind of devices which are specified as a ' - 'remote.')) - self.AddField( - BooleanField('same_machine', - default=False, - description='Ensure cached runs are run on the ' - 'same remote.')) - self.AddField( - BooleanField('use_file_locks', - default=False, - description='DEPRECATED: Whether to use the file locks ' - 'or AFE server lock mechanism.')) - self.AddField( - IntegerField( - 'iterations', - required=False, - default=0, - description='Number of iterations to run all tests. ' - 'If not set, will run each benchmark test the optimum number of ' - 'times to get a stable result.')) - self.AddField( - TextField('chromeos_root', - description='The path to a chromeos checkout which ' - 'contains a src/scripts directory. Defaults to ' - 'the chromeos checkout which contains the ' - 'chromeos_image.')) - self.AddField( - TextField('logging_level', - default='average', - description='The level of logging desired. ' - "Options are 'quiet', 'average', and 'verbose'.")) - self.AddField( - IntegerField('acquire_timeout', - default=0, - description='Number of seconds to wait for ' - 'machine before exit if all the machines in ' - 'the experiment file are busy. Default is 0.')) - self.AddField( - TextField('perf_args', - default='', - description='The optional profile command. It ' - 'enables perf commands to record perforamance ' - 'related counters. It must start with perf ' - 'command record or stat followed by arguments.')) - self.AddField( - BooleanField('download_debug', - default=True, - description='Download compressed debug symbols alongwith ' - 'image. This can provide more info matching symbols for' - 'profiles, but takes larger space. By default, download' - 'it only when perf_args is specified.')) - self.AddField( - TextField('cache_dir', - default='', - description='The abs path of cache dir. ' - 'Default is /home/$(whoami)/cros_scratch.')) - self.AddField( - BooleanField('cache_only', - default=False, - description='Whether to use only cached ' - 'results (do not rerun failed tests).')) - self.AddField( - BooleanField('no_email', - default=False, - description='Whether to disable the email to ' - 'user after crosperf finishes.')) - self.AddField( - BooleanField('json_report', - default=False, - description='Whether to generate a json version ' - 'of the report, for archiving.')) - self.AddField( - BooleanField('show_all_results', - default=False, - description='When running Telemetry tests, ' - 'whether to all the results, instead of just ' - 'the default (summary) results.')) - self.AddField( - TextField('share_cache', - default='', - description='Path to alternate cache whose data ' - 'you want to use. It accepts multiple directories ' - 'separated by a ",".')) - self.AddField( - TextField('results_dir', default='', description='The results dir.')) - self.AddField( - BooleanField( - 'compress_results', - default=True, - description='Whether to compress all test results other than ' - 'reports into a tarball to save disk space.')) - self.AddField( - TextField('locks_dir', - default='', - description='An alternate directory to use for ' - 'storing/checking machine file locks for local machines. ' - 'By default the file locks directory is ' - '/google/data/rw/users/mo/mobiletc-prebuild/locks.\n' - 'WARNING: If you use your own locks directory, ' - 'there is no guarantee that someone else might not ' - 'hold a lock on the same machine in a different ' - 'locks directory.')) - self.AddField( - TextField('chrome_src', - description='The path to the source of chrome. ' - 'This is used to run telemetry benchmarks. ' - 'The default one is the src inside chroot.', - required=False, - default='')) - self.AddField( - IntegerField('retries', - default=0, - description='Number of times to retry a ' - 'benchmark run.')) - self.AddField( - TextField('cwp_dso', - description='The DSO type that we want to use for ' - 'CWP approximation. This is used to run telemetry ' - 'benchmarks. Valid DSO types can be found from dso_list ' - 'in experiment_factory.py. The default value is set to ' - 'be empty.', - required=False, - default='')) - self.AddField( - BooleanField('enable_aslr', - description='Enable ASLR on the machine to run the ' - 'benchmarks. ASLR is disabled by default', - required=False, - default=False)) - self.AddField( - BooleanField('ignore_min_max', - description='When doing math for the raw results, ' - 'ignore min and max values to reduce noise.', - required=False, - default=False)) - self.AddField( - TextField( - 'intel_pstate', - description='Intel Pstate mode.\n' - 'Supported modes: "active", "passive", "no_hwp".\n' - 'Default is "no_hwp" which disables hardware pstates to avoid ' - 'noise in benchmarks.', - required=False, - default='no_hwp')) - self.AddField( - BooleanField('turbostat', - description='Run turbostat process in the background' - ' of a benchmark. Enabled by default.', - required=False, - default=True)) - self.AddField( - FloatField( - 'top_interval', - description='Run top command in the background of a benchmark with' - ' interval of sampling specified in seconds.\n' - 'Recommended values 1-5. Lower number provides more accurate' - ' data.\n' - 'With 0 - do not run top.\n' - 'NOTE: Running top with interval 1-5 sec has insignificant' - ' performance impact (performance degradation does not exceed' - ' 0.3%%, measured on x86_64, ARM32, and ARM64). ' - 'The default value is 1.', - required=False, - default=1)) - self.AddField( - IntegerField('cooldown_temp', - required=False, - default=40, - description='Wait until CPU temperature goes down below' - ' specified temperature in Celsius' - ' prior starting a benchmark. ' - 'By default the value is set to 40 degrees.')) - self.AddField( - IntegerField('cooldown_time', - required=False, - default=10, - description='Wait specified time in minutes allowing' - ' CPU to cool down. Zero value disables cooldown. ' - 'The default value is 10 minutes.')) - self.AddField( - EnumField( - 'governor', - options=[ - 'performance', - 'powersave', - 'userspace', - 'ondemand', - 'conservative', - 'schedutils', - 'sched', - 'interactive', - ], - default='performance', - required=False, - description='Setup CPU governor for all cores.\n' - 'For more details refer to:\n' - 'https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt. ' - 'Default is "performance" governor.')) - self.AddField( - EnumField( - 'cpu_usage', - options=[ - 'all', - 'big_only', - 'little_only', - 'exclusive_cores', - ], - default='all', - required=False, - description='Restrict usage of CPUs to decrease CPU interference.\n' - '"all" - no restrictions;\n' - '"big-only", "little-only" - enable only big/little cores,' - ' applicable only on ARM;\n' - '"exclusive-cores" - (for future use)' - ' isolate cores for exclusive use of benchmark processes. ' - 'By default use all CPUs.')) - self.AddField( - IntegerField( - 'cpu_freq_pct', - required=False, - default=95, - description='Setup CPU frequency to a supported value less than' - ' or equal to a percent of max_freq. ' - 'CPU frequency is reduced to 95%% by default to reduce thermal ' - 'throttling.')) - self.AddField( - BooleanField( - 'no_lock', - default=False, - description='Do not attempt to lock the DUT.' - ' Useful when lock is held externally, say with crosfleet.')) + def __init__(self, name): + super(GlobalSettings, self).__init__(name, "global") + self.AddField( + TextField( + "name", + description="The name of the experiment. Just an " + "identifier.", + ) + ) + self.AddField( + TextField( + "board", + description="The target board for running " + "experiments on, e.g. x86-alex.", + ) + ) + self.AddField( + BooleanField( + "crosfleet", + description="Whether to run experiments via crosfleet.", + default=False, + ) + ) + self.AddField( + ListField( + "remote", + description="A comma-separated list of IPs of " + "chromeos devices to run experiments on.", + ) + ) + self.AddField( + BooleanField( + "rerun_if_failed", + description="Whether to re-run failed test runs " "or not.", + default=False, + ) + ) + self.AddField( + BooleanField( + "rm_chroot_tmp", + default=False, + description="Whether to remove the test_that " + "result in the chroot.", + ) + ) + self.AddField( + ListField( + "email", + description="Space-separated list of email " + "addresses to send email to.", + ) + ) + self.AddField( + BooleanField( + "rerun", + description="Whether to ignore the cache and " + "for tests to be re-run.", + default=False, + ) + ) + self.AddField( + BooleanField( + "same_specs", + default=True, + description="Ensure cached runs are run on the " + "same kind of devices which are specified as a " + "remote.", + ) + ) + self.AddField( + BooleanField( + "same_machine", + default=False, + description="Ensure cached runs are run on the " "same remote.", + ) + ) + self.AddField( + BooleanField( + "use_file_locks", + default=False, + description="DEPRECATED: Whether to use the file locks " + "or AFE server lock mechanism.", + ) + ) + self.AddField( + IntegerField( + "iterations", + required=False, + default=0, + description="Number of iterations to run all tests. " + "If not set, will run each benchmark test the optimum number of " + "times to get a stable result.", + ) + ) + self.AddField( + TextField( + "chromeos_root", + description="The path to a chromeos checkout which " + "contains a src/scripts directory. Defaults to " + "the chromeos checkout which contains the " + "chromeos_image.", + ) + ) + self.AddField( + TextField( + "logging_level", + default="average", + description="The level of logging desired. " + "Options are 'quiet', 'average', and 'verbose'.", + ) + ) + self.AddField( + IntegerField( + "acquire_timeout", + default=0, + description="Number of seconds to wait for " + "machine before exit if all the machines in " + "the experiment file are busy. Default is 0.", + ) + ) + self.AddField( + TextField( + "perf_args", + default="", + description="The optional profile command. It " + "enables perf commands to record perforamance " + "related counters. It must start with perf " + "command record or stat followed by arguments.", + ) + ) + self.AddField( + BooleanField( + "download_debug", + default=True, + description="Download compressed debug symbols alongwith " + "image. This can provide more info matching symbols for" + "profiles, but takes larger space. By default, download" + "it only when perf_args is specified.", + ) + ) + self.AddField( + TextField( + "cache_dir", + default="", + description="The abs path of cache dir. " + "Default is /home/$(whoami)/cros_scratch.", + ) + ) + self.AddField( + BooleanField( + "cache_only", + default=False, + description="Whether to use only cached " + "results (do not rerun failed tests).", + ) + ) + self.AddField( + BooleanField( + "no_email", + default=False, + description="Whether to disable the email to " + "user after crosperf finishes.", + ) + ) + self.AddField( + BooleanField( + "json_report", + default=False, + description="Whether to generate a json version " + "of the report, for archiving.", + ) + ) + self.AddField( + BooleanField( + "show_all_results", + default=False, + description="When running Telemetry tests, " + "whether to all the results, instead of just " + "the default (summary) results.", + ) + ) + self.AddField( + TextField( + "share_cache", + default="", + description="Path to alternate cache whose data " + "you want to use. It accepts multiple directories " + 'separated by a ",".', + ) + ) + self.AddField( + TextField("results_dir", default="", description="The results dir.") + ) + self.AddField( + BooleanField( + "compress_results", + default=True, + description="Whether to compress all test results other than " + "reports into a tarball to save disk space.", + ) + ) + self.AddField( + TextField( + "locks_dir", + default="", + description="An alternate directory to use for " + "storing/checking machine file locks for local machines. " + "By default the file locks directory is " + "/google/data/rw/users/mo/mobiletc-prebuild/locks.\n" + "WARNING: If you use your own locks directory, " + "there is no guarantee that someone else might not " + "hold a lock on the same machine in a different " + "locks directory.", + ) + ) + self.AddField( + TextField( + "chrome_src", + description="The path to the source of chrome. " + "This is used to run telemetry benchmarks. " + "The default one is the src inside chroot.", + required=False, + default="", + ) + ) + self.AddField( + IntegerField( + "retries", + default=0, + description="Number of times to retry a " "benchmark run.", + ) + ) + self.AddField( + TextField( + "cwp_dso", + description="The DSO type that we want to use for " + "CWP approximation. This is used to run telemetry " + "benchmarks. Valid DSO types can be found from dso_list " + "in experiment_factory.py. The default value is set to " + "be empty.", + required=False, + default="", + ) + ) + self.AddField( + BooleanField( + "enable_aslr", + description="Enable ASLR on the machine to run the " + "benchmarks. ASLR is disabled by default", + required=False, + default=False, + ) + ) + self.AddField( + BooleanField( + "ignore_min_max", + description="When doing math for the raw results, " + "ignore min and max values to reduce noise.", + required=False, + default=False, + ) + ) + self.AddField( + TextField( + "intel_pstate", + description="Intel Pstate mode.\n" + 'Supported modes: "active", "passive", "no_hwp".\n' + 'Default is "no_hwp" which disables hardware pstates to avoid ' + "noise in benchmarks.", + required=False, + default="no_hwp", + ) + ) + self.AddField( + BooleanField( + "turbostat", + description="Run turbostat process in the background" + " of a benchmark. Enabled by default.", + required=False, + default=True, + ) + ) + self.AddField( + FloatField( + "top_interval", + description="Run top command in the background of a benchmark with" + " interval of sampling specified in seconds.\n" + "Recommended values 1-5. Lower number provides more accurate" + " data.\n" + "With 0 - do not run top.\n" + "NOTE: Running top with interval 1-5 sec has insignificant" + " performance impact (performance degradation does not exceed" + " 0.3%%, measured on x86_64, ARM32, and ARM64). " + "The default value is 1.", + required=False, + default=1, + ) + ) + self.AddField( + IntegerField( + "cooldown_temp", + required=False, + default=40, + description="Wait until CPU temperature goes down below" + " specified temperature in Celsius" + " prior starting a benchmark. " + "By default the value is set to 40 degrees.", + ) + ) + self.AddField( + IntegerField( + "cooldown_time", + required=False, + default=10, + description="Wait specified time in minutes allowing" + " CPU to cool down. Zero value disables cooldown. " + "The default value is 10 minutes.", + ) + ) + self.AddField( + EnumField( + "governor", + options=[ + "performance", + "powersave", + "userspace", + "ondemand", + "conservative", + "schedutils", + "sched", + "interactive", + ], + default="performance", + required=False, + description="Setup CPU governor for all cores.\n" + "For more details refer to:\n" + "https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt. " + 'Default is "performance" governor.', + ) + ) + self.AddField( + EnumField( + "cpu_usage", + options=[ + "all", + "big_only", + "little_only", + "exclusive_cores", + ], + default="all", + required=False, + description="Restrict usage of CPUs to decrease CPU interference.\n" + '"all" - no restrictions;\n' + '"big-only", "little-only" - enable only big/little cores,' + " applicable only on ARM;\n" + '"exclusive-cores" - (for future use)' + " isolate cores for exclusive use of benchmark processes. " + "By default use all CPUs.", + ) + ) + self.AddField( + IntegerField( + "cpu_freq_pct", + required=False, + default=95, + description="Setup CPU frequency to a supported value less than" + " or equal to a percent of max_freq. " + "CPU frequency is reduced to 95%% by default to reduce thermal " + "throttling.", + ) + ) + self.AddField( + BooleanField( + "no_lock", + default=False, + description="Do not attempt to lock the DUT." + " Useful when lock is held externally, say with crosfleet.", + ) + ) class SettingsFactory(object): - """Factory class for building different types of Settings objects. + """Factory class for building different types of Settings objects. - This factory is currently hardcoded to produce settings for ChromeOS - experiment files. The idea is that in the future, other types - of settings could be produced. - """ + This factory is currently hardcoded to produce settings for ChromeOS + experiment files. The idea is that in the future, other types + of settings could be produced. + """ - def GetSettings(self, name, settings_type): - if settings_type == 'label' or not settings_type: - return LabelSettings(name) - if settings_type == 'global': - return GlobalSettings(name) - if settings_type == 'benchmark': - return BenchmarkSettings(name) + def GetSettings(self, name, settings_type): + if settings_type == "label" or not settings_type: + return LabelSettings(name) + if settings_type == "global": + return GlobalSettings(name) + if settings_type == "benchmark": + return BenchmarkSettings(name) - raise TypeError("Invalid settings type: '%s'." % settings_type) + raise TypeError("Invalid settings type: '%s'." % settings_type) diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py index 195c17ff..031a0e65 100755 --- a/crosperf/settings_factory_unittest.py +++ b/crosperf/settings_factory_unittest.py @@ -15,101 +15,108 @@ import settings_factory class BenchmarkSettingsTest(unittest.TestCase): - """Class to test benchmark settings.""" + """Class to test benchmark settings.""" - def test_init(self): - res = settings_factory.BenchmarkSettings('b_settings') - self.assertIsNotNone(res) - self.assertEqual(len(res.fields), 7) - self.assertEqual(res.GetField('test_name'), '') - self.assertEqual(res.GetField('test_args'), '') - self.assertEqual(res.GetField('iterations'), 0) - self.assertEqual(res.GetField('suite'), 'test_that') + def test_init(self): + res = settings_factory.BenchmarkSettings("b_settings") + self.assertIsNotNone(res) + self.assertEqual(len(res.fields), 7) + self.assertEqual(res.GetField("test_name"), "") + self.assertEqual(res.GetField("test_args"), "") + self.assertEqual(res.GetField("iterations"), 0) + self.assertEqual(res.GetField("suite"), "test_that") class LabelSettingsTest(unittest.TestCase): - """Class to test label settings.""" - - def test_init(self): - res = settings_factory.LabelSettings('l_settings') - self.assertIsNotNone(res) - self.assertEqual(len(res.fields), 10) - self.assertEqual(res.GetField('chromeos_image'), '') - self.assertEqual(res.GetField('autotest_path'), '') - self.assertEqual(res.GetField('chromeos_root'), '') - self.assertEqual(res.GetField('remote'), None) - self.assertEqual(res.GetField('image_args'), '') - self.assertEqual(res.GetField('cache_dir'), '') - self.assertEqual(res.GetField('chrome_src'), '') - self.assertEqual(res.GetField('build'), '') + """Class to test label settings.""" + + def test_init(self): + res = settings_factory.LabelSettings("l_settings") + self.assertIsNotNone(res) + self.assertEqual(len(res.fields), 10) + self.assertEqual(res.GetField("chromeos_image"), "") + self.assertEqual(res.GetField("autotest_path"), "") + self.assertEqual(res.GetField("chromeos_root"), "") + self.assertEqual(res.GetField("remote"), None) + self.assertEqual(res.GetField("image_args"), "") + self.assertEqual(res.GetField("cache_dir"), "") + self.assertEqual(res.GetField("chrome_src"), "") + self.assertEqual(res.GetField("build"), "") class GlobalSettingsTest(unittest.TestCase): - """Class to test global settings.""" - - def test_init(self): - res = settings_factory.GlobalSettings('g_settings') - self.assertIsNotNone(res) - self.assertEqual(len(res.fields), 40) - self.assertEqual(res.GetField('name'), '') - self.assertEqual(res.GetField('board'), '') - self.assertEqual(res.GetField('crosfleet'), False) - self.assertEqual(res.GetField('remote'), None) - self.assertEqual(res.GetField('rerun_if_failed'), False) - self.assertEqual(res.GetField('rm_chroot_tmp'), False) - self.assertEqual(res.GetField('email'), None) - self.assertEqual(res.GetField('rerun'), False) - self.assertEqual(res.GetField('same_specs'), True) - self.assertEqual(res.GetField('same_machine'), False) - self.assertEqual(res.GetField('iterations'), 0) - self.assertEqual(res.GetField('chromeos_root'), '') - self.assertEqual(res.GetField('logging_level'), 'average') - self.assertEqual(res.GetField('acquire_timeout'), 0) - self.assertEqual(res.GetField('perf_args'), '') - self.assertEqual(res.GetField('download_debug'), True) - self.assertEqual(res.GetField('cache_dir'), '') - self.assertEqual(res.GetField('cache_only'), False) - self.assertEqual(res.GetField('no_email'), False) - self.assertEqual(res.GetField('show_all_results'), False) - self.assertEqual(res.GetField('share_cache'), '') - self.assertEqual(res.GetField('results_dir'), '') - self.assertEqual(res.GetField('compress_results'), True) - self.assertEqual(res.GetField('chrome_src'), '') - self.assertEqual(res.GetField('cwp_dso'), '') - self.assertEqual(res.GetField('enable_aslr'), False) - self.assertEqual(res.GetField('ignore_min_max'), False) - self.assertEqual(res.GetField('intel_pstate'), 'no_hwp') - self.assertEqual(res.GetField('turbostat'), True) - self.assertEqual(res.GetField('top_interval'), 1) - self.assertEqual(res.GetField('cooldown_time'), 10) - self.assertEqual(res.GetField('cooldown_temp'), 40) - self.assertEqual(res.GetField('governor'), 'performance') - self.assertEqual(res.GetField('cpu_usage'), 'all') - self.assertEqual(res.GetField('cpu_freq_pct'), 95) + """Class to test global settings.""" + + def test_init(self): + res = settings_factory.GlobalSettings("g_settings") + self.assertIsNotNone(res) + self.assertEqual(len(res.fields), 40) + self.assertEqual(res.GetField("name"), "") + self.assertEqual(res.GetField("board"), "") + self.assertEqual(res.GetField("crosfleet"), False) + self.assertEqual(res.GetField("remote"), None) + self.assertEqual(res.GetField("rerun_if_failed"), False) + self.assertEqual(res.GetField("rm_chroot_tmp"), False) + self.assertEqual(res.GetField("email"), None) + self.assertEqual(res.GetField("rerun"), False) + self.assertEqual(res.GetField("same_specs"), True) + self.assertEqual(res.GetField("same_machine"), False) + self.assertEqual(res.GetField("iterations"), 0) + self.assertEqual(res.GetField("chromeos_root"), "") + self.assertEqual(res.GetField("logging_level"), "average") + self.assertEqual(res.GetField("acquire_timeout"), 0) + self.assertEqual(res.GetField("perf_args"), "") + self.assertEqual(res.GetField("download_debug"), True) + self.assertEqual(res.GetField("cache_dir"), "") + self.assertEqual(res.GetField("cache_only"), False) + self.assertEqual(res.GetField("no_email"), False) + self.assertEqual(res.GetField("show_all_results"), False) + self.assertEqual(res.GetField("share_cache"), "") + self.assertEqual(res.GetField("results_dir"), "") + self.assertEqual(res.GetField("compress_results"), True) + self.assertEqual(res.GetField("chrome_src"), "") + self.assertEqual(res.GetField("cwp_dso"), "") + self.assertEqual(res.GetField("enable_aslr"), False) + self.assertEqual(res.GetField("ignore_min_max"), False) + self.assertEqual(res.GetField("intel_pstate"), "no_hwp") + self.assertEqual(res.GetField("turbostat"), True) + self.assertEqual(res.GetField("top_interval"), 1) + self.assertEqual(res.GetField("cooldown_time"), 10) + self.assertEqual(res.GetField("cooldown_temp"), 40) + self.assertEqual(res.GetField("governor"), "performance") + self.assertEqual(res.GetField("cpu_usage"), "all") + self.assertEqual(res.GetField("cpu_freq_pct"), 95) class SettingsFactoryTest(unittest.TestCase): - """Class to test SettingsFactory.""" - - def test_get_settings(self): - self.assertRaises(Exception, settings_factory.SettingsFactory.GetSettings, - 'global', 'bad_type') - - l_settings = settings_factory.SettingsFactory().GetSettings( - 'label', 'label') - self.assertIsInstance(l_settings, settings_factory.LabelSettings) - self.assertEqual(len(l_settings.fields), 10) - - b_settings = settings_factory.SettingsFactory().GetSettings( - 'benchmark', 'benchmark') - self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings) - self.assertEqual(len(b_settings.fields), 7) - - g_settings = settings_factory.SettingsFactory().GetSettings( - 'global', 'global') - self.assertIsInstance(g_settings, settings_factory.GlobalSettings) - self.assertEqual(len(g_settings.fields), 40) - - -if __name__ == '__main__': - unittest.main() + """Class to test SettingsFactory.""" + + def test_get_settings(self): + self.assertRaises( + Exception, + settings_factory.SettingsFactory.GetSettings, + "global", + "bad_type", + ) + + l_settings = settings_factory.SettingsFactory().GetSettings( + "label", "label" + ) + self.assertIsInstance(l_settings, settings_factory.LabelSettings) + self.assertEqual(len(l_settings.fields), 10) + + b_settings = settings_factory.SettingsFactory().GetSettings( + "benchmark", "benchmark" + ) + self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings) + self.assertEqual(len(b_settings.fields), 7) + + g_settings = settings_factory.SettingsFactory().GetSettings( + "global", "global" + ) + self.assertIsInstance(g_settings, settings_factory.GlobalSettings) + self.assertEqual(len(g_settings.fields), 40) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py index fb9b85f3..0128c33e 100755 --- a/crosperf/settings_unittest.py +++ b/crosperf/settings_unittest.py @@ -11,228 +11,284 @@ from __future__ import print_function import unittest import unittest.mock as mock -import settings -import settings_factory - +from cros_utils import logger +import download_images from field import IntegerField from field import ListField -import download_images - -from cros_utils import logger +import settings +import settings_factory class TestSettings(unittest.TestCase): - """setting test class.""" - - def setUp(self): - self.settings = settings.Settings('global_name', 'global') - - def test_init(self): - self.assertEqual(self.settings.name, 'global_name') - self.assertEqual(self.settings.settings_type, 'global') - self.assertIsNone(self.settings.parent) - - def test_set_parent_settings(self): - self.assertIsNone(self.settings.parent) - settings_parent = {'fake_parent_entry': 0} - self.settings.SetParentSettings(settings_parent) - self.assertIsNotNone(self.settings.parent) - self.assertTrue(isinstance(self.settings.parent, dict)) - self.assertEqual(self.settings.parent, settings_parent) - - def test_add_field(self): - self.assertEqual(self.settings.fields, {}) - self.settings.AddField( - IntegerField( - 'iterations', - default=1, - required=False, - description='Number of iterations to ' - 'run the test.')) - self.assertEqual(len(self.settings.fields), 1) - # Adding the same field twice raises an exception. - self.assertRaises(Exception, self.settings.AddField, (IntegerField( - 'iterations', - default=1, - required=False, - description='Number of iterations to run ' - 'the test.'))) - res = self.settings.fields['iterations'] - self.assertIsInstance(res, IntegerField) - self.assertEqual(res.Get(), 1) - - def test_set_field(self): - self.assertEqual(self.settings.fields, {}) - self.settings.AddField( - IntegerField( - 'iterations', - default=1, - required=False, - description='Number of iterations to run the ' - 'test.')) - res = self.settings.fields['iterations'] - self.assertEqual(res.Get(), 1) - - self.settings.SetField('iterations', 10) - res = self.settings.fields['iterations'] - self.assertEqual(res.Get(), 10) - - # Setting a field that's not there raises an exception. - self.assertRaises(Exception, self.settings.SetField, 'remote', - 'lumpy1.cros') - - self.settings.AddField( - ListField( - 'remote', - default=[], - description="A comma-separated list of ip's of " - 'chromeos devices to run ' - 'experiments on.')) - self.assertTrue(isinstance(self.settings.fields, dict)) - self.assertEqual(len(self.settings.fields), 2) - res = self.settings.fields['remote'] - self.assertEqual(res.Get(), []) - self.settings.SetField('remote', 'lumpy1.cros', append=True) - self.settings.SetField('remote', 'lumpy2.cros', append=True) - res = self.settings.fields['remote'] - self.assertEqual(res.Get(), ['lumpy1.cros', 'lumpy2.cros']) - - def test_get_field(self): - # Getting a field that's not there raises an exception. - self.assertRaises(Exception, self.settings.GetField, 'iterations') - - # Getting a required field that hasn't been assigned raises an exception. - self.settings.AddField( - IntegerField( - 'iterations', - required=True, - description='Number of iterations to ' - 'run the test.')) - self.assertIsNotNone(self.settings.fields['iterations']) - self.assertRaises(Exception, self.settings.GetField, 'iterations') - - # Set the value, then get it. - self.settings.SetField('iterations', 5) - res = self.settings.GetField('iterations') - self.assertEqual(res, 5) - - def test_inherit(self): - parent_settings = settings_factory.SettingsFactory().GetSettings( - 'global', 'global') - label_settings = settings_factory.SettingsFactory().GetSettings( - 'label', 'label') - self.assertEqual(parent_settings.GetField('chromeos_root'), '') - self.assertEqual(label_settings.GetField('chromeos_root'), '') - self.assertIsNone(label_settings.parent) - - parent_settings.SetField('chromeos_root', '/tmp/chromeos') - label_settings.SetParentSettings(parent_settings) - self.assertEqual(parent_settings.GetField('chromeos_root'), '/tmp/chromeos') - self.assertEqual(label_settings.GetField('chromeos_root'), '') - label_settings.Inherit() - self.assertEqual(label_settings.GetField('chromeos_root'), '/tmp/chromeos') - - def test_override(self): - self.settings.AddField( - ListField( - 'email', - default=[], - description='Space-seperated' - 'list of email addresses to send ' - 'email to.')) - - global_settings = settings_factory.SettingsFactory().GetSettings( - 'global', 'global') - - global_settings.SetField('email', 'john.doe@google.com', append=True) - global_settings.SetField('email', 'jane.smith@google.com', append=True) - - res = self.settings.GetField('email') - self.assertEqual(res, []) - - self.settings.Override(global_settings) - res = self.settings.GetField('email') - self.assertEqual(res, ['john.doe@google.com', 'jane.smith@google.com']) - - def test_validate(self): - - self.settings.AddField( - IntegerField( - 'iterations', - required=True, - description='Number of iterations ' - 'to run the test.')) - self.settings.AddField( - ListField( - 'remote', - default=[], - required=True, - description='A comma-separated list ' - "of ip's of chromeos " - 'devices to run experiments on.')) - self.settings.AddField( - ListField( - 'email', - default=[], - description='Space-seperated' - 'list of email addresses to ' - 'send email to.')) - - # 'required' fields have not been assigned; should raise an exception. - self.assertRaises(Exception, self.settings.Validate) - self.settings.SetField('iterations', 2) - self.settings.SetField('remote', 'x86-alex.cros', append=True) - # Should run without exception now. - self.settings.Validate() - - @mock.patch.object(logger, 'GetLogger') - @mock.patch.object(download_images.ImageDownloader, 'Run') - @mock.patch.object(download_images, 'ImageDownloader') - def test_get_xbuddy_path(self, mock_downloader, mock_run, mock_logger): - - mock_run.return_value = 'fake_xbuddy_translation' - mock_downloader.Run = mock_run - board = 'lumpy' - chromeos_root = '/tmp/chromeos' - log_level = 'average' - - trybot_str = 'trybot-lumpy-paladin/R34-5417.0.0-b1506' - official_str = 'lumpy-release/R34-5417.0.0' - xbuddy_str = 'latest-dev' - autotest_path = '' - debug_path = '' - download_debug = False - - self.settings.GetXbuddyPath(trybot_str, autotest_path, debug_path, board, - chromeos_root, log_level, download_debug) - self.assertEqual(mock_run.call_count, 1) - self.assertEqual(mock_run.call_args_list[0][0], ( - '/tmp/chromeos', - 'remote/trybot-lumpy-paladin/R34-5417.0.0-b1506', - '', - '', - False, - )) - - mock_run.reset_mock() - self.settings.GetXbuddyPath(official_str, autotest_path, debug_path, board, - chromeos_root, log_level, download_debug) - self.assertEqual(mock_run.call_count, 1) - self.assertEqual( - mock_run.call_args_list[0][0], - ('/tmp/chromeos', 'remote/lumpy-release/R34-5417.0.0', '', '', False)) - - mock_run.reset_mock() - self.settings.GetXbuddyPath(xbuddy_str, autotest_path, debug_path, board, - chromeos_root, log_level, download_debug) - self.assertEqual(mock_run.call_count, 1) - self.assertEqual( - mock_run.call_args_list[0][0], - ('/tmp/chromeos', 'remote/lumpy/latest-dev', '', '', False)) - - if mock_logger: - return - - -if __name__ == '__main__': - unittest.main() + """setting test class.""" + + def setUp(self): + self.settings = settings.Settings("global_name", "global") + + def test_init(self): + self.assertEqual(self.settings.name, "global_name") + self.assertEqual(self.settings.settings_type, "global") + self.assertIsNone(self.settings.parent) + + def test_set_parent_settings(self): + self.assertIsNone(self.settings.parent) + settings_parent = {"fake_parent_entry": 0} + self.settings.SetParentSettings(settings_parent) + self.assertIsNotNone(self.settings.parent) + self.assertTrue(isinstance(self.settings.parent, dict)) + self.assertEqual(self.settings.parent, settings_parent) + + def test_add_field(self): + self.assertEqual(self.settings.fields, {}) + self.settings.AddField( + IntegerField( + "iterations", + default=1, + required=False, + description="Number of iterations to " "run the test.", + ) + ) + self.assertEqual(len(self.settings.fields), 1) + # Adding the same field twice raises an exception. + self.assertRaises( + Exception, + self.settings.AddField, + ( + IntegerField( + "iterations", + default=1, + required=False, + description="Number of iterations to run " "the test.", + ) + ), + ) + res = self.settings.fields["iterations"] + self.assertIsInstance(res, IntegerField) + self.assertEqual(res.Get(), 1) + + def test_set_field(self): + self.assertEqual(self.settings.fields, {}) + self.settings.AddField( + IntegerField( + "iterations", + default=1, + required=False, + description="Number of iterations to run the " "test.", + ) + ) + res = self.settings.fields["iterations"] + self.assertEqual(res.Get(), 1) + + self.settings.SetField("iterations", 10) + res = self.settings.fields["iterations"] + self.assertEqual(res.Get(), 10) + + # Setting a field that's not there raises an exception. + self.assertRaises( + Exception, self.settings.SetField, "remote", "lumpy1.cros" + ) + + self.settings.AddField( + ListField( + "remote", + default=[], + description="A comma-separated list of ip's of " + "chromeos devices to run " + "experiments on.", + ) + ) + self.assertTrue(isinstance(self.settings.fields, dict)) + self.assertEqual(len(self.settings.fields), 2) + res = self.settings.fields["remote"] + self.assertEqual(res.Get(), []) + self.settings.SetField("remote", "lumpy1.cros", append=True) + self.settings.SetField("remote", "lumpy2.cros", append=True) + res = self.settings.fields["remote"] + self.assertEqual(res.Get(), ["lumpy1.cros", "lumpy2.cros"]) + + def test_get_field(self): + # Getting a field that's not there raises an exception. + self.assertRaises(Exception, self.settings.GetField, "iterations") + + # Getting a required field that hasn't been assigned raises an exception. + self.settings.AddField( + IntegerField( + "iterations", + required=True, + description="Number of iterations to " "run the test.", + ) + ) + self.assertIsNotNone(self.settings.fields["iterations"]) + self.assertRaises(Exception, self.settings.GetField, "iterations") + + # Set the value, then get it. + self.settings.SetField("iterations", 5) + res = self.settings.GetField("iterations") + self.assertEqual(res, 5) + + def test_inherit(self): + parent_settings = settings_factory.SettingsFactory().GetSettings( + "global", "global" + ) + label_settings = settings_factory.SettingsFactory().GetSettings( + "label", "label" + ) + self.assertEqual(parent_settings.GetField("chromeos_root"), "") + self.assertEqual(label_settings.GetField("chromeos_root"), "") + self.assertIsNone(label_settings.parent) + + parent_settings.SetField("chromeos_root", "/tmp/chromeos") + label_settings.SetParentSettings(parent_settings) + self.assertEqual( + parent_settings.GetField("chromeos_root"), "/tmp/chromeos" + ) + self.assertEqual(label_settings.GetField("chromeos_root"), "") + label_settings.Inherit() + self.assertEqual( + label_settings.GetField("chromeos_root"), "/tmp/chromeos" + ) + + def test_override(self): + self.settings.AddField( + ListField( + "email", + default=[], + description="Space-seperated" + "list of email addresses to send " + "email to.", + ) + ) + + global_settings = settings_factory.SettingsFactory().GetSettings( + "global", "global" + ) + + global_settings.SetField("email", "john.doe@google.com", append=True) + global_settings.SetField("email", "jane.smith@google.com", append=True) + + res = self.settings.GetField("email") + self.assertEqual(res, []) + + self.settings.Override(global_settings) + res = self.settings.GetField("email") + self.assertEqual(res, ["john.doe@google.com", "jane.smith@google.com"]) + + def test_validate(self): + + self.settings.AddField( + IntegerField( + "iterations", + required=True, + description="Number of iterations " "to run the test.", + ) + ) + self.settings.AddField( + ListField( + "remote", + default=[], + required=True, + description="A comma-separated list " + "of ip's of chromeos " + "devices to run experiments on.", + ) + ) + self.settings.AddField( + ListField( + "email", + default=[], + description="Space-seperated" + "list of email addresses to " + "send email to.", + ) + ) + + # 'required' fields have not been assigned; should raise an exception. + self.assertRaises(Exception, self.settings.Validate) + self.settings.SetField("iterations", 2) + self.settings.SetField("remote", "x86-alex.cros", append=True) + # Should run without exception now. + self.settings.Validate() + + @mock.patch.object(logger, "GetLogger") + @mock.patch.object(download_images.ImageDownloader, "Run") + @mock.patch.object(download_images, "ImageDownloader") + def test_get_xbuddy_path(self, mock_downloader, mock_run, mock_logger): + + mock_run.return_value = "fake_xbuddy_translation" + mock_downloader.Run = mock_run + board = "lumpy" + chromeos_root = "/tmp/chromeos" + log_level = "average" + + trybot_str = "trybot-lumpy-paladin/R34-5417.0.0-b1506" + official_str = "lumpy-release/R34-5417.0.0" + xbuddy_str = "latest-dev" + autotest_path = "" + debug_path = "" + download_debug = False + + self.settings.GetXbuddyPath( + trybot_str, + autotest_path, + debug_path, + board, + chromeos_root, + log_level, + download_debug, + ) + self.assertEqual(mock_run.call_count, 1) + self.assertEqual( + mock_run.call_args_list[0][0], + ( + "/tmp/chromeos", + "remote/trybot-lumpy-paladin/R34-5417.0.0-b1506", + "", + "", + False, + ), + ) + + mock_run.reset_mock() + self.settings.GetXbuddyPath( + official_str, + autotest_path, + debug_path, + board, + chromeos_root, + log_level, + download_debug, + ) + self.assertEqual(mock_run.call_count, 1) + self.assertEqual( + mock_run.call_args_list[0][0], + ( + "/tmp/chromeos", + "remote/lumpy-release/R34-5417.0.0", + "", + "", + False, + ), + ) + + mock_run.reset_mock() + self.settings.GetXbuddyPath( + xbuddy_str, + autotest_path, + debug_path, + board, + chromeos_root, + log_level, + download_debug, + ) + self.assertEqual(mock_run.call_count, 1) + self.assertEqual( + mock_run.call_args_list[0][0], + ("/tmp/chromeos", "remote/lumpy/latest-dev", "", "", False), + ) + + if mock_logger: + return + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index fe6eca4b..b3c5879d 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -16,317 +16,377 @@ import time from cros_utils import command_executer -TEST_THAT_PATH = '/usr/bin/test_that' -TAST_PATH = '/usr/bin/tast' -CROSFLEET_PATH = 'crosfleet' -GS_UTIL = 'src/chromium/depot_tools/gsutil.py' -AUTOTEST_DIR = '/mnt/host/source/src/third_party/autotest/files' -CHROME_MOUNT_DIR = '/tmp/chrome_root' + +TEST_THAT_PATH = "/usr/bin/test_that" +TAST_PATH = "/usr/bin/tast" +CROSFLEET_PATH = "crosfleet" +GS_UTIL = "src/chromium/depot_tools/gsutil.py" +AUTOTEST_DIR = "/mnt/host/source/src/third_party/autotest/files" +CHROME_MOUNT_DIR = "/tmp/chrome_root" def GetProfilerArgs(profiler_args): - # Remove "--" from in front of profiler args. - args_list = shlex.split(profiler_args) - new_list = [] - for arg in args_list: - if arg[0:2] == '--': - arg = arg[2:] - new_list.append(arg) - args_list = new_list - - # Remove "perf_options=" from middle of profiler args. - new_list = [] - for arg in args_list: - idx = arg.find('perf_options=') - if idx != -1: - prefix = arg[0:idx] - suffix = arg[idx + len('perf_options=') + 1:-1] - new_arg = prefix + "'" + suffix + "'" - new_list.append(new_arg) - else: - new_list.append(arg) - args_list = new_list - - return ' '.join(args_list) + # Remove "--" from in front of profiler args. + args_list = shlex.split(profiler_args) + new_list = [] + for arg in args_list: + if arg[0:2] == "--": + arg = arg[2:] + new_list.append(arg) + args_list = new_list + + # Remove "perf_options=" from middle of profiler args. + new_list = [] + for arg in args_list: + idx = arg.find("perf_options=") + if idx != -1: + prefix = arg[0:idx] + suffix = arg[idx + len("perf_options=") + 1 : -1] + new_arg = prefix + "'" + suffix + "'" + new_list.append(new_arg) + else: + new_list.append(arg) + args_list = new_list + + return " ".join(args_list) def GetDutConfigArgs(dut_config): - return 'dut_config={}'.format(pipes.quote(json.dumps(dut_config))) + return "dut_config={}".format(pipes.quote(json.dumps(dut_config))) class SuiteRunner(object): - """This defines the interface from crosperf to test script.""" - - def __init__(self, - dut_config, - logger_to_use=None, - log_level='verbose', - cmd_exec=None, - cmd_term=None): - self.logger = logger_to_use - self.log_level = log_level - self._ce = cmd_exec or command_executer.GetCommandExecuter( - self.logger, log_level=self.log_level) - # DUT command executer. - # Will be initialized and used within Run. - self._ct = cmd_term or command_executer.CommandTerminator() - self.dut_config = dut_config - - def Run(self, cros_machine, label, benchmark, test_args, profiler_args): - machine_name = cros_machine.name - for i in range(0, benchmark.retries + 1): - if label.crosfleet: - ret_tup = self.Crosfleet_Run(label, benchmark, test_args, profiler_args) - else: - if benchmark.suite == 'tast': - ret_tup = self.Tast_Run(machine_name, label, benchmark) + """This defines the interface from crosperf to test script.""" + + def __init__( + self, + dut_config, + logger_to_use=None, + log_level="verbose", + cmd_exec=None, + cmd_term=None, + ): + self.logger = logger_to_use + self.log_level = log_level + self._ce = cmd_exec or command_executer.GetCommandExecuter( + self.logger, log_level=self.log_level + ) + # DUT command executer. + # Will be initialized and used within Run. + self._ct = cmd_term or command_executer.CommandTerminator() + self.dut_config = dut_config + + def Run(self, cros_machine, label, benchmark, test_args, profiler_args): + machine_name = cros_machine.name + for i in range(0, benchmark.retries + 1): + if label.crosfleet: + ret_tup = self.Crosfleet_Run( + label, benchmark, test_args, profiler_args + ) + else: + if benchmark.suite == "tast": + ret_tup = self.Tast_Run(machine_name, label, benchmark) + else: + ret_tup = self.Test_That_Run( + machine_name, label, benchmark, test_args, profiler_args + ) + if ret_tup[0] != 0: + self.logger.LogOutput( + "benchmark %s failed. Retries left: %s" + % (benchmark.name, benchmark.retries - i) + ) + elif i > 0: + self.logger.LogOutput( + "benchmark %s succeded after %s retries" + % (benchmark.name, i) + ) + break + else: + self.logger.LogOutput( + "benchmark %s succeded on first try" % benchmark.name + ) + break + return ret_tup + + def RemoveTelemetryTempFile(self, machine, chromeos_root): + filename = "telemetry@%s" % machine + fullname = os.path.join(chromeos_root, "chroot", "tmp", filename) + if os.path.exists(fullname): + os.remove(fullname) + + def GenTestArgs(self, benchmark, test_args, profiler_args): + args_list = [] + + if benchmark.suite != "telemetry_Crosperf" and profiler_args: + self.logger.LogFatal( + "Tests other than telemetry_Crosperf do not " + "support profiler." + ) + + if test_args: + # Strip double quotes off args (so we can wrap them in single + # quotes, to pass through to Telemetry). + if test_args[0] == '"' and test_args[-1] == '"': + test_args = test_args[1:-1] + args_list.append("test_args='%s'" % test_args) + + args_list.append(GetDutConfigArgs(self.dut_config)) + + if not ( + benchmark.suite == "telemetry_Crosperf" + or benchmark.suite == "crosperf_Wrapper" + ): + self.logger.LogWarning( + "Please make sure the server test has stage for " + "device setup.\n" + ) else: - ret_tup = self.Test_That_Run(machine_name, label, benchmark, - test_args, profiler_args) - if ret_tup[0] != 0: - self.logger.LogOutput('benchmark %s failed. Retries left: %s' % - (benchmark.name, benchmark.retries - i)) - elif i > 0: - self.logger.LogOutput('benchmark %s succeded after %s retries' % - (benchmark.name, i)) - break - else: - self.logger.LogOutput('benchmark %s succeded on first try' % - benchmark.name) - break - return ret_tup - - def RemoveTelemetryTempFile(self, machine, chromeos_root): - filename = 'telemetry@%s' % machine - fullname = os.path.join(chromeos_root, 'chroot', 'tmp', filename) - if os.path.exists(fullname): - os.remove(fullname) - - def GenTestArgs(self, benchmark, test_args, profiler_args): - args_list = [] - - if benchmark.suite != 'telemetry_Crosperf' and profiler_args: - self.logger.LogFatal('Tests other than telemetry_Crosperf do not ' - 'support profiler.') - - if test_args: - # Strip double quotes off args (so we can wrap them in single - # quotes, to pass through to Telemetry). - if test_args[0] == '"' and test_args[-1] == '"': - test_args = test_args[1:-1] - args_list.append("test_args='%s'" % test_args) - - args_list.append(GetDutConfigArgs(self.dut_config)) - - if not (benchmark.suite == 'telemetry_Crosperf' or - benchmark.suite == 'crosperf_Wrapper'): - self.logger.LogWarning('Please make sure the server test has stage for ' - 'device setup.\n') - else: - args_list.append('test=%s' % benchmark.test_name) - if benchmark.suite == 'telemetry_Crosperf': - args_list.append('run_local=%s' % benchmark.run_local) - args_list.append(GetProfilerArgs(profiler_args)) - - return args_list - - # TODO(zhizhouy): Currently do not support passing arguments or running - # customized tast tests, as we do not have such requirements. - def Tast_Run(self, machine, label, benchmark): - # Remove existing tast results - command = 'rm -rf /usr/local/autotest/results/*' - self._ce.CrosRunCommand( - command, machine=machine, chromeos_root=label.chromeos_root) - - command = ' '.join( - [TAST_PATH, 'run', '-build=False', machine, benchmark.test_name]) - - if self.log_level != 'verbose': - self.logger.LogOutput('Running test.') - self.logger.LogOutput('CMD: %s' % command) - - return self._ce.ChrootRunCommandWOutput( - label.chromeos_root, command, command_terminator=self._ct) - - def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args): - """Run the test_that test..""" - - # Remove existing test_that results - command = 'rm -rf /usr/local/autotest/results/*' - self._ce.CrosRunCommand( - command, machine=machine, chromeos_root=label.chromeos_root) - - if benchmark.suite == 'telemetry_Crosperf': - if not os.path.isdir(label.chrome_src): - self.logger.LogFatal('Cannot find chrome src dir to ' - 'run telemetry: %s' % label.chrome_src) - # Check for and remove temporary file that may have been left by - # previous telemetry runs (and which might prevent this run from - # working). - self.RemoveTelemetryTempFile(machine, label.chromeos_root) - - # --autotest_dir specifies which autotest directory to use. - autotest_dir_arg = '--autotest_dir=%s' % ( - label.autotest_path if label.autotest_path else AUTOTEST_DIR) - - # --fast avoids unnecessary copies of syslogs. - fast_arg = '--fast' - board_arg = '--board=%s' % label.board - - args_list = self.GenTestArgs(benchmark, test_args, profiler_args) - args_arg = '--args=%s' % pipes.quote(' '.join(args_list)) - - command = ' '.join([ - TEST_THAT_PATH, autotest_dir_arg, fast_arg, board_arg, args_arg, - machine, benchmark.suite if - (benchmark.suite == 'telemetry_Crosperf' or - benchmark.suite == 'crosperf_Wrapper') else benchmark.test_name - ]) - - # Use --no-ns-pid so that cros_sdk does not create a different - # process namespace and we can kill process created easily by their - # process group. - chrome_root_options = ('--no-ns-pid ' - '--chrome_root={0} --chrome_root_mount={1} ' - 'FEATURES="-usersandbox" ' - 'CHROME_ROOT={1}'.format(label.chrome_src, - CHROME_MOUNT_DIR)) - - if self.log_level != 'verbose': - self.logger.LogOutput('Running test.') - self.logger.LogOutput('CMD: %s' % command) - - return self._ce.ChrootRunCommandWOutput( - label.chromeos_root, - command, - command_terminator=self._ct, - cros_sdk_options=chrome_root_options) - - def DownloadResult(self, label, task_id): - gsutil_cmd = os.path.join(label.chromeos_root, GS_UTIL) - result_dir = 'gs://chromeos-autotest-results/swarming-%s' % task_id - download_path = os.path.join(label.chromeos_root, 'chroot/tmp') - ls_command = '%s ls %s' % (gsutil_cmd, - os.path.join(result_dir, 'autoserv_test')) - cp_command = '%s -mq cp -r %s %s' % (gsutil_cmd, result_dir, download_path) - - # Server sometimes will not be able to generate the result directory right - # after the test. Will try to access this gs location every 60s for - # RETRY_LIMIT mins. - t = 0 - RETRY_LIMIT = 10 - while t < RETRY_LIMIT: - t += 1 - status = self._ce.RunCommand(ls_command, print_to_console=False) - if status == 0: - break - if t < RETRY_LIMIT: - self.logger.LogOutput('Result directory not generated yet, ' - 'retry (%d) in 60s.' % t) + args_list.append("test=%s" % benchmark.test_name) + if benchmark.suite == "telemetry_Crosperf": + args_list.append("run_local=%s" % benchmark.run_local) + args_list.append(GetProfilerArgs(profiler_args)) + + return args_list + + # TODO(zhizhouy): Currently do not support passing arguments or running + # customized tast tests, as we do not have such requirements. + def Tast_Run(self, machine, label, benchmark): + # Remove existing tast results + command = "rm -rf /usr/local/autotest/results/*" + self._ce.CrosRunCommand( + command, machine=machine, chromeos_root=label.chromeos_root + ) + + command = " ".join( + [TAST_PATH, "run", "-build=False", machine, benchmark.test_name] + ) + + if self.log_level != "verbose": + self.logger.LogOutput("Running test.") + self.logger.LogOutput("CMD: %s" % command) + + return self._ce.ChrootRunCommandWOutput( + label.chromeos_root, command, command_terminator=self._ct + ) + + def Test_That_Run( + self, machine, label, benchmark, test_args, profiler_args + ): + """Run the test_that test..""" + + # Remove existing test_that results + command = "rm -rf /usr/local/autotest/results/*" + self._ce.CrosRunCommand( + command, machine=machine, chromeos_root=label.chromeos_root + ) + + if benchmark.suite == "telemetry_Crosperf": + if not os.path.isdir(label.chrome_src): + self.logger.LogFatal( + "Cannot find chrome src dir to " + "run telemetry: %s" % label.chrome_src + ) + # Check for and remove temporary file that may have been left by + # previous telemetry runs (and which might prevent this run from + # working). + self.RemoveTelemetryTempFile(machine, label.chromeos_root) + + # --autotest_dir specifies which autotest directory to use. + autotest_dir_arg = "--autotest_dir=%s" % ( + label.autotest_path if label.autotest_path else AUTOTEST_DIR + ) + + # --fast avoids unnecessary copies of syslogs. + fast_arg = "--fast" + board_arg = "--board=%s" % label.board + + args_list = self.GenTestArgs(benchmark, test_args, profiler_args) + args_arg = "--args=%s" % pipes.quote(" ".join(args_list)) + + command = " ".join( + [ + TEST_THAT_PATH, + autotest_dir_arg, + fast_arg, + board_arg, + args_arg, + machine, + benchmark.suite + if ( + benchmark.suite == "telemetry_Crosperf" + or benchmark.suite == "crosperf_Wrapper" + ) + else benchmark.test_name, + ] + ) + + # Use --no-ns-pid so that cros_sdk does not create a different + # process namespace and we can kill process created easily by their + # process group. + chrome_root_options = ( + "--no-ns-pid " + "--chrome_root={0} --chrome_root_mount={1} " + 'FEATURES="-usersandbox" ' + "CHROME_ROOT={1}".format(label.chrome_src, CHROME_MOUNT_DIR) + ) + + if self.log_level != "verbose": + self.logger.LogOutput("Running test.") + self.logger.LogOutput("CMD: %s" % command) + + return self._ce.ChrootRunCommandWOutput( + label.chromeos_root, + command, + command_terminator=self._ct, + cros_sdk_options=chrome_root_options, + ) + + def DownloadResult(self, label, task_id): + gsutil_cmd = os.path.join(label.chromeos_root, GS_UTIL) + result_dir = "gs://chromeos-autotest-results/swarming-%s" % task_id + download_path = os.path.join(label.chromeos_root, "chroot/tmp") + ls_command = "%s ls %s" % ( + gsutil_cmd, + os.path.join(result_dir, "autoserv_test"), + ) + cp_command = "%s -mq cp -r %s %s" % ( + gsutil_cmd, + result_dir, + download_path, + ) + + # Server sometimes will not be able to generate the result directory right + # after the test. Will try to access this gs location every 60s for + # RETRY_LIMIT mins. + t = 0 + RETRY_LIMIT = 10 + while t < RETRY_LIMIT: + t += 1 + status = self._ce.RunCommand(ls_command, print_to_console=False) + if status == 0: + break + if t < RETRY_LIMIT: + self.logger.LogOutput( + "Result directory not generated yet, " + "retry (%d) in 60s." % t + ) + time.sleep(60) + else: + self.logger.LogOutput( + "No result directory for task %s" % task_id + ) + return status + + # Wait for 60s to make sure server finished writing to gs location. time.sleep(60) - else: - self.logger.LogOutput('No result directory for task %s' % task_id) + + status = self._ce.RunCommand(cp_command) + if status != 0: + self.logger.LogOutput( + "Cannot download results from task %s" % task_id + ) + else: + self.logger.LogOutput("Result downloaded for task %s" % task_id) return status - # Wait for 60s to make sure server finished writing to gs location. - time.sleep(60) - - status = self._ce.RunCommand(cp_command) - if status != 0: - self.logger.LogOutput('Cannot download results from task %s' % task_id) - else: - self.logger.LogOutput('Result downloaded for task %s' % task_id) - return status - - def Crosfleet_Run(self, label, benchmark, test_args, profiler_args): - """Run the test via crosfleet..""" - options = [] - if label.board: - options.append('-board=%s' % label.board) - if label.build: - options.append('-image=%s' % label.build) - # TODO: now only put toolchain pool here, user need to be able to specify - # which pool to use. Need to request feature to not use this option at all. - options.append('-pool=toolchain') - - args_list = self.GenTestArgs(benchmark, test_args, profiler_args) - options.append('-test-args=%s' % pipes.quote(' '.join(args_list))) - - dimensions = [] - for dut in label.remote: - dimensions.append('-dim dut_name:%s' % dut.rstrip('.cros')) - - command = (('%s create-test %s %s %s') % \ - (CROSFLEET_PATH, ' '.join(dimensions), ' '.join(options), - benchmark.suite if - (benchmark.suite == 'telemetry_Crosperf' or - benchmark.suite == 'crosperf_Wrapper') - else benchmark.test_name)) - - if self.log_level != 'verbose': - self.logger.LogOutput('Starting crosfleet test.') - self.logger.LogOutput('CMD: %s' % command) - ret_tup = self._ce.RunCommandWOutput(command, command_terminator=self._ct) - - if ret_tup[0] != 0: - self.logger.LogOutput('Crosfleet test not created successfully.') - return ret_tup - - # Std output of the command will look like: - # Created request at https://ci.chromium.org/../cros_test_platform/b12345 - # We want to parse it and get the id number of the task, which is the - # number in the very end of the link address. - task_id = ret_tup[1].strip().split('b')[-1] - - command = ('crosfleet wait-task %s' % task_id) - if self.log_level != 'verbose': - self.logger.LogOutput('Waiting for crosfleet test to finish.') - self.logger.LogOutput('CMD: %s' % command) - - ret_tup = self._ce.RunCommandWOutput(command, command_terminator=self._ct) - - # The output of `wait-task` command will be a combination of verbose and a - # json format result in the end. The json result looks like this: - # {"task-result": - # {"name":"Test Platform Invocation", - # "state":"", "failure":false, "success":true, - # "task-run-id":"12345", - # "task-run-url":"https://ci.chromium.org/.../cros_test_platform/b12345", - # "task-logs-url":"" - # }, - # "stdout":"", - # "child-results": - # [{"name":"graphics_WebGLAquarium", - # "state":"", "failure":false, "success":true, "task-run-id":"", - # "task-run-url":"https://chromeos-swarming.appspot.com/task?id=1234", - # "task-logs-url":"https://stainless.corp.google.com/1234/"} - # ] - # } - # We need the task id of the child-results to download result. - output = json.loads(ret_tup[1].split('\n')[-1]) - output = output['child-results'][0] - if output['success']: - task_id = output['task-run-url'].split('=')[-1] - if self.DownloadResult(label, task_id) == 0: - result_dir = '\nResults placed in tmp/swarming-%s\n' % task_id - return (ret_tup[0], result_dir, ret_tup[2]) - return ret_tup - - def CommandTerminator(self): - return self._ct - - def Terminate(self): - self._ct.Terminate() + def Crosfleet_Run(self, label, benchmark, test_args, profiler_args): + """Run the test via crosfleet..""" + options = [] + if label.board: + options.append("-board=%s" % label.board) + if label.build: + options.append("-image=%s" % label.build) + # TODO: now only put toolchain pool here, user need to be able to specify + # which pool to use. Need to request feature to not use this option at all. + options.append("-pool=toolchain") + + args_list = self.GenTestArgs(benchmark, test_args, profiler_args) + options.append("-test-args=%s" % pipes.quote(" ".join(args_list))) + + dimensions = [] + for dut in label.remote: + dimensions.append("-dim dut_name:%s" % dut.rstrip(".cros")) + + command = ("%s create-test %s %s %s") % ( + CROSFLEET_PATH, + " ".join(dimensions), + " ".join(options), + benchmark.suite + if ( + benchmark.suite == "telemetry_Crosperf" + or benchmark.suite == "crosperf_Wrapper" + ) + else benchmark.test_name, + ) + + if self.log_level != "verbose": + self.logger.LogOutput("Starting crosfleet test.") + self.logger.LogOutput("CMD: %s" % command) + ret_tup = self._ce.RunCommandWOutput( + command, command_terminator=self._ct + ) + + if ret_tup[0] != 0: + self.logger.LogOutput("Crosfleet test not created successfully.") + return ret_tup + + # Std output of the command will look like: + # Created request at https://ci.chromium.org/../cros_test_platform/b12345 + # We want to parse it and get the id number of the task, which is the + # number in the very end of the link address. + task_id = ret_tup[1].strip().split("b")[-1] + + command = "crosfleet wait-task %s" % task_id + if self.log_level != "verbose": + self.logger.LogOutput("Waiting for crosfleet test to finish.") + self.logger.LogOutput("CMD: %s" % command) + + ret_tup = self._ce.RunCommandWOutput( + command, command_terminator=self._ct + ) + + # The output of `wait-task` command will be a combination of verbose and a + # json format result in the end. The json result looks like this: + # {"task-result": + # {"name":"Test Platform Invocation", + # "state":"", "failure":false, "success":true, + # "task-run-id":"12345", + # "task-run-url":"https://ci.chromium.org/.../cros_test_platform/b12345", + # "task-logs-url":"" + # }, + # "stdout":"", + # "child-results": + # [{"name":"graphics_WebGLAquarium", + # "state":"", "failure":false, "success":true, "task-run-id":"", + # "task-run-url":"https://chromeos-swarming.appspot.com/task?id=1234", + # "task-logs-url":"https://stainless.corp.google.com/1234/"} + # ] + # } + # We need the task id of the child-results to download result. + output = json.loads(ret_tup[1].split("\n")[-1]) + output = output["child-results"][0] + if output["success"]: + task_id = output["task-run-url"].split("=")[-1] + if self.DownloadResult(label, task_id) == 0: + result_dir = "\nResults placed in tmp/swarming-%s\n" % task_id + return (ret_tup[0], result_dir, ret_tup[2]) + return ret_tup + + def CommandTerminator(self): + return self._ct + + def Terminate(self): + self._ct.Terminate() class MockSuiteRunner(object): - """Mock suite runner for test.""" + """Mock suite runner for test.""" - def __init__(self): - self._true = True + def __init__(self): + self._true = True - def Run(self, *_args): - if self._true: - return [0, '', ''] - else: - return [0, '', ''] + def Run(self, *_args): + if self._true: + return [0, "", ""] + else: + return [0, "", ""] diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py index b080c91e..a97e1638 100755 --- a/crosperf/suite_runner_unittest.py +++ b/crosperf/suite_runner_unittest.py @@ -10,295 +10,394 @@ from __future__ import print_function import json - import unittest import unittest.mock as mock -import suite_runner -import label - from benchmark import Benchmark - from cros_utils import command_executer from cros_utils import logger +import label from machine_manager import MockCrosMachine +import suite_runner class SuiteRunnerTest(unittest.TestCase): - """Class of SuiteRunner test.""" - mock_json = mock.Mock(spec=json) - mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) - mock_cmd_term = mock.Mock(spec=command_executer.CommandTerminator) - mock_logger = mock.Mock(spec=logger.Logger) - mock_label = label.MockLabel('lumpy', 'build', 'lumpy_chromeos_image', '', '', - '/tmp/chromeos', 'lumpy', - ['lumpy1.cros', 'lumpy.cros2'], '', '', False, - 'average', 'gcc', False, '') - telemetry_crosperf_bench = Benchmark( - 'b1_test', # name - 'octane', # test_name - '', # test_args - 3, # iterations - False, # rm_chroot_tmp - 'record -e cycles', # perf_args - 'telemetry_Crosperf', # suite - True) # show_all_results - - crosperf_wrapper_bench = Benchmark( - 'b2_test', # name - 'webgl', # test_name - '', # test_args - 3, # iterations - False, # rm_chroot_tmp - '', # perf_args - 'crosperf_Wrapper') # suite - - tast_bench = Benchmark( - 'b3_test', # name - 'platform.ReportDiskUsage', # test_name - '', # test_args - 1, # iterations - False, # rm_chroot_tmp - '', # perf_args - 'tast') # suite - - def __init__(self, *args, **kwargs): - super(SuiteRunnerTest, self).__init__(*args, **kwargs) - self.crosfleet_run_args = [] - self.test_that_args = [] - self.tast_args = [] - self.call_crosfleet_run = False - self.call_test_that_run = False - self.call_tast_run = False - - def setUp(self): - self.runner = suite_runner.SuiteRunner({}, self.mock_logger, 'verbose', - self.mock_cmd_exec, - self.mock_cmd_term) - - def test_get_profiler_args(self): - input_str = ("--profiler=custom_perf --profiler_args='perf_options" - '="record -a -e cycles,instructions"\'') - output_str = ("profiler=custom_perf profiler_args='record -a -e " - "cycles,instructions'") - res = suite_runner.GetProfilerArgs(input_str) - self.assertEqual(res, output_str) - - def test_get_dut_config_args(self): - dut_config = {'enable_aslr': False, 'top_interval': 1.0} - output_str = ('dut_config=' - "'" - '{"enable_aslr": ' - 'false, "top_interval": 1.0}' - "'" - '') - res = suite_runner.GetDutConfigArgs(dut_config) - self.assertEqual(res, output_str) - - def test_run(self): - - def reset(): - self.test_that_args = [] - self.crosfleet_run_args = [] - self.tast_args = [] - self.call_test_that_run = False - self.call_crosfleet_run = False - self.call_tast_run = False - - def FakeCrosfleetRun(test_label, benchmark, test_args, profiler_args): - self.crosfleet_run_args = [ - test_label, benchmark, test_args, profiler_args - ] - self.call_crosfleet_run = True - return 'Ran FakeCrosfleetRun' - - def FakeTestThatRun(machine, test_label, benchmark, test_args, - profiler_args): - self.test_that_args = [ - machine, test_label, benchmark, test_args, profiler_args - ] - self.call_test_that_run = True - return 'Ran FakeTestThatRun' - - def FakeTastRun(machine, test_label, benchmark): - self.tast_args = [machine, test_label, benchmark] - self.call_tast_run = True - return 'Ran FakeTastRun' - - self.runner.Crosfleet_Run = FakeCrosfleetRun - self.runner.Test_That_Run = FakeTestThatRun - self.runner.Tast_Run = FakeTastRun - - self.runner.dut_config['enable_aslr'] = False - self.runner.dut_config['cooldown_time'] = 0 - self.runner.dut_config['governor'] = 'fake_governor' - self.runner.dut_config['cpu_freq_pct'] = 65 - self.runner.dut_config['intel_pstate'] = 'no_hwp' - machine = 'fake_machine' - cros_machine = MockCrosMachine(machine, self.mock_label.chromeos_root, - self.mock_logger) - test_args = '' - profiler_args = '' - - # Test crosfleet run for telemetry_Crosperf and crosperf_Wrapper benchmarks. - self.mock_label.crosfleet = True - reset() - self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench, - test_args, profiler_args) - self.assertTrue(self.call_crosfleet_run) - self.assertFalse(self.call_test_that_run) - self.assertEqual(self.crosfleet_run_args, - [self.mock_label, self.crosperf_wrapper_bench, '', '']) - - reset() - self.runner.Run(cros_machine, self.mock_label, - self.telemetry_crosperf_bench, test_args, profiler_args) - self.assertTrue(self.call_crosfleet_run) - self.assertFalse(self.call_test_that_run) - self.assertEqual(self.crosfleet_run_args, - [self.mock_label, self.telemetry_crosperf_bench, '', '']) - - # Test test_that run for telemetry_Crosperf and crosperf_Wrapper benchmarks. - self.mock_label.crosfleet = False - reset() - self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench, - test_args, profiler_args) - self.assertTrue(self.call_test_that_run) - self.assertFalse(self.call_crosfleet_run) - self.assertEqual( - self.test_that_args, - ['fake_machine', self.mock_label, self.crosperf_wrapper_bench, '', '']) - - reset() - self.runner.Run(cros_machine, self.mock_label, - self.telemetry_crosperf_bench, test_args, profiler_args) - self.assertTrue(self.call_test_that_run) - self.assertFalse(self.call_crosfleet_run) - self.assertEqual(self.test_that_args, [ - 'fake_machine', self.mock_label, self.telemetry_crosperf_bench, '', '' - ]) - - # Test tast run for tast benchmarks. - reset() - self.runner.Run(cros_machine, self.mock_label, self.tast_bench, '', '') - self.assertTrue(self.call_tast_run) - self.assertFalse(self.call_test_that_run) - self.assertFalse(self.call_crosfleet_run) - self.assertEqual(self.tast_args, - ['fake_machine', self.mock_label, self.tast_bench]) - - def test_gen_test_args(self): - test_args = '--iterations=2' - perf_args = 'record -a -e cycles' - - # Test crosperf_Wrapper benchmarks arg list generation - args_list = ["test_args='--iterations=2'", "dut_config='{}'", 'test=webgl'] - res = self.runner.GenTestArgs(self.crosperf_wrapper_bench, test_args, '') - self.assertCountEqual(res, args_list) - - # Test telemetry_Crosperf benchmarks arg list generation - args_list = [ - "test_args='--iterations=2'", "dut_config='{}'", 'test=octane', - 'run_local=False' - ] - args_list.append(suite_runner.GetProfilerArgs(perf_args)) - res = self.runner.GenTestArgs(self.telemetry_crosperf_bench, test_args, - perf_args) - self.assertCountEqual(res, args_list) - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand') - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_tast_run(self, mock_chroot_runcmd, mock_cros_runcmd): - mock_chroot_runcmd.return_value = 0 - self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd - self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd - res = self.runner.Tast_Run('lumpy1.cros', self.mock_label, self.tast_bench) - self.assertEqual(mock_cros_runcmd.call_count, 1) - self.assertEqual(mock_chroot_runcmd.call_count, 1) - self.assertEqual(res, 0) - self.assertEqual(mock_cros_runcmd.call_args_list[0][0], - ('rm -rf /usr/local/autotest/results/*',)) - args_list = mock_chroot_runcmd.call_args_list[0][0] - args_dict = mock_chroot_runcmd.call_args_list[0][1] - self.assertEqual(len(args_list), 2) - self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) - - @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand') - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - @mock.patch.object(logger.Logger, 'LogFatal') - def test_test_that_run(self, mock_log_fatal, mock_chroot_runcmd, - mock_cros_runcmd): - mock_log_fatal.side_effect = SystemExit() - self.runner.logger.LogFatal = mock_log_fatal - # Test crosperf_Wrapper benchmarks cannot take perf_args - raised_exception = False - try: - self.runner.Test_That_Run('lumpy1.cros', self.mock_label, - self.crosperf_wrapper_bench, '', - 'record -a -e cycles') - except SystemExit: - raised_exception = True - self.assertTrue(raised_exception) - - mock_chroot_runcmd.return_value = 0 - self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd - self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd - res = self.runner.Test_That_Run('lumpy1.cros', self.mock_label, - self.crosperf_wrapper_bench, - '--iterations=2', '') - self.assertEqual(mock_cros_runcmd.call_count, 1) - self.assertEqual(mock_chroot_runcmd.call_count, 1) - self.assertEqual(res, 0) - self.assertEqual(mock_cros_runcmd.call_args_list[0][0], - ('rm -rf /usr/local/autotest/results/*',)) - args_list = mock_chroot_runcmd.call_args_list[0][0] - args_dict = mock_chroot_runcmd.call_args_list[0][1] - self.assertEqual(len(args_list), 2) - self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) - - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - @mock.patch.object(json, 'loads') - def test_crosfleet_run_client(self, mock_json_loads, mock_runcmd): - - def FakeDownloadResult(l, task_id): - if l and task_id: - self.assertEqual(task_id, '12345') - return 0 - - mock_runcmd.return_value = ( - 0, - 'Created Swarming task https://swarming/task/b12345', - '', + """Class of SuiteRunner test.""" + + mock_json = mock.Mock(spec=json) + mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter) + mock_cmd_term = mock.Mock(spec=command_executer.CommandTerminator) + mock_logger = mock.Mock(spec=logger.Logger) + mock_label = label.MockLabel( + "lumpy", + "build", + "lumpy_chromeos_image", + "", + "", + "/tmp/chromeos", + "lumpy", + ["lumpy1.cros", "lumpy.cros2"], + "", + "", + False, + "average", + "gcc", + False, + "", + ) + telemetry_crosperf_bench = Benchmark( + "b1_test", # name + "octane", # test_name + "", # test_args + 3, # iterations + False, # rm_chroot_tmp + "record -e cycles", # perf_args + "telemetry_Crosperf", # suite + True, + ) # show_all_results + + crosperf_wrapper_bench = Benchmark( + "b2_test", # name + "webgl", # test_name + "", # test_args + 3, # iterations + False, # rm_chroot_tmp + "", # perf_args + "crosperf_Wrapper", + ) # suite + + tast_bench = Benchmark( + "b3_test", # name + "platform.ReportDiskUsage", # test_name + "", # test_args + 1, # iterations + False, # rm_chroot_tmp + "", # perf_args + "tast", + ) # suite + + def __init__(self, *args, **kwargs): + super(SuiteRunnerTest, self).__init__(*args, **kwargs) + self.crosfleet_run_args = [] + self.test_that_args = [] + self.tast_args = [] + self.call_crosfleet_run = False + self.call_test_that_run = False + self.call_tast_run = False + + def setUp(self): + self.runner = suite_runner.SuiteRunner( + {}, + self.mock_logger, + "verbose", + self.mock_cmd_exec, + self.mock_cmd_term, + ) + + def test_get_profiler_args(self): + input_str = ( + "--profiler=custom_perf --profiler_args='perf_options" + '="record -a -e cycles,instructions"\'' + ) + output_str = ( + "profiler=custom_perf profiler_args='record -a -e " + "cycles,instructions'" + ) + res = suite_runner.GetProfilerArgs(input_str) + self.assertEqual(res, output_str) + + def test_get_dut_config_args(self): + dut_config = {"enable_aslr": False, "top_interval": 1.0} + output_str = ( + "dut_config=" + "'" + '{"enable_aslr": ' + 'false, "top_interval": 1.0}' + "'" + "" + ) + res = suite_runner.GetDutConfigArgs(dut_config) + self.assertEqual(res, output_str) + + def test_run(self): + def reset(): + self.test_that_args = [] + self.crosfleet_run_args = [] + self.tast_args = [] + self.call_test_that_run = False + self.call_crosfleet_run = False + self.call_tast_run = False + + def FakeCrosfleetRun(test_label, benchmark, test_args, profiler_args): + self.crosfleet_run_args = [ + test_label, + benchmark, + test_args, + profiler_args, + ] + self.call_crosfleet_run = True + return "Ran FakeCrosfleetRun" + + def FakeTestThatRun( + machine, test_label, benchmark, test_args, profiler_args + ): + self.test_that_args = [ + machine, + test_label, + benchmark, + test_args, + profiler_args, + ] + self.call_test_that_run = True + return "Ran FakeTestThatRun" + + def FakeTastRun(machine, test_label, benchmark): + self.tast_args = [machine, test_label, benchmark] + self.call_tast_run = True + return "Ran FakeTastRun" + + self.runner.Crosfleet_Run = FakeCrosfleetRun + self.runner.Test_That_Run = FakeTestThatRun + self.runner.Tast_Run = FakeTastRun + + self.runner.dut_config["enable_aslr"] = False + self.runner.dut_config["cooldown_time"] = 0 + self.runner.dut_config["governor"] = "fake_governor" + self.runner.dut_config["cpu_freq_pct"] = 65 + self.runner.dut_config["intel_pstate"] = "no_hwp" + machine = "fake_machine" + cros_machine = MockCrosMachine( + machine, self.mock_label.chromeos_root, self.mock_logger + ) + test_args = "" + profiler_args = "" + + # Test crosfleet run for telemetry_Crosperf and crosperf_Wrapper benchmarks. + self.mock_label.crosfleet = True + reset() + self.runner.Run( + cros_machine, + self.mock_label, + self.crosperf_wrapper_bench, + test_args, + profiler_args, + ) + self.assertTrue(self.call_crosfleet_run) + self.assertFalse(self.call_test_that_run) + self.assertEqual( + self.crosfleet_run_args, + [self.mock_label, self.crosperf_wrapper_bench, "", ""], + ) + + reset() + self.runner.Run( + cros_machine, + self.mock_label, + self.telemetry_crosperf_bench, + test_args, + profiler_args, + ) + self.assertTrue(self.call_crosfleet_run) + self.assertFalse(self.call_test_that_run) + self.assertEqual( + self.crosfleet_run_args, + [self.mock_label, self.telemetry_crosperf_bench, "", ""], + ) + + # Test test_that run for telemetry_Crosperf and crosperf_Wrapper benchmarks. + self.mock_label.crosfleet = False + reset() + self.runner.Run( + cros_machine, + self.mock_label, + self.crosperf_wrapper_bench, + test_args, + profiler_args, + ) + self.assertTrue(self.call_test_that_run) + self.assertFalse(self.call_crosfleet_run) + self.assertEqual( + self.test_that_args, + [ + "fake_machine", + self.mock_label, + self.crosperf_wrapper_bench, + "", + "", + ], + ) + + reset() + self.runner.Run( + cros_machine, + self.mock_label, + self.telemetry_crosperf_bench, + test_args, + profiler_args, + ) + self.assertTrue(self.call_test_that_run) + self.assertFalse(self.call_crosfleet_run) + self.assertEqual( + self.test_that_args, + [ + "fake_machine", + self.mock_label, + self.telemetry_crosperf_bench, + "", + "", + ], + ) + + # Test tast run for tast benchmarks. + reset() + self.runner.Run(cros_machine, self.mock_label, self.tast_bench, "", "") + self.assertTrue(self.call_tast_run) + self.assertFalse(self.call_test_that_run) + self.assertFalse(self.call_crosfleet_run) + self.assertEqual( + self.tast_args, ["fake_machine", self.mock_label, self.tast_bench] + ) + + def test_gen_test_args(self): + test_args = "--iterations=2" + perf_args = "record -a -e cycles" + + # Test crosperf_Wrapper benchmarks arg list generation + args_list = [ + "test_args='--iterations=2'", + "dut_config='{}'", + "test=webgl", + ] + res = self.runner.GenTestArgs( + self.crosperf_wrapper_bench, test_args, "" + ) + self.assertCountEqual(res, args_list) + + # Test telemetry_Crosperf benchmarks arg list generation + args_list = [ + "test_args='--iterations=2'", + "dut_config='{}'", + "test=octane", + "run_local=False", + ] + args_list.append(suite_runner.GetProfilerArgs(perf_args)) + res = self.runner.GenTestArgs( + self.telemetry_crosperf_bench, test_args, perf_args + ) + self.assertCountEqual(res, args_list) + + @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand") + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" + ) + def test_tast_run(self, mock_chroot_runcmd, mock_cros_runcmd): + mock_chroot_runcmd.return_value = 0 + self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd + self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd + res = self.runner.Tast_Run( + "lumpy1.cros", self.mock_label, self.tast_bench + ) + self.assertEqual(mock_cros_runcmd.call_count, 1) + self.assertEqual(mock_chroot_runcmd.call_count, 1) + self.assertEqual(res, 0) + self.assertEqual( + mock_cros_runcmd.call_args_list[0][0], + ("rm -rf /usr/local/autotest/results/*",), + ) + args_list = mock_chroot_runcmd.call_args_list[0][0] + args_dict = mock_chroot_runcmd.call_args_list[0][1] + self.assertEqual(len(args_list), 2) + self.assertEqual(args_dict["command_terminator"], self.mock_cmd_term) + + @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand") + @mock.patch.object( + command_executer.CommandExecuter, "ChrootRunCommandWOutput" ) - self.mock_cmd_exec.RunCommandWOutput = mock_runcmd - - mock_json_loads.return_value = { - 'child-results': [{ - 'success': True, - 'task-run-url': 'https://swarming/task?id=12345' - }] - } - self.mock_json.loads = mock_json_loads - - self.mock_label.crosfleet = True - self.runner.DownloadResult = FakeDownloadResult - res = self.runner.Crosfleet_Run(self.mock_label, - self.crosperf_wrapper_bench, '', '') - ret_tup = (0, '\nResults placed in tmp/swarming-12345\n', '') - self.assertEqual(res, ret_tup) - self.assertEqual(mock_runcmd.call_count, 2) - - args_list = mock_runcmd.call_args_list[0][0] - args_dict = mock_runcmd.call_args_list[0][1] - self.assertEqual(len(args_list), 1) - self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) - - args_list = mock_runcmd.call_args_list[1][0] - self.assertEqual(args_list[0], ('crosfleet wait-task 12345')) - self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) - - -if __name__ == '__main__': - unittest.main() + @mock.patch.object(logger.Logger, "LogFatal") + def test_test_that_run( + self, mock_log_fatal, mock_chroot_runcmd, mock_cros_runcmd + ): + mock_log_fatal.side_effect = SystemExit() + self.runner.logger.LogFatal = mock_log_fatal + # Test crosperf_Wrapper benchmarks cannot take perf_args + raised_exception = False + try: + self.runner.Test_That_Run( + "lumpy1.cros", + self.mock_label, + self.crosperf_wrapper_bench, + "", + "record -a -e cycles", + ) + except SystemExit: + raised_exception = True + self.assertTrue(raised_exception) + + mock_chroot_runcmd.return_value = 0 + self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd + self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd + res = self.runner.Test_That_Run( + "lumpy1.cros", + self.mock_label, + self.crosperf_wrapper_bench, + "--iterations=2", + "", + ) + self.assertEqual(mock_cros_runcmd.call_count, 1) + self.assertEqual(mock_chroot_runcmd.call_count, 1) + self.assertEqual(res, 0) + self.assertEqual( + mock_cros_runcmd.call_args_list[0][0], + ("rm -rf /usr/local/autotest/results/*",), + ) + args_list = mock_chroot_runcmd.call_args_list[0][0] + args_dict = mock_chroot_runcmd.call_args_list[0][1] + self.assertEqual(len(args_list), 2) + self.assertEqual(args_dict["command_terminator"], self.mock_cmd_term) + + @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput") + @mock.patch.object(json, "loads") + def test_crosfleet_run_client(self, mock_json_loads, mock_runcmd): + def FakeDownloadResult(l, task_id): + if l and task_id: + self.assertEqual(task_id, "12345") + return 0 + + mock_runcmd.return_value = ( + 0, + "Created Swarming task https://swarming/task/b12345", + "", + ) + self.mock_cmd_exec.RunCommandWOutput = mock_runcmd + + mock_json_loads.return_value = { + "child-results": [ + { + "success": True, + "task-run-url": "https://swarming/task?id=12345", + } + ] + } + self.mock_json.loads = mock_json_loads + + self.mock_label.crosfleet = True + self.runner.DownloadResult = FakeDownloadResult + res = self.runner.Crosfleet_Run( + self.mock_label, self.crosperf_wrapper_bench, "", "" + ) + ret_tup = (0, "\nResults placed in tmp/swarming-12345\n", "") + self.assertEqual(res, ret_tup) + self.assertEqual(mock_runcmd.call_count, 2) + + args_list = mock_runcmd.call_args_list[0][0] + args_dict = mock_runcmd.call_args_list[0][1] + self.assertEqual(len(args_list), 1) + self.assertEqual(args_dict["command_terminator"], self.mock_cmd_term) + + args_list = mock_runcmd.call_args_list[1][0] + self.assertEqual(args_list[0], ("crosfleet wait-task 12345")) + self.assertEqual(args_dict["command_terminator"], self.mock_cmd_term) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/test_flag.py b/crosperf/test_flag.py index 0b061f95..62679197 100644 --- a/crosperf/test_flag.py +++ b/crosperf/test_flag.py @@ -9,8 +9,8 @@ is_test = [False] def SetTestMode(flag): - is_test[0] = flag + is_test[0] = flag def GetTestMode(): - return is_test[0] + return is_test[0] diff --git a/crosperf/translate_xbuddy.py b/crosperf/translate_xbuddy.py index 9af95e63..eb28ecae 100755 --- a/crosperf/translate_xbuddy.py +++ b/crosperf/translate_xbuddy.py @@ -11,29 +11,33 @@ from __future__ import print_function import os import sys -if '/mnt/host/source/src/third_party/toolchain-utils/crosperf' in sys.path: - dev_path = os.path.expanduser('~/trunk/chromite/lib/xbuddy') - sys.path.append(dev_path) + +if "/mnt/host/source/src/third_party/toolchain-utils/crosperf" in sys.path: + dev_path = os.path.expanduser("~/trunk/chromite/lib/xbuddy") + sys.path.append(dev_path) else: - print('This script can only be run from inside a ChromeOS chroot. Please ' - 'enter your chroot, go to ~/src/third_party/toolchain-utils/crosperf' - ' and try again.') - sys.exit(0) + print( + "This script can only be run from inside a ChromeOS chroot. Please " + "enter your chroot, go to ~/src/third_party/toolchain-utils/crosperf" + " and try again." + ) + sys.exit(0) # pylint: disable=import-error,wrong-import-position import xbuddy def Main(xbuddy_string): - if not os.path.exists('./xbuddy_config.ini'): - config_path = os.path.expanduser( - '~/trunk/chromite/lib/xbuddy/xbuddy_config.ini') - os.symlink(config_path, './xbuddy_config.ini') - x = xbuddy.XBuddy(manage_builds=False, static_dir='/tmp/devserver/static') - build_id = x.Translate(os.path.split(xbuddy_string)) - return build_id - - -if __name__ == '__main__': - print(Main(sys.argv[1])) - sys.exit(0) + if not os.path.exists("./xbuddy_config.ini"): + config_path = os.path.expanduser( + "~/trunk/chromite/lib/xbuddy/xbuddy_config.ini" + ) + os.symlink(config_path, "./xbuddy_config.ini") + x = xbuddy.XBuddy(manage_builds=False, static_dir="/tmp/devserver/static") + build_id = x.Translate(os.path.split(xbuddy_string)) + return build_id + + +if __name__ == "__main__": + print(Main(sys.argv[1])) + sys.exit(0) diff --git a/cwp/cr-os/fetch_gn_descs.py b/cwp/cr-os/fetch_gn_descs.py index e1b50cdf..220511bf 100755 --- a/cwp/cr-os/fetch_gn_descs.py +++ b/cwp/cr-os/fetch_gn_descs.py @@ -31,165 +31,182 @@ import tempfile def _find_chromium_root(search_from): - """Finds the chromium root directory from `search_from`.""" - current = search_from - while current != '/': - if os.path.isfile(os.path.join(current, '.gclient')): - return current - current = os.path.dirname(current) - raise ValueError( - "%s doesn't appear to be a Chromium subdirectory" % search_from) + """Finds the chromium root directory from `search_from`.""" + current = search_from + while current != "/": + if os.path.isfile(os.path.join(current, ".gclient")): + return current + current = os.path.dirname(current) + raise ValueError( + "%s doesn't appear to be a Chromium subdirectory" % search_from + ) def _create_gn_args_for(arch): - """Creates a `gn args` listing for the given architecture.""" - # FIXME(gbiv): is_chromeos_device = True would be nice to support, as well. - # Requires playing nicely with SimpleChrome though, and this should be "close - # enough" for now. - return '\n'.join(( - 'target_os = "chromeos"', - 'target_cpu = "%s"' % arch, - 'is_official_build = true', - 'is_chrome_branded = true', - )) + """Creates a `gn args` listing for the given architecture.""" + # FIXME(gbiv): is_chromeos_device = True would be nice to support, as well. + # Requires playing nicely with SimpleChrome though, and this should be "close + # enough" for now. + return "\n".join( + ( + 'target_os = "chromeos"', + 'target_cpu = "%s"' % arch, + "is_official_build = true", + "is_chrome_branded = true", + ) + ) def _parse_gn_desc_output(output): - """Parses the output of `gn desc --format=json`. + """Parses the output of `gn desc --format=json`. - Args: - output: a seekable file containing the JSON output of `gn desc`. + Args: + output: a seekable file containing the JSON output of `gn desc`. - Returns: - A tuple of (warnings, gn_desc_json). - """ - warnings = [] - desc_json = None - while True: - start_pos = output.tell() - next_line = next(output, None) - if next_line is None: - raise ValueError('No JSON found in the given gn file') + Returns: + A tuple of (warnings, gn_desc_json). + """ + warnings = [] + desc_json = None + while True: + start_pos = output.tell() + next_line = next(output, None) + if next_line is None: + raise ValueError("No JSON found in the given gn file") - if next_line.lstrip().startswith('{'): - output.seek(start_pos) - desc_json = json.load(output) - break + if next_line.lstrip().startswith("{"): + output.seek(start_pos) + desc_json = json.load(output) + break - warnings.append(next_line) + warnings.append(next_line) - return ''.join(warnings).strip(), desc_json + return "".join(warnings).strip(), desc_json def _run_gn_desc(in_dir, gn_args): - logging.info('Running `gn gen`...') - subprocess.check_call(['gn', 'gen', '.', '--args=' + gn_args], cwd=in_dir) + logging.info("Running `gn gen`...") + subprocess.check_call(["gn", "gen", ".", "--args=" + gn_args], cwd=in_dir) + + logging.info("Running `gn desc`...") + with tempfile.TemporaryFile(mode="r+", encoding="utf-8") as f: + gn_command = ["gn", "desc", "--format=json", ".", "//*:*"] + exit_code = subprocess.call(gn_command, stdout=f, cwd=in_dir) + f.seek(0) + if exit_code: + logging.error("gn failed; stdout:\n%s", f.read()) + raise subprocess.CalledProcessError(exit_code, gn_command) + warnings, result = _parse_gn_desc_output(f) + + if warnings: + logging.warning( + "Encountered warning(s) running `gn desc`:\n%s", warnings + ) + return result - logging.info('Running `gn desc`...') - with tempfile.TemporaryFile(mode='r+', encoding='utf-8') as f: - gn_command = ['gn', 'desc', '--format=json', '.', '//*:*'] - exit_code = subprocess.call(gn_command, stdout=f, cwd=in_dir) - f.seek(0) - if exit_code: - logging.error('gn failed; stdout:\n%s', f.read()) - raise subprocess.CalledProcessError(exit_code, gn_command) - warnings, result = _parse_gn_desc_output(f) - if warnings: - logging.warning('Encountered warning(s) running `gn desc`:\n%s', warnings) - return result +def _fix_result(rename_out, out_dir, chromium_root, gn_desc): + """Performs postprocessing on `gn desc` JSON.""" + result = {} + rel_out = "//" + os.path.relpath( + out_dir, os.path.join(chromium_root, "src") + ) + rename_out = rename_out if rename_out.endswith("/") else rename_out + "/" -def _fix_result(rename_out, out_dir, chromium_root, gn_desc): - """Performs postprocessing on `gn desc` JSON.""" - result = {} - - rel_out = '//' + os.path.relpath(out_dir, os.path.join(chromium_root, 'src')) - rename_out = rename_out if rename_out.endswith('/') else rename_out + '/' - - def fix_source_file(f): - if not f.startswith(rel_out): - return f - return rename_out + f[len(rel_out) + 1:] - - for target, info in gn_desc.items(): - sources = info.get('sources') - configs = info.get('configs') - if not sources or not configs: - continue - - result[target] = { - 'configs': configs, - 'sources': [fix_source_file(f) for f in sources], - } + def fix_source_file(f): + if not f.startswith(rel_out): + return f + return rename_out + f[len(rel_out) + 1 :] + + for target, info in gn_desc.items(): + sources = info.get("sources") + configs = info.get("configs") + if not sources or not configs: + continue + + result[target] = { + "configs": configs, + "sources": [fix_source_file(f) for f in sources], + } - return result + return result def main(args): - known_arches = [ - 'arm', - 'arm64', - 'x64', - 'x86', - ] - - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'arch', - nargs='+', - help='Architecture(s) to fetch `gn desc`s for. ' - 'Supported ones are %s' % known_arches) - parser.add_argument( - '--output', required=True, help='File to write results to.') - parser.add_argument( - '--chromium_out_dir', - required=True, - help='Chromium out/ directory for us to use. This directory will ' - 'be clobbered by this script.') - parser.add_argument( - '--rename_out', - default='//out', - help='Directory to rename files in --chromium_out_dir to. ' - 'Default: %(default)s') - opts = parser.parse_args(args) - - logging.basicConfig( - format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s', - level=logging.INFO, - ) - - arches = opts.arch - rename_out = opts.rename_out - for arch in arches: - if arch not in known_arches: - parser.error( - 'unknown architecture: %s; try one of %s' % (arch, known_arches)) - - results_file = os.path.realpath(opts.output) - out_dir = os.path.realpath(opts.chromium_out_dir) - chromium_root = _find_chromium_root(out_dir) - - os.makedirs(out_dir, exist_ok=True) - results = {} - for arch in arches: - logging.info('Getting `gn` desc for %s...', arch) - - results[arch] = _fix_result( - rename_out, out_dir, chromium_root, - _run_gn_desc( - in_dir=out_dir, - gn_args=_create_gn_args_for(arch), - )) - - os.makedirs(os.path.dirname(results_file), exist_ok=True) - - results_intermed = results_file + '.tmp' - with open(results_intermed, 'w', encoding='utf-8') as f: - json.dump(results, f) - os.rename(results_intermed, results_file) - - -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + known_arches = [ + "arm", + "arm64", + "x64", + "x86", + ] + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "arch", + nargs="+", + help="Architecture(s) to fetch `gn desc`s for. " + "Supported ones are %s" % known_arches, + ) + parser.add_argument( + "--output", required=True, help="File to write results to." + ) + parser.add_argument( + "--chromium_out_dir", + required=True, + help="Chromium out/ directory for us to use. This directory will " + "be clobbered by this script.", + ) + parser.add_argument( + "--rename_out", + default="//out", + help="Directory to rename files in --chromium_out_dir to. " + "Default: %(default)s", + ) + opts = parser.parse_args(args) + + logging.basicConfig( + format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s", + level=logging.INFO, + ) + + arches = opts.arch + rename_out = opts.rename_out + for arch in arches: + if arch not in known_arches: + parser.error( + "unknown architecture: %s; try one of %s" % (arch, known_arches) + ) + + results_file = os.path.realpath(opts.output) + out_dir = os.path.realpath(opts.chromium_out_dir) + chromium_root = _find_chromium_root(out_dir) + + os.makedirs(out_dir, exist_ok=True) + results = {} + for arch in arches: + logging.info("Getting `gn` desc for %s...", arch) + + results[arch] = _fix_result( + rename_out, + out_dir, + chromium_root, + _run_gn_desc( + in_dir=out_dir, + gn_args=_create_gn_args_for(arch), + ), + ) + + os.makedirs(os.path.dirname(results_file), exist_ok=True) + + results_intermed = results_file + ".tmp" + with open(results_intermed, "w", encoding="utf-8") as f: + json.dump(results, f) + os.rename(results_intermed, results_file) + + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/cwp/cr-os/fetch_gn_descs_test.py b/cwp/cr-os/fetch_gn_descs_test.py index 02941792..f9a9cf93 100755 --- a/cwp/cr-os/fetch_gn_descs_test.py +++ b/cwp/cr-os/fetch_gn_descs_test.py @@ -17,93 +17,108 @@ import fetch_gn_descs class Test(unittest.TestCase): - """Tests for fetch_gn_descs.""" - - def test_fix_result_removes_uninteresting_items(self): - items = { - '//uninteresting:a': {}, - '//uninteresting:b': { - 'sources': ['whee'], - }, - '//uninteresting:c': { - 'configs': ['whee'], - }, - '//uninteresting:d': { - 'sources': [], - 'configs': [], - }, - '//interesting:a': { - 'sources': ['a'], - 'configs': ['b'], - }, - '//interesting:b': { - 'sources': ['d'], - 'configs': ['c'], - }, - } - - expected_items = { - '//interesting:a': items['//interesting:a'], - '//interesting:b': items['//interesting:b'], - } - - self.assertDictEqual( - fetch_gn_descs._fix_result('/', '/', '/', items), expected_items) - - def test_fix_result_translates_paths_in_out_dir(self): - items = { - '//interesting:a': { - 'sources': ['//out_dir/foo', '//out_dir'], - 'configs': ['b'], - }, - } - - expected_items = { - '//interesting:a': { - 'sources': ['//out_translated/foo', '//out_translated/'], - 'configs': ['b'], - }, - } - - self.assertDictEqual( - fetch_gn_descs._fix_result( - rename_out='//out_translated', - out_dir='/chromium/src/out_dir', - chromium_root='/chromium', - gn_desc=items, - ), - expected_items, - ) - - def test_gn_desc_output_parsing_skips_pre_json_warnings(self): - gn_desc = io.StringIO('\n'.join(( - 'foo', - 'warning: "{" is bad', - '{"bar": "baz",', - ' "qux": true}', - ))) - - warnings, desc_json = fetch_gn_descs._parse_gn_desc_output(gn_desc) - self.assertEqual(warnings, '\n'.join(( - 'foo', - 'warning: "{" is bad', - ))) - self.assertEqual(desc_json, { - 'bar': 'baz', - 'qux': True, - }) - - def test_gn_desc_output_parsing_issues_no_warnings_if_none_are_present(self): - gn_desc = io.StringIO('{"bar": "baz"}') - warnings, desc_json = fetch_gn_descs._parse_gn_desc_output(gn_desc) - self.assertEqual(warnings, '') - self.assertEqual(desc_json, {'bar': 'baz'}) - - gn_desc = io.StringIO('\n \n\t\n{"bar": "baz"}') - warnings, desc_json = fetch_gn_descs._parse_gn_desc_output(gn_desc) - self.assertEqual(warnings, '') - self.assertEqual(desc_json, {'bar': 'baz'}) - - -if __name__ == '__main__': - unittest.main() + """Tests for fetch_gn_descs.""" + + def test_fix_result_removes_uninteresting_items(self): + items = { + "//uninteresting:a": {}, + "//uninteresting:b": { + "sources": ["whee"], + }, + "//uninteresting:c": { + "configs": ["whee"], + }, + "//uninteresting:d": { + "sources": [], + "configs": [], + }, + "//interesting:a": { + "sources": ["a"], + "configs": ["b"], + }, + "//interesting:b": { + "sources": ["d"], + "configs": ["c"], + }, + } + + expected_items = { + "//interesting:a": items["//interesting:a"], + "//interesting:b": items["//interesting:b"], + } + + self.assertDictEqual( + fetch_gn_descs._fix_result("/", "/", "/", items), expected_items + ) + + def test_fix_result_translates_paths_in_out_dir(self): + items = { + "//interesting:a": { + "sources": ["//out_dir/foo", "//out_dir"], + "configs": ["b"], + }, + } + + expected_items = { + "//interesting:a": { + "sources": ["//out_translated/foo", "//out_translated/"], + "configs": ["b"], + }, + } + + self.assertDictEqual( + fetch_gn_descs._fix_result( + rename_out="//out_translated", + out_dir="/chromium/src/out_dir", + chromium_root="/chromium", + gn_desc=items, + ), + expected_items, + ) + + def test_gn_desc_output_parsing_skips_pre_json_warnings(self): + gn_desc = io.StringIO( + "\n".join( + ( + "foo", + 'warning: "{" is bad', + '{"bar": "baz",', + ' "qux": true}', + ) + ) + ) + + warnings, desc_json = fetch_gn_descs._parse_gn_desc_output(gn_desc) + self.assertEqual( + warnings, + "\n".join( + ( + "foo", + 'warning: "{" is bad', + ) + ), + ) + self.assertEqual( + desc_json, + { + "bar": "baz", + "qux": True, + }, + ) + + def test_gn_desc_output_parsing_issues_no_warnings_if_none_are_present( + self, + ): + gn_desc = io.StringIO('{"bar": "baz"}') + warnings, desc_json = fetch_gn_descs._parse_gn_desc_output(gn_desc) + self.assertEqual(warnings, "") + self.assertEqual(desc_json, {"bar": "baz"}) + + gn_desc = io.StringIO('\n \n\t\n{"bar": "baz"}') + warnings, desc_json = fetch_gn_descs._parse_gn_desc_output(gn_desc) + self.assertEqual(warnings, "") + self.assertEqual(desc_json, {"bar": "baz"}) + + +if __name__ == "__main__": + unittest.main() diff --git a/debug_info_test/allowlist.py b/debug_info_test/allowlist.py index 9205b37b..945c0440 100644 --- a/debug_info_test/allowlist.py +++ b/debug_info_test/allowlist.py @@ -19,50 +19,50 @@ import re # The performance bottleneck of this script is readelf. Unless this becomes # slower than readelf, don't waste time here. def is_allowlisted(list_name, pattern): - """Check whether the given pattern is specified in the allowlist. + """Check whether the given pattern is specified in the allowlist. - Args: - list_name: name of the allowlist. - pattern: the target string. + Args: + list_name: name of the allowlist. + pattern: the target string. - Returns: - True if matched otherwise False. - """ - return pattern and allowlists[list_name].match(pattern) + Returns: + True if matched otherwise False. + """ + return pattern and allowlists[list_name].match(pattern) def prepare_allowlist(patterns): - """Join and compile the re patterns. + """Join and compile the re patterns. - Args: - patterns: regex patterns. + Args: + patterns: regex patterns. - Returns: - A compiled re object. - """ - return re.compile('|'.join(patterns)) + Returns: + A compiled re object. + """ + return re.compile("|".join(patterns)) def load_allowlists(dirname): - """Load allowlists under dirname. - - An allowlist ends with .allowlist. - - Args: - dirname: path to the dir. - - Returns: - A dictionary of 'filename' -> allowlist matcher. - """ - wlist = {} - for fn in glob.glob(os.path.join(dirname, '*.allowlist')): - key = os.path.splitext(os.path.basename(fn))[0] - with open(fn, 'r', encoding='utf-8') as f: - patterns = f.read().splitlines() - patterns = [l for l in patterns if l != ''] - patterns = [l for l in patterns if l[0] != '#'] - wlist[key] = prepare_allowlist(patterns) - return wlist + """Load allowlists under dirname. + + An allowlist ends with .allowlist. + + Args: + dirname: path to the dir. + + Returns: + A dictionary of 'filename' -> allowlist matcher. + """ + wlist = {} + for fn in glob.glob(os.path.join(dirname, "*.allowlist")): + key = os.path.splitext(os.path.basename(fn))[0] + with open(fn, "r", encoding="utf-8") as f: + patterns = f.read().splitlines() + patterns = [l for l in patterns if l != ""] + patterns = [l for l in patterns if l[0] != "#"] + wlist[key] = prepare_allowlist(patterns) + return wlist allowlists = load_allowlists(os.path.dirname(__file__)) diff --git a/debug_info_test/check_cus.py b/debug_info_test/check_cus.py index a83a9bc4..df2c4275 100644 --- a/debug_info_test/check_cus.py +++ b/debug_info_test/check_cus.py @@ -12,64 +12,68 @@ import subprocess import check_ngcc + cu_checks = [check_ngcc.not_by_gcc] def check_compile_unit(dso_path, producer, comp_path): - """check all compiler flags used to build the compile unit. + """check all compiler flags used to build the compile unit. - Args: - dso_path: path to the elf/dso. - producer: DW_AT_producer contains the compiler command line. - comp_path: DW_AT_comp_dir + DW_AT_name. + Args: + dso_path: path to the elf/dso. + producer: DW_AT_producer contains the compiler command line. + comp_path: DW_AT_comp_dir + DW_AT_name. - Returns: - A set of failed tests. - """ - failed = set() - for c in cu_checks: - if not c(dso_path, producer, comp_path): - failed.add(c.__module__) + Returns: + A set of failed tests. + """ + failed = set() + for c in cu_checks: + if not c(dso_path, producer, comp_path): + failed.add(c.__module__) - return failed + return failed def check_compile_units(dso_path): - """check all compile units in the given dso. - - Args: - dso_path: path to the dso. - - Returns: - True if everything looks fine otherwise False. - """ - - failed = set() - producer = '' - comp_path = '' - - readelf = subprocess.Popen( - ['llvm-dwarfdump', '--recurse-depth=0', dso_path], - stdout=subprocess.PIPE, - stderr=open(os.devnull, 'w'), - encoding='utf-8') - for l in readelf.stdout: - if 'DW_TAG_compile_unit' in l: - if producer: + """check all compile units in the given dso. + + Args: + dso_path: path to the dso. + + Returns: + True if everything looks fine otherwise False. + """ + + failed = set() + producer = "" + comp_path = "" + + readelf = subprocess.Popen( + ["llvm-dwarfdump", "--recurse-depth=0", dso_path], + stdout=subprocess.PIPE, + stderr=open(os.devnull, "w"), + encoding="utf-8", + ) + for l in readelf.stdout: + if "DW_TAG_compile_unit" in l: + if producer: + failed = failed.union( + check_compile_unit(dso_path, producer, comp_path) + ) + producer = "" + comp_path = "" + elif "DW_AT_producer" in l: + producer = l + elif "DW_AT_name" in l: + comp_path = os.path.join(comp_path, l.split(":")[-1].strip()) + elif "DW_AT_comp_dir" in l: + comp_path = os.path.join(l.split(":")[-1].strip(), comp_path) + if producer: failed = failed.union(check_compile_unit(dso_path, producer, comp_path)) - producer = '' - comp_path = '' - elif 'DW_AT_producer' in l: - producer = l - elif 'DW_AT_name' in l: - comp_path = os.path.join(comp_path, l.split(':')[-1].strip()) - elif 'DW_AT_comp_dir' in l: - comp_path = os.path.join(l.split(':')[-1].strip(), comp_path) - if producer: - failed = failed.union(check_compile_unit(dso_path, producer, comp_path)) - - if failed: - print('%s failed check: %s' % (dso_path, ' '.join(failed))) - return False - - return True + + if failed: + print("%s failed check: %s" % (dso_path, " ".join(failed))) + return False + + return True diff --git a/debug_info_test/check_exist.py b/debug_info_test/check_exist.py index 768d09bd..795cb470 100644 --- a/debug_info_test/check_exist.py +++ b/debug_info_test/check_exist.py @@ -14,89 +14,90 @@ from allowlist import is_allowlisted def check_debug_info(dso_path, readelf_content): - """Check whether debug info section exists in the elf file. + """Check whether debug info section exists in the elf file. - Args: - dso_path: path to the dso. - readelf_content: debug info dumped by command readelf. + Args: + dso_path: path to the dso. + readelf_content: debug info dumped by command readelf. - Returns: - True if debug info section exists, otherwise False. - """ + Returns: + True if debug info section exists, otherwise False. + """ - # Return True if it is allowlisted - if is_allowlisted('exist_debug_info', dso_path): - return True + # Return True if it is allowlisted + if is_allowlisted("exist_debug_info", dso_path): + return True - for l in readelf_content: - if 'debug_info' in l: - return True - return False + for l in readelf_content: + if "debug_info" in l: + return True + return False def check_producer(dso_path, readelf_content): - """Check whether DW_AT_producer exists in each compile unit. - - Args: - dso_path: path to the dso. - readelf_content: debug info dumped by command readelf. - - Returns: - True if DW_AT_producer exists in each compile unit, otherwise False. - Notice: If no compile unit in DSO, also return True. - """ - - # Return True if it is allowlisted - if is_allowlisted('exist_producer', dso_path): - return True - - # Indicate if there is a producer under each cu - cur_producer = False - - first_cu = True - producer_exist = True - - for l in readelf_content: - if 'DW_TAG_compile_unit' in l: - if not first_cu and not cur_producer: + """Check whether DW_AT_producer exists in each compile unit. + + Args: + dso_path: path to the dso. + readelf_content: debug info dumped by command readelf. + + Returns: + True if DW_AT_producer exists in each compile unit, otherwise False. + Notice: If no compile unit in DSO, also return True. + """ + + # Return True if it is allowlisted + if is_allowlisted("exist_producer", dso_path): + return True + + # Indicate if there is a producer under each cu + cur_producer = False + + first_cu = True + producer_exist = True + + for l in readelf_content: + if "DW_TAG_compile_unit" in l: + if not first_cu and not cur_producer: + producer_exist = False + break + first_cu = False + cur_producer = False + elif "DW_AT_producer" in l: + cur_producer = True + + # Check whether last producer of compile unit exists in the elf, + # also return True if no cu in the DSO. + if not first_cu and not cur_producer: producer_exist = False - break - first_cu = False - cur_producer = False - elif 'DW_AT_producer' in l: - cur_producer = True - # Check whether last producer of compile unit exists in the elf, - # also return True if no cu in the DSO. - if not first_cu and not cur_producer: - producer_exist = False - - return producer_exist + return producer_exist def check_exist_all(dso_path): - """check whether intended components exists in the given dso. + """check whether intended components exists in the given dso. - Args: - dso_path: path to the dso. + Args: + dso_path: path to the dso. - Returns: - True if everything looks fine otherwise False. - """ + Returns: + True if everything looks fine otherwise False. + """ - readelf = subprocess.Popen( - ['llvm-dwarfdump', '--recurse-depth=0', dso_path], - stdout=subprocess.PIPE, - stderr=open(os.devnull, 'w'), - encoding='utf-8') - readelf_content = list(readelf.stdout) + readelf = subprocess.Popen( + ["llvm-dwarfdump", "--recurse-depth=0", dso_path], + stdout=subprocess.PIPE, + stderr=open(os.devnull, "w"), + encoding="utf-8", + ) + readelf_content = list(readelf.stdout) - exist_checks = [check_debug_info, check_producer] + exist_checks = [check_debug_info, check_producer] - for e in exist_checks: - if not e(dso_path, readelf_content): - check_failed = e.__module__ + ': ' + e.__name__ - print('%s failed check: %s' % (dso_path, check_failed)) - return False + for e in exist_checks: + if not e(dso_path, readelf_content): + check_failed = e.__module__ + ": " + e.__name__ + print("%s failed check: %s" % (dso_path, check_failed)) + return False - return True + return True diff --git a/debug_info_test/check_icf.py b/debug_info_test/check_icf.py index a79d5e80..5e92ec13 100644 --- a/debug_info_test/check_icf.py +++ b/debug_info_test/check_icf.py @@ -13,41 +13,43 @@ import subprocess def check_identical_code_folding(dso_path): - """check whether chrome was built with identical code folding. - - Args: - dso_path: path to the dso. - - Returns: - False if the dso is chrome and it was not built with icf, - True otherwise. - """ - - if not dso_path.endswith('/chrome.debug'): - return True - - # Run 'nm' on the chrome binary and read the output. - nm = subprocess.Popen(['nm', dso_path], - stdout=subprocess.PIPE, - stderr=open(os.devnull, 'w'), - encoding='utf-8') - nm_output, _ = nm.communicate() - - # Search for addresses of text symbols. - text_addresses = re.findall('^[0-9a-f]+[ ]+[tT] ', nm_output, re.MULTILINE) - - # Calculate number of text symbols in chrome binary. - num_text_addresses = len(text_addresses) - - # Calculate number of unique text symbols in chrome binary. - num_unique_text_addresses = len(set(text_addresses)) - - # Check that the number of duplicate symbols is at least 10,000. - # - https://crbug.com/813272#c18 - if num_text_addresses - num_unique_text_addresses >= 10000: - return True - - print('%s was not built with ICF' % dso_path) - print(' num_text_addresses = %d' % num_text_addresses) - print(' num_unique_text_addresses = %d' % num_unique_text_addresses) - return False + """check whether chrome was built with identical code folding. + + Args: + dso_path: path to the dso. + + Returns: + False if the dso is chrome and it was not built with icf, + True otherwise. + """ + + if not dso_path.endswith("/chrome.debug"): + return True + + # Run 'nm' on the chrome binary and read the output. + nm = subprocess.Popen( + ["nm", dso_path], + stdout=subprocess.PIPE, + stderr=open(os.devnull, "w"), + encoding="utf-8", + ) + nm_output, _ = nm.communicate() + + # Search for addresses of text symbols. + text_addresses = re.findall("^[0-9a-f]+[ ]+[tT] ", nm_output, re.MULTILINE) + + # Calculate number of text symbols in chrome binary. + num_text_addresses = len(text_addresses) + + # Calculate number of unique text symbols in chrome binary. + num_unique_text_addresses = len(set(text_addresses)) + + # Check that the number of duplicate symbols is at least 10,000. + # - https://crbug.com/813272#c18 + if num_text_addresses - num_unique_text_addresses >= 10000: + return True + + print("%s was not built with ICF" % dso_path) + print(" num_text_addresses = %d" % num_text_addresses) + print(" num_unique_text_addresses = %d" % num_unique_text_addresses) + return False diff --git a/debug_info_test/check_ngcc.py b/debug_info_test/check_ngcc.py index 4a8241a0..60508691 100644 --- a/debug_info_test/check_ngcc.py +++ b/debug_info_test/check_ngcc.py @@ -11,20 +11,20 @@ from allowlist import is_allowlisted def not_by_gcc(dso_path, producer, comp_path): - """Check whether the compile unit is not built by gcc. + """Check whether the compile unit is not built by gcc. - Args: - dso_path: path to the elf/dso. - producer: DW_AT_producer contains the compiler command line. - comp_path: DW_AT_comp_dir + DW_AT_name. + Args: + dso_path: path to the elf/dso. + producer: DW_AT_producer contains the compiler command line. + comp_path: DW_AT_comp_dir + DW_AT_name. - Returns: - False if compiled by gcc otherwise True. - """ - if is_allowlisted('ngcc_comp_path', comp_path): - return True + Returns: + False if compiled by gcc otherwise True. + """ + if is_allowlisted("ngcc_comp_path", comp_path): + return True - if is_allowlisted('ngcc_dso_path', dso_path): - return True + if is_allowlisted("ngcc_dso_path", dso_path): + return True - return 'GNU C' not in producer + return "GNU C" not in producer diff --git a/debug_info_test/debug_info_test.py b/debug_info_test/debug_info_test.py index 52875b74..ab123e0b 100755 --- a/debug_info_test/debug_info_test.py +++ b/debug_info_test/debug_info_test.py @@ -12,56 +12,60 @@ import os import subprocess import sys -import check_icf import check_cus import check_exist +import check_icf + elf_checks = [ - check_exist.check_exist_all, check_cus.check_compile_units, - check_icf.check_identical_code_folding + check_exist.check_exist_all, + check_cus.check_compile_units, + check_icf.check_identical_code_folding, ] def scanelf(root): - """Find ELFs in root. + """Find ELFs in root. - Args: - root: root dir to start with the search. + Args: + root: root dir to start with the search. - Returns: - Filenames of ELFs in root. - """ - p = subprocess.Popen(['scanelf', '-y', '-B', '-F', '%F', '-R', root], - stdout=subprocess.PIPE, - encoding='utf-8') - return [l.strip() for l in p.stdout] + Returns: + Filenames of ELFs in root. + """ + p = subprocess.Popen( + ["scanelf", "-y", "-B", "-F", "%F", "-R", root], + stdout=subprocess.PIPE, + encoding="utf-8", + ) + return [l.strip() for l in p.stdout] def Main(argv): - if len(argv) < 2: - print('usage: %s [file|dir]') - return 1 - - files = [] - cand = argv[1] - if os.path.isfile(cand): - files = [cand] - elif os.path.isdir(cand): - files = scanelf(cand) - else: - print('usage: %s [file|dir]') - return 1 - - failed = False - for f in files: - for c in elf_checks: - if not c(f): - failed = True - - if failed: - return 1 - return 0 - - -if __name__ == '__main__': - sys.exit(Main(sys.argv)) + if len(argv) < 2: + print("usage: %s [file|dir]") + return 1 + + files = [] + cand = argv[1] + if os.path.isfile(cand): + files = [cand] + elif os.path.isdir(cand): + files = scanelf(cand) + else: + print("usage: %s [file|dir]") + return 1 + + failed = False + for f in files: + for c in elf_checks: + if not c(f): + failed = True + + if failed: + return 1 + return 0 + + +if __name__ == "__main__": + sys.exit(Main(sys.argv)) diff --git a/file_lock_machine.py b/file_lock_machine.py index 41650ea1..72d6233a 100755 --- a/file_lock_machine.py +++ b/file_lock_machine.py @@ -9,7 +9,8 @@ from __future__ import division from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" import argparse import datetime @@ -24,7 +25,8 @@ import time from cros_utils import logger -LOCK_SUFFIX = '_check_lock_liveness' + +LOCK_SUFFIX = "_check_lock_liveness" # The locks file directory REQUIRES that 'group' only has read/write # privileges and 'world' has no privileges. So the mask must be @@ -33,357 +35,392 @@ LOCK_MASK = 0o27 def FileCheckName(name): - return name + LOCK_SUFFIX + return name + LOCK_SUFFIX def OpenLiveCheck(file_name): - with FileCreationMask(LOCK_MASK): - fd = open(file_name, 'a') - try: - fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - except IOError as e: - logger.GetLogger().LogError(e) - raise - return fd + with FileCreationMask(LOCK_MASK): + fd = open(file_name, "a") + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError as e: + logger.GetLogger().LogError(e) + raise + return fd class FileCreationMask(object): - """Class for the file creation mask.""" + """Class for the file creation mask.""" - def __init__(self, mask): - self._mask = mask - self._old_mask = None + def __init__(self, mask): + self._mask = mask + self._old_mask = None - def __enter__(self): - self._old_mask = os.umask(self._mask) + def __enter__(self): + self._old_mask = os.umask(self._mask) - def __exit__(self, typ, value, traceback): - os.umask(self._old_mask) + def __exit__(self, typ, value, traceback): + os.umask(self._old_mask) class LockDescription(object): - """The description of the lock.""" + """The description of the lock.""" - def __init__(self, desc=None): - try: - self.owner = desc['owner'] - self.exclusive = desc['exclusive'] - self.counter = desc['counter'] - self.time = desc['time'] - self.reason = desc['reason'] - self.auto = desc['auto'] - except (KeyError, TypeError): - self.owner = '' - self.exclusive = False - self.counter = 0 - self.time = 0 - self.reason = '' - self.auto = False - - def IsLocked(self): - return self.counter or self.exclusive - - def __str__(self): - return ' '.join([ - 'Owner: %s' % self.owner, - 'Exclusive: %s' % self.exclusive, - 'Counter: %s' % self.counter, - 'Time: %s' % self.time, - 'Reason: %s' % self.reason, - 'Auto: %s' % self.auto, - ]) + def __init__(self, desc=None): + try: + self.owner = desc["owner"] + self.exclusive = desc["exclusive"] + self.counter = desc["counter"] + self.time = desc["time"] + self.reason = desc["reason"] + self.auto = desc["auto"] + except (KeyError, TypeError): + self.owner = "" + self.exclusive = False + self.counter = 0 + self.time = 0 + self.reason = "" + self.auto = False + + def IsLocked(self): + return self.counter or self.exclusive + + def __str__(self): + return " ".join( + [ + "Owner: %s" % self.owner, + "Exclusive: %s" % self.exclusive, + "Counter: %s" % self.counter, + "Time: %s" % self.time, + "Reason: %s" % self.reason, + "Auto: %s" % self.auto, + ] + ) class FileLock(object): - """File lock operation class.""" - FILE_OPS = [] - - def __init__(self, lock_filename): - self._filepath = lock_filename - lock_dir = os.path.dirname(lock_filename) - assert os.path.isdir(lock_dir), ("Locks dir: %s doesn't exist!" % lock_dir) - self._file = None - self._description = None - - self.exclusive = None - self.auto = None - self.reason = None - self.time = None - self.owner = None - - def getDescription(self): - return self._description - - def getFilePath(self): - return self._filepath - - def setDescription(self, desc): - self._description = desc - - @classmethod - def AsString(cls, file_locks): - stringify_fmt = '%-30s %-15s %-4s %-4s %-15s %-40s %-4s' - header = stringify_fmt % ('machine', 'owner', 'excl', 'ctr', 'elapsed', - 'reason', 'auto') - lock_strings = [] - for file_lock in file_locks: - - elapsed_time = datetime.timedelta( - seconds=int(time.time() - file_lock.getDescription().time)) - elapsed_time = '%s ago' % elapsed_time - lock_strings.append( - stringify_fmt % (os.path.basename(file_lock.getFilePath), - file_lock.getDescription().owner, - file_lock.getDescription().exclusive, - file_lock.getDescription().counter, elapsed_time, - file_lock.getDescription().reason, - file_lock.getDescription().auto)) - table = '\n'.join(lock_strings) - return '\n'.join([header, table]) - - @classmethod - def ListLock(cls, pattern, locks_dir): - if not locks_dir: - locks_dir = Machine.LOCKS_DIR - full_pattern = os.path.join(locks_dir, pattern) - file_locks = [] - for lock_filename in glob.glob(full_pattern): - if LOCK_SUFFIX in lock_filename: - continue - file_lock = FileLock(lock_filename) - with file_lock as lock: - if lock.IsLocked(): - file_locks.append(file_lock) - logger.GetLogger().LogOutput('\n%s' % cls.AsString(file_locks)) - - def __enter__(self): - with FileCreationMask(LOCK_MASK): - try: - self._file = open(self._filepath, 'a+') - self._file.seek(0, os.SEEK_SET) - - if fcntl.flock(self._file.fileno(), fcntl.LOCK_EX) == -1: - raise IOError('flock(%s, LOCK_EX) failed!' % self._filepath) - - try: - desc = json.load(self._file) - except (EOFError, ValueError): - desc = None - self._description = LockDescription(desc) - - if self._description.exclusive and self._description.auto: - locked_byself = False - for fd in self.FILE_OPS: - if fd.name == FileCheckName(self._filepath): - locked_byself = True - break - if not locked_byself: - try: - fp = OpenLiveCheck(FileCheckName(self._filepath)) - except IOError: - pass - else: - self._description = LockDescription() - fcntl.lockf(fp, fcntl.LOCK_UN) - fp.close() + """File lock operation class.""" + + FILE_OPS = [] + + def __init__(self, lock_filename): + self._filepath = lock_filename + lock_dir = os.path.dirname(lock_filename) + assert os.path.isdir(lock_dir), ( + "Locks dir: %s doesn't exist!" % lock_dir + ) + self._file = None + self._description = None + + self.exclusive = None + self.auto = None + self.reason = None + self.time = None + self.owner = None + + def getDescription(self): return self._description - # Check this differently? - except IOError as ex: - logger.GetLogger().LogError(ex) - return None - - def __exit__(self, typ, value, traceback): - self._file.truncate(0) - self._file.write(json.dumps(self._description.__dict__, skipkeys=True)) - self._file.close() - def __str__(self): - return self.AsString([self]) + def getFilePath(self): + return self._filepath + + def setDescription(self, desc): + self._description = desc + + @classmethod + def AsString(cls, file_locks): + stringify_fmt = "%-30s %-15s %-4s %-4s %-15s %-40s %-4s" + header = stringify_fmt % ( + "machine", + "owner", + "excl", + "ctr", + "elapsed", + "reason", + "auto", + ) + lock_strings = [] + for file_lock in file_locks: + + elapsed_time = datetime.timedelta( + seconds=int(time.time() - file_lock.getDescription().time) + ) + elapsed_time = "%s ago" % elapsed_time + lock_strings.append( + stringify_fmt + % ( + os.path.basename(file_lock.getFilePath), + file_lock.getDescription().owner, + file_lock.getDescription().exclusive, + file_lock.getDescription().counter, + elapsed_time, + file_lock.getDescription().reason, + file_lock.getDescription().auto, + ) + ) + table = "\n".join(lock_strings) + return "\n".join([header, table]) + + @classmethod + def ListLock(cls, pattern, locks_dir): + if not locks_dir: + locks_dir = Machine.LOCKS_DIR + full_pattern = os.path.join(locks_dir, pattern) + file_locks = [] + for lock_filename in glob.glob(full_pattern): + if LOCK_SUFFIX in lock_filename: + continue + file_lock = FileLock(lock_filename) + with file_lock as lock: + if lock.IsLocked(): + file_locks.append(file_lock) + logger.GetLogger().LogOutput("\n%s" % cls.AsString(file_locks)) + + def __enter__(self): + with FileCreationMask(LOCK_MASK): + try: + self._file = open(self._filepath, "a+") + self._file.seek(0, os.SEEK_SET) + + if fcntl.flock(self._file.fileno(), fcntl.LOCK_EX) == -1: + raise IOError("flock(%s, LOCK_EX) failed!" % self._filepath) + + try: + desc = json.load(self._file) + except (EOFError, ValueError): + desc = None + self._description = LockDescription(desc) + + if self._description.exclusive and self._description.auto: + locked_byself = False + for fd in self.FILE_OPS: + if fd.name == FileCheckName(self._filepath): + locked_byself = True + break + if not locked_byself: + try: + fp = OpenLiveCheck(FileCheckName(self._filepath)) + except IOError: + pass + else: + self._description = LockDescription() + fcntl.lockf(fp, fcntl.LOCK_UN) + fp.close() + return self._description + # Check this differently? + except IOError as ex: + logger.GetLogger().LogError(ex) + return None + + def __exit__(self, typ, value, traceback): + self._file.truncate(0) + self._file.write(json.dumps(self._description.__dict__, skipkeys=True)) + self._file.close() + + def __str__(self): + return self.AsString([self]) class Lock(object): - """Lock class""" - - def __init__(self, lock_file, auto=True): - self._to_lock = os.path.basename(lock_file) - self._lock_file = lock_file - self._logger = logger.GetLogger() - self._auto = auto - - def NonBlockingLock(self, exclusive, reason=''): - with FileLock(self._lock_file) as lock: - if lock.exclusive: - self._logger.LogError( - 'Exclusive lock already acquired by %s. Reason: %s' % (lock.owner, - lock.reason)) - return False - - if exclusive: - if lock.counter: - self._logger.LogError('Shared lock already acquired') - return False - lock_file_check = FileCheckName(self._lock_file) - fd = OpenLiveCheck(lock_file_check) - FileLock.FILE_OPS.append(fd) - - lock.exclusive = True - lock.reason = reason - lock.owner = getpass.getuser() - lock.time = time.time() - lock.auto = self._auto - else: - lock.counter += 1 - self._logger.LogOutput('Successfully locked: %s' % self._to_lock) - return True - - def Unlock(self, exclusive, force=False): - with FileLock(self._lock_file) as lock: - if not lock.IsLocked(): - self._logger.LogWarning("Can't unlock unlocked machine!") + """Lock class""" + + def __init__(self, lock_file, auto=True): + self._to_lock = os.path.basename(lock_file) + self._lock_file = lock_file + self._logger = logger.GetLogger() + self._auto = auto + + def NonBlockingLock(self, exclusive, reason=""): + with FileLock(self._lock_file) as lock: + if lock.exclusive: + self._logger.LogError( + "Exclusive lock already acquired by %s. Reason: %s" + % (lock.owner, lock.reason) + ) + return False + + if exclusive: + if lock.counter: + self._logger.LogError("Shared lock already acquired") + return False + lock_file_check = FileCheckName(self._lock_file) + fd = OpenLiveCheck(lock_file_check) + FileLock.FILE_OPS.append(fd) + + lock.exclusive = True + lock.reason = reason + lock.owner = getpass.getuser() + lock.time = time.time() + lock.auto = self._auto + else: + lock.counter += 1 + self._logger.LogOutput("Successfully locked: %s" % self._to_lock) return True - if lock.exclusive != exclusive: - self._logger.LogError('shared locks must be unlocked with --shared') - return False - - if lock.exclusive: - if lock.owner != getpass.getuser() and not force: - self._logger.LogError("%s can't unlock lock owned by: %s" % - (getpass.getuser(), lock.owner)) - return False - if lock.auto != self._auto: - self._logger.LogError("Can't unlock lock with different -a" - ' parameter.') - return False - lock.exclusive = False - lock.reason = '' - lock.owner = '' - - if self._auto: - del_list = [ - i for i in FileLock.FILE_OPS - if i.name == FileCheckName(self._lock_file) - ] - for i in del_list: - FileLock.FILE_OPS.remove(i) - for f in del_list: - fcntl.lockf(f, fcntl.LOCK_UN) - f.close() - del del_list - os.remove(FileCheckName(self._lock_file)) - - else: - lock.counter -= 1 - return True + def Unlock(self, exclusive, force=False): + with FileLock(self._lock_file) as lock: + if not lock.IsLocked(): + self._logger.LogWarning("Can't unlock unlocked machine!") + return True + + if lock.exclusive != exclusive: + self._logger.LogError( + "shared locks must be unlocked with --shared" + ) + return False + + if lock.exclusive: + if lock.owner != getpass.getuser() and not force: + self._logger.LogError( + "%s can't unlock lock owned by: %s" + % (getpass.getuser(), lock.owner) + ) + return False + if lock.auto != self._auto: + self._logger.LogError( + "Can't unlock lock with different -a" " parameter." + ) + return False + lock.exclusive = False + lock.reason = "" + lock.owner = "" + + if self._auto: + del_list = [ + i + for i in FileLock.FILE_OPS + if i.name == FileCheckName(self._lock_file) + ] + for i in del_list: + FileLock.FILE_OPS.remove(i) + for f in del_list: + fcntl.lockf(f, fcntl.LOCK_UN) + f.close() + del del_list + os.remove(FileCheckName(self._lock_file)) + + else: + lock.counter -= 1 + return True class Machine(object): - """Machine class""" + """Machine class""" - LOCKS_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/locks' + LOCKS_DIR = "/google/data/rw/users/mo/mobiletc-prebuild/locks" - def __init__(self, name, locks_dir=LOCKS_DIR, auto=True): - self._name = name - self._auto = auto - try: - self._full_name = socket.gethostbyaddr(name)[0] - except socket.error: - self._full_name = self._name - self._full_name = os.path.join(locks_dir, self._full_name) - - def Lock(self, exclusive=False, reason=''): - lock = Lock(self._full_name, self._auto) - return lock.NonBlockingLock(exclusive, reason) - - def TryLock(self, timeout=300, exclusive=False, reason=''): - locked = False - sleep = timeout / 10 - while True: - locked = self.Lock(exclusive, reason) - if locked or timeout < 0: - break - print('Lock not acquired for {0}, wait {1} seconds ...'.format( - self._name, sleep)) - time.sleep(sleep) - timeout -= sleep - return locked - - def Unlock(self, exclusive=False, ignore_ownership=False): - lock = Lock(self._full_name, self._auto) - return lock.Unlock(exclusive, ignore_ownership) + def __init__(self, name, locks_dir=LOCKS_DIR, auto=True): + self._name = name + self._auto = auto + try: + self._full_name = socket.gethostbyaddr(name)[0] + except socket.error: + self._full_name = self._name + self._full_name = os.path.join(locks_dir, self._full_name) + + def Lock(self, exclusive=False, reason=""): + lock = Lock(self._full_name, self._auto) + return lock.NonBlockingLock(exclusive, reason) + + def TryLock(self, timeout=300, exclusive=False, reason=""): + locked = False + sleep = timeout / 10 + while True: + locked = self.Lock(exclusive, reason) + if locked or timeout < 0: + break + print( + "Lock not acquired for {0}, wait {1} seconds ...".format( + self._name, sleep + ) + ) + time.sleep(sleep) + timeout -= sleep + return locked + + def Unlock(self, exclusive=False, ignore_ownership=False): + lock = Lock(self._full_name, self._auto) + return lock.Unlock(exclusive, ignore_ownership) def Main(argv): - """The main function.""" - - parser = argparse.ArgumentParser() - parser.add_argument( - '-r', '--reason', dest='reason', default='', help='The lock reason.') - parser.add_argument( - '-u', - '--unlock', - dest='unlock', - action='store_true', - default=False, - help='Use this to unlock.') - parser.add_argument( - '-l', - '--list_locks', - dest='list_locks', - action='store_true', - default=False, - help='Use this to list locks.') - parser.add_argument( - '-f', - '--ignore_ownership', - dest='ignore_ownership', - action='store_true', - default=False, - help="Use this to force unlock on a lock you don't own.") - parser.add_argument( - '-s', - '--shared', - dest='shared', - action='store_true', - default=False, - help='Use this for a shared (non-exclusive) lock.') - parser.add_argument( - '-d', - '--dir', - dest='locks_dir', - action='store', - default=Machine.LOCKS_DIR, - help='Use this to set different locks_dir') - parser.add_argument('args', nargs='*', help='Machine arg.') - - options = parser.parse_args(argv) - - options.locks_dir = os.path.abspath(options.locks_dir) - exclusive = not options.shared - - if not options.list_locks and len(options.args) != 2: - logger.GetLogger().LogError( - 'Either --list_locks or a machine arg is needed.') - return 1 - - if len(options.args) > 1: - machine = Machine(options.args[1], options.locks_dir, auto=False) - else: - machine = None - - if options.list_locks: - FileLock.ListLock('*', options.locks_dir) - retval = True - elif options.unlock: - retval = machine.Unlock(exclusive, options.ignore_ownership) - else: - retval = machine.Lock(exclusive, options.reason) - - if retval: - return 0 - else: - return 1 - - -if __name__ == '__main__': - sys.exit(Main(sys.argv[1:])) + """The main function.""" + + parser = argparse.ArgumentParser() + parser.add_argument( + "-r", "--reason", dest="reason", default="", help="The lock reason." + ) + parser.add_argument( + "-u", + "--unlock", + dest="unlock", + action="store_true", + default=False, + help="Use this to unlock.", + ) + parser.add_argument( + "-l", + "--list_locks", + dest="list_locks", + action="store_true", + default=False, + help="Use this to list locks.", + ) + parser.add_argument( + "-f", + "--ignore_ownership", + dest="ignore_ownership", + action="store_true", + default=False, + help="Use this to force unlock on a lock you don't own.", + ) + parser.add_argument( + "-s", + "--shared", + dest="shared", + action="store_true", + default=False, + help="Use this for a shared (non-exclusive) lock.", + ) + parser.add_argument( + "-d", + "--dir", + dest="locks_dir", + action="store", + default=Machine.LOCKS_DIR, + help="Use this to set different locks_dir", + ) + parser.add_argument("args", nargs="*", help="Machine arg.") + + options = parser.parse_args(argv) + + options.locks_dir = os.path.abspath(options.locks_dir) + exclusive = not options.shared + + if not options.list_locks and len(options.args) != 2: + logger.GetLogger().LogError( + "Either --list_locks or a machine arg is needed." + ) + return 1 + + if len(options.args) > 1: + machine = Machine(options.args[1], options.locks_dir, auto=False) + else: + machine = None + + if options.list_locks: + FileLock.ListLock("*", options.locks_dir) + retval = True + elif options.unlock: + retval = machine.Unlock(exclusive, options.ignore_ownership) + else: + retval = machine.Lock(exclusive, options.reason) + + if retval: + return 0 + else: + return 1 + + +if __name__ == "__main__": + sys.exit(Main(sys.argv[1:])) diff --git a/file_lock_machine_test.py b/file_lock_machine_test.py index d14deaf4..d1189512 100755 --- a/file_lock_machine_test.py +++ b/file_lock_machine_test.py @@ -12,7 +12,8 @@ MachineManagerTest tests MachineManager. from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" from multiprocessing import Process import time @@ -22,106 +23,108 @@ import file_lock_machine def LockAndSleep(machine): - file_lock_machine.Machine(machine, '/tmp', auto=True).Lock(exclusive=True) - time.sleep(1) + file_lock_machine.Machine(machine, "/tmp", auto=True).Lock(exclusive=True) + time.sleep(1) class MachineTest(unittest.TestCase): - """Class for testing machine locking.""" - - def setUp(self): - pass - - def testRepeatedUnlock(self): - mach = file_lock_machine.Machine('qqqraymes.mtv', '/tmp') - for _ in range(10): - self.assertTrue(mach.Unlock()) - mach = file_lock_machine.Machine('qqqraymes.mtv', '/tmp', auto=True) - for _ in range(10): - self.assertTrue(mach.Unlock()) - - def testLockUnlock(self): - mach = file_lock_machine.Machine('otter.mtv', '/tmp') - for _ in range(10): - self.assertTrue(mach.Lock(exclusive=True)) - self.assertTrue(mach.Unlock(exclusive=True)) - - mach = file_lock_machine.Machine('otter.mtv', '/tmp', True) - for _ in range(10): - self.assertTrue(mach.Lock(exclusive=True)) - self.assertTrue(mach.Unlock(exclusive=True)) - - def testSharedLock(self): - mach = file_lock_machine.Machine('chrotomation.mtv', '/tmp') - for _ in range(10): - self.assertTrue(mach.Lock(exclusive=False)) - for _ in range(10): - self.assertTrue(mach.Unlock(exclusive=False)) - self.assertTrue(mach.Lock(exclusive=True)) - self.assertTrue(mach.Unlock(exclusive=True)) - - mach = file_lock_machine.Machine('chrotomation.mtv', '/tmp', auto=True) - for _ in range(10): - self.assertTrue(mach.Lock(exclusive=False)) - for _ in range(10): - self.assertTrue(mach.Unlock(exclusive=False)) - self.assertTrue(mach.Lock(exclusive=True)) - self.assertTrue(mach.Unlock(exclusive=True)) - - def testExclusiveLock(self): - mach = file_lock_machine.Machine('atree.mtv', '/tmp') - self.assertTrue(mach.Lock(exclusive=True)) - for _ in range(10): - self.assertFalse(mach.Lock(exclusive=True)) - self.assertFalse(mach.Lock(exclusive=False)) - self.assertTrue(mach.Unlock(exclusive=True)) - - mach = file_lock_machine.Machine('atree.mtv', '/tmp', auto=True) - self.assertTrue(mach.Lock(exclusive=True)) - for _ in range(10): - self.assertFalse(mach.Lock(exclusive=True)) - self.assertFalse(mach.Lock(exclusive=False)) - self.assertTrue(mach.Unlock(exclusive=True)) - - def testExclusiveState(self): - mach = file_lock_machine.Machine('testExclusiveState', '/tmp') - self.assertTrue(mach.Lock(exclusive=True)) - for _ in range(10): - self.assertFalse(mach.Lock(exclusive=False)) - self.assertTrue(mach.Unlock(exclusive=True)) - - mach = file_lock_machine.Machine('testExclusiveState', '/tmp', auto=True) - self.assertTrue(mach.Lock(exclusive=True)) - for _ in range(10): - self.assertFalse(mach.Lock(exclusive=False)) - self.assertTrue(mach.Unlock(exclusive=True)) - - def testAutoLockGone(self): - mach = file_lock_machine.Machine('lockgone', '/tmp', auto=True) - p = Process(target=LockAndSleep, args=('lockgone',)) - p.start() - time.sleep(1.1) - p.join() - self.assertTrue(mach.Lock(exclusive=True)) - - def testAutoLockFromOther(self): - mach = file_lock_machine.Machine('other_lock', '/tmp', auto=True) - p = Process(target=LockAndSleep, args=('other_lock',)) - p.start() - time.sleep(0.5) - self.assertFalse(mach.Lock(exclusive=True)) - p.join() - time.sleep(0.6) - self.assertTrue(mach.Lock(exclusive=True)) - - def testUnlockByOthers(self): - mach = file_lock_machine.Machine('other_unlock', '/tmp', auto=True) - p = Process(target=LockAndSleep, args=('other_unlock',)) - p.start() - time.sleep(0.5) - self.assertTrue(mach.Unlock(exclusive=True)) - self.assertTrue(mach.Lock(exclusive=True)) - - -if __name__ == '__main__': - unittest.main() + """Class for testing machine locking.""" + + def setUp(self): + pass + + def testRepeatedUnlock(self): + mach = file_lock_machine.Machine("qqqraymes.mtv", "/tmp") + for _ in range(10): + self.assertTrue(mach.Unlock()) + mach = file_lock_machine.Machine("qqqraymes.mtv", "/tmp", auto=True) + for _ in range(10): + self.assertTrue(mach.Unlock()) + + def testLockUnlock(self): + mach = file_lock_machine.Machine("otter.mtv", "/tmp") + for _ in range(10): + self.assertTrue(mach.Lock(exclusive=True)) + self.assertTrue(mach.Unlock(exclusive=True)) + + mach = file_lock_machine.Machine("otter.mtv", "/tmp", True) + for _ in range(10): + self.assertTrue(mach.Lock(exclusive=True)) + self.assertTrue(mach.Unlock(exclusive=True)) + + def testSharedLock(self): + mach = file_lock_machine.Machine("chrotomation.mtv", "/tmp") + for _ in range(10): + self.assertTrue(mach.Lock(exclusive=False)) + for _ in range(10): + self.assertTrue(mach.Unlock(exclusive=False)) + self.assertTrue(mach.Lock(exclusive=True)) + self.assertTrue(mach.Unlock(exclusive=True)) + + mach = file_lock_machine.Machine("chrotomation.mtv", "/tmp", auto=True) + for _ in range(10): + self.assertTrue(mach.Lock(exclusive=False)) + for _ in range(10): + self.assertTrue(mach.Unlock(exclusive=False)) + self.assertTrue(mach.Lock(exclusive=True)) + self.assertTrue(mach.Unlock(exclusive=True)) + + def testExclusiveLock(self): + mach = file_lock_machine.Machine("atree.mtv", "/tmp") + self.assertTrue(mach.Lock(exclusive=True)) + for _ in range(10): + self.assertFalse(mach.Lock(exclusive=True)) + self.assertFalse(mach.Lock(exclusive=False)) + self.assertTrue(mach.Unlock(exclusive=True)) + + mach = file_lock_machine.Machine("atree.mtv", "/tmp", auto=True) + self.assertTrue(mach.Lock(exclusive=True)) + for _ in range(10): + self.assertFalse(mach.Lock(exclusive=True)) + self.assertFalse(mach.Lock(exclusive=False)) + self.assertTrue(mach.Unlock(exclusive=True)) + + def testExclusiveState(self): + mach = file_lock_machine.Machine("testExclusiveState", "/tmp") + self.assertTrue(mach.Lock(exclusive=True)) + for _ in range(10): + self.assertFalse(mach.Lock(exclusive=False)) + self.assertTrue(mach.Unlock(exclusive=True)) + + mach = file_lock_machine.Machine( + "testExclusiveState", "/tmp", auto=True + ) + self.assertTrue(mach.Lock(exclusive=True)) + for _ in range(10): + self.assertFalse(mach.Lock(exclusive=False)) + self.assertTrue(mach.Unlock(exclusive=True)) + + def testAutoLockGone(self): + mach = file_lock_machine.Machine("lockgone", "/tmp", auto=True) + p = Process(target=LockAndSleep, args=("lockgone",)) + p.start() + time.sleep(1.1) + p.join() + self.assertTrue(mach.Lock(exclusive=True)) + + def testAutoLockFromOther(self): + mach = file_lock_machine.Machine("other_lock", "/tmp", auto=True) + p = Process(target=LockAndSleep, args=("other_lock",)) + p.start() + time.sleep(0.5) + self.assertFalse(mach.Lock(exclusive=True)) + p.join() + time.sleep(0.6) + self.assertTrue(mach.Lock(exclusive=True)) + + def testUnlockByOthers(self): + mach = file_lock_machine.Machine("other_unlock", "/tmp", auto=True) + p = Process(target=LockAndSleep, args=("other_unlock",)) + p.start() + time.sleep(0.5) + self.assertTrue(mach.Unlock(exclusive=True)) + self.assertTrue(mach.Lock(exclusive=True)) + + +if __name__ == "__main__": + unittest.main() diff --git a/go/chromeos/setup_chromeos_testing.py b/go/chromeos/setup_chromeos_testing.py index cbb8bc29..53254a99 100755 --- a/go/chromeos/setup_chromeos_testing.py +++ b/go/chromeos/setup_chromeos_testing.py @@ -18,15 +18,15 @@ from cros_utils import command_executer SUCCESS = 0 DEBUG = False -ARCH_DATA = {'x86_64': 'amd64', 'arm32': 'arm', 'arm64': 'arm64'} +ARCH_DATA = {"x86_64": "amd64", "arm32": "arm", "arm64": "arm64"} CROS_TOOLCHAIN_DATA = { - 'x86_64': 'x86_64-cros-linux-gnu', - 'arm32': 'armv7a-cros-linux-gnueabihf', - 'arm64': 'aarch64-cros-linux-gnu' + "x86_64": "x86_64-cros-linux-gnu", + "arm32": "armv7a-cros-linux-gnueabihf", + "arm64": "aarch64-cros-linux-gnu", } -GLIBC_DATA = {'x86_64': 'glibc', 'arm32': 'glibc32', 'arm64': 'glibc'} +GLIBC_DATA = {"x86_64": "glibc", "arm32": "glibc32", "arm64": "glibc"} CONFIG_TEMPLATE = """ Host %s @@ -70,183 +70,214 @@ GOOS="linux" GOARCH="%s" \\ def log(msg): - if DEBUG: - print(msg) + if DEBUG: + print(msg) def WriteFile(file_content, file_name): - with open(file_name, 'w', encoding='utf-8') as out_file: - out_file.write(file_content) + with open(file_name, "w", encoding="utf-8") as out_file: + out_file.write(file_content) def GenerateGoHelperScripts(ce, arm_board, x86_board, chromeos_root): - keys = ['x86_64', 'arm32', 'arm64'] - names = { - 'x86_64': x86_board, - 'arm64': arm_board, - 'arm32': ('%s32' % arm_board) - } - - toolchain_dir = os.path.join(chromeos_root, 'src', 'third_party', - 'toolchain-utils', 'go', 'chromeos') - for k in keys: - name = names[k] - arch = ARCH_DATA[k] - toolchain = CROS_TOOLCHAIN_DATA[k] - glibc = GLIBC_DATA[k] - - base_file = os.path.join(toolchain_dir, ('go_%s' % name)) - base_file_content = BASE_TEMPLATE % (name, arch, arch, toolchain, toolchain, - toolchain) - WriteFile(base_file_content, base_file) - cmd = 'chmod 755 %s' % base_file - ce.RunCommand(cmd) - - exec_file = os.path.join(toolchain_dir, ('go_%s_exec' % name)) - exec_file_content = EXEC_TEMPLATE % (name, arch, glibc, name) - WriteFile(exec_file_content, exec_file) - cmd = 'chmod 755 %s' % exec_file - ce.RunCommand(cmd) - - return 0 - - -def UpdateChrootSshConfig(ce, arm_board, arm_dut, x86_board, x86_dut, - chromeos_root): - log('Entering UpdateChrootSshConfig') - # Copy testing_rsa to .ssh and set file protections properly. - user = getpass.getuser() - ssh_dir = os.path.join(chromeos_root, 'chroot', 'home', user, '.ssh') - dest_file = os.path.join(ssh_dir, 'testing_rsa') - src_file = os.path.join(chromeos_root, 'src', 'scripts', - 'mod_for_test_scripts', 'testing_rsa') - if not os.path.exists(dest_file): - if os.path.exists(src_file): - cmd = 'cp %s %s' % (src_file, dest_file) - ret = ce.RunCommand(cmd) - if ret != SUCCESS: - print('Error executing "%s". Exiting now...' % cmd) - sys.exit(1) - cmd = 'chmod 600 %s' % dest_file - ret = ce.RunCommand(cmd) - if ret != SUCCESS: - print('Error executing %s; may need to re-run this manually.' % cmd) + keys = ["x86_64", "arm32", "arm64"] + names = { + "x86_64": x86_board, + "arm64": arm_board, + "arm32": ("%s32" % arm_board), + } + + toolchain_dir = os.path.join( + chromeos_root, "src", "third_party", "toolchain-utils", "go", "chromeos" + ) + for k in keys: + name = names[k] + arch = ARCH_DATA[k] + toolchain = CROS_TOOLCHAIN_DATA[k] + glibc = GLIBC_DATA[k] + + base_file = os.path.join(toolchain_dir, ("go_%s" % name)) + base_file_content = BASE_TEMPLATE % ( + name, + arch, + arch, + toolchain, + toolchain, + toolchain, + ) + WriteFile(base_file_content, base_file) + cmd = "chmod 755 %s" % base_file + ce.RunCommand(cmd) + + exec_file = os.path.join(toolchain_dir, ("go_%s_exec" % name)) + exec_file_content = EXEC_TEMPLATE % (name, arch, glibc, name) + WriteFile(exec_file_content, exec_file) + cmd = "chmod 755 %s" % exec_file + ce.RunCommand(cmd) + + return 0 + + +def UpdateChrootSshConfig( + ce, arm_board, arm_dut, x86_board, x86_dut, chromeos_root +): + log("Entering UpdateChrootSshConfig") + # Copy testing_rsa to .ssh and set file protections properly. + user = getpass.getuser() + ssh_dir = os.path.join(chromeos_root, "chroot", "home", user, ".ssh") + dest_file = os.path.join(ssh_dir, "testing_rsa") + src_file = os.path.join( + chromeos_root, "src", "scripts", "mod_for_test_scripts", "testing_rsa" + ) + if not os.path.exists(dest_file): + if os.path.exists(src_file): + cmd = "cp %s %s" % (src_file, dest_file) + ret = ce.RunCommand(cmd) + if ret != SUCCESS: + print('Error executing "%s". Exiting now...' % cmd) + sys.exit(1) + cmd = "chmod 600 %s" % dest_file + ret = ce.RunCommand(cmd) + if ret != SUCCESS: + print( + "Error executing %s; may need to re-run this manually." + % cmd + ) + else: + print( + "Cannot find %s; you will need to update testing_rsa by hand." + % src_file + ) else: - print('Cannot find %s; you will need to update testing_rsa by hand.' % - src_file) - else: - log('testing_rsa exists already.') + log("testing_rsa exists already.") - # Save ~/.ssh/config file, if not already done. - config_file = os.path.expanduser('~/.ssh/config') - saved_config_file = os.path.join( - os.path.expanduser('~/.ssh'), 'config.save.go-scripts') - if not os.path.exists(saved_config_file): - cmd = 'cp %s %s' % (config_file, saved_config_file) - ret = ce.RunCommand(cmd) - if ret != SUCCESS: - print('Error making save copy of ~/.ssh/config. Exiting...') - sys.exit(1) + # Save ~/.ssh/config file, if not already done. + config_file = os.path.expanduser("~/.ssh/config") + saved_config_file = os.path.join( + os.path.expanduser("~/.ssh"), "config.save.go-scripts" + ) + if not os.path.exists(saved_config_file): + cmd = "cp %s %s" % (config_file, saved_config_file) + ret = ce.RunCommand(cmd) + if ret != SUCCESS: + print("Error making save copy of ~/.ssh/config. Exiting...") + sys.exit(1) - # Update ~/.ssh/config file - log('Reading ssh config file') - with open(config_file, 'r') as input_file: - config_lines = input_file.read() + # Update ~/.ssh/config file + log("Reading ssh config file") + with open(config_file, "r") as input_file: + config_lines = input_file.read() - x86_host_config = CONFIG_TEMPLATE % (x86_board, x86_dut) - arm_names = '%s %s32' % (arm_board, arm_board) - arm_host_config = CONFIG_TEMPLATE % (arm_names, arm_dut) + x86_host_config = CONFIG_TEMPLATE % (x86_board, x86_dut) + arm_names = "%s %s32" % (arm_board, arm_board) + arm_host_config = CONFIG_TEMPLATE % (arm_names, arm_dut) - config_lines += x86_host_config - config_lines += arm_host_config + config_lines += x86_host_config + config_lines += arm_host_config - log('Writing ~/.ssh/config') - WriteFile(config_lines, config_file) + log("Writing ~/.ssh/config") + WriteFile(config_lines, config_file) - return 0 + return 0 def CleanUp(ce, x86_board, arm_board, chromeos_root): - # Find and remove go helper scripts - keys = ['x86_64', 'arm32', 'arm64'] - names = { - 'x86_64': x86_board, - 'arm64': arm_board, - 'arm32': ('%s32' % arm_board) - } - - toolchain_dir = os.path.join(chromeos_root, 'src', 'third_party', - 'toolchain-utils', 'go', 'chromeos') - for k in keys: - name = names[k] - base_file = os.path.join(toolchain_dir, ('go_%s' % name)) - exec_file = os.path.join(toolchain_dir, ('go_%s_exec' % name)) - cmd = ('rm -f %s; rm -f %s' % (base_file, exec_file)) - ce.RunCommand(cmd) - - # Restore saved config_file - config_file = os.path.expanduser('~/.ssh/config') - saved_config_file = os.path.join( - os.path.expanduser('~/.ssh'), 'config.save.go-scripts') - if not os.path.exists(saved_config_file): - print('Could not find file: %s; unable to restore ~/.ssh/config .' % - saved_config_file) - else: - cmd = 'mv %s %s' % (saved_config_file, config_file) - ce.RunCommand(cmd) - - return 0 + # Find and remove go helper scripts + keys = ["x86_64", "arm32", "arm64"] + names = { + "x86_64": x86_board, + "arm64": arm_board, + "arm32": ("%s32" % arm_board), + } + + toolchain_dir = os.path.join( + chromeos_root, "src", "third_party", "toolchain-utils", "go", "chromeos" + ) + for k in keys: + name = names[k] + base_file = os.path.join(toolchain_dir, ("go_%s" % name)) + exec_file = os.path.join(toolchain_dir, ("go_%s_exec" % name)) + cmd = "rm -f %s; rm -f %s" % (base_file, exec_file) + ce.RunCommand(cmd) + + # Restore saved config_file + config_file = os.path.expanduser("~/.ssh/config") + saved_config_file = os.path.join( + os.path.expanduser("~/.ssh"), "config.save.go-scripts" + ) + if not os.path.exists(saved_config_file): + print( + "Could not find file: %s; unable to restore ~/.ssh/config ." + % saved_config_file + ) + else: + cmd = "mv %s %s" % (saved_config_file, config_file) + ce.RunCommand(cmd) + + return 0 def Main(argv): - # pylint: disable=global-statement - global DEBUG - - parser = argparse.ArgumentParser() - parser.add_argument('-a', '--arm64_board', dest='arm_board', required=True) - parser.add_argument( - '-b', '--x86_64_board', dest='x86_64_board', required=True) - parser.add_argument( - '-c', '--chromeos_root', dest='chromeos_root', required=True) - parser.add_argument('-x', '--x86_64_dut', dest='x86_64_dut', required=True) - parser.add_argument('-y', '--arm64_dut', dest='arm_dut', required=True) - parser.add_argument( - '-z', '--cleanup', dest='cleanup', default=False, action='store_true') - parser.add_argument( - '-v', '--verbose', dest='verbose', default=False, action='store_true') - - options = parser.parse_args(argv[1:]) - - if options.verbose: - DEBUG = True - - if not os.path.exists(options.chromeos_root): - print('Invalid ChromeOS Root: %s' % options.chromeos_root) - - ce = command_executer.GetCommandExecuter() - all_good = True - for m in (options.x86_64_dut, options.arm_dut): - cmd = 'ping -c 3 %s > /dev/null' % m - ret = ce.RunCommand(cmd) - if ret != SUCCESS: - print('Machine %s is currently not responding to ping.' % m) - all_good = False - - if not all_good: - return 1 - - if not options.cleanup: - UpdateChrootSshConfig(ce, options.arm_board, options.arm_dut, - options.x86_64_board, options.x86_64_dut, - options.chromeos_root) - GenerateGoHelperScripts(ce, options.arm_board, options.x86_64_board, - options.chromeos_root) - else: - CleanUp(ce, options.x86_64_board, options.arm_board, options.chromeos_root) - - return 0 - - -if __name__ == '__main__': - val = Main(sys.argv) - sys.exit(val) + # pylint: disable=global-statement + global DEBUG + + parser = argparse.ArgumentParser() + parser.add_argument("-a", "--arm64_board", dest="arm_board", required=True) + parser.add_argument( + "-b", "--x86_64_board", dest="x86_64_board", required=True + ) + parser.add_argument( + "-c", "--chromeos_root", dest="chromeos_root", required=True + ) + parser.add_argument("-x", "--x86_64_dut", dest="x86_64_dut", required=True) + parser.add_argument("-y", "--arm64_dut", dest="arm_dut", required=True) + parser.add_argument( + "-z", "--cleanup", dest="cleanup", default=False, action="store_true" + ) + parser.add_argument( + "-v", "--verbose", dest="verbose", default=False, action="store_true" + ) + + options = parser.parse_args(argv[1:]) + + if options.verbose: + DEBUG = True + + if not os.path.exists(options.chromeos_root): + print("Invalid ChromeOS Root: %s" % options.chromeos_root) + + ce = command_executer.GetCommandExecuter() + all_good = True + for m in (options.x86_64_dut, options.arm_dut): + cmd = "ping -c 3 %s > /dev/null" % m + ret = ce.RunCommand(cmd) + if ret != SUCCESS: + print("Machine %s is currently not responding to ping." % m) + all_good = False + + if not all_good: + return 1 + + if not options.cleanup: + UpdateChrootSshConfig( + ce, + options.arm_board, + options.arm_dut, + options.x86_64_board, + options.x86_64_dut, + options.chromeos_root, + ) + GenerateGoHelperScripts( + ce, options.arm_board, options.x86_64_board, options.chromeos_root + ) + else: + CleanUp( + ce, options.x86_64_board, options.arm_board, options.chromeos_root + ) + + return 0 + + +if __name__ == "__main__": + val = Main(sys.argv) + sys.exit(val) diff --git a/heatmaps/heat_map.py b/heatmaps/heat_map.py index 64067b61..c4e43fdc 100755 --- a/heatmaps/heat_map.py +++ b/heatmaps/heat_map.py @@ -19,167 +19,189 @@ from heatmaps import heatmap_generator def IsARepoRoot(directory): - """Returns True if directory is the root of a repo checkout.""" - return os.path.exists( - os.path.join(os.path.realpath(os.path.expanduser(directory)), '.repo')) + """Returns True if directory is the root of a repo checkout.""" + return os.path.exists( + os.path.join(os.path.realpath(os.path.expanduser(directory)), ".repo") + ) class HeatMapProducer(object): - """Class to produce heat map.""" - - def __init__(self, - chromeos_root, - perf_data, - hugepage, - binary, - title, - logger=None): - self.chromeos_root = os.path.realpath(os.path.expanduser(chromeos_root)) - self.perf_data = os.path.realpath(os.path.expanduser(perf_data)) - self.hugepage = hugepage - self.dir = os.path.dirname(os.path.realpath(__file__)) - self.binary = binary - self.ce = command_executer.GetCommandExecuter() - self.temp_dir = '' - self.temp_perf_inchroot = '' - self.temp_dir_created = False - self.perf_report = '' - self.title = title - self.logger = logger - - def _EnsureFileInChroot(self): - chroot_prefix = os.path.join(self.chromeos_root, 'chroot') - if self.perf_data.startswith(chroot_prefix): - # If the path to perf_data starts with the same chromeos_root, assume - # it's in the chromeos_root so no need for temporary directory and copy. - self.temp_dir = self.perf_data.replace('perf.data', '') - self.temp_perf_inchroot = self.temp_dir.replace(chroot_prefix, '') - - else: - # Otherwise, create a temporary directory and copy perf.data into chroot. - self.temp_dir = tempfile.mkdtemp( - prefix=os.path.join(self.chromeos_root, 'src/')) - temp_perf = os.path.join(self.temp_dir, 'perf.data') - shutil.copy2(self.perf_data, temp_perf) - self.temp_perf_inchroot = os.path.join('~/trunk/src', - os.path.basename(self.temp_dir)) - self.temp_dir_created = True - - def _GeneratePerfReport(self): - cmd = ('cd %s && perf report -D -i perf.data > perf_report.txt' % - self.temp_perf_inchroot) - retval = self.ce.ChrootRunCommand(self.chromeos_root, cmd) - if retval: - raise RuntimeError('Failed to generate perf report') - self.perf_report = os.path.join(self.temp_dir, 'perf_report.txt') - - def _GetHeatMap(self, top_n_pages): - generator = heatmap_generator.HeatmapGenerator( - perf_report=self.perf_report, - page_size=4096, - hugepage=self.hugepage, - title=self.title) - generator.draw() - # Analyze top N hottest symbols with the binary, if provided - if self.binary: - generator.analyze(self.binary, top_n_pages) - - def _RemoveFiles(self): - files = [ - 'out.txt', 'inst-histo.txt', 'inst-histo-hp.txt', 'inst-histo-sp.txt' - ] - for f in files: - if os.path.exists(f): - os.remove(f) - - def Run(self, top_n_pages): - try: - self._EnsureFileInChroot() - self._GeneratePerfReport() - self._GetHeatMap(top_n_pages) - finally: - self._RemoveFiles() - msg = ('heat map and time histogram genereated in the current ' - 'directory with name heat_map.png and timeline.png ' - 'accordingly.') - if self.binary: - msg += ('\nThe hottest %d pages inside and outside hugepage ' - 'is symbolized and saved to addr2symbol.txt' % top_n_pages) - if self.logger: - self.logger.LogOutput(msg) - else: - print(msg) + """Class to produce heat map.""" + + def __init__( + self, chromeos_root, perf_data, hugepage, binary, title, logger=None + ): + self.chromeos_root = os.path.realpath(os.path.expanduser(chromeos_root)) + self.perf_data = os.path.realpath(os.path.expanduser(perf_data)) + self.hugepage = hugepage + self.dir = os.path.dirname(os.path.realpath(__file__)) + self.binary = binary + self.ce = command_executer.GetCommandExecuter() + self.temp_dir = "" + self.temp_perf_inchroot = "" + self.temp_dir_created = False + self.perf_report = "" + self.title = title + self.logger = logger + + def _EnsureFileInChroot(self): + chroot_prefix = os.path.join(self.chromeos_root, "chroot") + if self.perf_data.startswith(chroot_prefix): + # If the path to perf_data starts with the same chromeos_root, assume + # it's in the chromeos_root so no need for temporary directory and copy. + self.temp_dir = self.perf_data.replace("perf.data", "") + self.temp_perf_inchroot = self.temp_dir.replace(chroot_prefix, "") + + else: + # Otherwise, create a temporary directory and copy perf.data into chroot. + self.temp_dir = tempfile.mkdtemp( + prefix=os.path.join(self.chromeos_root, "src/") + ) + temp_perf = os.path.join(self.temp_dir, "perf.data") + shutil.copy2(self.perf_data, temp_perf) + self.temp_perf_inchroot = os.path.join( + "~/trunk/src", os.path.basename(self.temp_dir) + ) + self.temp_dir_created = True + + def _GeneratePerfReport(self): + cmd = ( + "cd %s && perf report -D -i perf.data > perf_report.txt" + % self.temp_perf_inchroot + ) + retval = self.ce.ChrootRunCommand(self.chromeos_root, cmd) + if retval: + raise RuntimeError("Failed to generate perf report") + self.perf_report = os.path.join(self.temp_dir, "perf_report.txt") + + def _GetHeatMap(self, top_n_pages): + generator = heatmap_generator.HeatmapGenerator( + perf_report=self.perf_report, + page_size=4096, + hugepage=self.hugepage, + title=self.title, + ) + generator.draw() + # Analyze top N hottest symbols with the binary, if provided + if self.binary: + generator.analyze(self.binary, top_n_pages) + + def _RemoveFiles(self): + files = [ + "out.txt", + "inst-histo.txt", + "inst-histo-hp.txt", + "inst-histo-sp.txt", + ] + for f in files: + if os.path.exists(f): + os.remove(f) + + def Run(self, top_n_pages): + try: + self._EnsureFileInChroot() + self._GeneratePerfReport() + self._GetHeatMap(top_n_pages) + finally: + self._RemoveFiles() + msg = ( + "heat map and time histogram genereated in the current " + "directory with name heat_map.png and timeline.png " + "accordingly." + ) + if self.binary: + msg += ( + "\nThe hottest %d pages inside and outside hugepage " + "is symbolized and saved to addr2symbol.txt" % top_n_pages + ) + if self.logger: + self.logger.LogOutput(msg) + else: + print(msg) def main(argv): - """Parse the options. - - Args: - argv: The options with which this script was invoked. - - Returns: - 0 unless an exception is raised. - """ - parser = argparse.ArgumentParser() - - parser.add_argument( - '--chromeos_root', - dest='chromeos_root', - required=True, - help='ChromeOS root to use for generate heatmaps.') - parser.add_argument( - '--perf_data', - dest='perf_data', - required=True, - help='The raw perf data. Must be collected with -e instructions while ' - 'disabling ASLR.') - parser.add_argument( - '--binary', - dest='binary', - help='The path to the Chrome binary. Only useful if want to print ' - 'symbols on hottest pages', - default=None) - parser.add_argument( - '--top_n', - dest='top_n', - type=int, - default=10, - help='Print out top N hottest pages within/outside huge page range. ' - 'Must be used with --hugepage and --binary. (Default: %(default)s)') - parser.add_argument( - '--title', dest='title', help='Title of the heatmap', default='') - parser.add_argument( - '--hugepage', - dest='hugepage', - help='A range of addresses (start,end) where huge page starts and ends' - ' in text section, separated by a comma.' - ' Used to differentiate regions in heatmap.' - ' Example: --hugepage=0,4096' - ' If not specified, no effect on the heatmap.', - default=None) - - options = parser.parse_args(argv) - - if not IsARepoRoot(options.chromeos_root): - parser.error('%s does not contain .repo dir.' % options.chromeos_root) - - if not os.path.isfile(options.perf_data): - parser.error('Cannot find perf_data: %s.' % options.perf_data) - - hugepage_range = None - if options.hugepage: - hugepage_range = options.hugepage.split(',') - if len(hugepage_range) != 2 or \ - int(hugepage_range[0]) > int(hugepage_range[1]): - parser.error('Wrong format of hugepage range: %s' % options.hugepage) - hugepage_range = [int(x) for x in hugepage_range] - - heatmap_producer = HeatMapProducer(options.chromeos_root, options.perf_data, - hugepage_range, options.binary, - options.title) - - heatmap_producer.Run(options.top_n) - - -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + """Parse the options. + + Args: + argv: The options with which this script was invoked. + + Returns: + 0 unless an exception is raised. + """ + parser = argparse.ArgumentParser() + + parser.add_argument( + "--chromeos_root", + dest="chromeos_root", + required=True, + help="ChromeOS root to use for generate heatmaps.", + ) + parser.add_argument( + "--perf_data", + dest="perf_data", + required=True, + help="The raw perf data. Must be collected with -e instructions while " + "disabling ASLR.", + ) + parser.add_argument( + "--binary", + dest="binary", + help="The path to the Chrome binary. Only useful if want to print " + "symbols on hottest pages", + default=None, + ) + parser.add_argument( + "--top_n", + dest="top_n", + type=int, + default=10, + help="Print out top N hottest pages within/outside huge page range. " + "Must be used with --hugepage and --binary. (Default: %(default)s)", + ) + parser.add_argument( + "--title", dest="title", help="Title of the heatmap", default="" + ) + parser.add_argument( + "--hugepage", + dest="hugepage", + help="A range of addresses (start,end) where huge page starts and ends" + " in text section, separated by a comma." + " Used to differentiate regions in heatmap." + " Example: --hugepage=0,4096" + " If not specified, no effect on the heatmap.", + default=None, + ) + + options = parser.parse_args(argv) + + if not IsARepoRoot(options.chromeos_root): + parser.error("%s does not contain .repo dir." % options.chromeos_root) + + if not os.path.isfile(options.perf_data): + parser.error("Cannot find perf_data: %s." % options.perf_data) + + hugepage_range = None + if options.hugepage: + hugepage_range = options.hugepage.split(",") + if len(hugepage_range) != 2 or int(hugepage_range[0]) > int( + hugepage_range[1] + ): + parser.error( + "Wrong format of hugepage range: %s" % options.hugepage + ) + hugepage_range = [int(x) for x in hugepage_range] + + heatmap_producer = HeatMapProducer( + options.chromeos_root, + options.perf_data, + hugepage_range, + options.binary, + options.title, + ) + + heatmap_producer.Run(options.top_n) + + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/heatmaps/heat_map_test.py b/heatmaps/heat_map_test.py index aabb3cac..0d3ca4e2 100755 --- a/heatmaps/heat_map_test.py +++ b/heatmaps/heat_map_test.py @@ -9,150 +9,172 @@ from __future__ import print_function -import unittest.mock as mock -import unittest - import os +import unittest +import unittest.mock as mock from cros_utils import command_executer - from heatmaps import heat_map from heatmaps import heatmap_generator -def make_heatmap(chromeos_root='/path/to/fake/chromeos_root/', - perf_data='/any_path/perf.data'): - return heat_map.HeatMapProducer(chromeos_root, perf_data, None, None, '') +def make_heatmap( + chromeos_root="/path/to/fake/chromeos_root/", + perf_data="/any_path/perf.data", +): + return heat_map.HeatMapProducer(chromeos_root, perf_data, None, None, "") def fake_mkdtemp(prefix): - """Mock tempfile.mkdtemp() by just create a pathname.""" - return prefix + 'random_dir' + """Mock tempfile.mkdtemp() by just create a pathname.""" + return prefix + "random_dir" def fake_parser_error(_, msg): - """Redirect parser.error() to exception.""" - raise Exception(msg) + """Redirect parser.error() to exception.""" + raise Exception(msg) def fake_generate_perf_report_exception(_): - raise Exception + raise Exception class HeatmapTest(unittest.TestCase): - """All of our tests for heat_map.""" - - # pylint: disable=protected-access - @mock.patch('shutil.copy2') - @mock.patch('tempfile.mkdtemp') - def test_EnsureFileInChrootAlreadyInside(self, mock_mkdtemp, mock_copy): - perf_data_inchroot = ( - '/path/to/fake/chromeos_root/chroot/inchroot_path/perf.data') - heatmap = make_heatmap(perf_data=perf_data_inchroot) - heatmap._EnsureFileInChroot() - self.assertFalse(heatmap.temp_dir_created) - self.assertEqual(heatmap.temp_dir, - '/path/to/fake/chromeos_root/chroot/inchroot_path/') - self.assertEqual(heatmap.temp_perf_inchroot, '/inchroot_path/') - mock_mkdtemp.assert_not_called() - mock_copy.assert_not_called() - - @mock.patch('shutil.copy2') - @mock.patch('tempfile.mkdtemp', fake_mkdtemp) - def test_EnsureFileInChrootOutsideNeedCopy(self, mock_copy): - heatmap = make_heatmap() - heatmap._EnsureFileInChroot() - self.assertTrue(heatmap.temp_dir_created) - self.assertEqual(mock_copy.call_count, 1) - self.assertEqual(heatmap.temp_dir, - '/path/to/fake/chromeos_root/src/random_dir') - self.assertEqual(heatmap.temp_perf_inchroot, '~/trunk/src/random_dir') - - @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand') - def test_GeneratePerfReport(self, mock_ChrootRunCommand): - heatmap = make_heatmap() - heatmap.temp_dir = '/fake/chroot/inchroot_path/' - heatmap.temp_perf_inchroot = '/inchroot_path/' - mock_ChrootRunCommand.return_value = 0 - heatmap._GeneratePerfReport() - cmd = ('cd %s && perf report -D -i perf.data > perf_report.txt' % - heatmap.temp_perf_inchroot) - mock_ChrootRunCommand.assert_called_with(heatmap.chromeos_root, cmd) - self.assertEqual(mock_ChrootRunCommand.call_count, 1) - self.assertEqual(heatmap.perf_report, - '/fake/chroot/inchroot_path/perf_report.txt') - - @mock.patch.object(heatmap_generator, 'HeatmapGenerator') - def test_GetHeatMap(self, mock_heatmap_generator): - heatmap = make_heatmap() - heatmap._GetHeatMap(10) - self.assertTrue(mock_heatmap_generator.called) - - @mock.patch.object(heat_map.HeatMapProducer, '_EnsureFileInChroot') - @mock.patch.object(heat_map.HeatMapProducer, '_GeneratePerfReport') - @mock.patch.object(heat_map.HeatMapProducer, '_GetHeatMap') - @mock.patch.object(heat_map.HeatMapProducer, '_RemoveFiles') - def test_Run(self, mock_remove_files, mock_get_heatmap, - mock_generate_perf_report, mock_ensure_file_in_chroot): - heatmap = make_heatmap() - heatmap.Run(10) - mock_ensure_file_in_chroot.assert_called_once_with() - mock_generate_perf_report.assert_called_once_with() - mock_get_heatmap.assert_called_once_with(10) - mock_remove_files.assert_called_once_with() - - @mock.patch.object(heat_map.HeatMapProducer, '_EnsureFileInChroot') - @mock.patch.object( - heat_map.HeatMapProducer, - '_GeneratePerfReport', - new=fake_generate_perf_report_exception) - @mock.patch.object(heat_map.HeatMapProducer, '_GetHeatMap') - @mock.patch.object(heat_map.HeatMapProducer, '_RemoveFiles') - @mock.patch('builtins.print') - def test_Run_with_exception(self, mock_print, mock_remove_files, - mock_get_heatmap, mock_ensure_file_in_chroot): - heatmap = make_heatmap() - with self.assertRaises(Exception): - heatmap.Run(10) - mock_ensure_file_in_chroot.assert_called_once_with() - mock_get_heatmap.assert_not_called() - mock_remove_files.assert_called_once_with() - mock_print.assert_not_called() - - @mock.patch('argparse.ArgumentParser.error', fake_parser_error) - @mock.patch.object(os.path, 'isfile') - @mock.patch.object(heat_map, 'IsARepoRoot') - def test_main_arg_format(self, mock_IsARepoRoot, mock_isfile): - """Test wrong arg format are detected.""" - args = ['--chromeos_root=/fake/chroot/', '--perf_data=/path/to/perf.data'] - - # Test --chromeos_root format - mock_IsARepoRoot.return_value = False - with self.assertRaises(Exception) as msg: - heat_map.main(args) - self.assertIn('does not contain .repo dir.', str(msg.exception)) - - # Test --perf_data format - mock_IsARepoRoot.return_value = True - mock_isfile.return_value = False - with self.assertRaises(Exception) as msg: - heat_map.main(args) - self.assertIn('Cannot find perf_data', str(msg.exception)) - - # Test --hugepage format - mock_isfile.return_value = True - args.append('--hugepage=0') - with self.assertRaises(Exception) as msg: - heat_map.main(args) - self.assertIn('Wrong format of hugepage range', str(msg.exception)) - - # Test --hugepage parse - args[-1] = '--hugepage=0,4096' - heat_map.HeatMapProducer = mock.MagicMock() - heat_map.main(args) - heat_map.HeatMapProducer.assert_called_with( - '/fake/chroot/', '/path/to/perf.data', [0, 4096], None, '') - - -if __name__ == '__main__': - unittest.main() + """All of our tests for heat_map.""" + + # pylint: disable=protected-access + @mock.patch("shutil.copy2") + @mock.patch("tempfile.mkdtemp") + def test_EnsureFileInChrootAlreadyInside(self, mock_mkdtemp, mock_copy): + perf_data_inchroot = ( + "/path/to/fake/chromeos_root/chroot/inchroot_path/perf.data" + ) + heatmap = make_heatmap(perf_data=perf_data_inchroot) + heatmap._EnsureFileInChroot() + self.assertFalse(heatmap.temp_dir_created) + self.assertEqual( + heatmap.temp_dir, + "/path/to/fake/chromeos_root/chroot/inchroot_path/", + ) + self.assertEqual(heatmap.temp_perf_inchroot, "/inchroot_path/") + mock_mkdtemp.assert_not_called() + mock_copy.assert_not_called() + + @mock.patch("shutil.copy2") + @mock.patch("tempfile.mkdtemp", fake_mkdtemp) + def test_EnsureFileInChrootOutsideNeedCopy(self, mock_copy): + heatmap = make_heatmap() + heatmap._EnsureFileInChroot() + self.assertTrue(heatmap.temp_dir_created) + self.assertEqual(mock_copy.call_count, 1) + self.assertEqual( + heatmap.temp_dir, "/path/to/fake/chromeos_root/src/random_dir" + ) + self.assertEqual(heatmap.temp_perf_inchroot, "~/trunk/src/random_dir") + + @mock.patch.object(command_executer.CommandExecuter, "ChrootRunCommand") + def test_GeneratePerfReport(self, mock_ChrootRunCommand): + heatmap = make_heatmap() + heatmap.temp_dir = "/fake/chroot/inchroot_path/" + heatmap.temp_perf_inchroot = "/inchroot_path/" + mock_ChrootRunCommand.return_value = 0 + heatmap._GeneratePerfReport() + cmd = ( + "cd %s && perf report -D -i perf.data > perf_report.txt" + % heatmap.temp_perf_inchroot + ) + mock_ChrootRunCommand.assert_called_with(heatmap.chromeos_root, cmd) + self.assertEqual(mock_ChrootRunCommand.call_count, 1) + self.assertEqual( + heatmap.perf_report, "/fake/chroot/inchroot_path/perf_report.txt" + ) + + @mock.patch.object(heatmap_generator, "HeatmapGenerator") + def test_GetHeatMap(self, mock_heatmap_generator): + heatmap = make_heatmap() + heatmap._GetHeatMap(10) + self.assertTrue(mock_heatmap_generator.called) + + @mock.patch.object(heat_map.HeatMapProducer, "_EnsureFileInChroot") + @mock.patch.object(heat_map.HeatMapProducer, "_GeneratePerfReport") + @mock.patch.object(heat_map.HeatMapProducer, "_GetHeatMap") + @mock.patch.object(heat_map.HeatMapProducer, "_RemoveFiles") + def test_Run( + self, + mock_remove_files, + mock_get_heatmap, + mock_generate_perf_report, + mock_ensure_file_in_chroot, + ): + heatmap = make_heatmap() + heatmap.Run(10) + mock_ensure_file_in_chroot.assert_called_once_with() + mock_generate_perf_report.assert_called_once_with() + mock_get_heatmap.assert_called_once_with(10) + mock_remove_files.assert_called_once_with() + + @mock.patch.object(heat_map.HeatMapProducer, "_EnsureFileInChroot") + @mock.patch.object( + heat_map.HeatMapProducer, + "_GeneratePerfReport", + new=fake_generate_perf_report_exception, + ) + @mock.patch.object(heat_map.HeatMapProducer, "_GetHeatMap") + @mock.patch.object(heat_map.HeatMapProducer, "_RemoveFiles") + @mock.patch("builtins.print") + def test_Run_with_exception( + self, + mock_print, + mock_remove_files, + mock_get_heatmap, + mock_ensure_file_in_chroot, + ): + heatmap = make_heatmap() + with self.assertRaises(Exception): + heatmap.Run(10) + mock_ensure_file_in_chroot.assert_called_once_with() + mock_get_heatmap.assert_not_called() + mock_remove_files.assert_called_once_with() + mock_print.assert_not_called() + + @mock.patch("argparse.ArgumentParser.error", fake_parser_error) + @mock.patch.object(os.path, "isfile") + @mock.patch.object(heat_map, "IsARepoRoot") + def test_main_arg_format(self, mock_IsARepoRoot, mock_isfile): + """Test wrong arg format are detected.""" + args = [ + "--chromeos_root=/fake/chroot/", + "--perf_data=/path/to/perf.data", + ] + + # Test --chromeos_root format + mock_IsARepoRoot.return_value = False + with self.assertRaises(Exception) as msg: + heat_map.main(args) + self.assertIn("does not contain .repo dir.", str(msg.exception)) + + # Test --perf_data format + mock_IsARepoRoot.return_value = True + mock_isfile.return_value = False + with self.assertRaises(Exception) as msg: + heat_map.main(args) + self.assertIn("Cannot find perf_data", str(msg.exception)) + + # Test --hugepage format + mock_isfile.return_value = True + args.append("--hugepage=0") + with self.assertRaises(Exception) as msg: + heat_map.main(args) + self.assertIn("Wrong format of hugepage range", str(msg.exception)) + + # Test --hugepage parse + args[-1] = "--hugepage=0,4096" + heat_map.HeatMapProducer = mock.MagicMock() + heat_map.main(args) + heat_map.HeatMapProducer.assert_called_with( + "/fake/chroot/", "/path/to/perf.data", [0, 4096], None, "" + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/heatmaps/heatmap_generator.py b/heatmaps/heatmap_generator.py index ad1385f1..c139c364 100644 --- a/heatmaps/heatmap_generator.py +++ b/heatmaps/heatmap_generator.py @@ -13,7 +13,8 @@ performed by another script perf-to-inst-page.sh). It can also analyze the symbol names in hot pages. """ -from __future__ import division, print_function +from __future__ import division +from __future__ import print_function import bisect import collections @@ -23,445 +24,503 @@ import subprocess from cros_utils import command_executer -HugepageRange = collections.namedtuple('HugepageRange', ['start', 'end']) +HugepageRange = collections.namedtuple("HugepageRange", ["start", "end"]) -class MMap(object): - """Class to store mmap information in perf report. - - We assume ASLR is disabled, so MMap for all Chrome is assumed to be - the same. This class deals with the case hugepage creates several - mmaps for Chrome but should be merged together. In these case, we - assume the first MMAP is not affected by the bug and use the MMAP. - """ - - def __init__(self, addr, size, offset): - self.start_address = addr - self.size = size - self.offset = offset - - def __str__(self): - return '(%x, %x, %x)' % (self.start_address, self.size, self.offset) - - def merge(self, mmap): - # This function should not be needed, since we should only have - # one MMAP on Chrome of each process. This function only deals with - # images that is affected by http://crbug.com/931465. - # This function is only checking a few conditions to make sure - # the bug is within our expectation. - - if self.start_address == mmap.start_address: - assert self.size >= mmap.size, \ - 'Original MMAP size(%x) is smaller than the forked process(%x).' % ( - self.size, mmap.size) - # The case that the MMAP is forked from the previous process - # No need to do anything, OR - # The case where hugepage causes a small Chrome mmap. - # In this case, we use the prior MMAP for the whole Chrome - return - - assert self.start_address < mmap.start_address, \ - 'Original MMAP starting address(%x) is larger than the forked' \ - 'process(%x).' % (self.start_address, mmap.start_address) - - assert self.start_address + self.size >= mmap.start_address + mmap.size, \ - 'MMAP of the forked process exceeds the end of original MMAP.' +class MMap(object): + """Class to store mmap information in perf report. + + We assume ASLR is disabled, so MMap for all Chrome is assumed to be + the same. This class deals with the case hugepage creates several + mmaps for Chrome but should be merged together. In these case, we + assume the first MMAP is not affected by the bug and use the MMAP. + """ + + def __init__(self, addr, size, offset): + self.start_address = addr + self.size = size + self.offset = offset + + def __str__(self): + return "(%x, %x, %x)" % (self.start_address, self.size, self.offset) + + def merge(self, mmap): + # This function should not be needed, since we should only have + # one MMAP on Chrome of each process. This function only deals with + # images that is affected by http://crbug.com/931465. + + # This function is only checking a few conditions to make sure + # the bug is within our expectation. + + if self.start_address == mmap.start_address: + assert ( + self.size >= mmap.size + ), "Original MMAP size(%x) is smaller than the forked process(%x)." % ( + self.size, + mmap.size, + ) + # The case that the MMAP is forked from the previous process + # No need to do anything, OR + # The case where hugepage causes a small Chrome mmap. + # In this case, we use the prior MMAP for the whole Chrome + return + + assert self.start_address < mmap.start_address, ( + "Original MMAP starting address(%x) is larger than the forked" + "process(%x)." % (self.start_address, mmap.start_address) + ) + + assert ( + self.start_address + self.size >= mmap.start_address + mmap.size + ), "MMAP of the forked process exceeds the end of original MMAP." class HeatmapGenerator(object): - """Class to generate heat map with a perf report, containing mmaps and - - samples. This class contains two interfaces with other modules: - draw() and analyze(). - - draw() draws a heatmap with the sample information given in the perf report - analyze() prints out the symbol names in hottest pages with the given - chrome binary - """ - - def __init__(self, - perf_report, - page_size, - hugepage, - title, - log_level='verbose'): - self.perf_report = perf_report - # Pick 1G as a relatively large number. All addresses less than it will - # be recorded. The actual heatmap will show up to a boundary of the - # largest address in text segment. - self.max_addr = 1024 * 1024 * 1024 - self.ce = command_executer.GetCommandExecuter(log_level=log_level) - self.dir = os.path.dirname(os.path.realpath(__file__)) - with open(perf_report, 'r', encoding='utf-8') as f: - self.perf_report_contents = f.readlines() - # Write histogram results to a text file, in order to use gnu plot to draw - self.hist_temp_output = open('out.txt', 'w', encoding='utf-8') - self.processes = {} - self.deleted_processes = {} - self.count = 0 - if hugepage: - self.hugepage = HugepageRange(start=hugepage[0], end=hugepage[1]) - else: - self.hugepage = None - self.title = title - self.symbol_addresses = [] - self.symbol_names = [] - self.page_size = page_size - - def _parse_perf_sample(self, line): - # In a perf report, generated with -D, a PERF_RECORD_SAMPLE command should - # look like this: TODO: some arguments are unknown - # - # cpuid cycle unknown [unknown]: PERF_RECORD_SAMPLE(IP, 0x2): pid/tid: - # 0xaddr period: period addr: addr - # ... thread: threadname:tid - # ...... dso: process - # - # This is an example: - # 1 136712833349 0x6a558 [0x30]: PERF_RECORD_SAMPLE(IP, 0x2): 5227/5227: - # 0x55555683b810 period: 372151 addr: 0 - # ... thread: chrome:5227 - # ...... dso: /opt/google/chrome/chrome - # - # For this function, the 7th argument (args[6]) after spltting with spaces - # is pid/tid. We use the combination of the two as the pid. - # Also, we add an assertion here to check the tid in the 7th argument( - # args[6]) and the 15th argument(arg[14]) are the same - # - # The function returns the ((pid,tid), address) pair if the sampling - # is on Chrome. Otherwise, return (None, None) pair. - - if 'thread: chrome' not in line or \ - 'dso: /opt/google/chrome/chrome' not in line: - return None, None - args = line.split(' ') - pid_raw = args[6].split('/') - assert pid_raw[1][:-1] == args[14].split(':')[1][:-1], \ - 'TID in %s of sample is not the same: %s/%s' % ( - line[:-1], pid_raw[1][:-1], args[14].split(':')[1][:-1]) - key = (int(pid_raw[0]), int(pid_raw[1][:-1])) - address = int(args[7], base=16) - return key, address - - def _parse_perf_record(self, line): - # In a perf report, generated with -D, a PERF_RECORD_MMAP2 command should - # look like this: TODO: some arguments are unknown - # - # cpuid cycle unknown [unknown]: PERF_RECORD_MMAP2 pid/tid: - # [0xaddr(0xlength) @ pageoffset maj:min ino ino_generation]: - # permission process - # - # This is an example. - # 2 136690556823 0xa6898 [0x80]: PERF_RECORD_MMAP2 5227/5227: - # [0x555556496000(0x8d1b000) @ 0xf42000 b3:03 92844 1892514370]: - # r-xp /opt/google/chrome/chrome - # - # For this function, the 6th argument (args[5]) after spltting with spaces - # is pid/tid. We use the combination of the two as the pid. - # The 7th argument (args[6]) is the [0xaddr(0xlength). We can peel the - # string to get the address and size of the mmap. - # The 9th argument (args[8]) is the page offset. - # The function returns the ((pid,tid), mmap) pair if the mmap is for Chrome - # is on Chrome. Otherwise, return (None, None) pair. - - if 'chrome/chrome' not in line: - return None, None - args = line.split(' ') - pid_raw = args[5].split('/') - assert pid_raw[0] == pid_raw[1][:-1], \ - 'PID in %s of mmap is not the same: %s/%s' % ( - line[:-1], pid_raw[0], pid_raw[1]) - pid = (int(pid_raw[0]), int(pid_raw[1][:-1])) - address_raw = args[6].split('(') - start_address = int(address_raw[0][1:], base=16) - size = int(address_raw[1][:-1], base=16) - offset = int(args[8], base=16) - # Return an mmap object instead of only starting address, - # in case there are many mmaps for the sample PID - return pid, MMap(start_address, size, offset) - - def _parse_pair_event(self, arg): - # This function is called by the _parse_* functions that has a pattern of - # pids like: (pid:tid):(pid:tid), i.e. - # PERF_RECORD_FORK and PERF_RECORD_COMM - _, remain = arg.split('(', 1) - pid1, remain = remain.split(':', 1) - pid2, remain = remain.split(')', 1) - _, remain = remain.split('(', 1) - pid3, remain = remain.split(':', 1) - pid4, remain = remain.split(')', 1) - return (int(pid1), int(pid2)), (int(pid3), int(pid4)) - - def _process_perf_record(self, line): - # This function calls _parse_perf_record() to get information from - # PERF_RECORD_MMAP2. It records the mmap object for each pid (a pair of - # pid,tid), into a dictionary. - pid, mmap = self._parse_perf_record(line) - if pid is None: - # PID = None meaning the mmap is not for chrome - return - if pid in self.processes: - # This should never happen for a correct profiling result, as we - # should only have one MMAP for Chrome for each process. - # If it happens, see http://crbug.com/931465 - self.processes[pid].merge(mmap) - else: - self.processes[pid] = mmap - - def _process_perf_fork(self, line): - # In a perf report, generated with -D, a PERF_RECORD_FORK command should - # look like this: - # - # cpuid cycle unknown [unknown]: - # PERF_RECORD_FORK(pid_to:tid_to):(pid_from:tid_from) - # - # This is an example. - # 0 0 0x22a8 [0x38]: PERF_RECORD_FORK(1:1):(0:0) - # - # In this function, we need to peel the information of pid:tid pairs - # So we get the last argument and send it to function _parse_pair_event() - # for analysis. - # We use (pid, tid) as the pid. - args = line.split(' ') - pid_to, pid_from = self._parse_pair_event(args[-1]) - if pid_from in self.processes: - assert pid_to not in self.processes - self.processes[pid_to] = MMap(self.processes[pid_from].start_address, - self.processes[pid_from].size, - self.processes[pid_from].offset) - - def _process_perf_exit(self, line): - # In a perf report, generated with -D, a PERF_RECORD_EXIT command should - # look like this: - # - # cpuid cycle unknown [unknown]: - # PERF_RECORD_EXIT(pid1:tid1):(pid2:tid2) - # - # This is an example. - # 1 136082505621 0x30810 [0x38]: PERF_RECORD_EXIT(3851:3851):(3851:3851) - # - # In this function, we need to peel the information of pid:tid pairs - # So we get the last argument and send it to function _parse_pair_event() - # for analysis. - # We use (pid, tid) as the pid. - args = line.split(' ') - pid_to, pid_from = self._parse_pair_event(args[-1]) - assert pid_to == pid_from, '(%d, %d) (%d, %d)' % (pid_to[0], pid_to[1], - pid_from[0], pid_from[1]) - if pid_to in self.processes: - # Don't delete the process yet - self.deleted_processes[pid_from] = self.processes[pid_from] - - def _process_perf_sample(self, line): - # This function calls _parse_perf_sample() to get information from - # the perf report. - # It needs to check the starting address of allocated mmap from - # the dictionary (self.processes) to calculate the offset within - # the text section of the sampling. - # The offset is calculated into pages (4KB or 2MB) and writes into - # out.txt together with the total counts, which will be used to - # calculate histogram. - pid, addr = self._parse_perf_sample(line) - if pid is None: - return - - assert pid in self.processes and pid not in self.deleted_processes, \ - 'PID %d not found mmap and not forked from another process' - - start_address = self.processes[pid].start_address - address = addr - start_address - assert address >= 0 and \ - 'addresses accessed in PERF_RECORD_SAMPLE should be larger than' \ - ' the starting address of Chrome' - if address < self.max_addr: - self.count += 1 - line = '%d/%d: %d %d' % (pid[0], pid[1], self.count, - address // self.page_size * self.page_size) - if self.hugepage: - if self.hugepage.start <= address < self.hugepage.end: - line += ' hugepage' + """Class to generate heat map with a perf report, containing mmaps and + + samples. This class contains two interfaces with other modules: + draw() and analyze(). + + draw() draws a heatmap with the sample information given in the perf report + analyze() prints out the symbol names in hottest pages with the given + chrome binary + """ + + def __init__( + self, perf_report, page_size, hugepage, title, log_level="verbose" + ): + self.perf_report = perf_report + # Pick 1G as a relatively large number. All addresses less than it will + # be recorded. The actual heatmap will show up to a boundary of the + # largest address in text segment. + self.max_addr = 1024 * 1024 * 1024 + self.ce = command_executer.GetCommandExecuter(log_level=log_level) + self.dir = os.path.dirname(os.path.realpath(__file__)) + with open(perf_report, "r", encoding="utf-8") as f: + self.perf_report_contents = f.readlines() + # Write histogram results to a text file, in order to use gnu plot to draw + self.hist_temp_output = open("out.txt", "w", encoding="utf-8") + self.processes = {} + self.deleted_processes = {} + self.count = 0 + if hugepage: + self.hugepage = HugepageRange(start=hugepage[0], end=hugepage[1]) + else: + self.hugepage = None + self.title = title + self.symbol_addresses = [] + self.symbol_names = [] + self.page_size = page_size + + def _parse_perf_sample(self, line): + # In a perf report, generated with -D, a PERF_RECORD_SAMPLE command should + # look like this: TODO: some arguments are unknown + # + # cpuid cycle unknown [unknown]: PERF_RECORD_SAMPLE(IP, 0x2): pid/tid: + # 0xaddr period: period addr: addr + # ... thread: threadname:tid + # ...... dso: process + # + # This is an example: + # 1 136712833349 0x6a558 [0x30]: PERF_RECORD_SAMPLE(IP, 0x2): 5227/5227: + # 0x55555683b810 period: 372151 addr: 0 + # ... thread: chrome:5227 + # ...... dso: /opt/google/chrome/chrome + # + # For this function, the 7th argument (args[6]) after spltting with spaces + # is pid/tid. We use the combination of the two as the pid. + # Also, we add an assertion here to check the tid in the 7th argument( + # args[6]) and the 15th argument(arg[14]) are the same + # + # The function returns the ((pid,tid), address) pair if the sampling + # is on Chrome. Otherwise, return (None, None) pair. + + if ( + "thread: chrome" not in line + or "dso: /opt/google/chrome/chrome" not in line + ): + return None, None + args = line.split(" ") + pid_raw = args[6].split("/") + assert ( + pid_raw[1][:-1] == args[14].split(":")[1][:-1] + ), "TID in %s of sample is not the same: %s/%s" % ( + line[:-1], + pid_raw[1][:-1], + args[14].split(":")[1][:-1], + ) + key = (int(pid_raw[0]), int(pid_raw[1][:-1])) + address = int(args[7], base=16) + return key, address + + def _parse_perf_record(self, line): + # In a perf report, generated with -D, a PERF_RECORD_MMAP2 command should + # look like this: TODO: some arguments are unknown + # + # cpuid cycle unknown [unknown]: PERF_RECORD_MMAP2 pid/tid: + # [0xaddr(0xlength) @ pageoffset maj:min ino ino_generation]: + # permission process + # + # This is an example. + # 2 136690556823 0xa6898 [0x80]: PERF_RECORD_MMAP2 5227/5227: + # [0x555556496000(0x8d1b000) @ 0xf42000 b3:03 92844 1892514370]: + # r-xp /opt/google/chrome/chrome + # + # For this function, the 6th argument (args[5]) after spltting with spaces + # is pid/tid. We use the combination of the two as the pid. + # The 7th argument (args[6]) is the [0xaddr(0xlength). We can peel the + # string to get the address and size of the mmap. + # The 9th argument (args[8]) is the page offset. + # The function returns the ((pid,tid), mmap) pair if the mmap is for Chrome + # is on Chrome. Otherwise, return (None, None) pair. + + if "chrome/chrome" not in line: + return None, None + args = line.split(" ") + pid_raw = args[5].split("/") + assert ( + pid_raw[0] == pid_raw[1][:-1] + ), "PID in %s of mmap is not the same: %s/%s" % ( + line[:-1], + pid_raw[0], + pid_raw[1], + ) + pid = (int(pid_raw[0]), int(pid_raw[1][:-1])) + address_raw = args[6].split("(") + start_address = int(address_raw[0][1:], base=16) + size = int(address_raw[1][:-1], base=16) + offset = int(args[8], base=16) + # Return an mmap object instead of only starting address, + # in case there are many mmaps for the sample PID + return pid, MMap(start_address, size, offset) + + def _parse_pair_event(self, arg): + # This function is called by the _parse_* functions that has a pattern of + # pids like: (pid:tid):(pid:tid), i.e. + # PERF_RECORD_FORK and PERF_RECORD_COMM + _, remain = arg.split("(", 1) + pid1, remain = remain.split(":", 1) + pid2, remain = remain.split(")", 1) + _, remain = remain.split("(", 1) + pid3, remain = remain.split(":", 1) + pid4, remain = remain.split(")", 1) + return (int(pid1), int(pid2)), (int(pid3), int(pid4)) + + def _process_perf_record(self, line): + # This function calls _parse_perf_record() to get information from + # PERF_RECORD_MMAP2. It records the mmap object for each pid (a pair of + # pid,tid), into a dictionary. + pid, mmap = self._parse_perf_record(line) + if pid is None: + # PID = None meaning the mmap is not for chrome + return + if pid in self.processes: + # This should never happen for a correct profiling result, as we + # should only have one MMAP for Chrome for each process. + # If it happens, see http://crbug.com/931465 + self.processes[pid].merge(mmap) else: - line += ' smallpage' - print(line, file=self.hist_temp_output) - - def _read_perf_report(self): - # Serve as main function to read perf report, generated by -D - lines = iter(self.perf_report_contents) - for line in lines: - if 'PERF_RECORD_MMAP' in line: - self._process_perf_record(line) - elif 'PERF_RECORD_FORK' in line: - self._process_perf_fork(line) - elif 'PERF_RECORD_EXIT' in line: - self._process_perf_exit(line) - elif 'PERF_RECORD_SAMPLE' in line: - # Perf sample is multi-line - self._process_perf_sample(line + next(lines) + next(lines)) - self.hist_temp_output.close() - - def _draw_heat_map(self): - # Calls a script (perf-to-inst-page.sh) to calculate histogram - # of results written in out.txt and also generate pngs for - # heat maps. - heatmap_script = os.path.join(self.dir, 'perf-to-inst-page.sh') - if self.hugepage: - hp_arg = 'hugepage' - else: - hp_arg = 'none' - - cmd = '{0} {1} {2}'.format(heatmap_script, pipes.quote(self.title), hp_arg) - retval = self.ce.RunCommand(cmd) - if retval: - raise RuntimeError('Failed to run script to generate heatmap') - - def _restore_histogram(self): - # When hugepage is used, there are two files inst-histo-{hp,sp}.txt - # So we need to read in all the files. - names = [x for x in os.listdir('.') if 'inst-histo' in x and '.txt' in x] - hist = {} - for n in names: - with open(n, encoding='utf-8') as f: - for l in f.readlines(): - num, addr = l.strip().split(' ') - assert int(addr) not in hist - hist[int(addr)] = int(num) - return hist - - def _read_symbols_from_binary(self, binary): - # FIXME: We are using nm to read symbol names from Chrome binary - # for now. Can we get perf to hand us symbol names, instead of - # using nm in the future? - # - # Get all the symbols (and their starting addresses) that fall into - # the page. Will be used to print out information of hot pages - # Each line shows the information of a symbol: - # [symbol value (0xaddr)] [symbol type] [symbol name] - # For some symbols, the [symbol name] field might be missing. - # e.g. - # 0000000001129da0 t Builtins_LdaNamedPropertyHandler - - # Generate a list of symbols from nm tool and check each line - # to extract symbols names - text_section_start = 0 - for l in subprocess.check_output(['nm', '-n', binary]).split('\n'): - args = l.strip().split(' ') - if len(args) < 3: - # No name field - continue - addr_raw, symbol_type, name = args - addr = int(addr_raw, base=16) - if 't' not in symbol_type and 'T' not in symbol_type: - # Filter out symbols not in text sections - continue - if not self.symbol_addresses: - # The first symbol in text sections - text_section_start = addr - self.symbol_addresses.append(0) - self.symbol_names.append(name) - else: - assert text_section_start != 0, \ - 'The starting address of text section has not been found' - if addr == self.symbol_addresses[-1]: - # if the same address has multiple symbols, put them together - # and separate symbol names with '/' - self.symbol_names[-1] += '/' + name + self.processes[pid] = mmap + + def _process_perf_fork(self, line): + # In a perf report, generated with -D, a PERF_RECORD_FORK command should + # look like this: + # + # cpuid cycle unknown [unknown]: + # PERF_RECORD_FORK(pid_to:tid_to):(pid_from:tid_from) + # + # This is an example. + # 0 0 0x22a8 [0x38]: PERF_RECORD_FORK(1:1):(0:0) + # + # In this function, we need to peel the information of pid:tid pairs + # So we get the last argument and send it to function _parse_pair_event() + # for analysis. + # We use (pid, tid) as the pid. + args = line.split(" ") + pid_to, pid_from = self._parse_pair_event(args[-1]) + if pid_from in self.processes: + assert pid_to not in self.processes + self.processes[pid_to] = MMap( + self.processes[pid_from].start_address, + self.processes[pid_from].size, + self.processes[pid_from].offset, + ) + + def _process_perf_exit(self, line): + # In a perf report, generated with -D, a PERF_RECORD_EXIT command should + # look like this: + # + # cpuid cycle unknown [unknown]: + # PERF_RECORD_EXIT(pid1:tid1):(pid2:tid2) + # + # This is an example. + # 1 136082505621 0x30810 [0x38]: PERF_RECORD_EXIT(3851:3851):(3851:3851) + # + # In this function, we need to peel the information of pid:tid pairs + # So we get the last argument and send it to function _parse_pair_event() + # for analysis. + # We use (pid, tid) as the pid. + args = line.split(" ") + pid_to, pid_from = self._parse_pair_event(args[-1]) + assert pid_to == pid_from, "(%d, %d) (%d, %d)" % ( + pid_to[0], + pid_to[1], + pid_from[0], + pid_from[1], + ) + if pid_to in self.processes: + # Don't delete the process yet + self.deleted_processes[pid_from] = self.processes[pid_from] + + def _process_perf_sample(self, line): + # This function calls _parse_perf_sample() to get information from + # the perf report. + # It needs to check the starting address of allocated mmap from + # the dictionary (self.processes) to calculate the offset within + # the text section of the sampling. + # The offset is calculated into pages (4KB or 2MB) and writes into + # out.txt together with the total counts, which will be used to + # calculate histogram. + pid, addr = self._parse_perf_sample(line) + if pid is None: + return + + assert ( + pid in self.processes and pid not in self.deleted_processes + ), "PID %d not found mmap and not forked from another process" + + start_address = self.processes[pid].start_address + address = addr - start_address + assert ( + address >= 0 + and "addresses accessed in PERF_RECORD_SAMPLE should be larger than" + " the starting address of Chrome" + ) + if address < self.max_addr: + self.count += 1 + line = "%d/%d: %d %d" % ( + pid[0], + pid[1], + self.count, + address // self.page_size * self.page_size, + ) + if self.hugepage: + if self.hugepage.start <= address < self.hugepage.end: + line += " hugepage" + else: + line += " smallpage" + print(line, file=self.hist_temp_output) + + def _read_perf_report(self): + # Serve as main function to read perf report, generated by -D + lines = iter(self.perf_report_contents) + for line in lines: + if "PERF_RECORD_MMAP" in line: + self._process_perf_record(line) + elif "PERF_RECORD_FORK" in line: + self._process_perf_fork(line) + elif "PERF_RECORD_EXIT" in line: + self._process_perf_exit(line) + elif "PERF_RECORD_SAMPLE" in line: + # Perf sample is multi-line + self._process_perf_sample(line + next(lines) + next(lines)) + self.hist_temp_output.close() + + def _draw_heat_map(self): + # Calls a script (perf-to-inst-page.sh) to calculate histogram + # of results written in out.txt and also generate pngs for + # heat maps. + heatmap_script = os.path.join(self.dir, "perf-to-inst-page.sh") + if self.hugepage: + hp_arg = "hugepage" else: - # The output of nm -n command is already sorted by address - # Insert to the end will result in a sorted array for bisect - self.symbol_addresses.append(addr - text_section_start) - self.symbol_names.append(name) - - def _map_addr_to_symbol(self, addr): - # Find out the symbol name - assert self.symbol_addresses - index = bisect.bisect(self.symbol_addresses, addr) - assert 0 < index <= len(self.symbol_names), \ - 'Failed to find an index (%d) in the list (len=%d)' % ( - index, len(self.symbol_names)) - return self.symbol_names[index - 1] - - def _print_symbols_in_hot_pages(self, fp, pages_to_show): - # Print symbols in all the pages of interest - for page_num, sample_num in pages_to_show: - print( - '----------------------------------------------------------', file=fp) - print( - 'Page Offset: %d MB, Count: %d' % (page_num // 1024 // 1024, - sample_num), - file=fp) - - symbol_counts = collections.Counter() - # Read Sample File and find out the occurance of symbols in the page - lines = iter(self.perf_report_contents) - for line in lines: - if 'PERF_RECORD_SAMPLE' in line: - pid, addr = self._parse_perf_sample(line + next(lines) + next(lines)) - if pid is None: - # The sampling is not on Chrome - continue - if addr // self.page_size != ( - self.processes[pid].start_address + page_num) // self.page_size: - # Sampling not in the current page - continue - - name = self._map_addr_to_symbol(addr - - self.processes[pid].start_address) - assert name, 'Failed to find symbol name of addr %x' % addr - symbol_counts[name] += 1 - - assert sum(symbol_counts.values()) == sample_num, \ - 'Symbol name matching missing for some addresses: %d vs %d' % ( - sum(symbol_counts.values()), sample_num) - - # Print out the symbol names sorted by the number of samples in - # the page - for name, count in sorted( - symbol_counts.items(), key=lambda kv: kv[1], reverse=True): - if count == 0: - break - print('> %s : %d' % (name, count), file=fp) - print('\n\n', file=fp) - - def draw(self): - # First read perf report to process information and save histogram - # into a text file - self._read_perf_report() - # Then use gnu plot to draw heat map - self._draw_heat_map() - - def analyze(self, binary, top_n): - # Read histogram from histo.txt - hist = self._restore_histogram() - # Sort the pages in histogram - sorted_hist = sorted(hist.items(), key=lambda value: value[1], reverse=True) - - # Generate symbolizations - self._read_symbols_from_binary(binary) - - # Write hottest pages - with open('addr2symbol.txt', 'w', encoding='utf-8') as fp: - if self.hugepage: - # Print hugepage region first - print( - 'Hugepage top %d hot pages (%d MB - %d MB):' % - (top_n, self.hugepage.start // 1024 // 1024, - self.hugepage.end // 1024 // 1024), - file=fp) - pages_to_print = [(k, v) - for k, v in sorted_hist - if self.hugepage.start <= k < self.hugepage.end - ][:top_n] - self._print_symbols_in_hot_pages(fp, pages_to_print) - print('==========================================', file=fp) - print('Top %d hot pages landed outside of hugepage:' % top_n, file=fp) - # Then print outside pages - pages_to_print = [(k, v) - for k, v in sorted_hist - if k < self.hugepage.start or k >= self.hugepage.end - ][:top_n] - self._print_symbols_in_hot_pages(fp, pages_to_print) - else: - # Print top_n hottest pages. - pages_to_print = sorted_hist[:top_n] - self._print_symbols_in_hot_pages(fp, pages_to_print) + hp_arg = "none" + + cmd = "{0} {1} {2}".format( + heatmap_script, pipes.quote(self.title), hp_arg + ) + retval = self.ce.RunCommand(cmd) + if retval: + raise RuntimeError("Failed to run script to generate heatmap") + + def _restore_histogram(self): + # When hugepage is used, there are two files inst-histo-{hp,sp}.txt + # So we need to read in all the files. + names = [ + x for x in os.listdir(".") if "inst-histo" in x and ".txt" in x + ] + hist = {} + for n in names: + with open(n, encoding="utf-8") as f: + for l in f.readlines(): + num, addr = l.strip().split(" ") + assert int(addr) not in hist + hist[int(addr)] = int(num) + return hist + + def _read_symbols_from_binary(self, binary): + # FIXME: We are using nm to read symbol names from Chrome binary + # for now. Can we get perf to hand us symbol names, instead of + # using nm in the future? + # + # Get all the symbols (and their starting addresses) that fall into + # the page. Will be used to print out information of hot pages + # Each line shows the information of a symbol: + # [symbol value (0xaddr)] [symbol type] [symbol name] + # For some symbols, the [symbol name] field might be missing. + # e.g. + # 0000000001129da0 t Builtins_LdaNamedPropertyHandler + + # Generate a list of symbols from nm tool and check each line + # to extract symbols names + text_section_start = 0 + for l in subprocess.check_output(["nm", "-n", binary]).split("\n"): + args = l.strip().split(" ") + if len(args) < 3: + # No name field + continue + addr_raw, symbol_type, name = args + addr = int(addr_raw, base=16) + if "t" not in symbol_type and "T" not in symbol_type: + # Filter out symbols not in text sections + continue + if not self.symbol_addresses: + # The first symbol in text sections + text_section_start = addr + self.symbol_addresses.append(0) + self.symbol_names.append(name) + else: + assert ( + text_section_start != 0 + ), "The starting address of text section has not been found" + if addr == self.symbol_addresses[-1]: + # if the same address has multiple symbols, put them together + # and separate symbol names with '/' + self.symbol_names[-1] += "/" + name + else: + # The output of nm -n command is already sorted by address + # Insert to the end will result in a sorted array for bisect + self.symbol_addresses.append(addr - text_section_start) + self.symbol_names.append(name) + + def _map_addr_to_symbol(self, addr): + # Find out the symbol name + assert self.symbol_addresses + index = bisect.bisect(self.symbol_addresses, addr) + assert ( + 0 < index <= len(self.symbol_names) + ), "Failed to find an index (%d) in the list (len=%d)" % ( + index, + len(self.symbol_names), + ) + return self.symbol_names[index - 1] + + def _print_symbols_in_hot_pages(self, fp, pages_to_show): + # Print symbols in all the pages of interest + for page_num, sample_num in pages_to_show: + print( + "----------------------------------------------------------", + file=fp, + ) + print( + "Page Offset: %d MB, Count: %d" + % (page_num // 1024 // 1024, sample_num), + file=fp, + ) + + symbol_counts = collections.Counter() + # Read Sample File and find out the occurance of symbols in the page + lines = iter(self.perf_report_contents) + for line in lines: + if "PERF_RECORD_SAMPLE" in line: + pid, addr = self._parse_perf_sample( + line + next(lines) + next(lines) + ) + if pid is None: + # The sampling is not on Chrome + continue + if ( + addr // self.page_size + != (self.processes[pid].start_address + page_num) + // self.page_size + ): + # Sampling not in the current page + continue + + name = self._map_addr_to_symbol( + addr - self.processes[pid].start_address + ) + assert name, "Failed to find symbol name of addr %x" % addr + symbol_counts[name] += 1 + + assert ( + sum(symbol_counts.values()) == sample_num + ), "Symbol name matching missing for some addresses: %d vs %d" % ( + sum(symbol_counts.values()), + sample_num, + ) + + # Print out the symbol names sorted by the number of samples in + # the page + for name, count in sorted( + symbol_counts.items(), key=lambda kv: kv[1], reverse=True + ): + if count == 0: + break + print("> %s : %d" % (name, count), file=fp) + print("\n\n", file=fp) + + def draw(self): + # First read perf report to process information and save histogram + # into a text file + self._read_perf_report() + # Then use gnu plot to draw heat map + self._draw_heat_map() + + def analyze(self, binary, top_n): + # Read histogram from histo.txt + hist = self._restore_histogram() + # Sort the pages in histogram + sorted_hist = sorted( + hist.items(), key=lambda value: value[1], reverse=True + ) + + # Generate symbolizations + self._read_symbols_from_binary(binary) + + # Write hottest pages + with open("addr2symbol.txt", "w", encoding="utf-8") as fp: + if self.hugepage: + # Print hugepage region first + print( + "Hugepage top %d hot pages (%d MB - %d MB):" + % ( + top_n, + self.hugepage.start // 1024 // 1024, + self.hugepage.end // 1024 // 1024, + ), + file=fp, + ) + pages_to_print = [ + (k, v) + for k, v in sorted_hist + if self.hugepage.start <= k < self.hugepage.end + ][:top_n] + self._print_symbols_in_hot_pages(fp, pages_to_print) + print("==========================================", file=fp) + print( + "Top %d hot pages landed outside of hugepage:" % top_n, + file=fp, + ) + # Then print outside pages + pages_to_print = [ + (k, v) + for k, v in sorted_hist + if k < self.hugepage.start or k >= self.hugepage.end + ][:top_n] + self._print_symbols_in_hot_pages(fp, pages_to_print) + else: + # Print top_n hottest pages. + pages_to_print = sorted_hist[:top_n] + self._print_symbols_in_hot_pages(fp, pages_to_print) diff --git a/heatmaps/heatmap_generator_test.py b/heatmaps/heatmap_generator_test.py index 0838ffc4..4afc9351 100755 --- a/heatmaps/heatmap_generator_test.py +++ b/heatmaps/heatmap_generator_test.py @@ -6,311 +6,323 @@ """Tests for heatmap_generator.py.""" -from __future__ import division, print_function - -import unittest.mock as mock -import unittest +from __future__ import division +from __future__ import print_function import os +import unittest +import unittest.mock as mock from heatmaps import heatmap_generator def _write_perf_mmap(pid, tid, addr, size, fp): - print( - '0 0 0 0 PERF_RECORD_MMAP2 %d/%d: ' - '[%x(%x) @ 0x0 0:0 0 0] ' - 'r-xp /opt/google/chrome/chrome\n' % (pid, tid, addr, size), - file=fp) + print( + "0 0 0 0 PERF_RECORD_MMAP2 %d/%d: " + "[%x(%x) @ 0x0 0:0 0 0] " + "r-xp /opt/google/chrome/chrome\n" % (pid, tid, addr, size), + file=fp, + ) def _write_perf_fork(pid_from, tid_from, pid_to, tid_to, fp): - print( - '0 0 0 0 PERF_RECORD_FORK(%d:%d):(%d:%d)\n' % (pid_to, tid_to, pid_from, - tid_from), - file=fp) + print( + "0 0 0 0 PERF_RECORD_FORK(%d:%d):(%d:%d)\n" + % (pid_to, tid_to, pid_from, tid_from), + file=fp, + ) def _write_perf_exit(pid_from, tid_from, pid_to, tid_to, fp): - print( - '0 0 0 0 PERF_RECORD_EXIT(%d:%d):(%d:%d)\n' % (pid_to, tid_to, pid_from, - tid_from), - file=fp) + print( + "0 0 0 0 PERF_RECORD_EXIT(%d:%d):(%d:%d)\n" + % (pid_to, tid_to, pid_from, tid_from), + file=fp, + ) def _write_perf_sample(pid, tid, addr, fp): - print( - '0 0 0 0 PERF_RECORD_SAMPLE(IP, 0x2): ' - '%d/%d: %x period: 100000 addr: 0' % (pid, tid, addr), - file=fp) - print(' ... thread: chrome:%d' % tid, file=fp) - print(' ...... dso: /opt/google/chrome/chrome\n', file=fp) + print( + "0 0 0 0 PERF_RECORD_SAMPLE(IP, 0x2): " + "%d/%d: %x period: 100000 addr: 0" % (pid, tid, addr), + file=fp, + ) + print(" ... thread: chrome:%d" % tid, file=fp) + print(" ...... dso: /opt/google/chrome/chrome\n", file=fp) def _heatmap(file_name, page_size=4096, hugepage=None, analyze=False, top_n=10): - generator = heatmap_generator.HeatmapGenerator( - file_name, page_size, hugepage, '', - log_level='none') # Don't log to stdout - generator.draw() - if analyze: - generator.analyze('/path/to/chrome', top_n) + generator = heatmap_generator.HeatmapGenerator( + file_name, page_size, hugepage, "", log_level="none" + ) # Don't log to stdout + generator.draw() + if analyze: + generator.analyze("/path/to/chrome", top_n) def _cleanup(file_name): - files = [ - file_name, 'out.txt', 'inst-histo.txt', 'inst-histo-hp.txt', - 'inst-histo-sp.txt', 'heat_map.png', 'timeline.png', 'addr2symbol.txt' - ] - for f in files: - if os.path.exists(f): - os.remove(f) + files = [ + file_name, + "out.txt", + "inst-histo.txt", + "inst-histo-hp.txt", + "inst-histo-sp.txt", + "heat_map.png", + "timeline.png", + "addr2symbol.txt", + ] + for f in files: + if os.path.exists(f): + os.remove(f) class HeatmapGeneratorDrawTests(unittest.TestCase): - """All of our tests for heatmap_generator.draw() and related.""" - - def test_with_one_mmap_one_sample(self): - """Tests one perf record and one sample.""" - fname = 'test.txt' - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) - _write_perf_sample(101, 101, 0xABCD101, f) - self.addCleanup(_cleanup, fname) - _heatmap(fname) - self.assertIn('out.txt', os.listdir('.')) - with open('out.txt') as f: - lines = f.readlines() - self.assertEqual(len(lines), 1) - self.assertIn('101/101: 1 0', lines[0]) - - def test_with_one_mmap_multiple_samples(self): - """Tests one perf record and three samples.""" - fname = 'test.txt' - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) - _write_perf_sample(101, 101, 0xABCD101, f) - _write_perf_sample(101, 101, 0xABCD102, f) - _write_perf_sample(101, 101, 0xABCE102, f) - self.addCleanup(_cleanup, fname) - _heatmap(fname) - self.assertIn('out.txt', os.listdir('.')) - with open('out.txt') as f: - lines = f.readlines() - self.assertEqual(len(lines), 3) - self.assertIn('101/101: 1 0', lines[0]) - self.assertIn('101/101: 2 0', lines[1]) - self.assertIn('101/101: 3 4096', lines[2]) - - def test_with_fork_and_exit(self): - """Tests perf fork and perf exit.""" - fname = 'test_fork.txt' - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) - _write_perf_fork(101, 101, 202, 202, f) - _write_perf_sample(101, 101, 0xABCD101, f) - _write_perf_sample(202, 202, 0xABCE101, f) - _write_perf_exit(202, 202, 202, 202, f) - self.addCleanup(_cleanup, fname) - _heatmap(fname) - self.assertIn('out.txt', os.listdir('.')) - with open('out.txt') as f: - lines = f.readlines() - self.assertEqual(len(lines), 2) - self.assertIn('101/101: 1 0', lines[0]) - self.assertIn('202/202: 2 4096', lines[1]) - - def test_hugepage_creates_two_chrome_mmaps(self): - """Test two chrome mmaps for the same process.""" - fname = 'test_hugepage.txt' - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x1000, f) - _write_perf_fork(101, 101, 202, 202, f) - _write_perf_mmap(202, 202, 0xABCD000, 0x100, f) - _write_perf_mmap(202, 202, 0xABCD300, 0xD00, f) - _write_perf_sample(101, 101, 0xABCD102, f) - _write_perf_sample(202, 202, 0xABCD102, f) - self.addCleanup(_cleanup, fname) - _heatmap(fname) - self.assertIn('out.txt', os.listdir('.')) - with open('out.txt') as f: - lines = f.readlines() - self.assertEqual(len(lines), 2) - self.assertIn('101/101: 1 0', lines[0]) - self.assertIn('202/202: 2 0', lines[1]) - - def test_hugepage_creates_two_chrome_mmaps_fail(self): - """Test two chrome mmaps for the same process.""" - fname = 'test_hugepage.txt' - # Cases where first_mmap.size < second_mmap.size - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x1000, f) - _write_perf_fork(101, 101, 202, 202, f) - _write_perf_mmap(202, 202, 0xABCD000, 0x10000, f) - self.addCleanup(_cleanup, fname) - with self.assertRaises(AssertionError) as msg: - _heatmap(fname) - self.assertIn('Original MMAP size', str(msg.exception)) - - # Cases where first_mmap.address > second_mmap.address - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x1000, f) - _write_perf_fork(101, 101, 202, 202, f) - _write_perf_mmap(202, 202, 0xABCC000, 0x10000, f) - with self.assertRaises(AssertionError) as msg: - _heatmap(fname) - self.assertIn('Original MMAP starting address', str(msg.exception)) - - # Cases where first_mmap.address + size < - # second_mmap.address + second_mmap.size - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x1000, f) - _write_perf_fork(101, 101, 202, 202, f) - _write_perf_mmap(202, 202, 0xABCD100, 0x10000, f) - with self.assertRaises(AssertionError) as msg: - _heatmap(fname) - self.assertIn('exceeds the end of original MMAP', str(msg.exception)) - - def test_histogram(self): - """Tests if the tool can generate correct histogram. - - In the tool, histogram is generated from statistics - of perf samples (saved to out.txt). The histogram is - generated by perf-to-inst-page.sh and saved to - inst-histo.txt. It will be used to draw heat maps. - """ - fname = 'test_histo.txt' - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) - for i in range(100): - _write_perf_sample(101, 101, 0xABCD000 + i, f) - _write_perf_sample(101, 101, 0xABCE000 + i, f) - _write_perf_sample(101, 101, 0xABFD000 + i, f) - _write_perf_sample(101, 101, 0xAFCD000 + i, f) - self.addCleanup(_cleanup, fname) - _heatmap(fname) - self.assertIn('inst-histo.txt', os.listdir('.')) - with open('inst-histo.txt') as f: - lines = f.readlines() - self.assertEqual(len(lines), 4) - self.assertIn('100 0', lines[0]) - self.assertIn('100 4096', lines[1]) - self.assertIn('100 196608', lines[2]) - self.assertIn('100 4194304', lines[3]) - - def test_histogram_two_mb_page(self): - """Tests handling of 2MB page.""" - fname = 'test_histo.txt' - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) - for i in range(100): - _write_perf_sample(101, 101, 0xABCD000 + i, f) - _write_perf_sample(101, 101, 0xABCE000 + i, f) - _write_perf_sample(101, 101, 0xABFD000 + i, f) - _write_perf_sample(101, 101, 0xAFCD000 + i, f) - self.addCleanup(_cleanup, fname) - _heatmap(fname, page_size=2 * 1024 * 1024) - self.assertIn('inst-histo.txt', os.listdir('.')) - with open('inst-histo.txt') as f: - lines = f.readlines() - self.assertEqual(len(lines), 2) - self.assertIn('300 0', lines[0]) - self.assertIn('100 4194304', lines[1]) - - def test_histogram_in_and_out_hugepage(self): - """Tests handling the case of separating samples in and out huge page.""" - fname = 'test_histo.txt' - with open(fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) - for i in range(100): - _write_perf_sample(101, 101, 0xABCD000 + i, f) - _write_perf_sample(101, 101, 0xABCE000 + i, f) - _write_perf_sample(101, 101, 0xABFD000 + i, f) - _write_perf_sample(101, 101, 0xAFCD000 + i, f) - self.addCleanup(_cleanup, fname) - _heatmap(fname, hugepage=[0, 8192]) - file_list = os.listdir('.') - self.assertNotIn('inst-histo.txt', file_list) - self.assertIn('inst-histo-hp.txt', file_list) - self.assertIn('inst-histo-sp.txt', file_list) - with open('inst-histo-hp.txt') as f: - lines = f.readlines() - self.assertEqual(len(lines), 2) - self.assertIn('100 0', lines[0]) - self.assertIn('100 4096', lines[1]) - with open('inst-histo-sp.txt') as f: - lines = f.readlines() - self.assertEqual(len(lines), 2) - self.assertIn('100 196608', lines[0]) - self.assertIn('100 4194304', lines[1]) + """All of our tests for heatmap_generator.draw() and related.""" + + def test_with_one_mmap_one_sample(self): + """Tests one perf record and one sample.""" + fname = "test.txt" + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) + _write_perf_sample(101, 101, 0xABCD101, f) + self.addCleanup(_cleanup, fname) + _heatmap(fname) + self.assertIn("out.txt", os.listdir(".")) + with open("out.txt") as f: + lines = f.readlines() + self.assertEqual(len(lines), 1) + self.assertIn("101/101: 1 0", lines[0]) + + def test_with_one_mmap_multiple_samples(self): + """Tests one perf record and three samples.""" + fname = "test.txt" + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) + _write_perf_sample(101, 101, 0xABCD101, f) + _write_perf_sample(101, 101, 0xABCD102, f) + _write_perf_sample(101, 101, 0xABCE102, f) + self.addCleanup(_cleanup, fname) + _heatmap(fname) + self.assertIn("out.txt", os.listdir(".")) + with open("out.txt") as f: + lines = f.readlines() + self.assertEqual(len(lines), 3) + self.assertIn("101/101: 1 0", lines[0]) + self.assertIn("101/101: 2 0", lines[1]) + self.assertIn("101/101: 3 4096", lines[2]) + + def test_with_fork_and_exit(self): + """Tests perf fork and perf exit.""" + fname = "test_fork.txt" + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) + _write_perf_fork(101, 101, 202, 202, f) + _write_perf_sample(101, 101, 0xABCD101, f) + _write_perf_sample(202, 202, 0xABCE101, f) + _write_perf_exit(202, 202, 202, 202, f) + self.addCleanup(_cleanup, fname) + _heatmap(fname) + self.assertIn("out.txt", os.listdir(".")) + with open("out.txt") as f: + lines = f.readlines() + self.assertEqual(len(lines), 2) + self.assertIn("101/101: 1 0", lines[0]) + self.assertIn("202/202: 2 4096", lines[1]) + + def test_hugepage_creates_two_chrome_mmaps(self): + """Test two chrome mmaps for the same process.""" + fname = "test_hugepage.txt" + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x1000, f) + _write_perf_fork(101, 101, 202, 202, f) + _write_perf_mmap(202, 202, 0xABCD000, 0x100, f) + _write_perf_mmap(202, 202, 0xABCD300, 0xD00, f) + _write_perf_sample(101, 101, 0xABCD102, f) + _write_perf_sample(202, 202, 0xABCD102, f) + self.addCleanup(_cleanup, fname) + _heatmap(fname) + self.assertIn("out.txt", os.listdir(".")) + with open("out.txt") as f: + lines = f.readlines() + self.assertEqual(len(lines), 2) + self.assertIn("101/101: 1 0", lines[0]) + self.assertIn("202/202: 2 0", lines[1]) + + def test_hugepage_creates_two_chrome_mmaps_fail(self): + """Test two chrome mmaps for the same process.""" + fname = "test_hugepage.txt" + # Cases where first_mmap.size < second_mmap.size + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x1000, f) + _write_perf_fork(101, 101, 202, 202, f) + _write_perf_mmap(202, 202, 0xABCD000, 0x10000, f) + self.addCleanup(_cleanup, fname) + with self.assertRaises(AssertionError) as msg: + _heatmap(fname) + self.assertIn("Original MMAP size", str(msg.exception)) + + # Cases where first_mmap.address > second_mmap.address + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x1000, f) + _write_perf_fork(101, 101, 202, 202, f) + _write_perf_mmap(202, 202, 0xABCC000, 0x10000, f) + with self.assertRaises(AssertionError) as msg: + _heatmap(fname) + self.assertIn("Original MMAP starting address", str(msg.exception)) + + # Cases where first_mmap.address + size < + # second_mmap.address + second_mmap.size + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x1000, f) + _write_perf_fork(101, 101, 202, 202, f) + _write_perf_mmap(202, 202, 0xABCD100, 0x10000, f) + with self.assertRaises(AssertionError) as msg: + _heatmap(fname) + self.assertIn("exceeds the end of original MMAP", str(msg.exception)) + + def test_histogram(self): + """Tests if the tool can generate correct histogram. + + In the tool, histogram is generated from statistics + of perf samples (saved to out.txt). The histogram is + generated by perf-to-inst-page.sh and saved to + inst-histo.txt. It will be used to draw heat maps. + """ + fname = "test_histo.txt" + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) + for i in range(100): + _write_perf_sample(101, 101, 0xABCD000 + i, f) + _write_perf_sample(101, 101, 0xABCE000 + i, f) + _write_perf_sample(101, 101, 0xABFD000 + i, f) + _write_perf_sample(101, 101, 0xAFCD000 + i, f) + self.addCleanup(_cleanup, fname) + _heatmap(fname) + self.assertIn("inst-histo.txt", os.listdir(".")) + with open("inst-histo.txt") as f: + lines = f.readlines() + self.assertEqual(len(lines), 4) + self.assertIn("100 0", lines[0]) + self.assertIn("100 4096", lines[1]) + self.assertIn("100 196608", lines[2]) + self.assertIn("100 4194304", lines[3]) + + def test_histogram_two_mb_page(self): + """Tests handling of 2MB page.""" + fname = "test_histo.txt" + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) + for i in range(100): + _write_perf_sample(101, 101, 0xABCD000 + i, f) + _write_perf_sample(101, 101, 0xABCE000 + i, f) + _write_perf_sample(101, 101, 0xABFD000 + i, f) + _write_perf_sample(101, 101, 0xAFCD000 + i, f) + self.addCleanup(_cleanup, fname) + _heatmap(fname, page_size=2 * 1024 * 1024) + self.assertIn("inst-histo.txt", os.listdir(".")) + with open("inst-histo.txt") as f: + lines = f.readlines() + self.assertEqual(len(lines), 2) + self.assertIn("300 0", lines[0]) + self.assertIn("100 4194304", lines[1]) + + def test_histogram_in_and_out_hugepage(self): + """Tests handling the case of separating samples in and out huge page.""" + fname = "test_histo.txt" + with open(fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) + for i in range(100): + _write_perf_sample(101, 101, 0xABCD000 + i, f) + _write_perf_sample(101, 101, 0xABCE000 + i, f) + _write_perf_sample(101, 101, 0xABFD000 + i, f) + _write_perf_sample(101, 101, 0xAFCD000 + i, f) + self.addCleanup(_cleanup, fname) + _heatmap(fname, hugepage=[0, 8192]) + file_list = os.listdir(".") + self.assertNotIn("inst-histo.txt", file_list) + self.assertIn("inst-histo-hp.txt", file_list) + self.assertIn("inst-histo-sp.txt", file_list) + with open("inst-histo-hp.txt") as f: + lines = f.readlines() + self.assertEqual(len(lines), 2) + self.assertIn("100 0", lines[0]) + self.assertIn("100 4096", lines[1]) + with open("inst-histo-sp.txt") as f: + lines = f.readlines() + self.assertEqual(len(lines), 2) + self.assertIn("100 196608", lines[0]) + self.assertIn("100 4194304", lines[1]) class HeatmapGeneratorAnalyzeTests(unittest.TestCase): - """All of our tests for heatmap_generator.analyze() and related.""" - - def setUp(self): - # Use the same perf report for testing - self.fname = 'test_histo.txt' - with open(self.fname, 'w') as f: - _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) - for i in range(10): - _write_perf_sample(101, 101, 0xABCD000 + i, f) - _write_perf_sample(101, 101, 0xABCE000 + i, f) - _write_perf_sample(101, 101, 0xABFD000 + i, f) - self.nm = ('000000000abcd000 t Func1@Page1\n' - '000000000abcd001 t Func2@Page1\n' - '000000000abcd0a0 t Func3@Page1andFunc1@Page2\n' - '000000000abce010 t Func2@Page2\n' - '000000000abfd000 t Func1@Page3\n') - - def tearDown(self): - _cleanup(self.fname) - - @mock.patch('subprocess.check_output') - def test_analyze_hot_pages_with_hp_top(self, mock_nm): - """Test if the analyze() can print the top page with hugepage.""" - mock_nm.return_value = self.nm - _heatmap(self.fname, hugepage=[0, 8192], analyze=True, top_n=1) - file_list = os.listdir('.') - self.assertIn('addr2symbol.txt', file_list) - with open('addr2symbol.txt') as f: - contents = f.read() - self.assertIn('Func2@Page1 : 9', contents) - self.assertIn('Func1@Page1 : 1', contents) - self.assertIn('Func1@Page3 : 10', contents) - # Only displaying one page in hugepage - self.assertNotIn('Func3@Page1andFunc1@Page2 : 10', contents) - - @mock.patch('subprocess.check_output') - def test_analyze_hot_pages_without_hp_top(self, mock_nm): - """Test if the analyze() can print the top page without hugepage.""" - mock_nm.return_value = self.nm - _heatmap(self.fname, analyze=True, top_n=1) - file_list = os.listdir('.') - self.assertIn('addr2symbol.txt', file_list) - with open('addr2symbol.txt') as f: - contents = f.read() - self.assertIn('Func2@Page1 : 9', contents) - self.assertIn('Func1@Page1 : 1', contents) - # Only displaying one page - self.assertNotIn('Func3@Page1andFunc1@Page2 : 10', contents) - self.assertNotIn('Func1@Page3 : 10', contents) - - @mock.patch('subprocess.check_output') - def test_analyze_hot_pages_with_hp_top10(self, mock_nm): - """Test if the analyze() can print with default top 10.""" - mock_nm.return_value = self.nm - _heatmap(self.fname, analyze=True) - # Make sure nm command is called correctly. - mock_nm.assert_called_with(['nm', '-n', '/path/to/chrome']) - file_list = os.listdir('.') - self.assertIn('addr2symbol.txt', file_list) - with open('addr2symbol.txt') as f: - contents = f.read() - self.assertIn('Func2@Page1 : 9', contents) - self.assertIn('Func1@Page1 : 1', contents) - self.assertIn('Func3@Page1andFunc1@Page2 : 10', contents) - self.assertIn('Func1@Page3 : 10', contents) - - -if __name__ == '__main__': - unittest.main() + """All of our tests for heatmap_generator.analyze() and related.""" + + def setUp(self): + # Use the same perf report for testing + self.fname = "test_histo.txt" + with open(self.fname, "w") as f: + _write_perf_mmap(101, 101, 0xABCD000, 0x100, f) + for i in range(10): + _write_perf_sample(101, 101, 0xABCD000 + i, f) + _write_perf_sample(101, 101, 0xABCE000 + i, f) + _write_perf_sample(101, 101, 0xABFD000 + i, f) + self.nm = ( + "000000000abcd000 t Func1@Page1\n" + "000000000abcd001 t Func2@Page1\n" + "000000000abcd0a0 t Func3@Page1andFunc1@Page2\n" + "000000000abce010 t Func2@Page2\n" + "000000000abfd000 t Func1@Page3\n" + ) + + def tearDown(self): + _cleanup(self.fname) + + @mock.patch("subprocess.check_output") + def test_analyze_hot_pages_with_hp_top(self, mock_nm): + """Test if the analyze() can print the top page with hugepage.""" + mock_nm.return_value = self.nm + _heatmap(self.fname, hugepage=[0, 8192], analyze=True, top_n=1) + file_list = os.listdir(".") + self.assertIn("addr2symbol.txt", file_list) + with open("addr2symbol.txt") as f: + contents = f.read() + self.assertIn("Func2@Page1 : 9", contents) + self.assertIn("Func1@Page1 : 1", contents) + self.assertIn("Func1@Page3 : 10", contents) + # Only displaying one page in hugepage + self.assertNotIn("Func3@Page1andFunc1@Page2 : 10", contents) + + @mock.patch("subprocess.check_output") + def test_analyze_hot_pages_without_hp_top(self, mock_nm): + """Test if the analyze() can print the top page without hugepage.""" + mock_nm.return_value = self.nm + _heatmap(self.fname, analyze=True, top_n=1) + file_list = os.listdir(".") + self.assertIn("addr2symbol.txt", file_list) + with open("addr2symbol.txt") as f: + contents = f.read() + self.assertIn("Func2@Page1 : 9", contents) + self.assertIn("Func1@Page1 : 1", contents) + # Only displaying one page + self.assertNotIn("Func3@Page1andFunc1@Page2 : 10", contents) + self.assertNotIn("Func1@Page3 : 10", contents) + + @mock.patch("subprocess.check_output") + def test_analyze_hot_pages_with_hp_top10(self, mock_nm): + """Test if the analyze() can print with default top 10.""" + mock_nm.return_value = self.nm + _heatmap(self.fname, analyze=True) + # Make sure nm command is called correctly. + mock_nm.assert_called_with(["nm", "-n", "/path/to/chrome"]) + file_list = os.listdir(".") + self.assertIn("addr2symbol.txt", file_list) + with open("addr2symbol.txt") as f: + contents = f.read() + self.assertIn("Func2@Page1 : 9", contents) + self.assertIn("Func1@Page1 : 1", contents) + self.assertIn("Func3@Page1andFunc1@Page2 : 10", contents) + self.assertIn("Func1@Page3 : 10", contents) + + +if __name__ == "__main__": + unittest.main() diff --git a/image_chromeos.py b/image_chromeos.py index 64a6a81e..150c7de0 100755 --- a/image_chromeos.py +++ b/image_chromeos.py @@ -12,7 +12,8 @@ This script images a remote ChromeOS device with a specific image." from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" import argparse import filecmp @@ -31,462 +32,539 @@ from cros_utils import logger from cros_utils import misc from cros_utils.file_utils import FileUtils -checksum_file = '/usr/local/osimage_checksum_file' -lock_file = '/tmp/image_chromeos_lock/image_chromeos_lock' + +checksum_file = "/usr/local/osimage_checksum_file" +lock_file = "/tmp/image_chromeos_lock/image_chromeos_lock" def Usage(parser, message): - print('ERROR: %s' % message) - parser.print_help() - sys.exit(0) + print("ERROR: %s" % message) + parser.print_help() + sys.exit(0) def CheckForCrosFlash(chromeos_root, remote, log_level): - cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) + cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) - # Check to see if remote machine has cherrypy, ctypes - command = "python -c 'import cherrypy, ctypes'" - ret = cmd_executer.CrosRunCommand( - command, chromeos_root=chromeos_root, machine=remote) - logger.GetLogger().LogFatalIf( - ret == 255, 'Failed ssh to %s (for checking cherrypy)' % remote) - logger.GetLogger().LogFatalIf( - ret != 0, "Failed to find cherrypy or ctypes on remote '{}', " - 'cros flash cannot work.'.format(remote)) + # Check to see if remote machine has cherrypy, ctypes + command = "python -c 'import cherrypy, ctypes'" + ret = cmd_executer.CrosRunCommand( + command, chromeos_root=chromeos_root, machine=remote + ) + logger.GetLogger().LogFatalIf( + ret == 255, "Failed ssh to %s (for checking cherrypy)" % remote + ) + logger.GetLogger().LogFatalIf( + ret != 0, + "Failed to find cherrypy or ctypes on remote '{}', " + "cros flash cannot work.".format(remote), + ) def DisableCrosBeeps(chromeos_root, remote, log_level): - """Disable annoying chromebooks beeps after reboots.""" - cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) + """Disable annoying chromebooks beeps after reboots.""" + cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) - command = '/usr/share/vboot/bin/set_gbb_flags.sh 0x1' - logger.GetLogger().LogOutput('Trying to disable beeping.') + command = "/usr/share/vboot/bin/set_gbb_flags.sh 0x1" + logger.GetLogger().LogOutput("Trying to disable beeping.") - ret, o, _ = cmd_executer.CrosRunCommandWOutput( - command, chromeos_root=chromeos_root, machine=remote) - if ret != 0: - logger.GetLogger().LogOutput(o) - logger.GetLogger().LogOutput('Failed to disable beeps.') + ret, o, _ = cmd_executer.CrosRunCommandWOutput( + command, chromeos_root=chromeos_root, machine=remote + ) + if ret != 0: + logger.GetLogger().LogOutput(o) + logger.GetLogger().LogOutput("Failed to disable beeps.") def FindChromeOSImage(image_file, chromeos_root): - """Find path for ChromeOS image inside chroot. - - This function could be called with image paths that are either inside - or outside the chroot. In either case the path needs to be translated - to an real/absolute path inside the chroot. - Example input paths: - /usr/local/google/home/uname/chromeos/chroot/tmp/my-test-images/image - ~/trunk/src/build/images/board/latest/image - /tmp/peppy-release/R67-1235.0.0/image - - Corresponding example output paths: - /tmp/my-test-images/image - /home/uname/trunk/src/build/images/board/latest/image - /tmp/peppy-release/R67-1235.0,0/image - """ - - # Get the name of the user, for "/home/<user>" part of the path. - whoami = getpass.getuser() - # Get the full path for the chroot dir, including 'chroot' - real_chroot_dir = os.path.join(os.path.realpath(chromeos_root), 'chroot') - # Get the full path for the chromeos root, excluding 'chroot' - real_chromeos_root = os.path.realpath(chromeos_root) - - # If path name starts with real_chroot_dir, remove that piece, but assume - # the rest of the path is correct. - if image_file.find(real_chroot_dir) != -1: - chroot_image = image_file[len(real_chroot_dir):] - # If path name starts with chromeos_root, excluding 'chroot', replace the - # chromeos_root with the prefix: '/home/<username>/trunk'. - elif image_file.find(real_chromeos_root) != -1: - chroot_image = image_file[len(real_chromeos_root):] - chroot_image = '/home/%s/trunk%s' % (whoami, chroot_image) - # Else assume the path is already internal, so leave it alone. - else: - chroot_image = image_file - - return chroot_image - - -def DoImage(argv): - """Image ChromeOS.""" - - parser = argparse.ArgumentParser() - parser.add_argument( - '-c', - '--chromeos_root', - dest='chromeos_root', - help='Target directory for ChromeOS installation.') - parser.add_argument('-r', '--remote', dest='remote', help='Target device.') - parser.add_argument('-i', '--image', dest='image', help='Image binary file.') - parser.add_argument( - '-b', '--board', dest='board', help='Target board override.') - parser.add_argument( - '-f', - '--force', - dest='force', - action='store_true', - default=False, - help='Force an image even if it is non-test.') - parser.add_argument( - '-n', - '--no_lock', - dest='no_lock', - default=False, - action='store_true', - help='Do not attempt to lock remote before imaging. ' - 'This option should only be used in cases where the ' - 'exclusive lock has already been acquired (e.g. in ' - 'a script that calls this one).') - parser.add_argument( - '-l', - '--logging_level', - dest='log_level', - default='verbose', - help='Amount of logging to be used. Valid levels are ' - "'quiet', 'average', and 'verbose'.") - parser.add_argument('-a', '--image_args', dest='image_args') - - options = parser.parse_args(argv[1:]) - - if not options.log_level in command_executer.LOG_LEVEL: - Usage(parser, "--logging_level must be 'quiet', 'average' or 'verbose'") - else: - log_level = options.log_level - - # Common initializations - cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) - l = logger.GetLogger() - - if options.chromeos_root is None: - Usage(parser, '--chromeos_root must be set') - - if options.remote is None: - Usage(parser, '--remote must be set') - - options.chromeos_root = os.path.expanduser(options.chromeos_root) - - if options.board is None: - board = cmd_executer.CrosLearnBoard(options.chromeos_root, options.remote) - else: - board = options.board - - if options.image is None: - images_dir = misc.GetImageDir(options.chromeos_root, board) - image = os.path.join(images_dir, 'latest', 'chromiumos_test_image.bin') - if not os.path.exists(image): - image = os.path.join(images_dir, 'latest', 'chromiumos_image.bin') - is_xbuddy_image = False - else: - image = options.image - is_xbuddy_image = image.startswith('xbuddy://') - if not is_xbuddy_image: - image = os.path.expanduser(image) - - if not is_xbuddy_image: - image = os.path.realpath(image) - - if not os.path.exists(image) and not is_xbuddy_image: - Usage(parser, 'Image file: ' + image + ' does not exist!') - - try: - should_unlock = False - if not options.no_lock: - try: - _ = locks.AcquireLock( - list(options.remote.split()), options.chromeos_root) - should_unlock = True - except Exception as e: - raise RuntimeError('Error acquiring machine: %s' % str(e)) - - reimage = False - local_image = False - if not is_xbuddy_image: - local_image = True - image_checksum = FileUtils().Md5File(image, log_level=log_level) + """Find path for ChromeOS image inside chroot. + + This function could be called with image paths that are either inside + or outside the chroot. In either case the path needs to be translated + to an real/absolute path inside the chroot. + Example input paths: + /usr/local/google/home/uname/chromeos/chroot/tmp/my-test-images/image + ~/trunk/src/build/images/board/latest/image + /tmp/peppy-release/R67-1235.0.0/image + + Corresponding example output paths: + /tmp/my-test-images/image + /home/uname/trunk/src/build/images/board/latest/image + /tmp/peppy-release/R67-1235.0,0/image + """ + + # Get the name of the user, for "/home/<user>" part of the path. + whoami = getpass.getuser() + # Get the full path for the chroot dir, including 'chroot' + real_chroot_dir = os.path.join(os.path.realpath(chromeos_root), "chroot") + # Get the full path for the chromeos root, excluding 'chroot' + real_chromeos_root = os.path.realpath(chromeos_root) + + # If path name starts with real_chroot_dir, remove that piece, but assume + # the rest of the path is correct. + if image_file.find(real_chroot_dir) != -1: + chroot_image = image_file[len(real_chroot_dir) :] + # If path name starts with chromeos_root, excluding 'chroot', replace the + # chromeos_root with the prefix: '/home/<username>/trunk'. + elif image_file.find(real_chromeos_root) != -1: + chroot_image = image_file[len(real_chromeos_root) :] + chroot_image = "/home/%s/trunk%s" % (whoami, chroot_image) + # Else assume the path is already internal, so leave it alone. + else: + chroot_image = image_file - command = 'cat ' + checksum_file - ret, device_checksum, _ = cmd_executer.CrosRunCommandWOutput( - command, chromeos_root=options.chromeos_root, machine=options.remote) + return chroot_image - device_checksum = device_checksum.strip() - image_checksum = str(image_checksum) - l.LogOutput('Image checksum: ' + image_checksum) - l.LogOutput('Device checksum: ' + device_checksum) +def DoImage(argv): + """Image ChromeOS.""" + + parser = argparse.ArgumentParser() + parser.add_argument( + "-c", + "--chromeos_root", + dest="chromeos_root", + help="Target directory for ChromeOS installation.", + ) + parser.add_argument("-r", "--remote", dest="remote", help="Target device.") + parser.add_argument( + "-i", "--image", dest="image", help="Image binary file." + ) + parser.add_argument( + "-b", "--board", dest="board", help="Target board override." + ) + parser.add_argument( + "-f", + "--force", + dest="force", + action="store_true", + default=False, + help="Force an image even if it is non-test.", + ) + parser.add_argument( + "-n", + "--no_lock", + dest="no_lock", + default=False, + action="store_true", + help="Do not attempt to lock remote before imaging. " + "This option should only be used in cases where the " + "exclusive lock has already been acquired (e.g. in " + "a script that calls this one).", + ) + parser.add_argument( + "-l", + "--logging_level", + dest="log_level", + default="verbose", + help="Amount of logging to be used. Valid levels are " + "'quiet', 'average', and 'verbose'.", + ) + parser.add_argument("-a", "--image_args", dest="image_args") + + options = parser.parse_args(argv[1:]) + + if not options.log_level in command_executer.LOG_LEVEL: + Usage(parser, "--logging_level must be 'quiet', 'average' or 'verbose'") + else: + log_level = options.log_level - if image_checksum != device_checksum: - [found, located_image] = LocateOrCopyImage( - options.chromeos_root, image, board=board) + # Common initializations + cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) + l = logger.GetLogger() - reimage = True - l.LogOutput('Checksums do not match. Re-imaging...') + if options.chromeos_root is None: + Usage(parser, "--chromeos_root must be set") - chroot_image = FindChromeOSImage(located_image, options.chromeos_root) + if options.remote is None: + Usage(parser, "--remote must be set") - is_test_image = IsImageModdedForTest(options.chromeos_root, - chroot_image, log_level) + options.chromeos_root = os.path.expanduser(options.chromeos_root) - if not is_test_image and not options.force: - logger.GetLogger().LogFatal('Have to pass --force to image a ' - 'non-test image!') + if options.board is None: + board = cmd_executer.CrosLearnBoard( + options.chromeos_root, options.remote + ) else: - reimage = True - found = True - l.LogOutput('Using non-local image; Re-imaging...') - - if reimage: - # If the device has /tmp mounted as noexec, image_to_live.sh can fail. - command = 'mount -o remount,rw,exec /tmp' - cmd_executer.CrosRunCommand( - command, chromeos_root=options.chromeos_root, machine=options.remote) - - # Check to see if cros flash will work for the remote machine. - CheckForCrosFlash(options.chromeos_root, options.remote, log_level) - - # Disable the annoying chromebook beeps after reboot. - DisableCrosBeeps(options.chromeos_root, options.remote, log_level) - - cros_flash_args = [ - 'cros', 'flash', - '--board=%s' % board, '--clobber-stateful', options.remote - ] - if local_image: - cros_flash_args.append(chroot_image) - else: - cros_flash_args.append(image) - - command = ' '.join(cros_flash_args) - - # Workaround for crosbug.com/35684. - os.chmod(misc.GetChromeOSKeyFile(options.chromeos_root), 0o600) - - if log_level == 'average': - cmd_executer.SetLogLevel('verbose') - retries = 0 - while True: - if log_level == 'quiet': - l.LogOutput('CMD : %s' % command) - ret = cmd_executer.ChrootRunCommand( - options.chromeos_root, command, command_timeout=1800) - if ret == 0 or retries >= 2: - break - retries += 1 - if log_level == 'quiet': - l.LogOutput('Imaging failed. Retry # %d.' % retries) - - if log_level == 'average': - cmd_executer.SetLogLevel(log_level) - - logger.GetLogger().LogFatalIf(ret, 'Image command failed') - - # Unfortunately cros_image_to_target.py sometimes returns early when the - # machine isn't fully up yet. - ret = EnsureMachineUp(options.chromeos_root, options.remote, log_level) - - # If this is a non-local image, then the ret returned from - # EnsureMachineUp is the one that will be returned by this function; - # in that case, make sure the value in 'ret' is appropriate. - if not local_image and ret: - ret = 0 - else: - ret = 1 - - if local_image: - if log_level == 'average': - l.LogOutput('Verifying image.') - command = 'echo %s > %s && chmod -w %s' % (image_checksum, - checksum_file, checksum_file) - ret = cmd_executer.CrosRunCommand( - command, - chromeos_root=options.chromeos_root, - machine=options.remote) - logger.GetLogger().LogFatalIf(ret, 'Writing checksum failed.') - - successfully_imaged = VerifyChromeChecksum( - options.chromeos_root, chroot_image, options.remote, log_level) - logger.GetLogger().LogFatalIf(not successfully_imaged, - 'Image verification failed!') - TryRemountPartitionAsRW(options.chromeos_root, options.remote, - log_level) - - if not found: - temp_dir = os.path.dirname(located_image) - l.LogOutput('Deleting temp image dir: %s' % temp_dir) - shutil.rmtree(temp_dir) - l.LogOutput('Image updated.') + board = options.board + + if options.image is None: + images_dir = misc.GetImageDir(options.chromeos_root, board) + image = os.path.join(images_dir, "latest", "chromiumos_test_image.bin") + if not os.path.exists(image): + image = os.path.join(images_dir, "latest", "chromiumos_image.bin") + is_xbuddy_image = False else: - l.LogOutput('Checksums match, skip image update and reboot.') - command = 'reboot && exit' - _ = cmd_executer.CrosRunCommand( - command, chromeos_root=options.chromeos_root, machine=options.remote) - # Wait 30s after reboot. - time.sleep(30) - - finally: - if should_unlock: - locks.ReleaseLock(list(options.remote.split()), options.chromeos_root) + image = options.image + is_xbuddy_image = image.startswith("xbuddy://") + if not is_xbuddy_image: + image = os.path.expanduser(image) - return ret + if not is_xbuddy_image: + image = os.path.realpath(image) + + if not os.path.exists(image) and not is_xbuddy_image: + Usage(parser, "Image file: " + image + " does not exist!") + + try: + should_unlock = False + if not options.no_lock: + try: + _ = locks.AcquireLock( + list(options.remote.split()), options.chromeos_root + ) + should_unlock = True + except Exception as e: + raise RuntimeError("Error acquiring machine: %s" % str(e)) + + reimage = False + local_image = False + if not is_xbuddy_image: + local_image = True + image_checksum = FileUtils().Md5File(image, log_level=log_level) + + command = "cat " + checksum_file + ret, device_checksum, _ = cmd_executer.CrosRunCommandWOutput( + command, + chromeos_root=options.chromeos_root, + machine=options.remote, + ) + + device_checksum = device_checksum.strip() + image_checksum = str(image_checksum) + + l.LogOutput("Image checksum: " + image_checksum) + l.LogOutput("Device checksum: " + device_checksum) + + if image_checksum != device_checksum: + [found, located_image] = LocateOrCopyImage( + options.chromeos_root, image, board=board + ) + + reimage = True + l.LogOutput("Checksums do not match. Re-imaging...") + + chroot_image = FindChromeOSImage( + located_image, options.chromeos_root + ) + + is_test_image = IsImageModdedForTest( + options.chromeos_root, chroot_image, log_level + ) + + if not is_test_image and not options.force: + logger.GetLogger().LogFatal( + "Have to pass --force to image a " "non-test image!" + ) + else: + reimage = True + found = True + l.LogOutput("Using non-local image; Re-imaging...") + + if reimage: + # If the device has /tmp mounted as noexec, image_to_live.sh can fail. + command = "mount -o remount,rw,exec /tmp" + cmd_executer.CrosRunCommand( + command, + chromeos_root=options.chromeos_root, + machine=options.remote, + ) + + # Check to see if cros flash will work for the remote machine. + CheckForCrosFlash(options.chromeos_root, options.remote, log_level) + + # Disable the annoying chromebook beeps after reboot. + DisableCrosBeeps(options.chromeos_root, options.remote, log_level) + + cros_flash_args = [ + "cros", + "flash", + "--board=%s" % board, + "--clobber-stateful", + options.remote, + ] + if local_image: + cros_flash_args.append(chroot_image) + else: + cros_flash_args.append(image) + + command = " ".join(cros_flash_args) + + # Workaround for crosbug.com/35684. + os.chmod(misc.GetChromeOSKeyFile(options.chromeos_root), 0o600) + + if log_level == "average": + cmd_executer.SetLogLevel("verbose") + retries = 0 + while True: + if log_level == "quiet": + l.LogOutput("CMD : %s" % command) + ret = cmd_executer.ChrootRunCommand( + options.chromeos_root, command, command_timeout=1800 + ) + if ret == 0 or retries >= 2: + break + retries += 1 + if log_level == "quiet": + l.LogOutput("Imaging failed. Retry # %d." % retries) + + if log_level == "average": + cmd_executer.SetLogLevel(log_level) + + logger.GetLogger().LogFatalIf(ret, "Image command failed") + + # Unfortunately cros_image_to_target.py sometimes returns early when the + # machine isn't fully up yet. + ret = EnsureMachineUp( + options.chromeos_root, options.remote, log_level + ) + + # If this is a non-local image, then the ret returned from + # EnsureMachineUp is the one that will be returned by this function; + # in that case, make sure the value in 'ret' is appropriate. + if not local_image and ret: + ret = 0 + else: + ret = 1 + + if local_image: + if log_level == "average": + l.LogOutput("Verifying image.") + command = "echo %s > %s && chmod -w %s" % ( + image_checksum, + checksum_file, + checksum_file, + ) + ret = cmd_executer.CrosRunCommand( + command, + chromeos_root=options.chromeos_root, + machine=options.remote, + ) + logger.GetLogger().LogFatalIf(ret, "Writing checksum failed.") + + successfully_imaged = VerifyChromeChecksum( + options.chromeos_root, + chroot_image, + options.remote, + log_level, + ) + logger.GetLogger().LogFatalIf( + not successfully_imaged, "Image verification failed!" + ) + TryRemountPartitionAsRW( + options.chromeos_root, options.remote, log_level + ) + + if not found: + temp_dir = os.path.dirname(located_image) + l.LogOutput("Deleting temp image dir: %s" % temp_dir) + shutil.rmtree(temp_dir) + l.LogOutput("Image updated.") + else: + l.LogOutput("Checksums match, skip image update and reboot.") + command = "reboot && exit" + _ = cmd_executer.CrosRunCommand( + command, + chromeos_root=options.chromeos_root, + machine=options.remote, + ) + # Wait 30s after reboot. + time.sleep(30) + + finally: + if should_unlock: + locks.ReleaseLock( + list(options.remote.split()), options.chromeos_root + ) + + return ret def LocateOrCopyImage(chromeos_root, image, board=None): - l = logger.GetLogger() - if board is None: - board_glob = '*' - else: - board_glob = board - - chromeos_root_realpath = os.path.realpath(chromeos_root) - image = os.path.realpath(image) - - if image.startswith('%s/' % chromeos_root_realpath): - return [True, image] - - # First search within the existing build dirs for any matching files. - images_glob = ( - '%s/src/build/images/%s/*/*.bin' % (chromeos_root_realpath, board_glob)) - images_list = glob.glob(images_glob) - for potential_image in images_list: - if filecmp.cmp(potential_image, image): - l.LogOutput('Found matching image %s in chromeos_root.' % potential_image) - return [True, potential_image] - # We did not find an image. Copy it in the src dir and return the copied - # file. - if board is None: - board = '' - base_dir = ('%s/src/build/images/%s' % (chromeos_root_realpath, board)) - if not os.path.isdir(base_dir): - os.makedirs(base_dir) - temp_dir = tempfile.mkdtemp(prefix='%s/tmp' % base_dir) - new_image = '%s/%s' % (temp_dir, os.path.basename(image)) - l.LogOutput('No matching image found. Copying %s to %s' % (image, new_image)) - shutil.copyfile(image, new_image) - return [False, new_image] + l = logger.GetLogger() + if board is None: + board_glob = "*" + else: + board_glob = board + + chromeos_root_realpath = os.path.realpath(chromeos_root) + image = os.path.realpath(image) + + if image.startswith("%s/" % chromeos_root_realpath): + return [True, image] + + # First search within the existing build dirs for any matching files. + images_glob = "%s/src/build/images/%s/*/*.bin" % ( + chromeos_root_realpath, + board_glob, + ) + images_list = glob.glob(images_glob) + for potential_image in images_list: + if filecmp.cmp(potential_image, image): + l.LogOutput( + "Found matching image %s in chromeos_root." % potential_image + ) + return [True, potential_image] + # We did not find an image. Copy it in the src dir and return the copied + # file. + if board is None: + board = "" + base_dir = "%s/src/build/images/%s" % (chromeos_root_realpath, board) + if not os.path.isdir(base_dir): + os.makedirs(base_dir) + temp_dir = tempfile.mkdtemp(prefix="%s/tmp" % base_dir) + new_image = "%s/%s" % (temp_dir, os.path.basename(image)) + l.LogOutput( + "No matching image found. Copying %s to %s" % (image, new_image) + ) + shutil.copyfile(image, new_image) + return [False, new_image] def GetImageMountCommand(image, rootfs_mp, stateful_mp): - image_dir = os.path.dirname(image) - image_file = os.path.basename(image) - mount_command = ('cd /mnt/host/source/src/scripts &&' - './mount_gpt_image.sh --from=%s --image=%s' - ' --safe --read_only' - ' --rootfs_mountpt=%s' - ' --stateful_mountpt=%s' % (image_dir, image_file, rootfs_mp, - stateful_mp)) - return mount_command - - -def MountImage(chromeos_root, - image, - rootfs_mp, - stateful_mp, - log_level, - unmount=False, - extra_commands=''): - cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) - command = GetImageMountCommand(image, rootfs_mp, stateful_mp) - if unmount: - command = '%s --unmount' % command - if extra_commands: - command = '%s ; %s' % (command, extra_commands) - ret, out, _ = cmd_executer.ChrootRunCommandWOutput(chromeos_root, command) - logger.GetLogger().LogFatalIf(ret, 'Mount/unmount command failed!') - return out + image_dir = os.path.dirname(image) + image_file = os.path.basename(image) + mount_command = ( + "cd /mnt/host/source/src/scripts &&" + "./mount_gpt_image.sh --from=%s --image=%s" + " --safe --read_only" + " --rootfs_mountpt=%s" + " --stateful_mountpt=%s" + % (image_dir, image_file, rootfs_mp, stateful_mp) + ) + return mount_command + + +def MountImage( + chromeos_root, + image, + rootfs_mp, + stateful_mp, + log_level, + unmount=False, + extra_commands="", +): + cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) + command = GetImageMountCommand(image, rootfs_mp, stateful_mp) + if unmount: + command = "%s --unmount" % command + if extra_commands: + command = "%s ; %s" % (command, extra_commands) + ret, out, _ = cmd_executer.ChrootRunCommandWOutput(chromeos_root, command) + logger.GetLogger().LogFatalIf(ret, "Mount/unmount command failed!") + return out def IsImageModdedForTest(chromeos_root, image, log_level): - if log_level != 'verbose': - log_level = 'quiet' - command = 'mktemp -d' - cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) - _, rootfs_mp, _ = cmd_executer.ChrootRunCommandWOutput(chromeos_root, command) - _, stateful_mp, _ = cmd_executer.ChrootRunCommandWOutput( - chromeos_root, command) - rootfs_mp = rootfs_mp.strip() - stateful_mp = stateful_mp.strip() - lsb_release_file = os.path.join(rootfs_mp, 'etc/lsb-release') - extra = ('grep CHROMEOS_RELEASE_TRACK %s | grep -i test' % lsb_release_file) - output = MountImage( - chromeos_root, - image, - rootfs_mp, - stateful_mp, - log_level, - extra_commands=extra) - is_test_image = re.search('test', output, re.IGNORECASE) - MountImage( - chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True) - return is_test_image + if log_level != "verbose": + log_level = "quiet" + command = "mktemp -d" + cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) + _, rootfs_mp, _ = cmd_executer.ChrootRunCommandWOutput( + chromeos_root, command + ) + _, stateful_mp, _ = cmd_executer.ChrootRunCommandWOutput( + chromeos_root, command + ) + rootfs_mp = rootfs_mp.strip() + stateful_mp = stateful_mp.strip() + lsb_release_file = os.path.join(rootfs_mp, "etc/lsb-release") + extra = "grep CHROMEOS_RELEASE_TRACK %s | grep -i test" % lsb_release_file + output = MountImage( + chromeos_root, + image, + rootfs_mp, + stateful_mp, + log_level, + extra_commands=extra, + ) + is_test_image = re.search("test", output, re.IGNORECASE) + MountImage( + chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True + ) + return is_test_image def VerifyChromeChecksum(chromeos_root, image, remote, log_level): - command = 'mktemp -d' - cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) - _, rootfs_mp, _ = cmd_executer.ChrootRunCommandWOutput(chromeos_root, command) - _, stateful_mp, _ = cmd_executer.ChrootRunCommandWOutput( - chromeos_root, command) - rootfs_mp = rootfs_mp.strip() - stateful_mp = stateful_mp.strip() - chrome_file = '%s/opt/google/chrome/chrome' % rootfs_mp - extra = 'md5sum %s' % chrome_file - out = MountImage( - chromeos_root, - image, - rootfs_mp, - stateful_mp, - log_level, - extra_commands=extra) - image_chrome_checksum = out.strip().split()[0] - MountImage( - chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True) - - command = 'md5sum /opt/google/chrome/chrome' - [_, o, _] = cmd_executer.CrosRunCommandWOutput( - command, chromeos_root=chromeos_root, machine=remote) - device_chrome_checksum = o.split()[0] - return image_chrome_checksum.strip() == device_chrome_checksum.strip() + command = "mktemp -d" + cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) + _, rootfs_mp, _ = cmd_executer.ChrootRunCommandWOutput( + chromeos_root, command + ) + _, stateful_mp, _ = cmd_executer.ChrootRunCommandWOutput( + chromeos_root, command + ) + rootfs_mp = rootfs_mp.strip() + stateful_mp = stateful_mp.strip() + chrome_file = "%s/opt/google/chrome/chrome" % rootfs_mp + extra = "md5sum %s" % chrome_file + out = MountImage( + chromeos_root, + image, + rootfs_mp, + stateful_mp, + log_level, + extra_commands=extra, + ) + image_chrome_checksum = out.strip().split()[0] + MountImage( + chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True + ) + + command = "md5sum /opt/google/chrome/chrome" + [_, o, _] = cmd_executer.CrosRunCommandWOutput( + command, chromeos_root=chromeos_root, machine=remote + ) + device_chrome_checksum = o.split()[0] + return image_chrome_checksum.strip() == device_chrome_checksum.strip() # Remount partition as writable. # TODO: auto-detect if an image is built using --noenable_rootfs_verification. def TryRemountPartitionAsRW(chromeos_root, remote, log_level): - l = logger.GetLogger() - cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) - command = 'sudo mount -o remount,rw /' - ret = cmd_executer.CrosRunCommand(\ - command, chromeos_root=chromeos_root, machine=remote, - terminated_timeout=10) - if ret: - ## Safely ignore. - l.LogWarning('Failed to remount partition as rw, ' - 'probably the image was not built with ' - '"--noenable_rootfs_verification", ' - 'you can safely ignore this.') - else: - l.LogOutput('Re-mounted partition as writable.') + l = logger.GetLogger() + cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) + command = "sudo mount -o remount,rw /" + ret = cmd_executer.CrosRunCommand( + command, + chromeos_root=chromeos_root, + machine=remote, + terminated_timeout=10, + ) + if ret: + ## Safely ignore. + l.LogWarning( + "Failed to remount partition as rw, " + "probably the image was not built with " + '"--noenable_rootfs_verification", ' + "you can safely ignore this." + ) + else: + l.LogOutput("Re-mounted partition as writable.") def EnsureMachineUp(chromeos_root, remote, log_level): - l = logger.GetLogger() - cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) - timeout = 600 - magic = 'abcdefghijklmnopqrstuvwxyz' - command = 'echo %s' % magic - start_time = time.time() - while True: - current_time = time.time() - if current_time - start_time > timeout: - l.LogError( - 'Timeout of %ss reached. Machine still not up. Aborting.' % timeout) - return False - ret = cmd_executer.CrosRunCommand( - command, chromeos_root=chromeos_root, machine=remote) - if not ret: - return True + l = logger.GetLogger() + cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) + timeout = 600 + magic = "abcdefghijklmnopqrstuvwxyz" + command = "echo %s" % magic + start_time = time.time() + while True: + current_time = time.time() + if current_time - start_time > timeout: + l.LogError( + "Timeout of %ss reached. Machine still not up. Aborting." + % timeout + ) + return False + ret = cmd_executer.CrosRunCommand( + command, chromeos_root=chromeos_root, machine=remote + ) + if not ret: + return True -if __name__ == '__main__': - retval = DoImage(sys.argv) - sys.exit(retval) +if __name__ == "__main__": + retval = DoImage(sys.argv) + sys.exit(retval) diff --git a/llvm_extra/create_ebuild_file.py b/llvm_extra/create_ebuild_file.py index ec39fde5..d974d50c 100755 --- a/llvm_extra/create_ebuild_file.py +++ b/llvm_extra/create_ebuild_file.py @@ -97,62 +97,62 @@ import sys def process_line(line, text): - # Process the line and append to the text we want to generate. - # Check if line has any patterns that we want to handle. - newline = line.strip() - if newline.startswith('#'): - # Do not process comment lines. - text.append(line) - elif line.startswith('SLOT='): - # Change SLOT to "${PV%%_p[[:digit:]]*}" - SLOT_STRING = 'SLOT="${PV%%_p[[:digit:]]*}"\n' - text.append(SLOT_STRING) - elif line.startswith('IUSE') and 'multitarget' in line: - # Enable multitarget USE flag. - newline = line.replace('multitarget', '+multitarget') - text.append(newline) - elif line.startswith('pkg_setup()'): - # Setup PREFIX. - text.append(line) - text.append('\texport PREFIX="/usr/${PN}/${SLOT}"\n') - elif line.startswith('multilib_src_install_all()'): - text.append(line) - # Do not install any common files. - text.append('\treturn\n') - elif 'epatch ' in line: - # Convert any $PN or ${PN} in epatch files to llvm. - newline = line.replace('$PN', 'llvm') - newline = newline.replace('${PN}', 'llvm') - text.append(newline) - elif 'multilib-minimal_src_install' in line: - # Disable MULTILIB_CHOST_TOOLS and MULTILIB_WRAPPED_HEADERS - text.append('\tMULTILIB_CHOST_TOOLS=()\n') - text.append('\tMULTILIB_WRAPPED_HEADERS=()\n') - text.append(line) - elif 'cmake-utils_src_install' in line: - text.append(line) - # Do not install any wrappers. - text.append('\treturn\n') - else: - text.append(line) + # Process the line and append to the text we want to generate. + # Check if line has any patterns that we want to handle. + newline = line.strip() + if newline.startswith("#"): + # Do not process comment lines. + text.append(line) + elif line.startswith("SLOT="): + # Change SLOT to "${PV%%_p[[:digit:]]*}" + SLOT_STRING = 'SLOT="${PV%%_p[[:digit:]]*}"\n' + text.append(SLOT_STRING) + elif line.startswith("IUSE") and "multitarget" in line: + # Enable multitarget USE flag. + newline = line.replace("multitarget", "+multitarget") + text.append(newline) + elif line.startswith("pkg_setup()"): + # Setup PREFIX. + text.append(line) + text.append('\texport PREFIX="/usr/${PN}/${SLOT}"\n') + elif line.startswith("multilib_src_install_all()"): + text.append(line) + # Do not install any common files. + text.append("\treturn\n") + elif "epatch " in line: + # Convert any $PN or ${PN} in epatch files to llvm. + newline = line.replace("$PN", "llvm") + newline = newline.replace("${PN}", "llvm") + text.append(newline) + elif "multilib-minimal_src_install" in line: + # Disable MULTILIB_CHOST_TOOLS and MULTILIB_WRAPPED_HEADERS + text.append("\tMULTILIB_CHOST_TOOLS=()\n") + text.append("\tMULTILIB_WRAPPED_HEADERS=()\n") + text.append(line) + elif "cmake-utils_src_install" in line: + text.append(line) + # Do not install any wrappers. + text.append("\treturn\n") + else: + text.append(line) def main(): - if len(sys.argv) != 3: - filename = os.path.basename(__file__) - print('Usage: ', filename, ' <input.ebuild> <output.ebuild>') - return 1 + if len(sys.argv) != 3: + filename = os.path.basename(__file__) + print("Usage: ", filename, " <input.ebuild> <output.ebuild>") + return 1 - text = [] - with open(sys.argv[1], 'r') as infile: - for line in infile: - process_line(line, text) + text = [] + with open(sys.argv[1], "r") as infile: + for line in infile: + process_line(line, text) - with open(sys.argv[2], 'w') as outfile: - outfile.write(''.join(text)) + with open(sys.argv[2], "w") as outfile: + outfile.write("".join(text)) - return 0 + return 0 -if __name__ == '__main__': - sys.exit(main()) +if __name__ == "__main__": + sys.exit(main()) diff --git a/llvm_tools/auto_llvm_bisection.py b/llvm_tools/auto_llvm_bisection.py index 02fb7b93..b9d04d1d 100755 --- a/llvm_tools/auto_llvm_bisection.py +++ b/llvm_tools/auto_llvm_bisection.py @@ -17,10 +17,11 @@ import time import traceback import chroot -from llvm_bisection import BisectionExitStatus import llvm_bisection +from llvm_bisection import BisectionExitStatus import update_tryjob_status + # Used to re-try for 'llvm_bisection.py' to attempt to launch more tryjobs. BISECTION_RETRY_TIME_SECS = 10 * 60 @@ -42,146 +43,167 @@ POLLING_LIMIT_SECS = 18 * 60 * 60 class BuilderStatus(enum.Enum): - """Actual values given via 'cros buildresult'.""" + """Actual values given via 'cros buildresult'.""" - PASS = 'pass' - FAIL = 'fail' - RUNNING = 'running' + PASS = "pass" + FAIL = "fail" + RUNNING = "running" builder_status_mapping = { BuilderStatus.PASS.value: update_tryjob_status.TryjobStatus.GOOD.value, BuilderStatus.FAIL.value: update_tryjob_status.TryjobStatus.BAD.value, - BuilderStatus.RUNNING.value: - update_tryjob_status.TryjobStatus.PENDING.value + BuilderStatus.RUNNING.value: update_tryjob_status.TryjobStatus.PENDING.value, } def GetBuildResult(chroot_path, buildbucket_id): - """Returns the conversion of the result of 'cros buildresult'.""" - - # Calls 'cros buildresult' to get the status of the tryjob. - try: - tryjob_json = subprocess.check_output( - [ - 'cros_sdk', '--', 'cros', 'buildresult', '--buildbucket-id', - str(buildbucket_id), '--report', 'json' - ], - cwd=chroot_path, - stderr=subprocess.STDOUT, - encoding='UTF-8', - ) - except subprocess.CalledProcessError as err: - if 'No build found. Perhaps not started' not in err.output: - raise - return None - - tryjob_content = json.loads(tryjob_json) - - build_result = str(tryjob_content['%d' % buildbucket_id]['status']) - - # The string returned by 'cros buildresult' might not be in the mapping. - if build_result not in builder_status_mapping: - raise ValueError('"cros buildresult" return value is invalid: %s' % - build_result) - - return builder_status_mapping[build_result] + """Returns the conversion of the result of 'cros buildresult'.""" + + # Calls 'cros buildresult' to get the status of the tryjob. + try: + tryjob_json = subprocess.check_output( + [ + "cros_sdk", + "--", + "cros", + "buildresult", + "--buildbucket-id", + str(buildbucket_id), + "--report", + "json", + ], + cwd=chroot_path, + stderr=subprocess.STDOUT, + encoding="UTF-8", + ) + except subprocess.CalledProcessError as err: + if "No build found. Perhaps not started" not in err.output: + raise + return None + + tryjob_content = json.loads(tryjob_json) + + build_result = str(tryjob_content["%d" % buildbucket_id]["status"]) + + # The string returned by 'cros buildresult' might not be in the mapping. + if build_result not in builder_status_mapping: + raise ValueError( + '"cros buildresult" return value is invalid: %s' % build_result + ) + + return builder_status_mapping[build_result] def main(): - """Bisects LLVM using the result of `cros buildresult` of each tryjob. - - Raises: - AssertionError: The script was run inside the chroot. - """ + """Bisects LLVM using the result of `cros buildresult` of each tryjob. - chroot.VerifyOutsideChroot() + Raises: + AssertionError: The script was run inside the chroot. + """ - args_output = llvm_bisection.GetCommandLineArgs() + chroot.VerifyOutsideChroot() - if os.path.isfile(args_output.last_tested): - print('Resuming bisection for %s' % args_output.last_tested) - else: - print('Starting a new bisection for %s' % args_output.last_tested) + args_output = llvm_bisection.GetCommandLineArgs() - while True: - # Update the status of existing tryjobs if os.path.isfile(args_output.last_tested): - update_start_time = time.time() - with open(args_output.last_tested) as json_file: - json_dict = json.load(json_file) - while True: - print('\nAttempting to update all tryjobs whose "status" is ' - '"pending":') - print('-' * 40) - - completed = True - for tryjob in json_dict['jobs']: - if tryjob[ - 'status'] == update_tryjob_status.TryjobStatus.PENDING.value: - status = GetBuildResult(args_output.chroot_path, - tryjob['buildbucket_id']) - if status: - tryjob['status'] = status - else: - completed = False - - print('-' * 40) - - # Proceed to the next step if all the existing tryjobs have completed. - if completed: - break - - delta_time = time.time() - update_start_time - - if delta_time > POLLING_LIMIT_SECS: - # Something is wrong with updating the tryjobs's 'status' via - # `cros buildresult` (e.g. network issue, etc.). - sys.exit('Failed to update pending tryjobs.') - - print('-' * 40) - print('Sleeping for %d minutes.' % (POLL_RETRY_TIME_SECS // 60)) - time.sleep(POLL_RETRY_TIME_SECS) - - # There should always be update from the tryjobs launched in the - # last iteration. - temp_filename = '%s.new' % args_output.last_tested - with open(temp_filename, 'w') as temp_file: - json.dump(json_dict, temp_file, indent=4, separators=(',', ': ')) - os.rename(temp_filename, args_output.last_tested) - - # Launch more tryjobs. - for cur_try in range(1, BISECTION_ATTEMPTS + 1): - try: - print('\nAttempting to launch more tryjobs if possible:') - print('-' * 40) - - bisection_ret = llvm_bisection.main(args_output) - - print('-' * 40) - - # Stop if the bisection has completed. - if bisection_ret == BisectionExitStatus.BISECTION_COMPLETE.value: - sys.exit(0) - - # Successfully launched more tryjobs. - break - except Exception: - traceback.print_exc() - - print('-' * 40) - - # Exceeded the number of times to launch more tryjobs. - if cur_try == BISECTION_ATTEMPTS: - sys.exit('Unable to continue bisection.') - - num_retries_left = BISECTION_ATTEMPTS - cur_try - - print('Retries left to continue bisection %d.' % num_retries_left) - - print('Sleeping for %d minutes.' % (BISECTION_RETRY_TIME_SECS // 60)) - time.sleep(BISECTION_RETRY_TIME_SECS) - - -if __name__ == '__main__': - main() + print("Resuming bisection for %s" % args_output.last_tested) + else: + print("Starting a new bisection for %s" % args_output.last_tested) + + while True: + # Update the status of existing tryjobs + if os.path.isfile(args_output.last_tested): + update_start_time = time.time() + with open(args_output.last_tested) as json_file: + json_dict = json.load(json_file) + while True: + print( + '\nAttempting to update all tryjobs whose "status" is ' + '"pending":' + ) + print("-" * 40) + + completed = True + for tryjob in json_dict["jobs"]: + if ( + tryjob["status"] + == update_tryjob_status.TryjobStatus.PENDING.value + ): + status = GetBuildResult( + args_output.chroot_path, tryjob["buildbucket_id"] + ) + if status: + tryjob["status"] = status + else: + completed = False + + print("-" * 40) + + # Proceed to the next step if all the existing tryjobs have completed. + if completed: + break + + delta_time = time.time() - update_start_time + + if delta_time > POLLING_LIMIT_SECS: + # Something is wrong with updating the tryjobs's 'status' via + # `cros buildresult` (e.g. network issue, etc.). + sys.exit("Failed to update pending tryjobs.") + + print("-" * 40) + print("Sleeping for %d minutes." % (POLL_RETRY_TIME_SECS // 60)) + time.sleep(POLL_RETRY_TIME_SECS) + + # There should always be update from the tryjobs launched in the + # last iteration. + temp_filename = "%s.new" % args_output.last_tested + with open(temp_filename, "w") as temp_file: + json.dump( + json_dict, temp_file, indent=4, separators=(",", ": ") + ) + os.rename(temp_filename, args_output.last_tested) + + # Launch more tryjobs. + for cur_try in range(1, BISECTION_ATTEMPTS + 1): + try: + print("\nAttempting to launch more tryjobs if possible:") + print("-" * 40) + + bisection_ret = llvm_bisection.main(args_output) + + print("-" * 40) + + # Stop if the bisection has completed. + if ( + bisection_ret + == BisectionExitStatus.BISECTION_COMPLETE.value + ): + sys.exit(0) + + # Successfully launched more tryjobs. + break + except Exception: + traceback.print_exc() + + print("-" * 40) + + # Exceeded the number of times to launch more tryjobs. + if cur_try == BISECTION_ATTEMPTS: + sys.exit("Unable to continue bisection.") + + num_retries_left = BISECTION_ATTEMPTS - cur_try + + print( + "Retries left to continue bisection %d." % num_retries_left + ) + + print( + "Sleeping for %d minutes." + % (BISECTION_RETRY_TIME_SECS // 60) + ) + time.sleep(BISECTION_RETRY_TIME_SECS) + + +if __name__ == "__main__": + main() diff --git a/llvm_tools/auto_llvm_bisection_unittest.py b/llvm_tools/auto_llvm_bisection_unittest.py index b134aa50..9d0654cf 100755 --- a/llvm_tools/auto_llvm_bisection_unittest.py +++ b/llvm_tools/auto_llvm_bisection_unittest.py @@ -24,227 +24,268 @@ import update_tryjob_status class AutoLLVMBisectionTest(unittest.TestCase): - """Unittests for auto bisection of LLVM.""" - - @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - @mock.patch.object(llvm_bisection, - 'GetCommandLineArgs', - return_value=test_helpers.ArgsOutputTest()) - @mock.patch.object(time, 'sleep') - @mock.patch.object(traceback, 'print_exc') - @mock.patch.object(llvm_bisection, 'main') - @mock.patch.object(os.path, 'isfile') - @mock.patch.object(auto_llvm_bisection, 'open') - @mock.patch.object(json, 'load') - @mock.patch.object(auto_llvm_bisection, 'GetBuildResult') - @mock.patch.object(os, 'rename') - def testAutoLLVMBisectionPassed( - self, - # pylint: disable=unused-argument - mock_rename, - mock_get_build_result, - mock_json_load, - # pylint: disable=unused-argument - mock_open, - mock_isfile, - mock_llvm_bisection, - mock_traceback, - mock_sleep, - mock_get_args, - mock_outside_chroot): - - mock_isfile.side_effect = [False, False, True, True] - mock_llvm_bisection.side_effect = [ - 0, - ValueError('Failed to launch more tryjobs.'), - llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value - ] - mock_json_load.return_value = { - 'start': - 369410, - 'end': - 369420, - 'jobs': [{ - 'buildbucket_id': 12345, - 'rev': 369411, - 'status': update_tryjob_status.TryjobStatus.PENDING.value, - }] - } - mock_get_build_result.return_value = ( - update_tryjob_status.TryjobStatus.GOOD.value) - - # Verify the excpetion is raised when successfully found the bad revision. - # Uses `sys.exit(0)` to indicate success. - with self.assertRaises(SystemExit) as err: - auto_llvm_bisection.main() - - self.assertEqual(err.exception.code, 0) - - mock_outside_chroot.assert_called_once() - mock_get_args.assert_called_once() - self.assertEqual(mock_isfile.call_count, 3) - self.assertEqual(mock_llvm_bisection.call_count, 3) - mock_traceback.assert_called_once() - mock_sleep.assert_called_once() - - @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - @mock.patch.object(time, 'sleep') - @mock.patch.object(traceback, 'print_exc') - @mock.patch.object(llvm_bisection, 'main') - @mock.patch.object(os.path, 'isfile') - @mock.patch.object(llvm_bisection, - 'GetCommandLineArgs', - return_value=test_helpers.ArgsOutputTest()) - def testFailedToStartBisection(self, mock_get_args, mock_isfile, - mock_llvm_bisection, mock_traceback, - mock_sleep, mock_outside_chroot): - - mock_isfile.return_value = False - mock_llvm_bisection.side_effect = ValueError( - 'Failed to launch more tryjobs.') - - # Verify the exception is raised when the number of attempts to launched - # more tryjobs is exceeded, so unable to continue - # bisection. - with self.assertRaises(SystemExit) as err: - auto_llvm_bisection.main() - - self.assertEqual(err.exception.code, 'Unable to continue bisection.') - - mock_outside_chroot.assert_called_once() - mock_get_args.assert_called_once() - self.assertEqual(mock_isfile.call_count, 2) - self.assertEqual(mock_llvm_bisection.call_count, 3) - self.assertEqual(mock_traceback.call_count, 3) - self.assertEqual(mock_sleep.call_count, 2) - - @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - @mock.patch.object(llvm_bisection, - 'GetCommandLineArgs', - return_value=test_helpers.ArgsOutputTest()) - @mock.patch.object(time, 'time') - @mock.patch.object(time, 'sleep') - @mock.patch.object(os.path, 'isfile') - @mock.patch.object(auto_llvm_bisection, 'open') - @mock.patch.object(json, 'load') - @mock.patch.object(auto_llvm_bisection, 'GetBuildResult') - def testFailedToUpdatePendingTryJobs( - self, - mock_get_build_result, - mock_json_load, - # pylint: disable=unused-argument - mock_open, - mock_isfile, - mock_sleep, - mock_time, - mock_get_args, - mock_outside_chroot): - - # Simulate behavior of `time.time()` for time passed. - @test_helpers.CallCountsToMockFunctions - def MockTimePassed(call_count): - if call_count < 3: - return call_count - - assert False, 'Called `time.time()` more than expected.' - - mock_isfile.return_value = True - mock_json_load.return_value = { - 'start': - 369410, - 'end': - 369420, - 'jobs': [{ - 'buildbucket_id': 12345, - 'rev': 369411, - 'status': update_tryjob_status.TryjobStatus.PENDING.value, - }] - } - mock_get_build_result.return_value = None - mock_time.side_effect = MockTimePassed - # Reduce the polling limit for the test case to terminate faster. - auto_llvm_bisection.POLLING_LIMIT_SECS = 1 - - # Verify the exception is raised when unable to update tryjobs whose - # 'status' value is 'pending'. - with self.assertRaises(SystemExit) as err: - auto_llvm_bisection.main() - - self.assertEqual(err.exception.code, 'Failed to update pending tryjobs.') - - mock_outside_chroot.assert_called_once() - mock_get_args.assert_called_once() - self.assertEqual(mock_isfile.call_count, 2) - mock_sleep.assert_called_once() - self.assertEqual(mock_time.call_count, 3) - - @mock.patch.object(subprocess, 'check_output') - def testGetBuildResult(self, mock_chroot_command): - buildbucket_id = 192 - status = auto_llvm_bisection.BuilderStatus.PASS.value - tryjob_contents = {buildbucket_id: {'status': status}} - mock_chroot_command.return_value = json.dumps(tryjob_contents) - chroot_path = '/some/path/to/chroot' - - self.assertEqual( - auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id), - update_tryjob_status.TryjobStatus.GOOD.value) - - mock_chroot_command.assert_called_once_with( - [ - 'cros_sdk', '--', 'cros', 'buildresult', '--buildbucket-id', - str(buildbucket_id), '--report', 'json' - ], - cwd='/some/path/to/chroot', - stderr=subprocess.STDOUT, - encoding='UTF-8', - ) + """Unittests for auto bisection of LLVM.""" - @mock.patch.object(subprocess, 'check_output') - def testGetBuildResultPassedWithUnstartedTryjob(self, mock_chroot_command): - buildbucket_id = 192 - chroot_path = '/some/path/to/chroot' - mock_chroot_command.side_effect = subprocess.CalledProcessError( - returncode=1, cmd=[], output='No build found. Perhaps not started') - auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id) - mock_chroot_command.assert_called_once_with( - [ - 'cros_sdk', '--', 'cros', 'buildresult', '--buildbucket-id', '192', - '--report', 'json' - ], - cwd=chroot_path, - stderr=subprocess.STDOUT, - encoding='UTF-8', + @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True) + @mock.patch.object( + llvm_bisection, + "GetCommandLineArgs", + return_value=test_helpers.ArgsOutputTest(), ) - - @mock.patch.object(subprocess, 'check_output') - def testGetBuildReusultFailedWithInvalidBuildStatus(self, - mock_chroot_command): - chroot_path = '/some/path/to/chroot' - buildbucket_id = 50 - invalid_build_status = 'querying' - tryjob_contents = {buildbucket_id: {'status': invalid_build_status}} - mock_chroot_command.return_value = json.dumps(tryjob_contents) - - # Verify the exception is raised when the return value of `cros buildresult` - # is not in the `builder_status_mapping`. - with self.assertRaises(ValueError) as err: - auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id) - - self.assertEqual( - str(err.exception), '"cros buildresult" return value is invalid: %s' % - invalid_build_status) - - mock_chroot_command.assert_called_once_with( - [ - 'cros_sdk', '--', 'cros', 'buildresult', '--buildbucket-id', - str(buildbucket_id), '--report', 'json' - ], - cwd=chroot_path, - stderr=subprocess.STDOUT, - encoding='UTF-8', + @mock.patch.object(time, "sleep") + @mock.patch.object(traceback, "print_exc") + @mock.patch.object(llvm_bisection, "main") + @mock.patch.object(os.path, "isfile") + @mock.patch.object(auto_llvm_bisection, "open") + @mock.patch.object(json, "load") + @mock.patch.object(auto_llvm_bisection, "GetBuildResult") + @mock.patch.object(os, "rename") + def testAutoLLVMBisectionPassed( + self, + # pylint: disable=unused-argument + mock_rename, + mock_get_build_result, + mock_json_load, + # pylint: disable=unused-argument + mock_open, + mock_isfile, + mock_llvm_bisection, + mock_traceback, + mock_sleep, + mock_get_args, + mock_outside_chroot, + ): + + mock_isfile.side_effect = [False, False, True, True] + mock_llvm_bisection.side_effect = [ + 0, + ValueError("Failed to launch more tryjobs."), + llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value, + ] + mock_json_load.return_value = { + "start": 369410, + "end": 369420, + "jobs": [ + { + "buildbucket_id": 12345, + "rev": 369411, + "status": update_tryjob_status.TryjobStatus.PENDING.value, + } + ], + } + mock_get_build_result.return_value = ( + update_tryjob_status.TryjobStatus.GOOD.value + ) + + # Verify the excpetion is raised when successfully found the bad revision. + # Uses `sys.exit(0)` to indicate success. + with self.assertRaises(SystemExit) as err: + auto_llvm_bisection.main() + + self.assertEqual(err.exception.code, 0) + + mock_outside_chroot.assert_called_once() + mock_get_args.assert_called_once() + self.assertEqual(mock_isfile.call_count, 3) + self.assertEqual(mock_llvm_bisection.call_count, 3) + mock_traceback.assert_called_once() + mock_sleep.assert_called_once() + + @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True) + @mock.patch.object(time, "sleep") + @mock.patch.object(traceback, "print_exc") + @mock.patch.object(llvm_bisection, "main") + @mock.patch.object(os.path, "isfile") + @mock.patch.object( + llvm_bisection, + "GetCommandLineArgs", + return_value=test_helpers.ArgsOutputTest(), ) - - -if __name__ == '__main__': - unittest.main() + def testFailedToStartBisection( + self, + mock_get_args, + mock_isfile, + mock_llvm_bisection, + mock_traceback, + mock_sleep, + mock_outside_chroot, + ): + + mock_isfile.return_value = False + mock_llvm_bisection.side_effect = ValueError( + "Failed to launch more tryjobs." + ) + + # Verify the exception is raised when the number of attempts to launched + # more tryjobs is exceeded, so unable to continue + # bisection. + with self.assertRaises(SystemExit) as err: + auto_llvm_bisection.main() + + self.assertEqual(err.exception.code, "Unable to continue bisection.") + + mock_outside_chroot.assert_called_once() + mock_get_args.assert_called_once() + self.assertEqual(mock_isfile.call_count, 2) + self.assertEqual(mock_llvm_bisection.call_count, 3) + self.assertEqual(mock_traceback.call_count, 3) + self.assertEqual(mock_sleep.call_count, 2) + + @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True) + @mock.patch.object( + llvm_bisection, + "GetCommandLineArgs", + return_value=test_helpers.ArgsOutputTest(), + ) + @mock.patch.object(time, "time") + @mock.patch.object(time, "sleep") + @mock.patch.object(os.path, "isfile") + @mock.patch.object(auto_llvm_bisection, "open") + @mock.patch.object(json, "load") + @mock.patch.object(auto_llvm_bisection, "GetBuildResult") + def testFailedToUpdatePendingTryJobs( + self, + mock_get_build_result, + mock_json_load, + # pylint: disable=unused-argument + mock_open, + mock_isfile, + mock_sleep, + mock_time, + mock_get_args, + mock_outside_chroot, + ): + + # Simulate behavior of `time.time()` for time passed. + @test_helpers.CallCountsToMockFunctions + def MockTimePassed(call_count): + if call_count < 3: + return call_count + + assert False, "Called `time.time()` more than expected." + + mock_isfile.return_value = True + mock_json_load.return_value = { + "start": 369410, + "end": 369420, + "jobs": [ + { + "buildbucket_id": 12345, + "rev": 369411, + "status": update_tryjob_status.TryjobStatus.PENDING.value, + } + ], + } + mock_get_build_result.return_value = None + mock_time.side_effect = MockTimePassed + # Reduce the polling limit for the test case to terminate faster. + auto_llvm_bisection.POLLING_LIMIT_SECS = 1 + + # Verify the exception is raised when unable to update tryjobs whose + # 'status' value is 'pending'. + with self.assertRaises(SystemExit) as err: + auto_llvm_bisection.main() + + self.assertEqual( + err.exception.code, "Failed to update pending tryjobs." + ) + + mock_outside_chroot.assert_called_once() + mock_get_args.assert_called_once() + self.assertEqual(mock_isfile.call_count, 2) + mock_sleep.assert_called_once() + self.assertEqual(mock_time.call_count, 3) + + @mock.patch.object(subprocess, "check_output") + def testGetBuildResult(self, mock_chroot_command): + buildbucket_id = 192 + status = auto_llvm_bisection.BuilderStatus.PASS.value + tryjob_contents = {buildbucket_id: {"status": status}} + mock_chroot_command.return_value = json.dumps(tryjob_contents) + chroot_path = "/some/path/to/chroot" + + self.assertEqual( + auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id), + update_tryjob_status.TryjobStatus.GOOD.value, + ) + + mock_chroot_command.assert_called_once_with( + [ + "cros_sdk", + "--", + "cros", + "buildresult", + "--buildbucket-id", + str(buildbucket_id), + "--report", + "json", + ], + cwd="/some/path/to/chroot", + stderr=subprocess.STDOUT, + encoding="UTF-8", + ) + + @mock.patch.object(subprocess, "check_output") + def testGetBuildResultPassedWithUnstartedTryjob(self, mock_chroot_command): + buildbucket_id = 192 + chroot_path = "/some/path/to/chroot" + mock_chroot_command.side_effect = subprocess.CalledProcessError( + returncode=1, cmd=[], output="No build found. Perhaps not started" + ) + auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id) + mock_chroot_command.assert_called_once_with( + [ + "cros_sdk", + "--", + "cros", + "buildresult", + "--buildbucket-id", + "192", + "--report", + "json", + ], + cwd=chroot_path, + stderr=subprocess.STDOUT, + encoding="UTF-8", + ) + + @mock.patch.object(subprocess, "check_output") + def testGetBuildReusultFailedWithInvalidBuildStatus( + self, mock_chroot_command + ): + chroot_path = "/some/path/to/chroot" + buildbucket_id = 50 + invalid_build_status = "querying" + tryjob_contents = {buildbucket_id: {"status": invalid_build_status}} + mock_chroot_command.return_value = json.dumps(tryjob_contents) + + # Verify the exception is raised when the return value of `cros buildresult` + # is not in the `builder_status_mapping`. + with self.assertRaises(ValueError) as err: + auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id) + + self.assertEqual( + str(err.exception), + '"cros buildresult" return value is invalid: %s' + % invalid_build_status, + ) + + mock_chroot_command.assert_called_once_with( + [ + "cros_sdk", + "--", + "cros", + "buildresult", + "--buildbucket-id", + str(buildbucket_id), + "--report", + "json", + ], + cwd=chroot_path, + stderr=subprocess.STDOUT, + encoding="UTF-8", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/bisect_clang_crashes.py b/llvm_tools/bisect_clang_crashes.py index 9a50f0f5..65aadabd 100755 --- a/llvm_tools/bisect_clang_crashes.py +++ b/llvm_tools/bisect_clang_crashes.py @@ -21,119 +21,137 @@ import chroot def get_artifacts(pattern): - results = subprocess.check_output(['gsutil.py', 'ls', pattern], - stderr=subprocess.STDOUT, - encoding='utf-8') - return sorted(l.strip() for l in results.splitlines()) + results = subprocess.check_output( + ["gsutil.py", "ls", pattern], stderr=subprocess.STDOUT, encoding="utf-8" + ) + return sorted(l.strip() for l in results.splitlines()) def get_crash_reproducers(working_dir): - results = [] - for src in [ - f for f in glob.glob('%s/*.c*' % working_dir) - if f.split('.')[-1] in ['c', 'cc', 'cpp'] - ]: - script = '.'.join(src.split('.')[:-1]) + '.sh' - if not os.path.exists(script): - logging.warning('could not find the matching script of %s', src) - else: - results.append((src, script)) - return results - - -def submit_crash_to_forcey(forcey: str, temporary_directory: str, - buildbucket_id: str, url: str) -> None: - dest_dir = os.path.join(temporary_directory, buildbucket_id) - dest_file = os.path.join(dest_dir, os.path.basename(url)) - logging.info('Downloading and submitting %r...', url) - subprocess.check_output(['gsutil.py', 'cp', url, dest_file], - stderr=subprocess.STDOUT) - subprocess.check_output(['tar', '-xJf', dest_file], cwd=dest_dir) - for src, script in get_crash_reproducers(dest_dir): - subprocess.check_output([ - forcey, 'reduce', '-wait=false', '-note', - '%s:%s' % (url, src), '-sh_file', script, '-src_file', src - ]) + results = [] + for src in [ + f + for f in glob.glob("%s/*.c*" % working_dir) + if f.split(".")[-1] in ["c", "cc", "cpp"] + ]: + script = ".".join(src.split(".")[:-1]) + ".sh" + if not os.path.exists(script): + logging.warning("could not find the matching script of %s", src) + else: + results.append((src, script)) + return results + + +def submit_crash_to_forcey( + forcey: str, temporary_directory: str, buildbucket_id: str, url: str +) -> None: + dest_dir = os.path.join(temporary_directory, buildbucket_id) + dest_file = os.path.join(dest_dir, os.path.basename(url)) + logging.info("Downloading and submitting %r...", url) + subprocess.check_output( + ["gsutil.py", "cp", url, dest_file], stderr=subprocess.STDOUT + ) + subprocess.check_output(["tar", "-xJf", dest_file], cwd=dest_dir) + for src, script in get_crash_reproducers(dest_dir): + subprocess.check_output( + [ + forcey, + "reduce", + "-wait=false", + "-note", + "%s:%s" % (url, src), + "-sh_file", + script, + "-src_file", + src, + ] + ) def main(argv): - chroot.VerifyOutsideChroot() - logging.basicConfig( - format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s', - level=logging.INFO, - ) - cur_dir = os.path.dirname(os.path.abspath(__file__)) - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('--4c', - dest='forcey', - required=True, - help='Path to a 4c client binary') - parser.add_argument('--state_file', - default=os.path.join(cur_dir, 'chromeos-state.json'), - help='The path to the state file.') - parser.add_argument( - '--nocleanup', - action='store_false', - dest='cleanup', - help='Keep temporary files created after the script finishes.') - opts = parser.parse_args(argv) - - state_file = os.path.abspath(opts.state_file) - os.makedirs(os.path.dirname(state_file), exist_ok=True) - temporary_directory = '/tmp/bisect_clang_crashes' - os.makedirs(temporary_directory, exist_ok=True) - urls = get_artifacts( - 'gs://chromeos-toolchain-artifacts/clang-crash-diagnoses' - '/**/*clang_crash_diagnoses.tar.xz') - logging.info('%d crash URLs found', len(urls)) - - visited = {} - if os.path.exists(state_file): - buildbucket_ids = {url.split('/')[-2] for url in urls} - with open(state_file, encoding='utf-8') as f: - data = json.load(f) - visited = {k: v for k, v in data.items() if k in buildbucket_ids} - logging.info('Successfully loaded %d previously-submitted crashes', - len(visited)) - - try: - for url in urls: - splits = url.split('/') - buildbucket_id = splits[-2] - # Skip the builds that has been processed - if buildbucket_id in visited: - continue - submit_crash_to_forcey( - forcey=opts.forcey, - temporary_directory=temporary_directory, - buildbucket_id=buildbucket_id, - url=url, - ) - visited[buildbucket_id] = url - - exception_in_flight = False - except: - exception_in_flight = True - raise - finally: - if exception_in_flight: - # This is best-effort. If the machine powers off or similar, we'll just - # resubmit the same crashes, which is suboptimal, but otherwise - # acceptable. - logging.error('Something went wrong; attempting to save our work...') - else: - logging.info('Persisting state...') - - tmp_state_file = state_file + '.tmp' - with open(tmp_state_file, 'w', encoding='utf-8') as f: - json.dump(visited, f, indent=2) - os.rename(tmp_state_file, state_file) - - logging.info('State successfully persisted') - - if opts.cleanup: - shutil.rmtree(temporary_directory) - - -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + chroot.VerifyOutsideChroot() + logging.basicConfig( + format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s", + level=logging.INFO, + ) + cur_dir = os.path.dirname(os.path.abspath(__file__)) + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--4c", dest="forcey", required=True, help="Path to a 4c client binary" + ) + parser.add_argument( + "--state_file", + default=os.path.join(cur_dir, "chromeos-state.json"), + help="The path to the state file.", + ) + parser.add_argument( + "--nocleanup", + action="store_false", + dest="cleanup", + help="Keep temporary files created after the script finishes.", + ) + opts = parser.parse_args(argv) + + state_file = os.path.abspath(opts.state_file) + os.makedirs(os.path.dirname(state_file), exist_ok=True) + temporary_directory = "/tmp/bisect_clang_crashes" + os.makedirs(temporary_directory, exist_ok=True) + urls = get_artifacts( + "gs://chromeos-toolchain-artifacts/clang-crash-diagnoses" + "/**/*clang_crash_diagnoses.tar.xz" + ) + logging.info("%d crash URLs found", len(urls)) + + visited = {} + if os.path.exists(state_file): + buildbucket_ids = {url.split("/")[-2] for url in urls} + with open(state_file, encoding="utf-8") as f: + data = json.load(f) + visited = {k: v for k, v in data.items() if k in buildbucket_ids} + logging.info( + "Successfully loaded %d previously-submitted crashes", len(visited) + ) + + try: + for url in urls: + splits = url.split("/") + buildbucket_id = splits[-2] + # Skip the builds that has been processed + if buildbucket_id in visited: + continue + submit_crash_to_forcey( + forcey=opts.forcey, + temporary_directory=temporary_directory, + buildbucket_id=buildbucket_id, + url=url, + ) + visited[buildbucket_id] = url + + exception_in_flight = False + except: + exception_in_flight = True + raise + finally: + if exception_in_flight: + # This is best-effort. If the machine powers off or similar, we'll just + # resubmit the same crashes, which is suboptimal, but otherwise + # acceptable. + logging.error( + "Something went wrong; attempting to save our work..." + ) + else: + logging.info("Persisting state...") + + tmp_state_file = state_file + ".tmp" + with open(tmp_state_file, "w", encoding="utf-8") as f: + json.dump(visited, f, indent=2) + os.rename(tmp_state_file, state_file) + + logging.info("State successfully persisted") + + if opts.cleanup: + shutil.rmtree(temporary_directory) + + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/llvm_tools/bisect_clang_crashes_unittest.py b/llvm_tools/bisect_clang_crashes_unittest.py index 81ee31cd..96a375a0 100755 --- a/llvm_tools/bisect_clang_crashes_unittest.py +++ b/llvm_tools/bisect_clang_crashes_unittest.py @@ -17,74 +17,85 @@ import bisect_clang_crashes class Test(unittest.TestCase): - """Tests for bisect_clang_crashes.""" + """Tests for bisect_clang_crashes.""" - class _SilencingFilter(object): - """Silences all log messages. + class _SilencingFilter(object): + """Silences all log messages. - Also collects info about log messages that would've been emitted. - """ + Also collects info about log messages that would've been emitted. + """ - def __init__(self): - self.messages = [] + def __init__(self): + self.messages = [] - def filter(self, record): - self.messages.append(record.getMessage()) - return 0 + def filter(self, record): + self.messages.append(record.getMessage()) + return 0 - @mock.patch.object(subprocess, 'check_output') - def test_get_artifacts(self, mock_gsutil_ls): - pattern = 'gs://chromeos-toolchain-artifacts/clang-crash-diagnoses/' \ - '**/*clang_crash_diagnoses.tar.xz' - mock_gsutil_ls.return_value = 'artifact1\nartifact2\nartifact3' - results = bisect_clang_crashes.get_artifacts(pattern) - self.assertEqual(results, ['artifact1', 'artifact2', 'artifact3']) - mock_gsutil_ls.assert_called_once_with(['gsutil.py', 'ls', pattern], - stderr=subprocess.STDOUT, - encoding='utf-8') + @mock.patch.object(subprocess, "check_output") + def test_get_artifacts(self, mock_gsutil_ls): + pattern = ( + "gs://chromeos-toolchain-artifacts/clang-crash-diagnoses/" + "**/*clang_crash_diagnoses.tar.xz" + ) + mock_gsutil_ls.return_value = "artifact1\nartifact2\nartifact3" + results = bisect_clang_crashes.get_artifacts(pattern) + self.assertEqual(results, ["artifact1", "artifact2", "artifact3"]) + mock_gsutil_ls.assert_called_once_with( + ["gsutil.py", "ls", pattern], + stderr=subprocess.STDOUT, + encoding="utf-8", + ) - @mock.patch.object(os.path, 'exists') - @mock.patch.object(glob, 'glob') - def test_get_crash_reproducers_succeed(self, mock_file_search, - mock_file_check): - working_dir = 'SomeDirectory' - mock_file_search.return_value = ['a.c', 'b.cpp', 'c.cc'] - mock_file_check.side_effect = [True, True, True] - results = bisect_clang_crashes.get_crash_reproducers(working_dir) - mock_file_search.assert_called_once_with('%s/*.c*' % working_dir) - self.assertEqual(mock_file_check.call_count, 3) - self.assertEqual(mock_file_check.call_args_list[0], mock.call('a.sh')) - self.assertEqual(mock_file_check.call_args_list[1], mock.call('b.sh')) - self.assertEqual(mock_file_check.call_args_list[2], mock.call('c.sh')) - self.assertEqual(results, [('a.c', 'a.sh'), ('b.cpp', 'b.sh'), - ('c.cc', 'c.sh')]) + @mock.patch.object(os.path, "exists") + @mock.patch.object(glob, "glob") + def test_get_crash_reproducers_succeed( + self, mock_file_search, mock_file_check + ): + working_dir = "SomeDirectory" + mock_file_search.return_value = ["a.c", "b.cpp", "c.cc"] + mock_file_check.side_effect = [True, True, True] + results = bisect_clang_crashes.get_crash_reproducers(working_dir) + mock_file_search.assert_called_once_with("%s/*.c*" % working_dir) + self.assertEqual(mock_file_check.call_count, 3) + self.assertEqual(mock_file_check.call_args_list[0], mock.call("a.sh")) + self.assertEqual(mock_file_check.call_args_list[1], mock.call("b.sh")) + self.assertEqual(mock_file_check.call_args_list[2], mock.call("c.sh")) + self.assertEqual( + results, [("a.c", "a.sh"), ("b.cpp", "b.sh"), ("c.cc", "c.sh")] + ) - @mock.patch.object(os.path, 'exists') - @mock.patch.object(glob, 'glob') - def test_get_crash_reproducers_no_matching_script(self, mock_file_search, - mock_file_check): - def silence_logging(): - root = logging.getLogger() - filt = self._SilencingFilter() - root.addFilter(filt) - self.addCleanup(root.removeFilter, filt) - return filt + @mock.patch.object(os.path, "exists") + @mock.patch.object(glob, "glob") + def test_get_crash_reproducers_no_matching_script( + self, mock_file_search, mock_file_check + ): + def silence_logging(): + root = logging.getLogger() + filt = self._SilencingFilter() + root.addFilter(filt) + self.addCleanup(root.removeFilter, filt) + return filt - log_filter = silence_logging() - working_dir = 'SomeDirectory' - mock_file_search.return_value = ['a.c', 'b.cpp', 'c.cc'] - mock_file_check.side_effect = [True, False, True] - results = bisect_clang_crashes.get_crash_reproducers(working_dir) - mock_file_search.assert_called_once_with('%s/*.c*' % working_dir) - self.assertEqual(mock_file_check.call_count, 3) - self.assertEqual(mock_file_check.call_args_list[0], mock.call('a.sh')) - self.assertEqual(mock_file_check.call_args_list[1], mock.call('b.sh')) - self.assertEqual(mock_file_check.call_args_list[2], mock.call('c.sh')) - self.assertEqual(results, [('a.c', 'a.sh'), ('c.cc', 'c.sh')]) - self.assertTrue( - any('could not find the matching script of b.cpp' in x - for x in log_filter.messages), log_filter.messages) + log_filter = silence_logging() + working_dir = "SomeDirectory" + mock_file_search.return_value = ["a.c", "b.cpp", "c.cc"] + mock_file_check.side_effect = [True, False, True] + results = bisect_clang_crashes.get_crash_reproducers(working_dir) + mock_file_search.assert_called_once_with("%s/*.c*" % working_dir) + self.assertEqual(mock_file_check.call_count, 3) + self.assertEqual(mock_file_check.call_args_list[0], mock.call("a.sh")) + self.assertEqual(mock_file_check.call_args_list[1], mock.call("b.sh")) + self.assertEqual(mock_file_check.call_args_list[2], mock.call("c.sh")) + self.assertEqual(results, [("a.c", "a.sh"), ("c.cc", "c.sh")]) + self.assertTrue( + any( + "could not find the matching script of b.cpp" in x + for x in log_filter.messages + ), + log_filter.messages, + ) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/check_clang_diags.py b/llvm_tools/check_clang_diags.py index 69f91823..2509dc3c 100755 --- a/llvm_tools/check_clang_diags.py +++ b/llvm_tools/check_clang_diags.py @@ -22,78 +22,82 @@ from typing import Dict, List, Tuple from cros_utils import bugs -_DEFAULT_ASSIGNEE = 'mage' -_DEFAULT_CCS = ['cjdb@google.com'] + +_DEFAULT_ASSIGNEE = "mage" +_DEFAULT_CCS = ["cjdb@google.com"] # FIXME: clang would be cool to check, too? Doesn't seem to have a super stable # way of listing all warnings, unfortunately. def _build_llvm(llvm_dir: str, build_dir: str): - """Builds everything that _collect_available_diagnostics depends on.""" - targets = ['clang-tidy'] - # use `-C $llvm_dir` so the failure is easier to handle if llvm_dir DNE. - ninja_result = subprocess.run( - ['ninja', '-C', build_dir] + targets, - check=False, - ) - if not ninja_result.returncode: - return - - # Sometimes the directory doesn't exist, sometimes incremental cmake - # breaks, sometimes something random happens. Start fresh since that fixes - # the issue most of the time. - logging.warning('Initial build failed; trying to build from scratch.') - shutil.rmtree(build_dir, ignore_errors=True) - os.makedirs(build_dir) - subprocess.run( - [ - 'cmake', - '-G', - 'Ninja', - '-DCMAKE_BUILD_TYPE=MinSizeRel', - '-DLLVM_USE_LINKER=lld', - '-DLLVM_ENABLE_PROJECTS=clang;clang-tools-extra', - '-DLLVM_TARGETS_TO_BUILD=X86', - f'{os.path.abspath(llvm_dir)}/llvm', - ], - cwd=build_dir, - check=True, - ) - subprocess.run(['ninja'] + targets, check=True, cwd=build_dir) - - -def _collect_available_diagnostics(llvm_dir: str, - build_dir: str) -> Dict[str, List[str]]: - _build_llvm(llvm_dir, build_dir) - - clang_tidy = os.path.join(os.path.abspath(build_dir), 'bin', 'clang-tidy') - clang_tidy_checks = subprocess.run( - [clang_tidy, '-checks=*', '-list-checks'], - # Use cwd='/' to ensure no .clang-tidy files are picked up. It - # _shouldn't_ matter, but it's also ~free, so... - check=True, - cwd='/', - stdout=subprocess.PIPE, - encoding='utf-8', - ) - clang_tidy_checks_stdout = [ - x.strip() for x in clang_tidy_checks.stdout.strip().splitlines() - ] - - # The first line should always be this, then each line thereafter is a check - # name. - assert clang_tidy_checks_stdout[0] == 'Enabled checks:', ( - clang_tidy_checks_stdout) - clang_tidy_checks = clang_tidy_checks_stdout[1:] - assert not any(check.isspace() - for check in clang_tidy_checks), (clang_tidy_checks) - return {'clang-tidy': clang_tidy_checks} + """Builds everything that _collect_available_diagnostics depends on.""" + targets = ["clang-tidy"] + # use `-C $llvm_dir` so the failure is easier to handle if llvm_dir DNE. + ninja_result = subprocess.run( + ["ninja", "-C", build_dir] + targets, + check=False, + ) + if not ninja_result.returncode: + return + + # Sometimes the directory doesn't exist, sometimes incremental cmake + # breaks, sometimes something random happens. Start fresh since that fixes + # the issue most of the time. + logging.warning("Initial build failed; trying to build from scratch.") + shutil.rmtree(build_dir, ignore_errors=True) + os.makedirs(build_dir) + subprocess.run( + [ + "cmake", + "-G", + "Ninja", + "-DCMAKE_BUILD_TYPE=MinSizeRel", + "-DLLVM_USE_LINKER=lld", + "-DLLVM_ENABLE_PROJECTS=clang;clang-tools-extra", + "-DLLVM_TARGETS_TO_BUILD=X86", + f"{os.path.abspath(llvm_dir)}/llvm", + ], + cwd=build_dir, + check=True, + ) + subprocess.run(["ninja"] + targets, check=True, cwd=build_dir) + + +def _collect_available_diagnostics( + llvm_dir: str, build_dir: str +) -> Dict[str, List[str]]: + _build_llvm(llvm_dir, build_dir) + + clang_tidy = os.path.join(os.path.abspath(build_dir), "bin", "clang-tidy") + clang_tidy_checks = subprocess.run( + [clang_tidy, "-checks=*", "-list-checks"], + # Use cwd='/' to ensure no .clang-tidy files are picked up. It + # _shouldn't_ matter, but it's also ~free, so... + check=True, + cwd="/", + stdout=subprocess.PIPE, + encoding="utf-8", + ) + clang_tidy_checks_stdout = [ + x.strip() for x in clang_tidy_checks.stdout.strip().splitlines() + ] + + # The first line should always be this, then each line thereafter is a check + # name. + assert ( + clang_tidy_checks_stdout[0] == "Enabled checks:" + ), clang_tidy_checks_stdout + clang_tidy_checks = clang_tidy_checks_stdout[1:] + assert not any( + check.isspace() for check in clang_tidy_checks + ), clang_tidy_checks + return {"clang-tidy": clang_tidy_checks} def _process_new_diagnostics( old: Dict[str, List[str]], new: Dict[str, List[str]] ) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]: - """Determines the set of new diagnostics that we should file bugs for. + """Determines the set of new diagnostics that we should file bugs for. old: The previous state that this function returned as `new_state_file`, or `{}` @@ -102,108 +106,118 @@ def _process_new_diagnostics( Returns a `new_state_file` to pass into this function as `old` in the future, and a dict of diags to file bugs about. - """ - new_diagnostics = {} - new_state_file = {} - for tool, diags in new.items(): - if tool not in old: - logging.info('New tool with diagnostics: %s; pretending none are new', - tool) - new_state_file[tool] = diags - else: - old_diags = set(old[tool]) - newly_added_diags = [x for x in diags if x not in old_diags] - if newly_added_diags: - new_diagnostics[tool] = newly_added_diags - # This specifically tries to make diags sticky: if one is landed, then - # reverted, then relanded, we ignore the reland. This might not be - # desirable? I don't know. - new_state_file[tool] = old[tool] + newly_added_diags - - # Sort things so we have more predictable output. - for v in new_diagnostics.values(): - v.sort() - - return new_state_file, new_diagnostics + """ + new_diagnostics = {} + new_state_file = {} + for tool, diags in new.items(): + if tool not in old: + logging.info( + "New tool with diagnostics: %s; pretending none are new", tool + ) + new_state_file[tool] = diags + else: + old_diags = set(old[tool]) + newly_added_diags = [x for x in diags if x not in old_diags] + if newly_added_diags: + new_diagnostics[tool] = newly_added_diags + # This specifically tries to make diags sticky: if one is landed, then + # reverted, then relanded, we ignore the reland. This might not be + # desirable? I don't know. + new_state_file[tool] = old[tool] + newly_added_diags + + # Sort things so we have more predictable output. + for v in new_diagnostics.values(): + v.sort() + + return new_state_file, new_diagnostics def _file_bugs_for_new_diags(new_diags: Dict[str, List[str]]): - for tool, diags in sorted(new_diags.items()): - for diag in diags: - bugs.CreateNewBug( - component_id=bugs.WellKnownComponents.CrOSToolchainPublic, - title=f'Investigate {tool} check `{diag}`', - body='\n'.join(( - f'It seems that the `{diag}` check was recently added to {tool}.', - "It's probably good to TAL at whether this check would be good", - 'for us to enable in e.g., platform2, or across ChromeOS.', - )), - assignee=_DEFAULT_ASSIGNEE, - cc=_DEFAULT_CCS, - ) + for tool, diags in sorted(new_diags.items()): + for diag in diags: + bugs.CreateNewBug( + component_id=bugs.WellKnownComponents.CrOSToolchainPublic, + title=f"Investigate {tool} check `{diag}`", + body="\n".join( + ( + f"It seems that the `{diag}` check was recently added to {tool}.", + "It's probably good to TAL at whether this check would be good", + "for us to enable in e.g., platform2, or across ChromeOS.", + ) + ), + assignee=_DEFAULT_ASSIGNEE, + cc=_DEFAULT_CCS, + ) def main(argv: List[str]): - logging.basicConfig( - format='>> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: ' - '%(message)s', - level=logging.INFO, - ) - - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - parser.add_argument('--llvm_dir', - required=True, - help='LLVM directory to check. Required.') - parser.add_argument('--llvm_build_dir', - required=True, - help='Build directory for LLVM. Required & autocreated.') - parser.add_argument( - '--state_file', - required=True, - help='State file to use to suppress duplicate complaints. Required.') - parser.add_argument( - '--dry_run', - action='store_true', - help='Skip filing bugs & writing to the state file; just log ' - 'differences.') - opts = parser.parse_args(argv) - - build_dir = opts.llvm_build_dir - dry_run = opts.dry_run - llvm_dir = opts.llvm_dir - state_file = opts.state_file - - try: - with open(state_file, encoding='utf-8') as f: - prior_diagnostics = json.load(f) - except FileNotFoundError: - # If the state file didn't exist, just create it without complaining this - # time. - prior_diagnostics = {} - - available_diagnostics = _collect_available_diagnostics(llvm_dir, build_dir) - logging.info('Available diagnostics are %s', available_diagnostics) - if available_diagnostics == prior_diagnostics: - logging.info('Current diagnostics are identical to previous ones; quit') - return - - new_state_file, new_diagnostics = _process_new_diagnostics( - prior_diagnostics, available_diagnostics) - logging.info('New diagnostics in existing tool(s): %s', new_diagnostics) - - if dry_run: - logging.info('Skipping new state file writing and bug filing; dry-run ' - 'mode wins') - else: - _file_bugs_for_new_diags(new_diagnostics) - new_state_file_path = state_file + '.new' - with open(new_state_file_path, 'w', encoding='utf-8') as f: - json.dump(new_state_file, f) - os.rename(new_state_file_path, state_file) - - -if __name__ == '__main__': - main(sys.argv[1:]) + logging.basicConfig( + format=">> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: " + "%(message)s", + level=logging.INFO, + ) + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--llvm_dir", required=True, help="LLVM directory to check. Required." + ) + parser.add_argument( + "--llvm_build_dir", + required=True, + help="Build directory for LLVM. Required & autocreated.", + ) + parser.add_argument( + "--state_file", + required=True, + help="State file to use to suppress duplicate complaints. Required.", + ) + parser.add_argument( + "--dry_run", + action="store_true", + help="Skip filing bugs & writing to the state file; just log " + "differences.", + ) + opts = parser.parse_args(argv) + + build_dir = opts.llvm_build_dir + dry_run = opts.dry_run + llvm_dir = opts.llvm_dir + state_file = opts.state_file + + try: + with open(state_file, encoding="utf-8") as f: + prior_diagnostics = json.load(f) + except FileNotFoundError: + # If the state file didn't exist, just create it without complaining this + # time. + prior_diagnostics = {} + + available_diagnostics = _collect_available_diagnostics(llvm_dir, build_dir) + logging.info("Available diagnostics are %s", available_diagnostics) + if available_diagnostics == prior_diagnostics: + logging.info("Current diagnostics are identical to previous ones; quit") + return + + new_state_file, new_diagnostics = _process_new_diagnostics( + prior_diagnostics, available_diagnostics + ) + logging.info("New diagnostics in existing tool(s): %s", new_diagnostics) + + if dry_run: + logging.info( + "Skipping new state file writing and bug filing; dry-run " + "mode wins" + ) + else: + _file_bugs_for_new_diags(new_diagnostics) + new_state_file_path = state_file + ".new" + with open(new_state_file_path, "w", encoding="utf-8") as f: + json.dump(new_state_file, f) + os.rename(new_state_file_path, state_file) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/llvm_tools/check_clang_diags_test.py b/llvm_tools/check_clang_diags_test.py index 2c404d62..c15716f0 100755 --- a/llvm_tools/check_clang_diags_test.py +++ b/llvm_tools/check_clang_diags_test.py @@ -8,95 +8,103 @@ import unittest from unittest import mock +import check_clang_diags from cros_utils import bugs -import check_clang_diags # pylint: disable=protected-access class Test(unittest.TestCase): - """Test class.""" - - def test_process_new_diagnostics_ignores_new_tools(self): - new_state, new_diags = check_clang_diags._process_new_diagnostics( - old={}, - new={'clang': ['-Wone', '-Wtwo']}, - ) - self.assertEqual(new_state, {'clang': ['-Wone', '-Wtwo']}) - self.assertEqual(new_diags, {}) - - def test_process_new_diagnostics_is_a_nop_when_no_changes(self): - new_state, new_diags = check_clang_diags._process_new_diagnostics( - old={'clang': ['-Wone', '-Wtwo']}, - new={'clang': ['-Wone', '-Wtwo']}, - ) - self.assertEqual(new_state, {'clang': ['-Wone', '-Wtwo']}) - self.assertEqual(new_diags, {}) - - def test_process_new_diagnostics_ignores_removals_and_readds(self): - new_state, new_diags = check_clang_diags._process_new_diagnostics( - old={'clang': ['-Wone', '-Wtwo']}, - new={'clang': ['-Wone']}, - ) - self.assertEqual(new_diags, {}) - new_state, new_diags = check_clang_diags._process_new_diagnostics( - old=new_state, - new={'clang': ['-Wone', '-Wtwo']}, - ) - self.assertEqual(new_state, {'clang': ['-Wone', '-Wtwo']}) - self.assertEqual(new_diags, {}) - - def test_process_new_diagnostics_complains_when_warnings_are_added(self): - new_state, new_diags = check_clang_diags._process_new_diagnostics( - old={'clang': ['-Wone']}, - new={'clang': ['-Wone', '-Wtwo']}, - ) - self.assertEqual(new_state, {'clang': ['-Wone', '-Wtwo']}) - self.assertEqual(new_diags, {'clang': ['-Wtwo']}) - - @mock.patch.object(bugs, 'CreateNewBug') - def test_bugs_are_created_as_expected(self, create_new_bug_mock): - check_clang_diags._file_bugs_for_new_diags({ - 'clang': ['-Wone'], - 'clang-tidy': ['bugprone-foo'], - }) - - expected_calls = [ - mock.call( - component_id=bugs.WellKnownComponents.CrOSToolchainPublic, - title='Investigate clang check `-Wone`', - body='\n'.join(( - 'It seems that the `-Wone` check was recently added to clang.', - "It's probably good to TAL at whether this check would be good", - 'for us to enable in e.g., platform2, or across ChromeOS.', - )), - assignee=check_clang_diags._DEFAULT_ASSIGNEE, - cc=check_clang_diags._DEFAULT_CCS, - ), - mock.call( - component_id=bugs.WellKnownComponents.CrOSToolchainPublic, - title='Investigate clang-tidy check `bugprone-foo`', - body='\n'.join(( - 'It seems that the `bugprone-foo` check was recently added to ' - 'clang-tidy.', - "It's probably good to TAL at whether this check would be good", - 'for us to enable in e.g., platform2, or across ChromeOS.', - )), - assignee=check_clang_diags._DEFAULT_ASSIGNEE, - cc=check_clang_diags._DEFAULT_CCS, - ), - ] - - # Don't assertEqual the lists, since the diff is really hard to read for - # that. - for actual, expected in zip(create_new_bug_mock.call_args_list, - expected_calls): - self.assertEqual(actual, expected) - - self.assertEqual(len(create_new_bug_mock.call_args_list), - len(expected_calls)) - - -if __name__ == '__main__': - unittest.main() + """Test class.""" + + def test_process_new_diagnostics_ignores_new_tools(self): + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old={}, + new={"clang": ["-Wone", "-Wtwo"]}, + ) + self.assertEqual(new_state, {"clang": ["-Wone", "-Wtwo"]}) + self.assertEqual(new_diags, {}) + + def test_process_new_diagnostics_is_a_nop_when_no_changes(self): + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old={"clang": ["-Wone", "-Wtwo"]}, + new={"clang": ["-Wone", "-Wtwo"]}, + ) + self.assertEqual(new_state, {"clang": ["-Wone", "-Wtwo"]}) + self.assertEqual(new_diags, {}) + + def test_process_new_diagnostics_ignores_removals_and_readds(self): + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old={"clang": ["-Wone", "-Wtwo"]}, + new={"clang": ["-Wone"]}, + ) + self.assertEqual(new_diags, {}) + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old=new_state, + new={"clang": ["-Wone", "-Wtwo"]}, + ) + self.assertEqual(new_state, {"clang": ["-Wone", "-Wtwo"]}) + self.assertEqual(new_diags, {}) + + def test_process_new_diagnostics_complains_when_warnings_are_added(self): + new_state, new_diags = check_clang_diags._process_new_diagnostics( + old={"clang": ["-Wone"]}, + new={"clang": ["-Wone", "-Wtwo"]}, + ) + self.assertEqual(new_state, {"clang": ["-Wone", "-Wtwo"]}) + self.assertEqual(new_diags, {"clang": ["-Wtwo"]}) + + @mock.patch.object(bugs, "CreateNewBug") + def test_bugs_are_created_as_expected(self, create_new_bug_mock): + check_clang_diags._file_bugs_for_new_diags( + { + "clang": ["-Wone"], + "clang-tidy": ["bugprone-foo"], + } + ) + + expected_calls = [ + mock.call( + component_id=bugs.WellKnownComponents.CrOSToolchainPublic, + title="Investigate clang check `-Wone`", + body="\n".join( + ( + "It seems that the `-Wone` check was recently added to clang.", + "It's probably good to TAL at whether this check would be good", + "for us to enable in e.g., platform2, or across ChromeOS.", + ) + ), + assignee=check_clang_diags._DEFAULT_ASSIGNEE, + cc=check_clang_diags._DEFAULT_CCS, + ), + mock.call( + component_id=bugs.WellKnownComponents.CrOSToolchainPublic, + title="Investigate clang-tidy check `bugprone-foo`", + body="\n".join( + ( + "It seems that the `bugprone-foo` check was recently added to " + "clang-tidy.", + "It's probably good to TAL at whether this check would be good", + "for us to enable in e.g., platform2, or across ChromeOS.", + ) + ), + assignee=check_clang_diags._DEFAULT_ASSIGNEE, + cc=check_clang_diags._DEFAULT_CCS, + ), + ] + + # Don't assertEqual the lists, since the diff is really hard to read for + # that. + for actual, expected in zip( + create_new_bug_mock.call_args_list, expected_calls + ): + self.assertEqual(actual, expected) + + self.assertEqual( + len(create_new_bug_mock.call_args_list), len(expected_calls) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/chroot.py b/llvm_tools/chroot.py index 31e26e74..3a3bdde4 100755 --- a/llvm_tools/chroot.py +++ b/llvm_tools/chroot.py @@ -8,89 +8,93 @@ from __future__ import print_function +import collections import os import subprocess -import collections -CommitContents = collections.namedtuple('CommitContents', ['url', 'cl_number']) + +CommitContents = collections.namedtuple("CommitContents", ["url", "cl_number"]) def InChroot(): - """Returns True if currently in the chroot.""" - return 'CROS_WORKON_SRCROOT' in os.environ + """Returns True if currently in the chroot.""" + return "CROS_WORKON_SRCROOT" in os.environ def VerifyOutsideChroot(): - """Checks whether the script invoked was executed in the chroot. + """Checks whether the script invoked was executed in the chroot. - Raises: - AssertionError: The script was run inside the chroot. - """ + Raises: + AssertionError: The script was run inside the chroot. + """ - assert not InChroot(), 'Script should be run outside the chroot.' + assert not InChroot(), "Script should be run outside the chroot." def GetChrootEbuildPaths(chromeos_root, packages): - """Gets the chroot path(s) of the package(s). + """Gets the chroot path(s) of the package(s). - Args: - chromeos_root: The absolute path to the chroot to - use for executing chroot commands. - packages: A list of a package/packages to - be used to find their chroot path. + Args: + chromeos_root: The absolute path to the chroot to + use for executing chroot commands. + packages: A list of a package/packages to + be used to find their chroot path. - Returns: - A list of chroot paths of the packages' ebuild files. + Returns: + A list of chroot paths of the packages' ebuild files. - Raises: - ValueError: Failed to get the chroot path of a package. - """ + Raises: + ValueError: Failed to get the chroot path of a package. + """ - chroot_paths = [] + chroot_paths = [] - # Find the chroot path for each package's ebuild. - for package in packages: - chroot_path = subprocess.check_output( - ['cros_sdk', '--', 'equery', 'w', package], - cwd=chromeos_root, - encoding='utf-8') - chroot_paths.append(chroot_path.strip()) + # Find the chroot path for each package's ebuild. + for package in packages: + chroot_path = subprocess.check_output( + ["cros_sdk", "--", "equery", "w", package], + cwd=chromeos_root, + encoding="utf-8", + ) + chroot_paths.append(chroot_path.strip()) - return chroot_paths + return chroot_paths def ConvertChrootPathsToAbsolutePaths(chromeos_root, chroot_paths): - """Converts the chroot path(s) to absolute symlink path(s). + """Converts the chroot path(s) to absolute symlink path(s). - Args: - chromeos_root: The absolute path to the chroot. - chroot_paths: A list of chroot paths to convert to absolute paths. + Args: + chromeos_root: The absolute path to the chroot. + chroot_paths: A list of chroot paths to convert to absolute paths. - Returns: - A list of absolute path(s). + Returns: + A list of absolute path(s). - Raises: - ValueError: Invalid prefix for the chroot path or - invalid chroot paths were provided. - """ + Raises: + ValueError: Invalid prefix for the chroot path or + invalid chroot paths were provided. + """ - abs_paths = [] + abs_paths = [] - chroot_prefix = '/mnt/host/source/' + chroot_prefix = "/mnt/host/source/" - # Iterate through the chroot paths. - # - # For each chroot file path, remove '/mnt/host/source/' prefix - # and combine the chroot path with the result and add it to the list. - for chroot_path in chroot_paths: - if not chroot_path.startswith(chroot_prefix): - raise ValueError('Invalid prefix for the chroot path: %s' % chroot_path) + # Iterate through the chroot paths. + # + # For each chroot file path, remove '/mnt/host/source/' prefix + # and combine the chroot path with the result and add it to the list. + for chroot_path in chroot_paths: + if not chroot_path.startswith(chroot_prefix): + raise ValueError( + "Invalid prefix for the chroot path: %s" % chroot_path + ) - rel_path = chroot_path[len(chroot_prefix):] + rel_path = chroot_path[len(chroot_prefix) :] - # combine the chromeos root path + '/src/...' - abs_path = os.path.join(chromeos_root, rel_path) + # combine the chromeos root path + '/src/...' + abs_path = os.path.join(chromeos_root, rel_path) - abs_paths.append(abs_path) + abs_paths.append(abs_path) - return abs_paths + return abs_paths diff --git a/llvm_tools/chroot_unittest.py b/llvm_tools/chroot_unittest.py index 5c665de9..0e7d133c 100755 --- a/llvm_tools/chroot_unittest.py +++ b/llvm_tools/chroot_unittest.py @@ -14,53 +14,61 @@ import unittest.mock as mock import chroot + # These are unittests; protected access is OK to a point. # pylint: disable=protected-access class HelperFunctionsTest(unittest.TestCase): - """Test class for updating LLVM hashes of packages.""" + """Test class for updating LLVM hashes of packages.""" - @mock.patch.object(subprocess, 'check_output') - def testSucceedsToGetChrootEbuildPathForPackage(self, mock_chroot_command): - package_chroot_path = '/chroot/path/to/package.ebuild' + @mock.patch.object(subprocess, "check_output") + def testSucceedsToGetChrootEbuildPathForPackage(self, mock_chroot_command): + package_chroot_path = "/chroot/path/to/package.ebuild" - # Emulate ChrootRunCommandWOutput behavior when a chroot path is found for - # a valid package. - mock_chroot_command.return_value = package_chroot_path + # Emulate ChrootRunCommandWOutput behavior when a chroot path is found for + # a valid package. + mock_chroot_command.return_value = package_chroot_path - chroot_path = '/test/chroot/path' - package_list = ['new-test/package'] + chroot_path = "/test/chroot/path" + package_list = ["new-test/package"] - self.assertEqual(chroot.GetChrootEbuildPaths(chroot_path, package_list), - [package_chroot_path]) + self.assertEqual( + chroot.GetChrootEbuildPaths(chroot_path, package_list), + [package_chroot_path], + ) - mock_chroot_command.assert_called_once() + mock_chroot_command.assert_called_once() - def testFailedToConvertChrootPathWithInvalidPrefix(self): - chroot_path = '/path/to/chroot' - chroot_file_path = '/src/package.ebuild' + def testFailedToConvertChrootPathWithInvalidPrefix(self): + chroot_path = "/path/to/chroot" + chroot_file_path = "/src/package.ebuild" - # Verify the exception is raised when a chroot path does not have the prefix - # '/mnt/host/source/'. - with self.assertRaises(ValueError) as err: - chroot.ConvertChrootPathsToAbsolutePaths(chroot_path, [chroot_file_path]) + # Verify the exception is raised when a chroot path does not have the prefix + # '/mnt/host/source/'. + with self.assertRaises(ValueError) as err: + chroot.ConvertChrootPathsToAbsolutePaths( + chroot_path, [chroot_file_path] + ) - self.assertEqual( - str(err.exception), 'Invalid prefix for the chroot path: ' - '%s' % chroot_file_path) + self.assertEqual( + str(err.exception), + "Invalid prefix for the chroot path: " "%s" % chroot_file_path, + ) - def testSucceedsToConvertChrootPathToAbsolutePath(self): - chroot_path = '/path/to/chroot' - chroot_file_paths = ['/mnt/host/source/src/package.ebuild'] + def testSucceedsToConvertChrootPathToAbsolutePath(self): + chroot_path = "/path/to/chroot" + chroot_file_paths = ["/mnt/host/source/src/package.ebuild"] - expected_abs_path = '/path/to/chroot/src/package.ebuild' + expected_abs_path = "/path/to/chroot/src/package.ebuild" - self.assertEqual( - chroot.ConvertChrootPathsToAbsolutePaths(chroot_path, - chroot_file_paths), - [expected_abs_path]) + self.assertEqual( + chroot.ConvertChrootPathsToAbsolutePaths( + chroot_path, chroot_file_paths + ), + [expected_abs_path], + ) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/copy_helpers_to_chromiumos_overlay.py b/llvm_tools/copy_helpers_to_chromiumos_overlay.py index ee396316..042b19fa 100755 --- a/llvm_tools/copy_helpers_to_chromiumos_overlay.py +++ b/llvm_tools/copy_helpers_to_chromiumos_overlay.py @@ -20,48 +20,53 @@ import sys def _find_repo_root(script_root): - repo_root = os.path.abspath(os.path.join(script_root, '../../../../')) - if not os.path.isdir(os.path.join(repo_root, '.repo')): - return None - return repo_root + repo_root = os.path.abspath(os.path.join(script_root, "../../../../")) + if not os.path.isdir(os.path.join(repo_root, ".repo")): + return None + return repo_root def main(): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - '--chroot_path', - help="Path to where CrOS' source tree lives. Will autodetect if you're " - 'running this from inside the CrOS source tree.') - args = parser.parse_args() + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--chroot_path", + help="Path to where CrOS' source tree lives. Will autodetect if you're " + "running this from inside the CrOS source tree.", + ) + args = parser.parse_args() - my_dir = os.path.abspath(os.path.dirname(__file__)) + my_dir = os.path.abspath(os.path.dirname(__file__)) - repo_root = args.chroot_path - if repo_root is None: - repo_root = _find_repo_root(my_dir) + repo_root = args.chroot_path if repo_root is None: - sys.exit("Couldn't detect the CrOS checkout root; please provide a " - 'value for --chroot_path') + repo_root = _find_repo_root(my_dir) + if repo_root is None: + sys.exit( + "Couldn't detect the CrOS checkout root; please provide a " + "value for --chroot_path" + ) - chromiumos_overlay = os.path.join(repo_root, - 'src/third_party/chromiumos-overlay') + chromiumos_overlay = os.path.join( + repo_root, "src/third_party/chromiumos-overlay" + ) - clone_files = [ - 'failure_modes.py', - 'get_llvm_hash.py', - 'git_llvm_rev.py', - 'patch_manager.py', - 'subprocess_helpers.py', - ] + clone_files = [ + "failure_modes.py", + "get_llvm_hash.py", + "git_llvm_rev.py", + "patch_manager.py", + "subprocess_helpers.py", + ] - filesdir = os.path.join(chromiumos_overlay, - 'sys-devel/llvm/files/patch_manager') - for f in clone_files: - source = os.path.join(my_dir, f) - dest = os.path.join(filesdir, f) - print('%r => %r' % (source, dest)) - shutil.copyfile(source, dest) + filesdir = os.path.join( + chromiumos_overlay, "sys-devel/llvm/files/patch_manager" + ) + for f in clone_files: + source = os.path.join(my_dir, f) + dest = os.path.join(filesdir, f) + print("%r => %r" % (source, dest)) + shutil.copyfile(source, dest) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/llvm_tools/custom_script_example.py b/llvm_tools/custom_script_example.py index 6251b971..4b90e88b 100755 --- a/llvm_tools/custom_script_example.py +++ b/llvm_tools/custom_script_example.py @@ -15,58 +15,61 @@ from update_tryjob_status import TryjobStatus def main(): - """Determines the exit code based off of the contents of the .JSON file.""" - - # Index 1 in 'sys.argv' is the path to the .JSON file which contains - # the contents of the tryjob. - # - # Format of the tryjob contents: - # { - # "status" : [TRYJOB_STATUS], - # "buildbucket_id" : [BUILDBUCKET_ID], - # "extra_cls" : [A_LIST_OF_EXTRA_CLS_PASSED_TO_TRYJOB], - # "url" : [GERRIT_URL], - # "builder" : [TRYJOB_BUILDER_LIST], - # "rev" : [REVISION], - # "link" : [LINK_TO_TRYJOB], - # "options" : [A_LIST_OF_OPTIONS_PASSED_TO_TRYJOB] - # } - abs_path_json_file = sys.argv[1] - - with open(abs_path_json_file) as f: - tryjob_contents = json.load(f) - - CUTOFF_PENDING_REVISION = 369416 - - SKIP_REVISION_CUTOFF_START = 369420 - SKIP_REVISION_CUTOFF_END = 369428 - - if tryjob_contents['status'] == TryjobStatus.PENDING.value: - if tryjob_contents['rev'] <= CUTOFF_PENDING_REVISION: - # Exit code 0 means to set the tryjob 'status' as 'good'. - sys.exit(0) - - # Exit code 124 means to set the tryjob 'status' as 'bad'. - sys.exit(124) - - if tryjob_contents['status'] == TryjobStatus.BAD.value: - # Need to take a closer look at the contents of the tryjob to then decide - # what that tryjob's 'status' value should be. - # - # Since the exit code is not in the mapping, an exception will occur which - # will save the file in the directory of this custom script example. - sys.exit(1) - - if tryjob_contents['status'] == TryjobStatus.SKIP.value: - # Validate that the 'skip value is really set between the cutoffs. - if SKIP_REVISION_CUTOFF_START < tryjob_contents['rev'] < \ - SKIP_REVISION_CUTOFF_END: - # Exit code 125 means to set the tryjob 'status' as 'skip'. - sys.exit(125) - - if tryjob_contents['rev'] >= SKIP_REVISION_CUTOFF_END: - sys.exit(124) + """Determines the exit code based off of the contents of the .JSON file.""" - -if __name__ == '__main__': - main() + # Index 1 in 'sys.argv' is the path to the .JSON file which contains + # the contents of the tryjob. + # + # Format of the tryjob contents: + # { + # "status" : [TRYJOB_STATUS], + # "buildbucket_id" : [BUILDBUCKET_ID], + # "extra_cls" : [A_LIST_OF_EXTRA_CLS_PASSED_TO_TRYJOB], + # "url" : [GERRIT_URL], + # "builder" : [TRYJOB_BUILDER_LIST], + # "rev" : [REVISION], + # "link" : [LINK_TO_TRYJOB], + # "options" : [A_LIST_OF_OPTIONS_PASSED_TO_TRYJOB] + # } + abs_path_json_file = sys.argv[1] + + with open(abs_path_json_file) as f: + tryjob_contents = json.load(f) + + CUTOFF_PENDING_REVISION = 369416 + + SKIP_REVISION_CUTOFF_START = 369420 + SKIP_REVISION_CUTOFF_END = 369428 + + if tryjob_contents["status"] == TryjobStatus.PENDING.value: + if tryjob_contents["rev"] <= CUTOFF_PENDING_REVISION: + # Exit code 0 means to set the tryjob 'status' as 'good'. + sys.exit(0) + + # Exit code 124 means to set the tryjob 'status' as 'bad'. + sys.exit(124) + + if tryjob_contents["status"] == TryjobStatus.BAD.value: + # Need to take a closer look at the contents of the tryjob to then decide + # what that tryjob's 'status' value should be. + # + # Since the exit code is not in the mapping, an exception will occur which + # will save the file in the directory of this custom script example. + sys.exit(1) + + if tryjob_contents["status"] == TryjobStatus.SKIP.value: + # Validate that the 'skip value is really set between the cutoffs. + if ( + SKIP_REVISION_CUTOFF_START + < tryjob_contents["rev"] + < SKIP_REVISION_CUTOFF_END + ): + # Exit code 125 means to set the tryjob 'status' as 'skip'. + sys.exit(125) + + if tryjob_contents["rev"] >= SKIP_REVISION_CUTOFF_END: + sys.exit(124) + + +if __name__ == "__main__": + main() diff --git a/llvm_tools/failure_modes.py b/llvm_tools/failure_modes.py index 13f0a99b..f043b1ec 100644 --- a/llvm_tools/failure_modes.py +++ b/llvm_tools/failure_modes.py @@ -11,13 +11,13 @@ import enum class FailureModes(enum.Enum): - """Different modes for the patch manager when handling a failed patch.""" + """Different modes for the patch manager when handling a failed patch.""" - FAIL = 'fail' - CONTINUE = 'continue' - DISABLE_PATCHES = 'disable_patches' - BISECT_PATCHES = 'bisect_patches' - REMOVE_PATCHES = 'remove_patches' + FAIL = "fail" + CONTINUE = "continue" + DISABLE_PATCHES = "disable_patches" + BISECT_PATCHES = "bisect_patches" + REMOVE_PATCHES = "remove_patches" - # Only used by 'bisect_patches'. - INTERNAL_BISECTION = 'internal_bisection' + # Only used by 'bisect_patches'. + INTERNAL_BISECTION = "internal_bisection" diff --git a/llvm_tools/fetch_cros_sdk_rolls.py b/llvm_tools/fetch_cros_sdk_rolls.py index cf49c3e1..72692b3d 100755 --- a/llvm_tools/fetch_cros_sdk_rolls.py +++ b/llvm_tools/fetch_cros_sdk_rolls.py @@ -14,101 +14,101 @@ import argparse import json import logging import os +from pathlib import Path import shutil import subprocess import sys import tempfile from typing import Dict, List -from pathlib import Path def fetch_all_sdk_manifest_paths() -> List[str]: - """Fetches all paths of SDK manifests; newer = later in the return value.""" - results = subprocess.run( - ['gsutil', 'ls', 'gs://chromiumos-sdk/cros-sdk-20??.*.Manifest'], - check=True, - stdout=subprocess.PIPE, - encoding='utf-8', - ).stdout - # These are named so that sorted order == newest last. - return sorted(x.strip() for x in results.splitlines()) + """Fetches all paths of SDK manifests; newer = later in the return value.""" + results = subprocess.run( + ["gsutil", "ls", "gs://chromiumos-sdk/cros-sdk-20??.*.Manifest"], + check=True, + stdout=subprocess.PIPE, + encoding="utf-8", + ).stdout + # These are named so that sorted order == newest last. + return sorted(x.strip() for x in results.splitlines()) def fetch_manifests_into(into_dir: Path, manifests: List[str]): - # Wrap this in a `try` block because gsutil likes to print to stdout *and* - # stderr even on success, so we silence them & only print on failure. - try: - subprocess.run( - [ - 'gsutil', - '-m', - 'cp', - '-I', - str(into_dir), - ], - check=True, - input='\n'.join(manifests), - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - encoding='utf-8', - ) - except subprocess.CalledProcessError as e: - logging.exception('gsutil failed; output:\n%s', e.stdout) + # Wrap this in a `try` block because gsutil likes to print to stdout *and* + # stderr even on success, so we silence them & only print on failure. + try: + subprocess.run( + [ + "gsutil", + "-m", + "cp", + "-I", + str(into_dir), + ], + check=True, + input="\n".join(manifests), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", + ) + except subprocess.CalledProcessError as e: + logging.exception("gsutil failed; output:\n%s", e.stdout) def load_manifest_versions(manifest: Path) -> Dict[str, str]: - with manifest.open(encoding='utf-8') as f: - raw_versions = json.load(f) + with manifest.open(encoding="utf-8") as f: + raw_versions = json.load(f) - # We get a dict of list of lists of versions and some other metadata, e.g. - # {"foo/bar": [["1.2.3", {}]]} - # Trim out the metadata. - return {k: v[0][0] for k, v in raw_versions['packages'].items()} + # We get a dict of list of lists of versions and some other metadata, e.g. + # {"foo/bar": [["1.2.3", {}]]} + # Trim out the metadata. + return {k: v[0][0] for k, v in raw_versions["packages"].items()} def main(): - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('-d', - '--debug', - action='store_true', - help='Emit debugging output') - parser.add_argument( - '-n', - '--number', - type=int, - default=20, - help='Number of recent manifests to fetch info about. 0 means unlimited.' - ) - args = parser.parse_args() - - is_debug = args.debug - logging.basicConfig(level=logging.DEBUG if is_debug else logging.INFO) - - logging.debug('Fetching SDK manifests') - manifest_paths = fetch_all_sdk_manifest_paths() - logging.debug('%d SDK manifests fetched', len(manifest_paths)) - - number = args.number - if number: - manifest_paths = manifest_paths[-number:] - - tempdir = Path(tempfile.mkdtemp(prefix='cros-sdk-rolls')) - try: - logging.debug('Working in tempdir %r', tempdir) - fetch_manifests_into(tempdir, manifest_paths) - - for path in manifest_paths: - basename = os.path.basename(path) - versions = load_manifest_versions(tempdir.joinpath(basename)) - print(f'{basename}: {versions["sys-devel/llvm"]}') - finally: - if is_debug: - logging.debug('Keeping around tempdir %r to aid debugging', tempdir) - else: - shutil.rmtree(tempdir) - - -if __name__ == '__main__': - sys.exit(main()) + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "-d", "--debug", action="store_true", help="Emit debugging output" + ) + parser.add_argument( + "-n", + "--number", + type=int, + default=20, + help="Number of recent manifests to fetch info about. 0 means unlimited.", + ) + args = parser.parse_args() + + is_debug = args.debug + logging.basicConfig(level=logging.DEBUG if is_debug else logging.INFO) + + logging.debug("Fetching SDK manifests") + manifest_paths = fetch_all_sdk_manifest_paths() + logging.debug("%d SDK manifests fetched", len(manifest_paths)) + + number = args.number + if number: + manifest_paths = manifest_paths[-number:] + + tempdir = Path(tempfile.mkdtemp(prefix="cros-sdk-rolls")) + try: + logging.debug("Working in tempdir %r", tempdir) + fetch_manifests_into(tempdir, manifest_paths) + + for path in manifest_paths: + basename = os.path.basename(path) + versions = load_manifest_versions(tempdir.joinpath(basename)) + print(f'{basename}: {versions["sys-devel/llvm"]}') + finally: + if is_debug: + logging.debug("Keeping around tempdir %r to aid debugging", tempdir) + else: + shutil.rmtree(tempdir) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/llvm_tools/get_llvm_hash.py b/llvm_tools/get_llvm_hash.py index d5088079..9c0a5020 100755 --- a/llvm_tools/get_llvm_hash.py +++ b/llvm_tools/get_llvm_hash.py @@ -22,387 +22,422 @@ import git_llvm_rev from subprocess_helpers import check_output from subprocess_helpers import CheckCommand -_LLVM_GIT_URL = ('https://chromium.googlesource.com/external/github.com/llvm' - '/llvm-project') -KNOWN_HASH_SOURCES = {'google3', 'google3-unstable', 'tot'} +_LLVM_GIT_URL = ( + "https://chromium.googlesource.com/external/github.com/llvm" "/llvm-project" +) + +KNOWN_HASH_SOURCES = {"google3", "google3-unstable", "tot"} def GetVersionFrom(src_dir, git_hash): - """Obtain an SVN-style version number based on the LLVM git hash passed in. + """Obtain an SVN-style version number based on the LLVM git hash passed in. - Args: - src_dir: LLVM's source directory. - git_hash: The git hash. + Args: + src_dir: LLVM's source directory. + git_hash: The git hash. - Returns: - An SVN-style version number associated with the git hash. - """ + Returns: + An SVN-style version number associated with the git hash. + """ - version = git_llvm_rev.translate_sha_to_rev( - git_llvm_rev.LLVMConfig(remote='origin', dir=src_dir), git_hash) - # Note: branches aren't supported - assert version.branch == git_llvm_rev.MAIN_BRANCH, version.branch - return version.number + version = git_llvm_rev.translate_sha_to_rev( + git_llvm_rev.LLVMConfig(remote="origin", dir=src_dir), git_hash + ) + # Note: branches aren't supported + assert version.branch == git_llvm_rev.MAIN_BRANCH, version.branch + return version.number def GetGitHashFrom(src_dir, version): - """Finds the commit hash(es) of the LLVM version in the git log history. + """Finds the commit hash(es) of the LLVM version in the git log history. - Args: - src_dir: The LLVM source tree. - version: The version number. + Args: + src_dir: The LLVM source tree. + version: The version number. - Returns: - A git hash string corresponding to the version number. + Returns: + A git hash string corresponding to the version number. - Raises: - subprocess.CalledProcessError: Failed to find a git hash. - """ + Raises: + subprocess.CalledProcessError: Failed to find a git hash. + """ - return git_llvm_rev.translate_rev_to_sha( - git_llvm_rev.LLVMConfig(remote='origin', dir=src_dir), - git_llvm_rev.Rev(branch=git_llvm_rev.MAIN_BRANCH, number=version)) + return git_llvm_rev.translate_rev_to_sha( + git_llvm_rev.LLVMConfig(remote="origin", dir=src_dir), + git_llvm_rev.Rev(branch=git_llvm_rev.MAIN_BRANCH, number=version), + ) def CheckoutBranch(src_dir, branch): - """Checks out and pulls from a branch in a git repo. + """Checks out and pulls from a branch in a git repo. - Args: - src_dir: The LLVM source tree. - branch: The git branch to checkout in src_dir. + Args: + src_dir: The LLVM source tree. + branch: The git branch to checkout in src_dir. - Raises: - ValueError: Failed to checkout or pull branch version - """ - CheckCommand(['git', '-C', src_dir, 'checkout', branch]) - CheckCommand(['git', '-C', src_dir, 'pull']) + Raises: + ValueError: Failed to checkout or pull branch version + """ + CheckCommand(["git", "-C", src_dir, "checkout", branch]) + CheckCommand(["git", "-C", src_dir, "pull"]) def ParseLLVMMajorVersion(cmakelist): - """Reads CMakeList.txt file contents for LLVMMajor Version. + """Reads CMakeList.txt file contents for LLVMMajor Version. - Args: - cmakelist: contents of CMakeList.txt + Args: + cmakelist: contents of CMakeList.txt - Returns: - The major version number as a string + Returns: + The major version number as a string - Raises: - ValueError: The major version cannot be parsed from cmakelist - """ - match = re.search(r'\n\s+set\(LLVM_VERSION_MAJOR (?P<major>\d+)\)', - cmakelist) - if not match: - raise ValueError('Failed to parse CMakeList for llvm major version') - return match.group('major') + Raises: + ValueError: The major version cannot be parsed from cmakelist + """ + match = re.search( + r"\n\s+set\(LLVM_VERSION_MAJOR (?P<major>\d+)\)", cmakelist + ) + if not match: + raise ValueError("Failed to parse CMakeList for llvm major version") + return match.group("major") @functools.lru_cache(maxsize=1) def GetLLVMMajorVersion(git_hash=None): - """Reads llvm/CMakeList.txt file contents for LLVMMajor Version. - - Args: - git_hash: git hash of llvm version as string or None for top of trunk - - Returns: - The major version number as a string - - Raises: - ValueError: The major version cannot be parsed from cmakelist or - there was a failure to checkout git_hash version - FileExistsError: The src directory doe not contain CMakeList.txt - """ - src_dir = GetAndUpdateLLVMProjectInLLVMTools() - cmakelists_path = os.path.join(src_dir, 'llvm', 'CMakeLists.txt') - if git_hash: - CheckCommand(['git', '-C', src_dir, 'checkout', git_hash]) - try: - with open(cmakelists_path) as cmakelists_file: - return ParseLLVMMajorVersion(cmakelists_file.read()) - finally: + """Reads llvm/CMakeList.txt file contents for LLVMMajor Version. + + Args: + git_hash: git hash of llvm version as string or None for top of trunk + + Returns: + The major version number as a string + + Raises: + ValueError: The major version cannot be parsed from cmakelist or + there was a failure to checkout git_hash version + FileExistsError: The src directory doe not contain CMakeList.txt + """ + src_dir = GetAndUpdateLLVMProjectInLLVMTools() + cmakelists_path = os.path.join(src_dir, "llvm", "CMakeLists.txt") if git_hash: - CheckoutBranch(src_dir, git_llvm_rev.MAIN_BRANCH) + CheckCommand(["git", "-C", src_dir, "checkout", git_hash]) + try: + with open(cmakelists_path) as cmakelists_file: + return ParseLLVMMajorVersion(cmakelists_file.read()) + finally: + if git_hash: + CheckoutBranch(src_dir, git_llvm_rev.MAIN_BRANCH) @contextlib.contextmanager def CreateTempLLVMRepo(temp_dir): - """Adds a LLVM worktree to 'temp_dir'. + """Adds a LLVM worktree to 'temp_dir'. - Creating a worktree because the LLVM source tree in - '../toolchain-utils/llvm_tools/llvm-project-copy' should not be modified. + Creating a worktree because the LLVM source tree in + '../toolchain-utils/llvm_tools/llvm-project-copy' should not be modified. - This is useful for applying patches to a source tree but do not want to modify - the actual LLVM source tree in 'llvm-project-copy'. + This is useful for applying patches to a source tree but do not want to modify + the actual LLVM source tree in 'llvm-project-copy'. - Args: - temp_dir: An absolute path to the temporary directory to put the worktree in - (obtained via 'tempfile.mkdtemp()'). + Args: + temp_dir: An absolute path to the temporary directory to put the worktree in + (obtained via 'tempfile.mkdtemp()'). - Yields: - The absolute path to 'temp_dir'. + Yields: + The absolute path to 'temp_dir'. - Raises: - subprocess.CalledProcessError: Failed to remove the worktree. - ValueError: Failed to add a worktree. - """ + Raises: + subprocess.CalledProcessError: Failed to remove the worktree. + ValueError: Failed to add a worktree. + """ - abs_path_to_llvm_project_dir = GetAndUpdateLLVMProjectInLLVMTools() - CheckCommand([ - 'git', '-C', abs_path_to_llvm_project_dir, 'worktree', 'add', '--detach', - temp_dir, - 'origin/%s' % git_llvm_rev.MAIN_BRANCH - ]) + abs_path_to_llvm_project_dir = GetAndUpdateLLVMProjectInLLVMTools() + CheckCommand( + [ + "git", + "-C", + abs_path_to_llvm_project_dir, + "worktree", + "add", + "--detach", + temp_dir, + "origin/%s" % git_llvm_rev.MAIN_BRANCH, + ] + ) - try: - yield temp_dir - finally: - if os.path.isdir(temp_dir): - check_output([ - 'git', '-C', abs_path_to_llvm_project_dir, 'worktree', 'remove', - '-f', temp_dir - ]) + try: + yield temp_dir + finally: + if os.path.isdir(temp_dir): + check_output( + [ + "git", + "-C", + abs_path_to_llvm_project_dir, + "worktree", + "remove", + "-f", + temp_dir, + ] + ) def GetAndUpdateLLVMProjectInLLVMTools(): - """Gets the absolute path to 'llvm-project-copy' directory in 'llvm_tools'. + """Gets the absolute path to 'llvm-project-copy' directory in 'llvm_tools'. - The intent of this function is to avoid cloning the LLVM repo and then - discarding the contents of the repo. The function will create a directory - in '../toolchain-utils/llvm_tools' called 'llvm-project-copy' if this - directory does not exist yet. If it does not exist, then it will use the - LLVMHash() class to clone the LLVM repo into 'llvm-project-copy'. Otherwise, - it will clean the contents of that directory and then fetch from the chromium - LLVM mirror. In either case, this function will return the absolute path to - 'llvm-project-copy' directory. + The intent of this function is to avoid cloning the LLVM repo and then + discarding the contents of the repo. The function will create a directory + in '../toolchain-utils/llvm_tools' called 'llvm-project-copy' if this + directory does not exist yet. If it does not exist, then it will use the + LLVMHash() class to clone the LLVM repo into 'llvm-project-copy'. Otherwise, + it will clean the contents of that directory and then fetch from the chromium + LLVM mirror. In either case, this function will return the absolute path to + 'llvm-project-copy' directory. - Returns: - Absolute path to 'llvm-project-copy' directory in 'llvm_tools' + Returns: + Absolute path to 'llvm-project-copy' directory in 'llvm_tools' - Raises: - ValueError: LLVM repo (in 'llvm-project-copy' dir.) has changes or failed to - checkout to main or failed to fetch from chromium mirror of LLVM. - """ + Raises: + ValueError: LLVM repo (in 'llvm-project-copy' dir.) has changes or failed to + checkout to main or failed to fetch from chromium mirror of LLVM. + """ - abs_path_to_llvm_tools_dir = os.path.dirname(os.path.abspath(__file__)) + abs_path_to_llvm_tools_dir = os.path.dirname(os.path.abspath(__file__)) - abs_path_to_llvm_project_dir = os.path.join(abs_path_to_llvm_tools_dir, - 'llvm-project-copy') + abs_path_to_llvm_project_dir = os.path.join( + abs_path_to_llvm_tools_dir, "llvm-project-copy" + ) - if not os.path.isdir(abs_path_to_llvm_project_dir): - print((f'Checking out LLVM to {abs_path_to_llvm_project_dir}\n' - 'so that we can map between commit hashes and revision numbers.\n' - 'This may take a while, but only has to be done once.'), - file=sys.stderr) - os.mkdir(abs_path_to_llvm_project_dir) + if not os.path.isdir(abs_path_to_llvm_project_dir): + print( + ( + f"Checking out LLVM to {abs_path_to_llvm_project_dir}\n" + "so that we can map between commit hashes and revision numbers.\n" + "This may take a while, but only has to be done once." + ), + file=sys.stderr, + ) + os.mkdir(abs_path_to_llvm_project_dir) - LLVMHash().CloneLLVMRepo(abs_path_to_llvm_project_dir) - else: - # `git status` has a '-s'/'--short' option that shortens the output. - # With the '-s' option, if no changes were made to the LLVM repo, then the - # output (assigned to 'repo_status') would be empty. - repo_status = check_output( - ['git', '-C', abs_path_to_llvm_project_dir, 'status', '-s']) + LLVMHash().CloneLLVMRepo(abs_path_to_llvm_project_dir) + else: + # `git status` has a '-s'/'--short' option that shortens the output. + # With the '-s' option, if no changes were made to the LLVM repo, then the + # output (assigned to 'repo_status') would be empty. + repo_status = check_output( + ["git", "-C", abs_path_to_llvm_project_dir, "status", "-s"] + ) - if repo_status.rstrip(): - raise ValueError('LLVM repo in %s has changes, please remove.' % - abs_path_to_llvm_project_dir) + if repo_status.rstrip(): + raise ValueError( + "LLVM repo in %s has changes, please remove." + % abs_path_to_llvm_project_dir + ) - CheckoutBranch(abs_path_to_llvm_project_dir, git_llvm_rev.MAIN_BRANCH) + CheckoutBranch(abs_path_to_llvm_project_dir, git_llvm_rev.MAIN_BRANCH) - return abs_path_to_llvm_project_dir + return abs_path_to_llvm_project_dir def GetGoogle3LLVMVersion(stable): - """Gets the latest google3 LLVM version. + """Gets the latest google3 LLVM version. - Args: - stable: boolean, use the stable version or the unstable version + Args: + stable: boolean, use the stable version or the unstable version - Returns: - The latest LLVM SVN version as an integer. + Returns: + The latest LLVM SVN version as an integer. - Raises: - subprocess.CalledProcessError: An invalid path has been provided to the - `cat` command. - """ + Raises: + subprocess.CalledProcessError: An invalid path has been provided to the + `cat` command. + """ - subdir = 'stable' if stable else 'llvm_unstable' + subdir = "stable" if stable else "llvm_unstable" - # Cmd to get latest google3 LLVM version. - cmd = [ - 'cat', - os.path.join('/google/src/head/depot/google3/third_party/crosstool/v18', - subdir, 'installs/llvm/git_origin_rev_id') - ] + # Cmd to get latest google3 LLVM version. + cmd = [ + "cat", + os.path.join( + "/google/src/head/depot/google3/third_party/crosstool/v18", + subdir, + "installs/llvm/git_origin_rev_id", + ), + ] - # Get latest version. - git_hash = check_output(cmd) + # Get latest version. + git_hash = check_output(cmd) - # Change type to an integer - return GetVersionFrom(GetAndUpdateLLVMProjectInLLVMTools(), - git_hash.rstrip()) + # Change type to an integer + return GetVersionFrom( + GetAndUpdateLLVMProjectInLLVMTools(), git_hash.rstrip() + ) def IsSvnOption(svn_option): - """Validates whether the argument (string) is a git hash option. + """Validates whether the argument (string) is a git hash option. - The argument is used to find the git hash of LLVM. + The argument is used to find the git hash of LLVM. - Args: - svn_option: The option passed in as a command line argument. + Args: + svn_option: The option passed in as a command line argument. - Returns: - lowercase svn_option if it is a known hash source, otherwise the svn_option - as an int + Returns: + lowercase svn_option if it is a known hash source, otherwise the svn_option + as an int - Raises: - ValueError: Invalid svn option provided. - """ + Raises: + ValueError: Invalid svn option provided. + """ - if svn_option.lower() in KNOWN_HASH_SOURCES: - return svn_option.lower() + if svn_option.lower() in KNOWN_HASH_SOURCES: + return svn_option.lower() - try: - svn_version = int(svn_option) + try: + svn_version = int(svn_option) - return svn_version + return svn_version - # Unable to convert argument to an int, so the option is invalid. - # - # Ex: 'one'. - except ValueError: - pass + # Unable to convert argument to an int, so the option is invalid. + # + # Ex: 'one'. + except ValueError: + pass - raise ValueError('Invalid LLVM git hash option provided: %s' % svn_option) + raise ValueError("Invalid LLVM git hash option provided: %s" % svn_option) def GetLLVMHashAndVersionFromSVNOption(svn_option): - """Gets the LLVM hash and LLVM version based off of the svn option. + """Gets the LLVM hash and LLVM version based off of the svn option. - Args: - svn_option: A valid svn option obtained from the command line. - Ex. 'google3', 'tot', or <svn_version> such as 365123. + Args: + svn_option: A valid svn option obtained from the command line. + Ex. 'google3', 'tot', or <svn_version> such as 365123. - Returns: - A tuple that is the LLVM git hash and LLVM version. - """ + Returns: + A tuple that is the LLVM git hash and LLVM version. + """ - new_llvm_hash = LLVMHash() + new_llvm_hash = LLVMHash() - # Determine which LLVM git hash to retrieve. - if svn_option == 'tot': - git_hash = new_llvm_hash.GetTopOfTrunkGitHash() - version = GetVersionFrom(GetAndUpdateLLVMProjectInLLVMTools(), git_hash) - elif isinstance(svn_option, int): - version = svn_option - git_hash = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version) - else: - assert svn_option in ('google3', 'google3-unstable') - version = GetGoogle3LLVMVersion(stable=svn_option == 'google3') + # Determine which LLVM git hash to retrieve. + if svn_option == "tot": + git_hash = new_llvm_hash.GetTopOfTrunkGitHash() + version = GetVersionFrom(GetAndUpdateLLVMProjectInLLVMTools(), git_hash) + elif isinstance(svn_option, int): + version = svn_option + git_hash = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version) + else: + assert svn_option in ("google3", "google3-unstable") + version = GetGoogle3LLVMVersion(stable=svn_option == "google3") - git_hash = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version) + git_hash = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version) - return git_hash, version + return git_hash, version class LLVMHash(object): - """Provides methods to retrieve a LLVM hash.""" + """Provides methods to retrieve a LLVM hash.""" - @staticmethod - @contextlib.contextmanager - def CreateTempDirectory(): - temp_dir = tempfile.mkdtemp() + @staticmethod + @contextlib.contextmanager + def CreateTempDirectory(): + temp_dir = tempfile.mkdtemp() - try: - yield temp_dir - finally: - if os.path.isdir(temp_dir): - shutil.rmtree(temp_dir, ignore_errors=True) + try: + yield temp_dir + finally: + if os.path.isdir(temp_dir): + shutil.rmtree(temp_dir, ignore_errors=True) - def CloneLLVMRepo(self, temp_dir): - """Clones the LLVM repo. + def CloneLLVMRepo(self, temp_dir): + """Clones the LLVM repo. - Args: - temp_dir: The temporary directory to clone the repo to. + Args: + temp_dir: The temporary directory to clone the repo to. - Raises: - ValueError: Failed to clone the LLVM repo. - """ + Raises: + ValueError: Failed to clone the LLVM repo. + """ - clone_cmd = ['git', 'clone', _LLVM_GIT_URL, temp_dir] + clone_cmd = ["git", "clone", _LLVM_GIT_URL, temp_dir] - clone_cmd_obj = subprocess.Popen(clone_cmd, stderr=subprocess.PIPE) - _, stderr = clone_cmd_obj.communicate() + clone_cmd_obj = subprocess.Popen(clone_cmd, stderr=subprocess.PIPE) + _, stderr = clone_cmd_obj.communicate() - if clone_cmd_obj.returncode: - raise ValueError('Failed to clone the LLVM repo: %s' % stderr) + if clone_cmd_obj.returncode: + raise ValueError("Failed to clone the LLVM repo: %s" % stderr) - def GetLLVMHash(self, version): - """Retrieves the LLVM hash corresponding to the LLVM version passed in. + def GetLLVMHash(self, version): + """Retrieves the LLVM hash corresponding to the LLVM version passed in. - Args: - version: The LLVM version to use as a delimiter. + Args: + version: The LLVM version to use as a delimiter. - Returns: - The hash as a string that corresponds to the LLVM version. - """ + Returns: + The hash as a string that corresponds to the LLVM version. + """ - hash_value = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version) - return hash_value + hash_value = GetGitHashFrom( + GetAndUpdateLLVMProjectInLLVMTools(), version + ) + return hash_value - def GetGoogle3LLVMHash(self): - """Retrieves the google3 LLVM hash.""" + def GetGoogle3LLVMHash(self): + """Retrieves the google3 LLVM hash.""" - return self.GetLLVMHash(GetGoogle3LLVMVersion(stable=True)) + return self.GetLLVMHash(GetGoogle3LLVMVersion(stable=True)) - def GetGoogle3UnstableLLVMHash(self): - """Retrieves the LLVM hash of google3's unstable compiler.""" - return self.GetLLVMHash(GetGoogle3LLVMVersion(stable=False)) + def GetGoogle3UnstableLLVMHash(self): + """Retrieves the LLVM hash of google3's unstable compiler.""" + return self.GetLLVMHash(GetGoogle3LLVMVersion(stable=False)) - def GetTopOfTrunkGitHash(self): - """Gets the latest git hash from top of trunk of LLVM.""" + def GetTopOfTrunkGitHash(self): + """Gets the latest git hash from top of trunk of LLVM.""" - path_to_main_branch = 'refs/heads/main' - llvm_tot_git_hash = check_output( - ['git', 'ls-remote', _LLVM_GIT_URL, path_to_main_branch]) - return llvm_tot_git_hash.rstrip().split()[0] + path_to_main_branch = "refs/heads/main" + llvm_tot_git_hash = check_output( + ["git", "ls-remote", _LLVM_GIT_URL, path_to_main_branch] + ) + return llvm_tot_git_hash.rstrip().split()[0] def main(): - """Prints the git hash of LLVM. - - Parses the command line for the optional command line - arguments. - """ - - # Create parser and add optional command-line arguments. - parser = argparse.ArgumentParser(description='Finds the LLVM hash.') - parser.add_argument( - '--llvm_version', - type=IsSvnOption, - required=True, - help='which git hash of LLVM to find. Either a svn revision, or one ' - 'of %s' % sorted(KNOWN_HASH_SOURCES)) - - # Parse command-line arguments. - args_output = parser.parse_args() - - cur_llvm_version = args_output.llvm_version - - new_llvm_hash = LLVMHash() - - if isinstance(cur_llvm_version, int): - # Find the git hash of the specific LLVM version. - print(new_llvm_hash.GetLLVMHash(cur_llvm_version)) - elif cur_llvm_version == 'google3': - print(new_llvm_hash.GetGoogle3LLVMHash()) - elif cur_llvm_version == 'google3-unstable': - print(new_llvm_hash.GetGoogle3UnstableLLVMHash()) - else: - assert cur_llvm_version == 'tot' - print(new_llvm_hash.GetTopOfTrunkGitHash()) - - -if __name__ == '__main__': - main() + """Prints the git hash of LLVM. + + Parses the command line for the optional command line + arguments. + """ + + # Create parser and add optional command-line arguments. + parser = argparse.ArgumentParser(description="Finds the LLVM hash.") + parser.add_argument( + "--llvm_version", + type=IsSvnOption, + required=True, + help="which git hash of LLVM to find. Either a svn revision, or one " + "of %s" % sorted(KNOWN_HASH_SOURCES), + ) + + # Parse command-line arguments. + args_output = parser.parse_args() + + cur_llvm_version = args_output.llvm_version + + new_llvm_hash = LLVMHash() + + if isinstance(cur_llvm_version, int): + # Find the git hash of the specific LLVM version. + print(new_llvm_hash.GetLLVMHash(cur_llvm_version)) + elif cur_llvm_version == "google3": + print(new_llvm_hash.GetGoogle3LLVMHash()) + elif cur_llvm_version == "google3-unstable": + print(new_llvm_hash.GetGoogle3UnstableLLVMHash()) + else: + assert cur_llvm_version == "tot" + print(new_llvm_hash.GetTopOfTrunkGitHash()) + + +if __name__ == "__main__": + main() diff --git a/llvm_tools/get_llvm_hash_unittest.py b/llvm_tools/get_llvm_hash_unittest.py index 7f3ad17a..32fb5b53 100755 --- a/llvm_tools/get_llvm_hash_unittest.py +++ b/llvm_tools/get_llvm_hash_unittest.py @@ -15,124 +15,148 @@ import unittest.mock as mock import get_llvm_hash from get_llvm_hash import LLVMHash + # We grab protected stuff from get_llvm_hash. That's OK. # pylint: disable=protected-access def MakeMockPopen(return_code): - def MockPopen(*_args, **_kwargs): - result = mock.MagicMock() - result.returncode = return_code + def MockPopen(*_args, **_kwargs): + result = mock.MagicMock() + result.returncode = return_code - communicate_result = result.communicate.return_value - # Communicate returns stdout, stderr. - communicate_result.__iter__.return_value = (None, 'some stderr') - return result + communicate_result = result.communicate.return_value + # Communicate returns stdout, stderr. + communicate_result.__iter__.return_value = (None, "some stderr") + return result - return MockPopen + return MockPopen class TestGetLLVMHash(unittest.TestCase): - """The LLVMHash test class.""" - - @mock.patch.object(subprocess, 'Popen') - def testCloneRepoSucceedsWhenGitSucceeds(self, popen_mock): - popen_mock.side_effect = MakeMockPopen(return_code=0) - llvm_hash = LLVMHash() - - into_tempdir = '/tmp/tmpTest' - llvm_hash.CloneLLVMRepo(into_tempdir) - popen_mock.assert_called_with( - ['git', 'clone', get_llvm_hash._LLVM_GIT_URL, into_tempdir], - stderr=subprocess.PIPE) - - @mock.patch.object(subprocess, 'Popen') - def testCloneRepoFailsWhenGitFails(self, popen_mock): - popen_mock.side_effect = MakeMockPopen(return_code=1) - - with self.assertRaises(ValueError) as err: - LLVMHash().CloneLLVMRepo('/tmp/tmp1') - - self.assertIn('Failed to clone', str(err.exception.args)) - self.assertIn('some stderr', str(err.exception.args)) - - @mock.patch.object(get_llvm_hash, 'GetGitHashFrom') - def testGetGitHashWorks(self, mock_get_git_hash): - mock_get_git_hash.return_value = 'a13testhash2' - - self.assertEqual(get_llvm_hash.GetGitHashFrom('/tmp/tmpTest', 100), - 'a13testhash2') - - mock_get_git_hash.assert_called_once() - - @mock.patch.object(LLVMHash, 'GetLLVMHash') - @mock.patch.object(get_llvm_hash, 'GetGoogle3LLVMVersion') - def testReturnGoogle3LLVMHash(self, mock_google3_llvm_version, - mock_get_llvm_hash): - mock_get_llvm_hash.return_value = 'a13testhash3' - mock_google3_llvm_version.return_value = 1000 - self.assertEqual(LLVMHash().GetGoogle3LLVMHash(), 'a13testhash3') - mock_get_llvm_hash.assert_called_once_with(1000) - - @mock.patch.object(LLVMHash, 'GetLLVMHash') - @mock.patch.object(get_llvm_hash, 'GetGoogle3LLVMVersion') - def testReturnGoogle3UnstableLLVMHash(self, mock_google3_llvm_version, - mock_get_llvm_hash): - mock_get_llvm_hash.return_value = 'a13testhash3' - mock_google3_llvm_version.return_value = 1000 - self.assertEqual(LLVMHash().GetGoogle3UnstableLLVMHash(), 'a13testhash3') - mock_get_llvm_hash.assert_called_once_with(1000) - - @mock.patch.object(subprocess, 'check_output') - def testSuccessfullyGetGitHashFromToTOfLLVM(self, mock_check_output): - mock_check_output.return_value = 'a123testhash1 path/to/main\n' - self.assertEqual(LLVMHash().GetTopOfTrunkGitHash(), 'a123testhash1') - mock_check_output.assert_called_once() - - @mock.patch.object(subprocess, 'Popen') - def testCheckoutBranch(self, mock_popen): - mock_popen.return_value = mock.MagicMock(communicate=lambda: (None, None), - returncode=0) - get_llvm_hash.CheckoutBranch('fake/src_dir', 'fake_branch') - self.assertEqual( - mock_popen.call_args_list[0][0], - (['git', '-C', 'fake/src_dir', 'checkout', 'fake_branch'], )) - self.assertEqual(mock_popen.call_args_list[1][0], - (['git', '-C', 'fake/src_dir', 'pull'], )) - - def testParseLLVMMajorVersion(self): - cmakelist_42 = ('set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)\n' - 'if(NOT DEFINED LLVM_VERSION_MAJOR)\n' - ' set(LLVM_VERSION_MAJOR 42)\n' - 'endif()') - self.assertEqual(get_llvm_hash.ParseLLVMMajorVersion(cmakelist_42), '42') - - def testParseLLVMMajorVersionInvalid(self): - invalid_cmakelist = 'invalid cmakelist.txt contents' - with self.assertRaises(ValueError): - get_llvm_hash.ParseLLVMMajorVersion(invalid_cmakelist) - - @mock.patch.object(get_llvm_hash, 'GetAndUpdateLLVMProjectInLLVMTools') - @mock.patch.object(get_llvm_hash, 'ParseLLVMMajorVersion') - @mock.patch.object(get_llvm_hash, 'CheckCommand') - @mock.patch.object(get_llvm_hash, 'CheckoutBranch') - @mock.patch('get_llvm_hash.open', - mock.mock_open(read_data='mock contents'), - create=True) - def testGetLLVMMajorVersion(self, mock_checkout_branch, mock_git_checkout, - mock_major_version, mock_llvm_project_path): - mock_llvm_project_path.return_value = 'path/to/llvm-project' - mock_major_version.return_value = '1234' - self.assertEqual(get_llvm_hash.GetLLVMMajorVersion('314159265'), '1234') - # Second call should be memoized - self.assertEqual(get_llvm_hash.GetLLVMMajorVersion('314159265'), '1234') - mock_llvm_project_path.assert_called_once() - mock_major_version.assert_called_with('mock contents') - mock_git_checkout.assert_called_once_with( - ['git', '-C', 'path/to/llvm-project', 'checkout', '314159265']) - mock_checkout_branch.assert_called_once_with('path/to/llvm-project', - 'main') - - -if __name__ == '__main__': - unittest.main() + """The LLVMHash test class.""" + + @mock.patch.object(subprocess, "Popen") + def testCloneRepoSucceedsWhenGitSucceeds(self, popen_mock): + popen_mock.side_effect = MakeMockPopen(return_code=0) + llvm_hash = LLVMHash() + + into_tempdir = "/tmp/tmpTest" + llvm_hash.CloneLLVMRepo(into_tempdir) + popen_mock.assert_called_with( + ["git", "clone", get_llvm_hash._LLVM_GIT_URL, into_tempdir], + stderr=subprocess.PIPE, + ) + + @mock.patch.object(subprocess, "Popen") + def testCloneRepoFailsWhenGitFails(self, popen_mock): + popen_mock.side_effect = MakeMockPopen(return_code=1) + + with self.assertRaises(ValueError) as err: + LLVMHash().CloneLLVMRepo("/tmp/tmp1") + + self.assertIn("Failed to clone", str(err.exception.args)) + self.assertIn("some stderr", str(err.exception.args)) + + @mock.patch.object(get_llvm_hash, "GetGitHashFrom") + def testGetGitHashWorks(self, mock_get_git_hash): + mock_get_git_hash.return_value = "a13testhash2" + + self.assertEqual( + get_llvm_hash.GetGitHashFrom("/tmp/tmpTest", 100), "a13testhash2" + ) + + mock_get_git_hash.assert_called_once() + + @mock.patch.object(LLVMHash, "GetLLVMHash") + @mock.patch.object(get_llvm_hash, "GetGoogle3LLVMVersion") + def testReturnGoogle3LLVMHash( + self, mock_google3_llvm_version, mock_get_llvm_hash + ): + mock_get_llvm_hash.return_value = "a13testhash3" + mock_google3_llvm_version.return_value = 1000 + self.assertEqual(LLVMHash().GetGoogle3LLVMHash(), "a13testhash3") + mock_get_llvm_hash.assert_called_once_with(1000) + + @mock.patch.object(LLVMHash, "GetLLVMHash") + @mock.patch.object(get_llvm_hash, "GetGoogle3LLVMVersion") + def testReturnGoogle3UnstableLLVMHash( + self, mock_google3_llvm_version, mock_get_llvm_hash + ): + mock_get_llvm_hash.return_value = "a13testhash3" + mock_google3_llvm_version.return_value = 1000 + self.assertEqual( + LLVMHash().GetGoogle3UnstableLLVMHash(), "a13testhash3" + ) + mock_get_llvm_hash.assert_called_once_with(1000) + + @mock.patch.object(subprocess, "check_output") + def testSuccessfullyGetGitHashFromToTOfLLVM(self, mock_check_output): + mock_check_output.return_value = "a123testhash1 path/to/main\n" + self.assertEqual(LLVMHash().GetTopOfTrunkGitHash(), "a123testhash1") + mock_check_output.assert_called_once() + + @mock.patch.object(subprocess, "Popen") + def testCheckoutBranch(self, mock_popen): + mock_popen.return_value = mock.MagicMock( + communicate=lambda: (None, None), returncode=0 + ) + get_llvm_hash.CheckoutBranch("fake/src_dir", "fake_branch") + self.assertEqual( + mock_popen.call_args_list[0][0], + (["git", "-C", "fake/src_dir", "checkout", "fake_branch"],), + ) + self.assertEqual( + mock_popen.call_args_list[1][0], + (["git", "-C", "fake/src_dir", "pull"],), + ) + + def testParseLLVMMajorVersion(self): + cmakelist_42 = ( + "set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)\n" + "if(NOT DEFINED LLVM_VERSION_MAJOR)\n" + " set(LLVM_VERSION_MAJOR 42)\n" + "endif()" + ) + self.assertEqual( + get_llvm_hash.ParseLLVMMajorVersion(cmakelist_42), "42" + ) + + def testParseLLVMMajorVersionInvalid(self): + invalid_cmakelist = "invalid cmakelist.txt contents" + with self.assertRaises(ValueError): + get_llvm_hash.ParseLLVMMajorVersion(invalid_cmakelist) + + @mock.patch.object(get_llvm_hash, "GetAndUpdateLLVMProjectInLLVMTools") + @mock.patch.object(get_llvm_hash, "ParseLLVMMajorVersion") + @mock.patch.object(get_llvm_hash, "CheckCommand") + @mock.patch.object(get_llvm_hash, "CheckoutBranch") + @mock.patch( + "get_llvm_hash.open", + mock.mock_open(read_data="mock contents"), + create=True, + ) + def testGetLLVMMajorVersion( + self, + mock_checkout_branch, + mock_git_checkout, + mock_major_version, + mock_llvm_project_path, + ): + mock_llvm_project_path.return_value = "path/to/llvm-project" + mock_major_version.return_value = "1234" + self.assertEqual(get_llvm_hash.GetLLVMMajorVersion("314159265"), "1234") + # Second call should be memoized + self.assertEqual(get_llvm_hash.GetLLVMMajorVersion("314159265"), "1234") + mock_llvm_project_path.assert_called_once() + mock_major_version.assert_called_with("mock contents") + mock_git_checkout.assert_called_once_with( + ["git", "-C", "path/to/llvm-project", "checkout", "314159265"] + ) + mock_checkout_branch.assert_called_once_with( + "path/to/llvm-project", "main" + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/get_upstream_patch.py b/llvm_tools/get_upstream_patch.py index b5b61153..d882fdc7 100755 --- a/llvm_tools/get_upstream_patch.py +++ b/llvm_tools/get_upstream_patch.py @@ -34,483 +34,567 @@ Example Usage: class CherrypickError(ValueError): - """A ValueError that highlights the cherry-pick has been seen before""" + """A ValueError that highlights the cherry-pick has been seen before""" class CherrypickVersionError(ValueError): - """A ValueError that highlights the cherry-pick is before the start_sha""" + """A ValueError that highlights the cherry-pick is before the start_sha""" class PatchApplicationError(ValueError): - """A ValueError indicating that a test patch application was unsuccessful""" - - -def validate_patch_application(llvm_dir: Path, svn_version: int, - patches_json_fp: Path, patch_props): - - start_sha = get_llvm_hash.GetGitHashFrom(llvm_dir, svn_version) - subprocess.run(['git', '-C', llvm_dir, 'checkout', start_sha], check=True) - - predecessor_apply_results = patch_utils.apply_all_from_json( - svn_version, llvm_dir, patches_json_fp, continue_on_failure=True) - - if predecessor_apply_results.failed_patches: - logging.error('Failed to apply patches from PATCHES.json:') - for p in predecessor_apply_results.failed_patches: - logging.error(f'Patch title: {p.title()}') - raise PatchApplicationError('Failed to apply patch from PATCHES.json') - - patch_entry = patch_utils.PatchEntry.from_dict(patches_json_fp.parent, - patch_props) - test_apply_result = patch_entry.test_apply(Path(llvm_dir)) - - if not test_apply_result: - logging.error('Could not apply requested patch') - logging.error(test_apply_result.failure_info()) - raise PatchApplicationError( - f'Failed to apply patch: {patch_props["metadata"]["title"]}') - - -def add_patch(patches_json_path: str, patches_dir: str, - relative_patches_dir: str, start_version: git_llvm_rev.Rev, - llvm_dir: str, rev: t.Union[git_llvm_rev.Rev, str], sha: str, - package: str, platforms: t.List[str]): - """Gets the start and end intervals in 'json_file'. - - Args: - patches_json_path: The absolute path to PATCHES.json. - patches_dir: The aboslute path to the directory patches are in. - relative_patches_dir: The relative path to PATCHES.json. - start_version: The base LLVM revision this patch applies to. - llvm_dir: The path to LLVM checkout. - rev: An LLVM revision (git_llvm_rev.Rev) for a cherrypicking, or a - differential revision (str) otherwise. - sha: The LLVM git sha that corresponds to the patch. For differential - revisions, the git sha from the local commit created by 'arc patch' - is used. - package: The LLVM project name this patch applies to. - platforms: List of platforms this patch applies to. - - Raises: - CherrypickError: A ValueError that highlights the cherry-pick has been - seen before. - CherrypickRangeError: A ValueError that's raised when the given patch - is from before the start_sha. - """ - - is_cherrypick = isinstance(rev, git_llvm_rev.Rev) - if is_cherrypick: - file_name = f'{sha}.patch' - else: - file_name = f'{rev}.patch' - rel_patch_path = os.path.join(relative_patches_dir, file_name) - - # Check that we haven't grabbed a patch range that's nonsensical. - end_vers = rev.number if isinstance(rev, git_llvm_rev.Rev) else None - if end_vers is not None and end_vers <= start_version.number: - raise CherrypickVersionError( - f'`until` version {end_vers} is earlier or equal to' - f' `from` version {start_version.number} for patch' - f' {rel_patch_path}') - - with open(patches_json_path, encoding='utf-8') as f: - patches_json = json.load(f) - - for p in patches_json: - rel_path = p['rel_patch_path'] - if rel_path == rel_patch_path: - raise CherrypickError( - f'Patch at {rel_path} already exists in PATCHES.json') + """A ValueError indicating that a test patch application was unsuccessful""" + + +def validate_patch_application( + llvm_dir: Path, svn_version: int, patches_json_fp: Path, patch_props +): + + start_sha = get_llvm_hash.GetGitHashFrom(llvm_dir, svn_version) + subprocess.run(["git", "-C", llvm_dir, "checkout", start_sha], check=True) + + predecessor_apply_results = patch_utils.apply_all_from_json( + svn_version, llvm_dir, patches_json_fp, continue_on_failure=True + ) + + if predecessor_apply_results.failed_patches: + logging.error("Failed to apply patches from PATCHES.json:") + for p in predecessor_apply_results.failed_patches: + logging.error(f"Patch title: {p.title()}") + raise PatchApplicationError("Failed to apply patch from PATCHES.json") + + patch_entry = patch_utils.PatchEntry.from_dict( + patches_json_fp.parent, patch_props + ) + test_apply_result = patch_entry.test_apply(Path(llvm_dir)) + + if not test_apply_result: + logging.error("Could not apply requested patch") + logging.error(test_apply_result.failure_info()) + raise PatchApplicationError( + f'Failed to apply patch: {patch_props["metadata"]["title"]}' + ) + + +def add_patch( + patches_json_path: str, + patches_dir: str, + relative_patches_dir: str, + start_version: git_llvm_rev.Rev, + llvm_dir: str, + rev: t.Union[git_llvm_rev.Rev, str], + sha: str, + package: str, + platforms: t.List[str], +): + """Gets the start and end intervals in 'json_file'. + + Args: + patches_json_path: The absolute path to PATCHES.json. + patches_dir: The aboslute path to the directory patches are in. + relative_patches_dir: The relative path to PATCHES.json. + start_version: The base LLVM revision this patch applies to. + llvm_dir: The path to LLVM checkout. + rev: An LLVM revision (git_llvm_rev.Rev) for a cherrypicking, or a + differential revision (str) otherwise. + sha: The LLVM git sha that corresponds to the patch. For differential + revisions, the git sha from the local commit created by 'arc patch' + is used. + package: The LLVM project name this patch applies to. + platforms: List of platforms this patch applies to. + + Raises: + CherrypickError: A ValueError that highlights the cherry-pick has been + seen before. + CherrypickRangeError: A ValueError that's raised when the given patch + is from before the start_sha. + """ + + is_cherrypick = isinstance(rev, git_llvm_rev.Rev) if is_cherrypick: - if sha in rel_path: - logging.warning( - 'Similarly-named patch already exists in PATCHES.json: %r', - rel_path) - - with open(os.path.join(patches_dir, file_name), 'wb') as f: - cmd = ['git', 'show', sha] - # Only apply the part of the patch that belongs to this package, expect - # LLVM. This is because some packages are built with LLVM ebuild on X86 but - # not on the other architectures. e.g. compiler-rt. Therefore always apply - # the entire patch to LLVM ebuild as a workaround. - if package != 'llvm': - cmd.append(package_to_project(package)) - subprocess.check_call(cmd, stdout=f, cwd=llvm_dir) - - commit_subject = subprocess.check_output( - ['git', 'log', '-n1', '--format=%s', sha], - cwd=llvm_dir, - encoding='utf-8') - patch_props = { - 'rel_patch_path': rel_patch_path, - 'metadata': { - 'title': commit_subject.strip(), - 'info': [], - }, - 'platforms': sorted(platforms), - 'version_range': { - 'from': start_version.number, - 'until': end_vers, - }, - } - - with patch_utils.git_clean_context(Path(llvm_dir)): - validate_patch_application(Path(llvm_dir), start_version.number, - Path(patches_json_path), patch_props) - - patches_json.append(patch_props) - - temp_file = patches_json_path + '.tmp' - with open(temp_file, 'w', encoding='utf-8') as f: - json.dump(patches_json, - f, - indent=4, - separators=(',', ': '), - sort_keys=True) - f.write('\n') - os.rename(temp_file, patches_json_path) + file_name = f"{sha}.patch" + else: + file_name = f"{rev}.patch" + rel_patch_path = os.path.join(relative_patches_dir, file_name) + + # Check that we haven't grabbed a patch range that's nonsensical. + end_vers = rev.number if isinstance(rev, git_llvm_rev.Rev) else None + if end_vers is not None and end_vers <= start_version.number: + raise CherrypickVersionError( + f"`until` version {end_vers} is earlier or equal to" + f" `from` version {start_version.number} for patch" + f" {rel_patch_path}" + ) + + with open(patches_json_path, encoding="utf-8") as f: + patches_json = json.load(f) + + for p in patches_json: + rel_path = p["rel_patch_path"] + if rel_path == rel_patch_path: + raise CherrypickError( + f"Patch at {rel_path} already exists in PATCHES.json" + ) + if is_cherrypick: + if sha in rel_path: + logging.warning( + "Similarly-named patch already exists in PATCHES.json: %r", + rel_path, + ) + + with open(os.path.join(patches_dir, file_name), "wb") as f: + cmd = ["git", "show", sha] + # Only apply the part of the patch that belongs to this package, expect + # LLVM. This is because some packages are built with LLVM ebuild on X86 but + # not on the other architectures. e.g. compiler-rt. Therefore always apply + # the entire patch to LLVM ebuild as a workaround. + if package != "llvm": + cmd.append(package_to_project(package)) + subprocess.check_call(cmd, stdout=f, cwd=llvm_dir) + + commit_subject = subprocess.check_output( + ["git", "log", "-n1", "--format=%s", sha], + cwd=llvm_dir, + encoding="utf-8", + ) + patch_props = { + "rel_patch_path": rel_patch_path, + "metadata": { + "title": commit_subject.strip(), + "info": [], + }, + "platforms": sorted(platforms), + "version_range": { + "from": start_version.number, + "until": end_vers, + }, + } + + with patch_utils.git_clean_context(Path(llvm_dir)): + validate_patch_application( + Path(llvm_dir), + start_version.number, + Path(patches_json_path), + patch_props, + ) + + patches_json.append(patch_props) + + temp_file = patches_json_path + ".tmp" + with open(temp_file, "w", encoding="utf-8") as f: + json.dump( + patches_json, f, indent=4, separators=(",", ": "), sort_keys=True + ) + f.write("\n") + os.rename(temp_file, patches_json_path) def parse_ebuild_for_assignment(ebuild_path: str, var_name: str) -> str: - # '_pre' filters the LLVM 9.0 ebuild, which we never want to target, from - # this list. - candidates = [ - x for x in os.listdir(ebuild_path) - if x.endswith('.ebuild') and '_pre' in x - ] - - if not candidates: - raise ValueError('No ebuilds found under %r' % ebuild_path) - - ebuild = os.path.join(ebuild_path, max(candidates)) - with open(ebuild, encoding='utf-8') as f: - var_name_eq = var_name + '=' - for orig_line in f: - if not orig_line.startswith(var_name_eq): - continue - - # We shouldn't see much variety here, so do the simplest thing possible. - line = orig_line[len(var_name_eq):] - # Remove comments - line = line.split('#')[0] - # Remove quotes - line = shlex.split(line) - if len(line) != 1: - raise ValueError('Expected exactly one quoted value in %r' % orig_line) - return line[0].strip() - - raise ValueError('No %s= line found in %r' % (var_name, ebuild)) + # '_pre' filters the LLVM 9.0 ebuild, which we never want to target, from + # this list. + candidates = [ + x + for x in os.listdir(ebuild_path) + if x.endswith(".ebuild") and "_pre" in x + ] + + if not candidates: + raise ValueError("No ebuilds found under %r" % ebuild_path) + + ebuild = os.path.join(ebuild_path, max(candidates)) + with open(ebuild, encoding="utf-8") as f: + var_name_eq = var_name + "=" + for orig_line in f: + if not orig_line.startswith(var_name_eq): + continue + + # We shouldn't see much variety here, so do the simplest thing possible. + line = orig_line[len(var_name_eq) :] + # Remove comments + line = line.split("#")[0] + # Remove quotes + line = shlex.split(line) + if len(line) != 1: + raise ValueError( + "Expected exactly one quoted value in %r" % orig_line + ) + return line[0].strip() + + raise ValueError("No %s= line found in %r" % (var_name, ebuild)) # Resolves a git ref (or similar) to a LLVM SHA. def resolve_llvm_ref(llvm_dir: str, sha: str) -> str: - return subprocess.check_output( - ['git', 'rev-parse', sha], - encoding='utf-8', - cwd=llvm_dir, - ).strip() + return subprocess.check_output( + ["git", "rev-parse", sha], + encoding="utf-8", + cwd=llvm_dir, + ).strip() # Get the package name of an LLVM project def project_to_package(project: str) -> str: - if project == 'libunwind': - return 'llvm-libunwind' - return project + if project == "libunwind": + return "llvm-libunwind" + return project # Get the LLVM project name of a package def package_to_project(package: str) -> str: - if package == 'llvm-libunwind': - return 'libunwind' - return package + if package == "llvm-libunwind": + return "libunwind" + return package # Get the LLVM projects change in the specifed sha def get_package_names(sha: str, llvm_dir: str) -> list: - paths = subprocess.check_output( - ['git', 'show', '--name-only', '--format=', sha], - cwd=llvm_dir, - encoding='utf-8').splitlines() - # Some LLVM projects are built by LLVM ebuild on X86, so always apply the - # patch to LLVM ebuild - packages = {'llvm'} - # Detect if there are more packages to apply the patch to - for path in paths: - package = project_to_package(path.split('/')[0]) - if package in ('compiler-rt', 'libcxx', 'libcxxabi', 'llvm-libunwind'): - packages.add(package) - packages = list(sorted(packages)) - return packages - - -def create_patch_for_packages(packages: t.List[str], symlinks: t.List[str], - start_rev: git_llvm_rev.Rev, - rev: t.Union[git_llvm_rev.Rev, str], sha: str, - llvm_dir: str, platforms: t.List[str]): - """Create a patch and add its metadata for each package""" - for package, symlink in zip(packages, symlinks): - symlink_dir = os.path.dirname(symlink) - patches_json_path = os.path.join(symlink_dir, 'files/PATCHES.json') - relative_patches_dir = 'cherry' if package == 'llvm' else '' - patches_dir = os.path.join(symlink_dir, 'files', relative_patches_dir) - logging.info('Getting %s (%s) into %s', rev, sha, package) - add_patch(patches_json_path, - patches_dir, - relative_patches_dir, - start_rev, - llvm_dir, - rev, - sha, - package, - platforms=platforms) - - -def make_cl(symlinks_to_uprev: t.List[str], llvm_symlink_dir: str, branch: str, - commit_messages: t.List[str], reviewers: t.Optional[t.List[str]], - cc: t.Optional[t.List[str]]): - symlinks_to_uprev = sorted(set(symlinks_to_uprev)) - for symlink in symlinks_to_uprev: - update_chromeos_llvm_hash.UprevEbuildSymlink(symlink) - subprocess.check_output(['git', 'add', '--all'], - cwd=os.path.dirname(symlink)) - git.UploadChanges(llvm_symlink_dir, branch, commit_messages, reviewers, cc) - git.DeleteBranch(llvm_symlink_dir, branch) + paths = subprocess.check_output( + ["git", "show", "--name-only", "--format=", sha], + cwd=llvm_dir, + encoding="utf-8", + ).splitlines() + # Some LLVM projects are built by LLVM ebuild on X86, so always apply the + # patch to LLVM ebuild + packages = {"llvm"} + # Detect if there are more packages to apply the patch to + for path in paths: + package = project_to_package(path.split("/")[0]) + if package in ("compiler-rt", "libcxx", "libcxxabi", "llvm-libunwind"): + packages.add(package) + packages = list(sorted(packages)) + return packages + + +def create_patch_for_packages( + packages: t.List[str], + symlinks: t.List[str], + start_rev: git_llvm_rev.Rev, + rev: t.Union[git_llvm_rev.Rev, str], + sha: str, + llvm_dir: str, + platforms: t.List[str], +): + """Create a patch and add its metadata for each package""" + for package, symlink in zip(packages, symlinks): + symlink_dir = os.path.dirname(symlink) + patches_json_path = os.path.join(symlink_dir, "files/PATCHES.json") + relative_patches_dir = "cherry" if package == "llvm" else "" + patches_dir = os.path.join(symlink_dir, "files", relative_patches_dir) + logging.info("Getting %s (%s) into %s", rev, sha, package) + add_patch( + patches_json_path, + patches_dir, + relative_patches_dir, + start_rev, + llvm_dir, + rev, + sha, + package, + platforms=platforms, + ) + + +def make_cl( + symlinks_to_uprev: t.List[str], + llvm_symlink_dir: str, + branch: str, + commit_messages: t.List[str], + reviewers: t.Optional[t.List[str]], + cc: t.Optional[t.List[str]], +): + symlinks_to_uprev = sorted(set(symlinks_to_uprev)) + for symlink in symlinks_to_uprev: + update_chromeos_llvm_hash.UprevEbuildSymlink(symlink) + subprocess.check_output( + ["git", "add", "--all"], cwd=os.path.dirname(symlink) + ) + git.UploadChanges(llvm_symlink_dir, branch, commit_messages, reviewers, cc) + git.DeleteBranch(llvm_symlink_dir, branch) def resolve_symbolic_sha(start_sha: str, llvm_symlink_dir: str) -> str: - if start_sha == 'llvm': - return parse_ebuild_for_assignment(llvm_symlink_dir, 'LLVM_HASH') + if start_sha == "llvm": + return parse_ebuild_for_assignment(llvm_symlink_dir, "LLVM_HASH") - if start_sha == 'llvm-next': - return parse_ebuild_for_assignment(llvm_symlink_dir, 'LLVM_NEXT_HASH') + if start_sha == "llvm-next": + return parse_ebuild_for_assignment(llvm_symlink_dir, "LLVM_NEXT_HASH") - return start_sha + return start_sha def find_patches_and_make_cl( - chroot_path: str, patches: t.List[str], start_rev: git_llvm_rev.Rev, - llvm_config: git_llvm_rev.LLVMConfig, llvm_symlink_dir: str, - create_cl: bool, skip_dependencies: bool, - reviewers: t.Optional[t.List[str]], cc: t.Optional[t.List[str]], - platforms: t.List[str]): - - converted_patches = [ - _convert_patch(llvm_config, skip_dependencies, p) for p in patches - ] - potential_duplicates = _get_duplicate_shas(converted_patches) - if potential_duplicates: - err_msg = '\n'.join(f'{a.patch} == {b.patch}' - for a, b in potential_duplicates) - raise RuntimeError(f'Found Duplicate SHAs:\n{err_msg}') - - # CL Related variables, only used if `create_cl` - symlinks_to_uprev = [] - commit_messages = [ - 'llvm: get patches from upstream\n', - ] - branch = f'get-upstream-{datetime.now().strftime("%Y%m%d%H%M%S%f")}' - - if create_cl: - git.CreateBranch(llvm_symlink_dir, branch) - - for parsed_patch in converted_patches: - # Find out the llvm projects changed in this commit - packages = get_package_names(parsed_patch.sha, llvm_config.dir) - # Find out the ebuild symlinks of the corresponding ChromeOS packages - symlinks = chroot.GetChrootEbuildPaths(chroot_path, [ - 'sys-devel/llvm' if package == 'llvm' else 'sys-libs/' + package - for package in packages - ]) - symlinks = chroot.ConvertChrootPathsToAbsolutePaths(chroot_path, symlinks) - # Create a local patch for all the affected llvm projects - create_patch_for_packages(packages, - symlinks, - start_rev, - parsed_patch.rev, - parsed_patch.sha, - llvm_config.dir, - platforms=platforms) - if create_cl: - symlinks_to_uprev.extend(symlinks) + chroot_path: str, + patches: t.List[str], + start_rev: git_llvm_rev.Rev, + llvm_config: git_llvm_rev.LLVMConfig, + llvm_symlink_dir: str, + create_cl: bool, + skip_dependencies: bool, + reviewers: t.Optional[t.List[str]], + cc: t.Optional[t.List[str]], + platforms: t.List[str], +): + + converted_patches = [ + _convert_patch(llvm_config, skip_dependencies, p) for p in patches + ] + potential_duplicates = _get_duplicate_shas(converted_patches) + if potential_duplicates: + err_msg = "\n".join( + f"{a.patch} == {b.patch}" for a, b in potential_duplicates + ) + raise RuntimeError(f"Found Duplicate SHAs:\n{err_msg}") + + # CL Related variables, only used if `create_cl` + symlinks_to_uprev = [] + commit_messages = [ + "llvm: get patches from upstream\n", + ] + branch = f'get-upstream-{datetime.now().strftime("%Y%m%d%H%M%S%f")}' - commit_messages.extend([ - parsed_patch.git_msg(), - subprocess.check_output( - ['git', 'log', '-n1', '--oneline', parsed_patch.sha], - cwd=llvm_config.dir, - encoding='utf-8') - ]) - - if parsed_patch.is_differential: - subprocess.check_output(['git', 'reset', '--hard', 'HEAD^'], - cwd=llvm_config.dir) + if create_cl: + git.CreateBranch(llvm_symlink_dir, branch) + + for parsed_patch in converted_patches: + # Find out the llvm projects changed in this commit + packages = get_package_names(parsed_patch.sha, llvm_config.dir) + # Find out the ebuild symlinks of the corresponding ChromeOS packages + symlinks = chroot.GetChrootEbuildPaths( + chroot_path, + [ + "sys-devel/llvm" if package == "llvm" else "sys-libs/" + package + for package in packages + ], + ) + symlinks = chroot.ConvertChrootPathsToAbsolutePaths( + chroot_path, symlinks + ) + # Create a local patch for all the affected llvm projects + create_patch_for_packages( + packages, + symlinks, + start_rev, + parsed_patch.rev, + parsed_patch.sha, + llvm_config.dir, + platforms=platforms, + ) + if create_cl: + symlinks_to_uprev.extend(symlinks) + + commit_messages.extend( + [ + parsed_patch.git_msg(), + subprocess.check_output( + ["git", "log", "-n1", "--oneline", parsed_patch.sha], + cwd=llvm_config.dir, + encoding="utf-8", + ), + ] + ) + + if parsed_patch.is_differential: + subprocess.check_output( + ["git", "reset", "--hard", "HEAD^"], cwd=llvm_config.dir + ) - if create_cl: - make_cl(symlinks_to_uprev, llvm_symlink_dir, branch, commit_messages, - reviewers, cc) + if create_cl: + make_cl( + symlinks_to_uprev, + llvm_symlink_dir, + branch, + commit_messages, + reviewers, + cc, + ) @dataclasses.dataclass(frozen=True) class ParsedPatch: - """Class to keep track of bundled patch info.""" - patch: str - sha: str - is_differential: bool - rev: t.Union[git_llvm_rev.Rev, str] - - def git_msg(self) -> str: - if self.is_differential: - return f'\n\nreviews.llvm.org/{self.patch}\n' - return f'\n\nreviews.llvm.org/rG{self.sha}\n' - - -def _convert_patch(llvm_config: git_llvm_rev.LLVMConfig, - skip_dependencies: bool, patch: str) -> ParsedPatch: - """Extract git revision info from a patch. - - Args: - llvm_config: LLVM configuration object. - skip_dependencies: Pass --skip-dependecies for to `arc` - patch: A single patch referent string. - - Returns: - A [ParsedPatch] object. - """ - - # git hash should only have lower-case letters - is_differential = patch.startswith('D') - if is_differential: - subprocess.check_output( - [ - 'arc', 'patch', '--nobranch', - '--skip-dependencies' if skip_dependencies else '--revision', patch - ], - cwd=llvm_config.dir, + """Class to keep track of bundled patch info.""" + + patch: str + sha: str + is_differential: bool + rev: t.Union[git_llvm_rev.Rev, str] + + def git_msg(self) -> str: + if self.is_differential: + return f"\n\nreviews.llvm.org/{self.patch}\n" + return f"\n\nreviews.llvm.org/rG{self.sha}\n" + + +def _convert_patch( + llvm_config: git_llvm_rev.LLVMConfig, skip_dependencies: bool, patch: str +) -> ParsedPatch: + """Extract git revision info from a patch. + + Args: + llvm_config: LLVM configuration object. + skip_dependencies: Pass --skip-dependecies for to `arc` + patch: A single patch referent string. + + Returns: + A [ParsedPatch] object. + """ + + # git hash should only have lower-case letters + is_differential = patch.startswith("D") + if is_differential: + subprocess.check_output( + [ + "arc", + "patch", + "--nobranch", + "--skip-dependencies" if skip_dependencies else "--revision", + patch, + ], + cwd=llvm_config.dir, + ) + sha = resolve_llvm_ref(llvm_config.dir, "HEAD") + rev = patch + else: + sha = resolve_llvm_ref(llvm_config.dir, patch) + rev = git_llvm_rev.translate_sha_to_rev(llvm_config, sha) + return ParsedPatch( + patch=patch, sha=sha, rev=rev, is_differential=is_differential ) - sha = resolve_llvm_ref(llvm_config.dir, 'HEAD') - rev = patch - else: - sha = resolve_llvm_ref(llvm_config.dir, patch) - rev = git_llvm_rev.translate_sha_to_rev(llvm_config, sha) - return ParsedPatch(patch=patch, - sha=sha, - rev=rev, - is_differential=is_differential) def _get_duplicate_shas( - patches: t.List[ParsedPatch]) -> t.List[t.Tuple[ParsedPatch, ParsedPatch]]: - """Return a list of Patches which have duplicate SHA's""" - return [(left, right) for i, left in enumerate(patches) - for right in patches[i + 1:] if left.sha == right.sha] - - -def get_from_upstream(chroot_path: str, - create_cl: bool, - start_sha: str, - patches: t.List[str], - platforms: t.List[str], - skip_dependencies: bool = False, - reviewers: t.List[str] = None, - cc: t.List[str] = None): - llvm_symlink = chroot.ConvertChrootPathsToAbsolutePaths( - chroot_path, chroot.GetChrootEbuildPaths(chroot_path, - ['sys-devel/llvm']))[0] - llvm_symlink_dir = os.path.dirname(llvm_symlink) - - git_status = subprocess.check_output(['git', 'status', '-s'], - cwd=llvm_symlink_dir, - encoding='utf-8') - - if git_status: - error_path = os.path.dirname(os.path.dirname(llvm_symlink_dir)) - raise ValueError(f'Uncommited changes detected in {error_path}') - - start_sha = resolve_symbolic_sha(start_sha, llvm_symlink_dir) - logging.info('Base llvm hash == %s', start_sha) - - llvm_config = git_llvm_rev.LLVMConfig( - remote='origin', dir=get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools()) - start_sha = resolve_llvm_ref(llvm_config.dir, start_sha) - - find_patches_and_make_cl(chroot_path=chroot_path, - patches=patches, - platforms=platforms, - start_rev=git_llvm_rev.translate_sha_to_rev( - llvm_config, start_sha), - llvm_config=llvm_config, - llvm_symlink_dir=llvm_symlink_dir, - create_cl=create_cl, - skip_dependencies=skip_dependencies, - reviewers=reviewers, - cc=cc) - logging.info('Complete.') + patches: t.List[ParsedPatch], +) -> t.List[t.Tuple[ParsedPatch, ParsedPatch]]: + """Return a list of Patches which have duplicate SHA's""" + return [ + (left, right) + for i, left in enumerate(patches) + for right in patches[i + 1 :] + if left.sha == right.sha + ] + + +def get_from_upstream( + chroot_path: str, + create_cl: bool, + start_sha: str, + patches: t.List[str], + platforms: t.List[str], + skip_dependencies: bool = False, + reviewers: t.List[str] = None, + cc: t.List[str] = None, +): + llvm_symlink = chroot.ConvertChrootPathsToAbsolutePaths( + chroot_path, + chroot.GetChrootEbuildPaths(chroot_path, ["sys-devel/llvm"]), + )[0] + llvm_symlink_dir = os.path.dirname(llvm_symlink) + + git_status = subprocess.check_output( + ["git", "status", "-s"], cwd=llvm_symlink_dir, encoding="utf-8" + ) + + if git_status: + error_path = os.path.dirname(os.path.dirname(llvm_symlink_dir)) + raise ValueError(f"Uncommited changes detected in {error_path}") + + start_sha = resolve_symbolic_sha(start_sha, llvm_symlink_dir) + logging.info("Base llvm hash == %s", start_sha) + + llvm_config = git_llvm_rev.LLVMConfig( + remote="origin", dir=get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools() + ) + start_sha = resolve_llvm_ref(llvm_config.dir, start_sha) + + find_patches_and_make_cl( + chroot_path=chroot_path, + patches=patches, + platforms=platforms, + start_rev=git_llvm_rev.translate_sha_to_rev(llvm_config, start_sha), + llvm_config=llvm_config, + llvm_symlink_dir=llvm_symlink_dir, + create_cl=create_cl, + skip_dependencies=skip_dependencies, + reviewers=reviewers, + cc=cc, + ) + logging.info("Complete.") def main(): - chroot.VerifyOutsideChroot() - logging.basicConfig( - format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s', - level=logging.INFO, - ) - - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=__DOC_EPILOGUE) - parser.add_argument('--chroot_path', - default=os.path.join(os.path.expanduser('~'), - 'chromiumos'), - help='the path to the chroot (default: %(default)s)') - parser.add_argument( - '--start_sha', - default='llvm-next', - help='LLVM SHA that the patch should start applying at. You can specify ' - '"llvm" or "llvm-next", as well. Defaults to %(default)s.') - parser.add_argument('--sha', - action='append', - default=[], - help='The LLVM git SHA to cherry-pick.') - parser.add_argument( - '--differential', - action='append', - default=[], - help='The LLVM differential revision to apply. Example: D1234.' - ' Cannot be used for changes already merged upstream; use --sha' - ' instead for those.') - parser.add_argument( - '--platform', - action='append', - required=True, - help='Apply this patch to the give platform. Common options include ' - '"chromiumos" and "android". Can be specified multiple times to ' - 'apply to multiple platforms') - parser.add_argument('--create_cl', - action='store_true', - help='Automatically create a CL if specified') - parser.add_argument( - '--skip_dependencies', - action='store_true', - help="Skips a LLVM differential revision's dependencies. Only valid " - 'when --differential appears exactly once.') - args = parser.parse_args() - - if not (args.sha or args.differential): - parser.error('--sha or --differential required') - - if args.skip_dependencies and len(args.differential) != 1: - parser.error("--skip_dependencies is only valid when there's exactly one " - 'supplied differential') - - get_from_upstream( - chroot_path=args.chroot_path, - create_cl=args.create_cl, - start_sha=args.start_sha, - patches=args.sha + args.differential, - skip_dependencies=args.skip_dependencies, - platforms=args.platform, - ) - - -if __name__ == '__main__': - sys.exit(main()) + chroot.VerifyOutsideChroot() + logging.basicConfig( + format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s", + level=logging.INFO, + ) + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=__DOC_EPILOGUE, + ) + parser.add_argument( + "--chroot_path", + default=os.path.join(os.path.expanduser("~"), "chromiumos"), + help="the path to the chroot (default: %(default)s)", + ) + parser.add_argument( + "--start_sha", + default="llvm-next", + help="LLVM SHA that the patch should start applying at. You can specify " + '"llvm" or "llvm-next", as well. Defaults to %(default)s.', + ) + parser.add_argument( + "--sha", + action="append", + default=[], + help="The LLVM git SHA to cherry-pick.", + ) + parser.add_argument( + "--differential", + action="append", + default=[], + help="The LLVM differential revision to apply. Example: D1234." + " Cannot be used for changes already merged upstream; use --sha" + " instead for those.", + ) + parser.add_argument( + "--platform", + action="append", + required=True, + help="Apply this patch to the give platform. Common options include " + '"chromiumos" and "android". Can be specified multiple times to ' + "apply to multiple platforms", + ) + parser.add_argument( + "--create_cl", + action="store_true", + help="Automatically create a CL if specified", + ) + parser.add_argument( + "--skip_dependencies", + action="store_true", + help="Skips a LLVM differential revision's dependencies. Only valid " + "when --differential appears exactly once.", + ) + args = parser.parse_args() + + if not (args.sha or args.differential): + parser.error("--sha or --differential required") + + if args.skip_dependencies and len(args.differential) != 1: + parser.error( + "--skip_dependencies is only valid when there's exactly one " + "supplied differential" + ) + + get_from_upstream( + chroot_path=args.chroot_path, + create_cl=args.create_cl, + start_sha=args.start_sha, + patches=args.sha + args.differential, + skip_dependencies=args.skip_dependencies, + platforms=args.platform, + ) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/llvm_tools/git.py b/llvm_tools/git.py index ef22c7d4..0fe4cb63 100755 --- a/llvm_tools/git.py +++ b/llvm_tools/git.py @@ -14,122 +14,126 @@ import re import subprocess import tempfile -CommitContents = collections.namedtuple('CommitContents', ['url', 'cl_number']) + +CommitContents = collections.namedtuple("CommitContents", ["url", "cl_number"]) def InChroot(): - """Returns True if currently in the chroot.""" - return 'CROS_WORKON_SRCROOT' in os.environ + """Returns True if currently in the chroot.""" + return "CROS_WORKON_SRCROOT" in os.environ def VerifyOutsideChroot(): - """Checks whether the script invoked was executed in the chroot. + """Checks whether the script invoked was executed in the chroot. - Raises: - AssertionError: The script was run inside the chroot. - """ + Raises: + AssertionError: The script was run inside the chroot. + """ - assert not InChroot(), 'Script should be run outside the chroot.' + assert not InChroot(), "Script should be run outside the chroot." def CreateBranch(repo, branch): - """Creates a branch in the given repo. + """Creates a branch in the given repo. - Args: - repo: The absolute path to the repo. - branch: The name of the branch to create. + Args: + repo: The absolute path to the repo. + branch: The name of the branch to create. - Raises: - ValueError: Failed to create a repo in that directory. - """ + Raises: + ValueError: Failed to create a repo in that directory. + """ - if not os.path.isdir(repo): - raise ValueError('Invalid directory path provided: %s' % repo) + if not os.path.isdir(repo): + raise ValueError("Invalid directory path provided: %s" % repo) - subprocess.check_output(['git', '-C', repo, 'reset', 'HEAD', '--hard']) + subprocess.check_output(["git", "-C", repo, "reset", "HEAD", "--hard"]) - subprocess.check_output(['repo', 'start', branch], cwd=repo) + subprocess.check_output(["repo", "start", branch], cwd=repo) def DeleteBranch(repo, branch): - """Deletes a branch in the given repo. + """Deletes a branch in the given repo. - Args: - repo: The absolute path of the repo. - branch: The name of the branch to delete. + Args: + repo: The absolute path of the repo. + branch: The name of the branch to delete. - Raises: - ValueError: Failed to delete the repo in that directory. - """ + Raises: + ValueError: Failed to delete the repo in that directory. + """ - if not os.path.isdir(repo): - raise ValueError('Invalid directory path provided: %s' % repo) + if not os.path.isdir(repo): + raise ValueError("Invalid directory path provided: %s" % repo) - subprocess.check_output(['git', '-C', repo, 'checkout', 'cros/main']) + subprocess.check_output(["git", "-C", repo, "checkout", "cros/main"]) - subprocess.check_output(['git', '-C', repo, 'reset', 'HEAD', '--hard']) + subprocess.check_output(["git", "-C", repo, "reset", "HEAD", "--hard"]) - subprocess.check_output(['git', '-C', repo, 'branch', '-D', branch]) + subprocess.check_output(["git", "-C", repo, "branch", "-D", branch]) def UploadChanges(repo, branch, commit_messages, reviewers=None, cc=None): - """Uploads the changes in the specifed branch of the given repo for review. - - Args: - repo: The absolute path to the repo where changes were made. - branch: The name of the branch to upload. - commit_messages: A string of commit message(s) (i.e. '[message]' - of the changes made. - reviewers: A list of reviewers to add to the CL. - cc: A list of contributors to CC about the CL. - - Returns: - A nametuple that has two (key, value) pairs, where the first pair is the - Gerrit commit URL and the second pair is the change list number. - - Raises: - ValueError: Failed to create a commit or failed to upload the - changes for review. - """ - - if not os.path.isdir(repo): - raise ValueError('Invalid path provided: %s' % repo) - - # Create a git commit. - with tempfile.NamedTemporaryFile(mode='w+t') as f: - f.write('\n'.join(commit_messages)) - f.flush() - - subprocess.check_output(['git', 'commit', '-F', f.name], cwd=repo) - - # Upload the changes for review. - git_args = [ - 'repo', - 'upload', - '--yes', - f'--reviewers={",".join(reviewers)}' if reviewers else '--ne', - '--no-verify', - f'--br={branch}', - ] - - if cc: - git_args.append(f'--cc={",".join(cc)}') - - out = subprocess.check_output( - git_args, - stderr=subprocess.STDOUT, - cwd=repo, - encoding='utf-8', - ) - - print(out) - - found_url = re.search( - r'https://chromium-review.googlesource.com/c/' - r'chromiumos/overlays/chromiumos-overlay/\+/([0-9]+)', out.rstrip()) - - if not found_url: - raise ValueError('Failed to find change list URL.') - - return CommitContents(url=found_url.group(0), - cl_number=int(found_url.group(1))) + """Uploads the changes in the specifed branch of the given repo for review. + + Args: + repo: The absolute path to the repo where changes were made. + branch: The name of the branch to upload. + commit_messages: A string of commit message(s) (i.e. '[message]' + of the changes made. + reviewers: A list of reviewers to add to the CL. + cc: A list of contributors to CC about the CL. + + Returns: + A nametuple that has two (key, value) pairs, where the first pair is the + Gerrit commit URL and the second pair is the change list number. + + Raises: + ValueError: Failed to create a commit or failed to upload the + changes for review. + """ + + if not os.path.isdir(repo): + raise ValueError("Invalid path provided: %s" % repo) + + # Create a git commit. + with tempfile.NamedTemporaryFile(mode="w+t") as f: + f.write("\n".join(commit_messages)) + f.flush() + + subprocess.check_output(["git", "commit", "-F", f.name], cwd=repo) + + # Upload the changes for review. + git_args = [ + "repo", + "upload", + "--yes", + f'--reviewers={",".join(reviewers)}' if reviewers else "--ne", + "--no-verify", + f"--br={branch}", + ] + + if cc: + git_args.append(f'--cc={",".join(cc)}') + + out = subprocess.check_output( + git_args, + stderr=subprocess.STDOUT, + cwd=repo, + encoding="utf-8", + ) + + print(out) + + found_url = re.search( + r"https://chromium-review.googlesource.com/c/" + r"chromiumos/overlays/chromiumos-overlay/\+/([0-9]+)", + out.rstrip(), + ) + + if not found_url: + raise ValueError("Failed to find change list URL.") + + return CommitContents( + url=found_url.group(0), cl_number=int(found_url.group(1)) + ) diff --git a/llvm_tools/git_llvm_rev.py b/llvm_tools/git_llvm_rev.py index 3f752210..283a3920 100755 --- a/llvm_tools/git_llvm_rev.py +++ b/llvm_tools/git_llvm_rev.py @@ -18,7 +18,8 @@ import subprocess import sys import typing as t -MAIN_BRANCH = 'main' + +MAIN_BRANCH = "main" # Note that after base_llvm_sha, we reach The Wild West(TM) of commits. # So reasonable input that could break us includes: @@ -33,350 +34,375 @@ MAIN_BRANCH = 'main' # While saddening, this is something we should probably try to handle # reasonably. base_llvm_revision = 375505 -base_llvm_sha = '186155b89c2d2a2f62337081e3ca15f676c9434b' +base_llvm_sha = "186155b89c2d2a2f62337081e3ca15f676c9434b" # Represents an LLVM git checkout: # - |dir| is the directory of the LLVM checkout # - |remote| is the name of the LLVM remote. Generally it's "origin". -LLVMConfig = t.NamedTuple('LLVMConfig', (('remote', str), ('dir', str))) +LLVMConfig = t.NamedTuple("LLVMConfig", (("remote", str), ("dir", str))) -class Rev(t.NamedTuple('Rev', (('branch', str), ('number', int)))): - """Represents a LLVM 'revision', a shorthand identifies a LLVM commit.""" +class Rev(t.NamedTuple("Rev", (("branch", str), ("number", int)))): + """Represents a LLVM 'revision', a shorthand identifies a LLVM commit.""" - @staticmethod - def parse(rev: str) -> 'Rev': - """Parses a Rev from the given string. + @staticmethod + def parse(rev: str) -> "Rev": + """Parses a Rev from the given string. - Raises a ValueError on a failed parse. - """ - # Revs are parsed into (${branch_name}, r${commits_since_base_commit}) - # pairs. - # - # We support r${commits_since_base_commit} as shorthand for - # (main, r${commits_since_base_commit}). - if rev.startswith('r'): - branch_name = MAIN_BRANCH - rev_string = rev[1:] - else: - match = re.match(r'\((.+), r(\d+)\)', rev) - if not match: - raise ValueError("%r isn't a valid revision" % rev) + Raises a ValueError on a failed parse. + """ + # Revs are parsed into (${branch_name}, r${commits_since_base_commit}) + # pairs. + # + # We support r${commits_since_base_commit} as shorthand for + # (main, r${commits_since_base_commit}). + if rev.startswith("r"): + branch_name = MAIN_BRANCH + rev_string = rev[1:] + else: + match = re.match(r"\((.+), r(\d+)\)", rev) + if not match: + raise ValueError("%r isn't a valid revision" % rev) - branch_name, rev_string = match.groups() + branch_name, rev_string = match.groups() - return Rev(branch=branch_name, number=int(rev_string)) + return Rev(branch=branch_name, number=int(rev_string)) - def __str__(self) -> str: - branch_name, number = self - if branch_name == MAIN_BRANCH: - return 'r%d' % number - return '(%s, r%d)' % (branch_name, number) + def __str__(self) -> str: + branch_name, number = self + if branch_name == MAIN_BRANCH: + return "r%d" % number + return "(%s, r%d)" % (branch_name, number) def is_git_sha(xs: str) -> bool: - """Returns whether the given string looks like a valid git commit SHA.""" - return len(xs) > 6 and len(xs) <= 40 and all( - x.isdigit() or 'a' <= x.lower() <= 'f' for x in xs) + """Returns whether the given string looks like a valid git commit SHA.""" + return ( + len(xs) > 6 + and len(xs) <= 40 + and all(x.isdigit() or "a" <= x.lower() <= "f" for x in xs) + ) def check_output(command: t.List[str], cwd: str) -> str: - """Shorthand for subprocess.check_output. Auto-decodes any stdout.""" - result = subprocess.run( - command, - cwd=cwd, - check=True, - stdin=subprocess.DEVNULL, - stdout=subprocess.PIPE, - encoding='utf-8', - ) - return result.stdout - - -def translate_prebase_sha_to_rev_number(llvm_config: LLVMConfig, - sha: str) -> int: - """Translates a sha to a revision number (e.g., "llvm-svn: 1234"). - - This function assumes that the given SHA is an ancestor of |base_llvm_sha|. - """ - commit_message = check_output( - ['git', 'log', '-n1', '--format=%B', sha], - cwd=llvm_config.dir, - ) - last_line = commit_message.strip().splitlines()[-1] - svn_match = re.match(r'^llvm-svn: (\d+)$', last_line) - - if not svn_match: - raise ValueError( - f"No llvm-svn line found for {sha}, which... shouldn't happen?") - - return int(svn_match.group(1)) + """Shorthand for subprocess.check_output. Auto-decodes any stdout.""" + result = subprocess.run( + command, + cwd=cwd, + check=True, + stdin=subprocess.DEVNULL, + stdout=subprocess.PIPE, + encoding="utf-8", + ) + return result.stdout -def translate_sha_to_rev(llvm_config: LLVMConfig, sha_or_ref: str) -> Rev: - """Translates a sha or git ref to a Rev.""" +def translate_prebase_sha_to_rev_number( + llvm_config: LLVMConfig, sha: str +) -> int: + """Translates a sha to a revision number (e.g., "llvm-svn: 1234"). - if is_git_sha(sha_or_ref): - sha = sha_or_ref - else: - sha = check_output( - ['git', 'rev-parse', sha_or_ref], + This function assumes that the given SHA is an ancestor of |base_llvm_sha|. + """ + commit_message = check_output( + ["git", "log", "-n1", "--format=%B", sha], cwd=llvm_config.dir, ) - sha = sha.strip() + last_line = commit_message.strip().splitlines()[-1] + svn_match = re.match(r"^llvm-svn: (\d+)$", last_line) - merge_base = check_output( - ['git', 'merge-base', base_llvm_sha, sha], - cwd=llvm_config.dir, - ) - merge_base = merge_base.strip() + if not svn_match: + raise ValueError( + f"No llvm-svn line found for {sha}, which... shouldn't happen?" + ) - if merge_base == base_llvm_sha: - result = check_output( + return int(svn_match.group(1)) + + +def translate_sha_to_rev(llvm_config: LLVMConfig, sha_or_ref: str) -> Rev: + """Translates a sha or git ref to a Rev.""" + + if is_git_sha(sha_or_ref): + sha = sha_or_ref + else: + sha = check_output( + ["git", "rev-parse", sha_or_ref], + cwd=llvm_config.dir, + ) + sha = sha.strip() + + merge_base = check_output( + ["git", "merge-base", base_llvm_sha, sha], + cwd=llvm_config.dir, + ) + merge_base = merge_base.strip() + + if merge_base == base_llvm_sha: + result = check_output( + [ + "git", + "rev-list", + "--count", + "--first-parent", + f"{base_llvm_sha}..{sha}", + ], + cwd=llvm_config.dir, + ) + count = int(result.strip()) + return Rev(branch=MAIN_BRANCH, number=count + base_llvm_revision) + + # Otherwise, either: + # - |merge_base| is |sha| (we have a guaranteed llvm-svn number on |sha|) + # - |merge_base| is neither (we have a guaranteed llvm-svn number on + # |merge_base|, but not |sha|) + merge_base_number = translate_prebase_sha_to_rev_number( + llvm_config, merge_base + ) + if merge_base == sha: + return Rev(branch=MAIN_BRANCH, number=merge_base_number) + + distance_from_base = check_output( [ - 'git', - 'rev-list', - '--count', - '--first-parent', - f'{base_llvm_sha}..{sha}', + "git", + "rev-list", + "--count", + "--first-parent", + f"{merge_base}..{sha}", ], cwd=llvm_config.dir, ) - count = int(result.strip()) - return Rev(branch=MAIN_BRANCH, number=count + base_llvm_revision) - - # Otherwise, either: - # - |merge_base| is |sha| (we have a guaranteed llvm-svn number on |sha|) - # - |merge_base| is neither (we have a guaranteed llvm-svn number on - # |merge_base|, but not |sha|) - merge_base_number = translate_prebase_sha_to_rev_number( - llvm_config, merge_base) - if merge_base == sha: - return Rev(branch=MAIN_BRANCH, number=merge_base_number) - - distance_from_base = check_output( - [ - 'git', - 'rev-list', - '--count', - '--first-parent', - f'{merge_base}..{sha}', - ], - cwd=llvm_config.dir, - ) - - revision_number = merge_base_number + int(distance_from_base.strip()) - branches_containing = check_output( - ['git', 'branch', '-r', '--contains', sha], - cwd=llvm_config.dir, - ) - - candidates = [] - - prefix = llvm_config.remote + '/' - for branch in branches_containing.splitlines(): - branch = branch.strip() - if branch.startswith(prefix): - candidates.append(branch[len(prefix):]) - - if not candidates: - raise ValueError( - f'No viable branches found from {llvm_config.remote} with {sha}') - - # It seems that some `origin/release/.*` branches have - # `origin/upstream/release/.*` equivalents, which is... awkward to deal with. - # Prefer the latter, since that seems to have newer commits than the former. - # Technically n^2, but len(elements) should be like, tens in the worst case. - candidates = [x for x in candidates if f'upstream/{x}' not in candidates] - if len(candidates) != 1: - raise ValueError( - f'Ambiguity: multiple branches from {llvm_config.remote} have {sha}: ' - f'{sorted(candidates)}') - - return Rev(branch=candidates[0], number=revision_number) - - -def parse_git_commit_messages(stream: t.Iterable[str], - separator: str) -> t.Iterable[t.Tuple[str, str]]: - """Parses a stream of git log messages. - - These are expected to be in the format: - - 40 character sha - commit - message - body - separator - 40 character sha - commit - message - body - separator - """ - - lines = iter(stream) - while True: - # Looks like a potential bug in pylint? crbug.com/1041148 - # pylint: disable=stop-iteration-return - sha = next(lines, None) - if sha is None: - return - - sha = sha.strip() - assert is_git_sha(sha), f'Invalid git SHA: {sha}' - - message = [] - for line in lines: - if line.strip() == separator: - break - message.append(line) - - yield sha, ''.join(message) + + revision_number = merge_base_number + int(distance_from_base.strip()) + branches_containing = check_output( + ["git", "branch", "-r", "--contains", sha], + cwd=llvm_config.dir, + ) + + candidates = [] + + prefix = llvm_config.remote + "/" + for branch in branches_containing.splitlines(): + branch = branch.strip() + if branch.startswith(prefix): + candidates.append(branch[len(prefix) :]) + + if not candidates: + raise ValueError( + f"No viable branches found from {llvm_config.remote} with {sha}" + ) + + # It seems that some `origin/release/.*` branches have + # `origin/upstream/release/.*` equivalents, which is... awkward to deal with. + # Prefer the latter, since that seems to have newer commits than the former. + # Technically n^2, but len(elements) should be like, tens in the worst case. + candidates = [x for x in candidates if f"upstream/{x}" not in candidates] + if len(candidates) != 1: + raise ValueError( + f"Ambiguity: multiple branches from {llvm_config.remote} have {sha}: " + f"{sorted(candidates)}" + ) + + return Rev(branch=candidates[0], number=revision_number) + + +def parse_git_commit_messages( + stream: t.Iterable[str], separator: str +) -> t.Iterable[t.Tuple[str, str]]: + """Parses a stream of git log messages. + + These are expected to be in the format: + + 40 character sha + commit + message + body + separator + 40 character sha + commit + message + body + separator + """ + + lines = iter(stream) + while True: + # Looks like a potential bug in pylint? crbug.com/1041148 + # pylint: disable=stop-iteration-return + sha = next(lines, None) + if sha is None: + return + + sha = sha.strip() + assert is_git_sha(sha), f"Invalid git SHA: {sha}" + + message = [] + for line in lines: + if line.strip() == separator: + break + message.append(line) + + yield sha, "".join(message) def translate_prebase_rev_to_sha(llvm_config: LLVMConfig, rev: Rev) -> str: - """Translates a Rev to a SHA. - - This function assumes that the given rev refers to a commit that's an - ancestor of |base_llvm_sha|. - """ - # Because reverts may include reverted commit messages, we can't just |-n1| - # and pick that. - separator = '>!' * 80 - looking_for = f'llvm-svn: {rev.number}' - - git_command = [ - 'git', 'log', '--grep', f'^{looking_for}$', - f'--format=%H%n%B{separator}', base_llvm_sha - ] - - subp = subprocess.Popen( - git_command, - cwd=llvm_config.dir, - stdin=subprocess.DEVNULL, - stdout=subprocess.PIPE, - encoding='utf-8', - ) - - with subp: - for sha, message in parse_git_commit_messages(subp.stdout, separator): - last_line = message.splitlines()[-1] - if last_line.strip() == looking_for: - subp.terminate() - return sha - - if subp.returncode: - raise subprocess.CalledProcessError(subp.returncode, git_command) - raise ValueError(f'No commit with revision {rev} found') + """Translates a Rev to a SHA. + + This function assumes that the given rev refers to a commit that's an + ancestor of |base_llvm_sha|. + """ + # Because reverts may include reverted commit messages, we can't just |-n1| + # and pick that. + separator = ">!" * 80 + looking_for = f"llvm-svn: {rev.number}" + + git_command = [ + "git", + "log", + "--grep", + f"^{looking_for}$", + f"--format=%H%n%B{separator}", + base_llvm_sha, + ] + + subp = subprocess.Popen( + git_command, + cwd=llvm_config.dir, + stdin=subprocess.DEVNULL, + stdout=subprocess.PIPE, + encoding="utf-8", + ) + + with subp: + for sha, message in parse_git_commit_messages(subp.stdout, separator): + last_line = message.splitlines()[-1] + if last_line.strip() == looking_for: + subp.terminate() + return sha + + if subp.returncode: + raise subprocess.CalledProcessError(subp.returncode, git_command) + raise ValueError(f"No commit with revision {rev} found") def translate_rev_to_sha(llvm_config: LLVMConfig, rev: Rev) -> str: - """Translates a Rev to a SHA. - - Raises a ValueError if the given Rev doesn't exist in the given config. - """ - branch, number = rev - - if branch == MAIN_BRANCH: - if number < base_llvm_revision: - return translate_prebase_rev_to_sha(llvm_config, rev) - base_sha = base_llvm_sha - base_revision_number = base_llvm_revision - else: - base_sha = check_output( - ['git', 'merge-base', base_llvm_sha, f'{llvm_config.remote}/{branch}'], + """Translates a Rev to a SHA. + + Raises a ValueError if the given Rev doesn't exist in the given config. + """ + branch, number = rev + + if branch == MAIN_BRANCH: + if number < base_llvm_revision: + return translate_prebase_rev_to_sha(llvm_config, rev) + base_sha = base_llvm_sha + base_revision_number = base_llvm_revision + else: + base_sha = check_output( + [ + "git", + "merge-base", + base_llvm_sha, + f"{llvm_config.remote}/{branch}", + ], + cwd=llvm_config.dir, + ) + base_sha = base_sha.strip() + if base_sha == base_llvm_sha: + base_revision_number = base_llvm_revision + else: + base_revision_number = translate_prebase_sha_to_rev_number( + llvm_config, base_sha + ) + + # Alternatively, we could |git log --format=%H|, but git is *super* fast + # about rev walking/counting locally compared to long |log|s, so we walk back + # twice. + head = check_output( + ["git", "rev-parse", f"{llvm_config.remote}/{branch}"], + cwd=llvm_config.dir, + ) + branch_head_sha = head.strip() + + commit_number = number - base_revision_number + revs_between_str = check_output( + [ + "git", + "rev-list", + "--count", + "--first-parent", + f"{base_sha}..{branch_head_sha}", + ], + cwd=llvm_config.dir, + ) + revs_between = int(revs_between_str.strip()) + + commits_behind_head = revs_between - commit_number + if commits_behind_head < 0: + raise ValueError( + f"Revision {rev} is past {llvm_config.remote}/{branch}. Try updating " + "your tree?" + ) + + result = check_output( + ["git", "rev-parse", f"{branch_head_sha}~{commits_behind_head}"], cwd=llvm_config.dir, ) - base_sha = base_sha.strip() - if base_sha == base_llvm_sha: - base_revision_number = base_llvm_revision - else: - base_revision_number = translate_prebase_sha_to_rev_number( - llvm_config, base_sha) - - # Alternatively, we could |git log --format=%H|, but git is *super* fast - # about rev walking/counting locally compared to long |log|s, so we walk back - # twice. - head = check_output( - ['git', 'rev-parse', f'{llvm_config.remote}/{branch}'], - cwd=llvm_config.dir, - ) - branch_head_sha = head.strip() - - commit_number = number - base_revision_number - revs_between_str = check_output( - [ - 'git', - 'rev-list', - '--count', - '--first-parent', - f'{base_sha}..{branch_head_sha}', - ], - cwd=llvm_config.dir, - ) - revs_between = int(revs_between_str.strip()) - - commits_behind_head = revs_between - commit_number - if commits_behind_head < 0: - raise ValueError( - f'Revision {rev} is past {llvm_config.remote}/{branch}. Try updating ' - 'your tree?') - - result = check_output( - ['git', 'rev-parse', f'{branch_head_sha}~{commits_behind_head}'], - cwd=llvm_config.dir, - ) - - return result.strip() - - -def find_root_llvm_dir(root_dir: str = '.') -> str: - """Finds the root of an LLVM directory starting at |root_dir|. - - Raises a subprocess.CalledProcessError if no git directory is found. - """ - result = check_output( - ['git', 'rev-parse', '--show-toplevel'], - cwd=root_dir, - ) - return result.strip() + + return result.strip() + + +def find_root_llvm_dir(root_dir: str = ".") -> str: + """Finds the root of an LLVM directory starting at |root_dir|. + + Raises a subprocess.CalledProcessError if no git directory is found. + """ + result = check_output( + ["git", "rev-parse", "--show-toplevel"], + cwd=root_dir, + ) + return result.strip() def main(argv: t.List[str]) -> None: - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - '--llvm_dir', - help='LLVM directory to consult for git history, etc. Autodetected ' - 'if cwd is inside of an LLVM tree') - parser.add_argument( - '--upstream', - default='origin', - help="LLVM upstream's remote name. Defaults to %(default)s.") - sha_or_rev = parser.add_mutually_exclusive_group(required=True) - sha_or_rev.add_argument('--sha', - help='A git SHA (or ref) to convert to a rev') - sha_or_rev.add_argument('--rev', help='A rev to convert into a sha') - opts = parser.parse_args(argv) - - llvm_dir = opts.llvm_dir - if llvm_dir is None: - try: - llvm_dir = find_root_llvm_dir() - except subprocess.CalledProcessError: - parser.error("Couldn't autodetect an LLVM tree; please use --llvm_dir") - - config = LLVMConfig( - remote=opts.upstream, - dir=opts.llvm_dir or find_root_llvm_dir(), - ) - - if opts.sha: - rev = translate_sha_to_rev(config, opts.sha) - print(rev) - else: - sha = translate_rev_to_sha(config, Rev.parse(opts.rev)) - print(sha) - - -if __name__ == '__main__': - main(sys.argv[1:]) + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--llvm_dir", + help="LLVM directory to consult for git history, etc. Autodetected " + "if cwd is inside of an LLVM tree", + ) + parser.add_argument( + "--upstream", + default="origin", + help="LLVM upstream's remote name. Defaults to %(default)s.", + ) + sha_or_rev = parser.add_mutually_exclusive_group(required=True) + sha_or_rev.add_argument( + "--sha", help="A git SHA (or ref) to convert to a rev" + ) + sha_or_rev.add_argument("--rev", help="A rev to convert into a sha") + opts = parser.parse_args(argv) + + llvm_dir = opts.llvm_dir + if llvm_dir is None: + try: + llvm_dir = find_root_llvm_dir() + except subprocess.CalledProcessError: + parser.error( + "Couldn't autodetect an LLVM tree; please use --llvm_dir" + ) + + config = LLVMConfig( + remote=opts.upstream, + dir=opts.llvm_dir or find_root_llvm_dir(), + ) + + if opts.sha: + rev = translate_sha_to_rev(config, opts.sha) + print(rev) + else: + sha = translate_rev_to_sha(config, Rev.parse(opts.rev)) + print(sha) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/llvm_tools/git_llvm_rev_test.py b/llvm_tools/git_llvm_rev_test.py index 31d45544..e47a2ee6 100755 --- a/llvm_tools/git_llvm_rev_test.py +++ b/llvm_tools/git_llvm_rev_test.py @@ -9,122 +9,143 @@ import unittest import git_llvm_rev -import llvm_project from git_llvm_rev import MAIN_BRANCH +import llvm_project def get_llvm_config() -> git_llvm_rev.LLVMConfig: - return git_llvm_rev.LLVMConfig(dir=llvm_project.get_location(), - remote='origin') + return git_llvm_rev.LLVMConfig( + dir=llvm_project.get_location(), remote="origin" + ) class Test(unittest.TestCase): - """Test cases for git_llvm_rev.""" - - def rev_to_sha_with_round_trip(self, rev: git_llvm_rev.Rev) -> str: - config = get_llvm_config() - sha = git_llvm_rev.translate_rev_to_sha(config, rev) - roundtrip_rev = git_llvm_rev.translate_sha_to_rev(config, sha) - self.assertEqual(roundtrip_rev, rev) - return sha - - def test_sha_to_rev_on_base_sha_works(self) -> None: - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch=MAIN_BRANCH, - number=git_llvm_rev.base_llvm_revision)) - self.assertEqual(sha, git_llvm_rev.base_llvm_sha) - - def test_sha_to_rev_prior_to_base_rev_works(self) -> None: - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375000)) - self.assertEqual(sha, '2f6da767f13b8fd81f840c211d405fea32ac9db7') - - def test_sha_to_rev_after_base_rev_works(self) -> None: - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375506)) - self.assertEqual(sha, '3bf7fddeb05655d9baed4cc69e13535c677ed1dd') - - def test_llvm_svn_parsing_runs_ignore_reverts(self) -> None: - # This commit has a revert that mentions the reverted llvm-svn in the - # commit message. - - # Commit which performed the revert - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374895)) - self.assertEqual(sha, '1731fc88d1fa1fa55edd056db73a339b415dd5d6') - - # Commit that was reverted - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374841)) - self.assertEqual(sha, '2a1386c81de504b5bda44fbecf3f7b4cdfd748fc') - - def test_imaginary_revs_raise(self) -> None: - with self.assertRaises(ValueError) as r: - git_llvm_rev.translate_rev_to_sha( - get_llvm_config(), - git_llvm_rev.Rev(branch=MAIN_BRANCH, number=9999999)) - - self.assertIn('Try updating your tree?', str(r.exception)) - - def test_merge_commits_count_as_one_commit_crbug1041079(self) -> None: - # This CL merged _a lot_ of commits in. Verify a few hand-computed - # properties about it. - merge_sha_rev_number = 4496 + git_llvm_rev.base_llvm_revision - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number)) - self.assertEqual(sha, '0f0d0ed1c78f1a80139a1f2133fad5284691a121') - - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number - 1)) - self.assertEqual(sha, '6f635f90929da9545dd696071a829a1a42f84b30') - - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number + 1)) - self.assertEqual(sha, '199700a5cfeedf227619f966aa3125cef18bc958') - - # NOTE: The below tests have _zz_ in their name as an optimization. Iterating - # on a quick test is painful when these larger tests come before it and take - # 7secs to run. Python's unittest module guarantees tests are run in - # alphabetical order by their method name, so... - # - # If you're wondering, the slow part is `git branch -r --contains`. I imagine - # it's going to be very cold code, so I'm not inclined to optimize it much. - - def test_zz_branch_revs_work_after_merge_points_and_svn_cutoff(self) -> None: - # Arbitrary 9.x commit without an attached llvm-svn: value. - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch='upstream/release/9.x', number=366670)) - self.assertEqual(sha, '4e858e4ac00b59f064da4e1f7e276916e7d296aa') - - def test_zz_branch_revs_work_at_merge_points(self) -> None: - rev_number = 366426 - backing_sha = 'c89a3d78f43d81b9cff7b9248772ddf14d21b749' - - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch=MAIN_BRANCH, number=rev_number)) - self.assertEqual(sha, backing_sha) - - # Note that this won't round-trip: since this commit is on the main - # branch, we'll pick main for this. That's fine. - sha = git_llvm_rev.translate_rev_to_sha( - get_llvm_config(), - git_llvm_rev.Rev(branch='upstream/release/9.x', number=rev_number)) - self.assertEqual(sha, backing_sha) - - def test_zz_branch_revs_work_after_merge_points(self) -> None: - # Picking the commit on the 9.x branch after the merge-base for that + - # main. Note that this is where llvm-svn numbers should diverge from - # ours, and are therefore untrustworthy. The commit for this *does* have a - # different `llvm-svn:` string than we should have. - sha = self.rev_to_sha_with_round_trip( - git_llvm_rev.Rev(branch='upstream/release/9.x', number=366427)) - self.assertEqual(sha, '2cf681a11aea459b50d712abc7136f7129e4d57f') + """Test cases for git_llvm_rev.""" + + def rev_to_sha_with_round_trip(self, rev: git_llvm_rev.Rev) -> str: + config = get_llvm_config() + sha = git_llvm_rev.translate_rev_to_sha(config, rev) + roundtrip_rev = git_llvm_rev.translate_sha_to_rev(config, sha) + self.assertEqual(roundtrip_rev, rev) + return sha + + def test_sha_to_rev_on_base_sha_works(self) -> None: + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev( + branch=MAIN_BRANCH, number=git_llvm_rev.base_llvm_revision + ) + ) + self.assertEqual(sha, git_llvm_rev.base_llvm_sha) + + def test_sha_to_rev_prior_to_base_rev_works(self) -> None: + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375000) + ) + self.assertEqual(sha, "2f6da767f13b8fd81f840c211d405fea32ac9db7") + + def test_sha_to_rev_after_base_rev_works(self) -> None: + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375506) + ) + self.assertEqual(sha, "3bf7fddeb05655d9baed4cc69e13535c677ed1dd") + + def test_llvm_svn_parsing_runs_ignore_reverts(self) -> None: + # This commit has a revert that mentions the reverted llvm-svn in the + # commit message. + + # Commit which performed the revert + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374895) + ) + self.assertEqual(sha, "1731fc88d1fa1fa55edd056db73a339b415dd5d6") + + # Commit that was reverted + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374841) + ) + self.assertEqual(sha, "2a1386c81de504b5bda44fbecf3f7b4cdfd748fc") + + def test_imaginary_revs_raise(self) -> None: + with self.assertRaises(ValueError) as r: + git_llvm_rev.translate_rev_to_sha( + get_llvm_config(), + git_llvm_rev.Rev(branch=MAIN_BRANCH, number=9999999), + ) + + self.assertIn("Try updating your tree?", str(r.exception)) + + def test_merge_commits_count_as_one_commit_crbug1041079(self) -> None: + # This CL merged _a lot_ of commits in. Verify a few hand-computed + # properties about it. + merge_sha_rev_number = 4496 + git_llvm_rev.base_llvm_revision + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number) + ) + self.assertEqual(sha, "0f0d0ed1c78f1a80139a1f2133fad5284691a121") + + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev( + branch=MAIN_BRANCH, number=merge_sha_rev_number - 1 + ) + ) + self.assertEqual(sha, "6f635f90929da9545dd696071a829a1a42f84b30") + + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev( + branch=MAIN_BRANCH, number=merge_sha_rev_number + 1 + ) + ) + self.assertEqual(sha, "199700a5cfeedf227619f966aa3125cef18bc958") + + # NOTE: The below tests have _zz_ in their name as an optimization. Iterating + # on a quick test is painful when these larger tests come before it and take + # 7secs to run. Python's unittest module guarantees tests are run in + # alphabetical order by their method name, so... + # + # If you're wondering, the slow part is `git branch -r --contains`. I imagine + # it's going to be very cold code, so I'm not inclined to optimize it much. + + def test_zz_branch_revs_work_after_merge_points_and_svn_cutoff( + self, + ) -> None: + # Arbitrary 9.x commit without an attached llvm-svn: value. + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev(branch="upstream/release/9.x", number=366670) + ) + self.assertEqual(sha, "4e858e4ac00b59f064da4e1f7e276916e7d296aa") + + def test_zz_branch_revs_work_at_merge_points(self) -> None: + rev_number = 366426 + backing_sha = "c89a3d78f43d81b9cff7b9248772ddf14d21b749" + + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev(branch=MAIN_BRANCH, number=rev_number) + ) + self.assertEqual(sha, backing_sha) + + # Note that this won't round-trip: since this commit is on the main + # branch, we'll pick main for this. That's fine. + sha = git_llvm_rev.translate_rev_to_sha( + get_llvm_config(), + git_llvm_rev.Rev(branch="upstream/release/9.x", number=rev_number), + ) + self.assertEqual(sha, backing_sha) + + def test_zz_branch_revs_work_after_merge_points(self) -> None: + # Picking the commit on the 9.x branch after the merge-base for that + + # main. Note that this is where llvm-svn numbers should diverge from + # ours, and are therefore untrustworthy. The commit for this *does* have a + # different `llvm-svn:` string than we should have. + sha = self.rev_to_sha_with_round_trip( + git_llvm_rev.Rev(branch="upstream/release/9.x", number=366427) + ) + self.assertEqual(sha, "2cf681a11aea459b50d712abc7136f7129e4d57f") # FIXME: When release/10.x happens, it may be nice to have a test-case # generally covering that, since it's the first branch that we have to travel # back to the base commit for. -if __name__ == '__main__': - llvm_project.ensure_up_to_date() - unittest.main() +if __name__ == "__main__": + llvm_project.ensure_up_to_date() + unittest.main() diff --git a/llvm_tools/git_unittest.py b/llvm_tools/git_unittest.py index 18fb60e8..8e75100f 100755 --- a/llvm_tools/git_unittest.py +++ b/llvm_tools/git_unittest.py @@ -16,127 +16,148 @@ import unittest.mock as mock import git + # These are unittests; protected access is OK to a point. # pylint: disable=protected-access class HelperFunctionsTest(unittest.TestCase): - """Test class for updating LLVM hashes of packages.""" - - @mock.patch.object(os.path, 'isdir', return_value=False) - def testFailedToCreateBranchForInvalidDirectoryPath(self, mock_isdir): - path_to_repo = '/invalid/path/to/repo' - branch = 'branch-name' - - # Verify the exception is raised when provided an invalid directory path. - with self.assertRaises(ValueError) as err: - git.CreateBranch(path_to_repo, branch) - - self.assertEqual(str(err.exception), - 'Invalid directory path provided: %s' % path_to_repo) - - mock_isdir.assert_called_once() - - @mock.patch.object(os.path, 'isdir', return_value=True) - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyCreatedBranch(self, mock_command_output, mock_isdir): - path_to_repo = '/path/to/repo' - branch = 'branch-name' - - git.CreateBranch(path_to_repo, branch) - - mock_isdir.assert_called_once_with(path_to_repo) - - self.assertEqual(mock_command_output.call_count, 2) - - @mock.patch.object(os.path, 'isdir', return_value=False) - def testFailedToDeleteBranchForInvalidDirectoryPath(self, mock_isdir): - path_to_repo = '/invalid/path/to/repo' - branch = 'branch-name' - - # Verify the exception is raised on an invalid repo path. - with self.assertRaises(ValueError) as err: - git.DeleteBranch(path_to_repo, branch) - - self.assertEqual(str(err.exception), - 'Invalid directory path provided: %s' % path_to_repo) - - mock_isdir.assert_called_once() - - @mock.patch.object(os.path, 'isdir', return_value=True) - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyDeletedBranch(self, mock_command_output, mock_isdir): - path_to_repo = '/valid/path/to/repo' - branch = 'branch-name' - - git.DeleteBranch(path_to_repo, branch) - - mock_isdir.assert_called_once_with(path_to_repo) - - self.assertEqual(mock_command_output.call_count, 3) - - @mock.patch.object(os.path, 'isdir', return_value=False) - def testFailedToUploadChangesForInvalidDirectoryPath(self, mock_isdir): - path_to_repo = '/some/path/to/repo' - branch = 'update-LLVM_NEXT_HASH-a123testhash3' - commit_messages = ['Test message'] - - # Verify exception is raised when on an invalid repo path. - with self.assertRaises(ValueError) as err: - git.UploadChanges(path_to_repo, branch, commit_messages) - - self.assertEqual(str(err.exception), - 'Invalid path provided: %s' % path_to_repo) - - mock_isdir.assert_called_once() - - @mock.patch.object(os.path, 'isdir', return_value=True) - @mock.patch.object(subprocess, 'check_output') - @mock.patch.object(tempfile, 'NamedTemporaryFile') - def testSuccessfullyUploadedChangesForReview(self, mock_tempfile, - mock_commands, mock_isdir): - - path_to_repo = '/some/path/to/repo' - branch = 'branch-name' - commit_messages = ['Test message'] - mock_tempfile.return_value.__enter__.return_value.name = 'tmp' - - # A test CL generated by `repo upload`. - mock_commands.side_effect = [ - None, - ('remote: https://chromium-review.googlesource.' - 'com/c/chromiumos/overlays/chromiumos-overlay/' - '+/193147 Fix stdout') - ] - change_list = git.UploadChanges(path_to_repo, branch, commit_messages) - - self.assertEqual(change_list.cl_number, 193147) - - mock_isdir.assert_called_once_with(path_to_repo) - - expected_command = [ - 'git', 'commit', '-F', - mock_tempfile.return_value.__enter__.return_value.name - ] - self.assertEqual(mock_commands.call_args_list[0], - mock.call(expected_command, cwd=path_to_repo)) - - expected_cmd = [ - 'repo', 'upload', '--yes', '--ne', '--no-verify', - '--br=%s' % branch - ] - self.assertEqual( - mock_commands.call_args_list[1], - mock.call(expected_cmd, - stderr=subprocess.STDOUT, - cwd=path_to_repo, - encoding='utf-8')) - - self.assertEqual( - change_list.url, - 'https://chromium-review.googlesource.com/c/chromiumos/overlays/' - 'chromiumos-overlay/+/193147') + """Test class for updating LLVM hashes of packages.""" + @mock.patch.object(os.path, "isdir", return_value=False) + def testFailedToCreateBranchForInvalidDirectoryPath(self, mock_isdir): + path_to_repo = "/invalid/path/to/repo" + branch = "branch-name" -if __name__ == '__main__': - unittest.main() + # Verify the exception is raised when provided an invalid directory path. + with self.assertRaises(ValueError) as err: + git.CreateBranch(path_to_repo, branch) + + self.assertEqual( + str(err.exception), + "Invalid directory path provided: %s" % path_to_repo, + ) + + mock_isdir.assert_called_once() + + @mock.patch.object(os.path, "isdir", return_value=True) + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyCreatedBranch(self, mock_command_output, mock_isdir): + path_to_repo = "/path/to/repo" + branch = "branch-name" + + git.CreateBranch(path_to_repo, branch) + + mock_isdir.assert_called_once_with(path_to_repo) + + self.assertEqual(mock_command_output.call_count, 2) + + @mock.patch.object(os.path, "isdir", return_value=False) + def testFailedToDeleteBranchForInvalidDirectoryPath(self, mock_isdir): + path_to_repo = "/invalid/path/to/repo" + branch = "branch-name" + + # Verify the exception is raised on an invalid repo path. + with self.assertRaises(ValueError) as err: + git.DeleteBranch(path_to_repo, branch) + + self.assertEqual( + str(err.exception), + "Invalid directory path provided: %s" % path_to_repo, + ) + + mock_isdir.assert_called_once() + + @mock.patch.object(os.path, "isdir", return_value=True) + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyDeletedBranch(self, mock_command_output, mock_isdir): + path_to_repo = "/valid/path/to/repo" + branch = "branch-name" + + git.DeleteBranch(path_to_repo, branch) + + mock_isdir.assert_called_once_with(path_to_repo) + + self.assertEqual(mock_command_output.call_count, 3) + + @mock.patch.object(os.path, "isdir", return_value=False) + def testFailedToUploadChangesForInvalidDirectoryPath(self, mock_isdir): + path_to_repo = "/some/path/to/repo" + branch = "update-LLVM_NEXT_HASH-a123testhash3" + commit_messages = ["Test message"] + + # Verify exception is raised when on an invalid repo path. + with self.assertRaises(ValueError) as err: + git.UploadChanges(path_to_repo, branch, commit_messages) + + self.assertEqual( + str(err.exception), "Invalid path provided: %s" % path_to_repo + ) + + mock_isdir.assert_called_once() + + @mock.patch.object(os.path, "isdir", return_value=True) + @mock.patch.object(subprocess, "check_output") + @mock.patch.object(tempfile, "NamedTemporaryFile") + def testSuccessfullyUploadedChangesForReview( + self, mock_tempfile, mock_commands, mock_isdir + ): + + path_to_repo = "/some/path/to/repo" + branch = "branch-name" + commit_messages = ["Test message"] + mock_tempfile.return_value.__enter__.return_value.name = "tmp" + + # A test CL generated by `repo upload`. + mock_commands.side_effect = [ + None, + ( + "remote: https://chromium-review.googlesource." + "com/c/chromiumos/overlays/chromiumos-overlay/" + "+/193147 Fix stdout" + ), + ] + change_list = git.UploadChanges(path_to_repo, branch, commit_messages) + + self.assertEqual(change_list.cl_number, 193147) + + mock_isdir.assert_called_once_with(path_to_repo) + + expected_command = [ + "git", + "commit", + "-F", + mock_tempfile.return_value.__enter__.return_value.name, + ] + self.assertEqual( + mock_commands.call_args_list[0], + mock.call(expected_command, cwd=path_to_repo), + ) + + expected_cmd = [ + "repo", + "upload", + "--yes", + "--ne", + "--no-verify", + "--br=%s" % branch, + ] + self.assertEqual( + mock_commands.call_args_list[1], + mock.call( + expected_cmd, + stderr=subprocess.STDOUT, + cwd=path_to_repo, + encoding="utf-8", + ), + ) + + self.assertEqual( + change_list.url, + "https://chromium-review.googlesource.com/c/chromiumos/overlays/" + "chromiumos-overlay/+/193147", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/llvm_bisection.py b/llvm_tools/llvm_bisection.py index 3f1dde73..f268bfb1 100755 --- a/llvm_tools/llvm_bisection.py +++ b/llvm_tools/llvm_bisection.py @@ -25,352 +25,442 @@ import update_tryjob_status class BisectionExitStatus(enum.Enum): - """Exit code when performing bisection.""" + """Exit code when performing bisection.""" - # Means that there are no more revisions available to bisect. - BISECTION_COMPLETE = 126 + # Means that there are no more revisions available to bisect. + BISECTION_COMPLETE = 126 def GetCommandLineArgs(): - """Parses the command line for the command line arguments.""" - - # Default path to the chroot if a path is not specified. - cros_root = os.path.expanduser('~') - cros_root = os.path.join(cros_root, 'chromiumos') - - # Create parser and add optional command-line arguments. - parser = argparse.ArgumentParser( - description='Bisects LLVM via tracking a JSON file.') - - # Add argument for other change lists that want to run alongside the tryjob - # which has a change list of updating a package's git hash. - parser.add_argument( - '--parallel', - type=int, - default=3, - help='How many tryjobs to create between the last good version and ' - 'the first bad version (default: %(default)s)') - - # Add argument for the good LLVM revision for bisection. - parser.add_argument('--start_rev', - required=True, - type=int, - help='The good revision for the bisection.') - - # Add argument for the bad LLVM revision for bisection. - parser.add_argument('--end_rev', - required=True, - type=int, - help='The bad revision for the bisection.') - - # Add argument for the absolute path to the file that contains information on - # the previous tested svn version. - parser.add_argument( - '--last_tested', - required=True, - help='the absolute path to the file that contains the tryjobs') - - # Add argument for the absolute path to the LLVM source tree. - parser.add_argument( - '--src_path', - help='the path to the LLVM source tree to use (used for retrieving the ' - 'git hash of each version between the last good version and first bad ' - 'version)') - - # Add argument for other change lists that want to run alongside the tryjob - # which has a change list of updating a package's git hash. - parser.add_argument( - '--extra_change_lists', - type=int, - nargs='+', - help='change lists that would like to be run alongside the change list ' - 'of updating the packages') - - # Add argument for custom options for the tryjob. - parser.add_argument('--options', - required=False, - nargs='+', - help='options to use for the tryjob testing') - - # Add argument for the builder to use for the tryjob. - parser.add_argument('--builder', - required=True, - help='builder to use for the tryjob testing') - - # Add argument for the description of the tryjob. - parser.add_argument('--description', - required=False, - nargs='+', - help='the description of the tryjob') - - # Add argument for a specific chroot path. - parser.add_argument('--chroot_path', - default=cros_root, - help='the path to the chroot (default: %(default)s)') - - # Add argument for whether to display command contents to `stdout`. - parser.add_argument('--verbose', - action='store_true', - help='display contents of a command to the terminal ' - '(default: %(default)s)') - - # Add argument for whether to display command contents to `stdout`. - parser.add_argument('--nocleanup', - action='store_false', - dest='cleanup', - help='Abandon CLs created for bisectoin') - - args_output = parser.parse_args() - - assert args_output.start_rev < args_output.end_rev, ( - 'Start revision %d is >= end revision %d' % - (args_output.start_rev, args_output.end_rev)) - - if args_output.last_tested and not args_output.last_tested.endswith('.json'): - raise ValueError('Filed provided %s does not end in ".json"' % - args_output.last_tested) - - return args_output + """Parses the command line for the command line arguments.""" + + # Default path to the chroot if a path is not specified. + cros_root = os.path.expanduser("~") + cros_root = os.path.join(cros_root, "chromiumos") + + # Create parser and add optional command-line arguments. + parser = argparse.ArgumentParser( + description="Bisects LLVM via tracking a JSON file." + ) + + # Add argument for other change lists that want to run alongside the tryjob + # which has a change list of updating a package's git hash. + parser.add_argument( + "--parallel", + type=int, + default=3, + help="How many tryjobs to create between the last good version and " + "the first bad version (default: %(default)s)", + ) + + # Add argument for the good LLVM revision for bisection. + parser.add_argument( + "--start_rev", + required=True, + type=int, + help="The good revision for the bisection.", + ) + + # Add argument for the bad LLVM revision for bisection. + parser.add_argument( + "--end_rev", + required=True, + type=int, + help="The bad revision for the bisection.", + ) + + # Add argument for the absolute path to the file that contains information on + # the previous tested svn version. + parser.add_argument( + "--last_tested", + required=True, + help="the absolute path to the file that contains the tryjobs", + ) + + # Add argument for the absolute path to the LLVM source tree. + parser.add_argument( + "--src_path", + help="the path to the LLVM source tree to use (used for retrieving the " + "git hash of each version between the last good version and first bad " + "version)", + ) + + # Add argument for other change lists that want to run alongside the tryjob + # which has a change list of updating a package's git hash. + parser.add_argument( + "--extra_change_lists", + type=int, + nargs="+", + help="change lists that would like to be run alongside the change list " + "of updating the packages", + ) + + # Add argument for custom options for the tryjob. + parser.add_argument( + "--options", + required=False, + nargs="+", + help="options to use for the tryjob testing", + ) + + # Add argument for the builder to use for the tryjob. + parser.add_argument( + "--builder", required=True, help="builder to use for the tryjob testing" + ) + + # Add argument for the description of the tryjob. + parser.add_argument( + "--description", + required=False, + nargs="+", + help="the description of the tryjob", + ) + + # Add argument for a specific chroot path. + parser.add_argument( + "--chroot_path", + default=cros_root, + help="the path to the chroot (default: %(default)s)", + ) + + # Add argument for whether to display command contents to `stdout`. + parser.add_argument( + "--verbose", + action="store_true", + help="display contents of a command to the terminal " + "(default: %(default)s)", + ) + + # Add argument for whether to display command contents to `stdout`. + parser.add_argument( + "--nocleanup", + action="store_false", + dest="cleanup", + help="Abandon CLs created for bisectoin", + ) + + args_output = parser.parse_args() + + assert ( + args_output.start_rev < args_output.end_rev + ), "Start revision %d is >= end revision %d" % ( + args_output.start_rev, + args_output.end_rev, + ) + + if args_output.last_tested and not args_output.last_tested.endswith( + ".json" + ): + raise ValueError( + 'Filed provided %s does not end in ".json"' + % args_output.last_tested + ) + + return args_output def GetRemainingRange(start, end, tryjobs): - """Gets the start and end intervals in 'json_file'. - - Args: - start: The start version of the bisection provided via the command line. - end: The end version of the bisection provided via the command line. - tryjobs: A list of tryjobs where each element is in the following format: - [ - {[TRYJOB_INFORMATION]}, - {[TRYJOB_INFORMATION]}, - ..., - {[TRYJOB_INFORMATION]} - ] - - Returns: - The new start version and end version for bisection, a set of revisions - that are 'pending' and a set of revisions that are to be skipped. - - Raises: - ValueError: The value for 'status' is missing or there is a mismatch - between 'start' and 'end' compared to the 'start' and 'end' in the JSON - file. - AssertionError: The new start version is >= than the new end version. - """ - - if not tryjobs: - return start, end, {}, {} - - # Verify that each tryjob has a value for the 'status' key. - for cur_tryjob_dict in tryjobs: - if not cur_tryjob_dict.get('status', None): - raise ValueError('"status" is missing or has no value, please ' - 'go to %s and update it' % cur_tryjob_dict['link']) - - all_bad_revisions = [end] - all_bad_revisions.extend( - cur_tryjob['rev'] for cur_tryjob in tryjobs - if cur_tryjob['status'] == update_tryjob_status.TryjobStatus.BAD.value) - - # The minimum value for the 'bad' field in the tryjobs is the new end - # version. - bad_rev = min(all_bad_revisions) - - all_good_revisions = [start] - all_good_revisions.extend( - cur_tryjob['rev'] for cur_tryjob in tryjobs - if cur_tryjob['status'] == update_tryjob_status.TryjobStatus.GOOD.value) - - # The maximum value for the 'good' field in the tryjobs is the new start - # version. - good_rev = max(all_good_revisions) - - # The good version should always be strictly less than the bad version; - # otherwise, bisection is broken. - assert good_rev < bad_rev, ('Bisection is broken because %d (good) is >= ' - '%d (bad)' % (good_rev, bad_rev)) - - # Find all revisions that are 'pending' within 'good_rev' and 'bad_rev'. - # - # NOTE: The intent is to not launch tryjobs between 'good_rev' and 'bad_rev' - # that have already been launched (this set is used when constructing the - # list of revisions to launch tryjobs for). - pending_revisions = { - tryjob['rev'] - for tryjob in tryjobs - if tryjob['status'] == update_tryjob_status.TryjobStatus.PENDING.value - and good_rev < tryjob['rev'] < bad_rev - } - - # Find all revisions that are to be skipped within 'good_rev' and 'bad_rev'. - # - # NOTE: The intent is to not launch tryjobs between 'good_rev' and 'bad_rev' - # that have already been marked as 'skip' (this set is used when constructing - # the list of revisions to launch tryjobs for). - skip_revisions = { - tryjob['rev'] - for tryjob in tryjobs - if tryjob['status'] == update_tryjob_status.TryjobStatus.SKIP.value - and good_rev < tryjob['rev'] < bad_rev - } - - return good_rev, bad_rev, pending_revisions, skip_revisions - - -def GetCommitsBetween(start, end, parallel, src_path, pending_revisions, - skip_revisions): - """Determines the revisions between start and end.""" - - with get_llvm_hash.LLVMHash().CreateTempDirectory() as temp_dir: - # We have guaranteed contiguous revision numbers after this, - # and that guarnatee simplifies things considerably, so we don't - # support anything before it. - assert start >= git_llvm_rev.base_llvm_revision, f'{start} was too long ago' - - with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as new_repo: - if not src_path: - src_path = new_repo - index_step = (end - (start + 1)) // (parallel + 1) - if not index_step: - index_step = 1 - revisions = [ - rev for rev in range(start + 1, end, index_step) - if rev not in pending_revisions and rev not in skip_revisions + """Gets the start and end intervals in 'json_file'. + + Args: + start: The start version of the bisection provided via the command line. + end: The end version of the bisection provided via the command line. + tryjobs: A list of tryjobs where each element is in the following format: + [ + {[TRYJOB_INFORMATION]}, + {[TRYJOB_INFORMATION]}, + ..., + {[TRYJOB_INFORMATION]} ] - git_hashes = [ - get_llvm_hash.GetGitHashFrom(src_path, rev) for rev in revisions - ] - return revisions, git_hashes - - -def Bisect(revisions, git_hashes, bisect_state, last_tested, update_packages, - chroot_path, patch_metadata_file, extra_change_lists, options, - builder, verbose): - """Adds tryjobs and updates the status file with the new tryjobs.""" - try: - for svn_revision, git_hash in zip(revisions, git_hashes): - tryjob_dict = modify_a_tryjob.AddTryjob(update_packages, git_hash, - svn_revision, chroot_path, - patch_metadata_file, - extra_change_lists, options, - builder, verbose, svn_revision) - - bisect_state['jobs'].append(tryjob_dict) - finally: - # Do not want to lose progress if there is an exception. - if last_tested: - new_file = '%s.new' % last_tested - with open(new_file, 'w') as json_file: - json.dump(bisect_state, json_file, indent=4, separators=(',', ': ')) - - os.rename(new_file, last_tested) + Returns: + The new start version and end version for bisection, a set of revisions + that are 'pending' and a set of revisions that are to be skipped. + + Raises: + ValueError: The value for 'status' is missing or there is a mismatch + between 'start' and 'end' compared to the 'start' and 'end' in the JSON + file. + AssertionError: The new start version is >= than the new end version. + """ + + if not tryjobs: + return start, end, {}, {} + + # Verify that each tryjob has a value for the 'status' key. + for cur_tryjob_dict in tryjobs: + if not cur_tryjob_dict.get("status", None): + raise ValueError( + '"status" is missing or has no value, please ' + "go to %s and update it" % cur_tryjob_dict["link"] + ) + + all_bad_revisions = [end] + all_bad_revisions.extend( + cur_tryjob["rev"] + for cur_tryjob in tryjobs + if cur_tryjob["status"] == update_tryjob_status.TryjobStatus.BAD.value + ) + + # The minimum value for the 'bad' field in the tryjobs is the new end + # version. + bad_rev = min(all_bad_revisions) + + all_good_revisions = [start] + all_good_revisions.extend( + cur_tryjob["rev"] + for cur_tryjob in tryjobs + if cur_tryjob["status"] == update_tryjob_status.TryjobStatus.GOOD.value + ) + + # The maximum value for the 'good' field in the tryjobs is the new start + # version. + good_rev = max(all_good_revisions) + + # The good version should always be strictly less than the bad version; + # otherwise, bisection is broken. + assert ( + good_rev < bad_rev + ), "Bisection is broken because %d (good) is >= " "%d (bad)" % ( + good_rev, + bad_rev, + ) + + # Find all revisions that are 'pending' within 'good_rev' and 'bad_rev'. + # + # NOTE: The intent is to not launch tryjobs between 'good_rev' and 'bad_rev' + # that have already been launched (this set is used when constructing the + # list of revisions to launch tryjobs for). + pending_revisions = { + tryjob["rev"] + for tryjob in tryjobs + if tryjob["status"] == update_tryjob_status.TryjobStatus.PENDING.value + and good_rev < tryjob["rev"] < bad_rev + } + + # Find all revisions that are to be skipped within 'good_rev' and 'bad_rev'. + # + # NOTE: The intent is to not launch tryjobs between 'good_rev' and 'bad_rev' + # that have already been marked as 'skip' (this set is used when constructing + # the list of revisions to launch tryjobs for). + skip_revisions = { + tryjob["rev"] + for tryjob in tryjobs + if tryjob["status"] == update_tryjob_status.TryjobStatus.SKIP.value + and good_rev < tryjob["rev"] < bad_rev + } + + return good_rev, bad_rev, pending_revisions, skip_revisions + + +def GetCommitsBetween( + start, end, parallel, src_path, pending_revisions, skip_revisions +): + """Determines the revisions between start and end.""" + + with get_llvm_hash.LLVMHash().CreateTempDirectory() as temp_dir: + # We have guaranteed contiguous revision numbers after this, + # and that guarnatee simplifies things considerably, so we don't + # support anything before it. + assert ( + start >= git_llvm_rev.base_llvm_revision + ), f"{start} was too long ago" + + with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as new_repo: + if not src_path: + src_path = new_repo + index_step = (end - (start + 1)) // (parallel + 1) + if not index_step: + index_step = 1 + revisions = [ + rev + for rev in range(start + 1, end, index_step) + if rev not in pending_revisions and rev not in skip_revisions + ] + git_hashes = [ + get_llvm_hash.GetGitHashFrom(src_path, rev) for rev in revisions + ] + return revisions, git_hashes + + +def Bisect( + revisions, + git_hashes, + bisect_state, + last_tested, + update_packages, + chroot_path, + patch_metadata_file, + extra_change_lists, + options, + builder, + verbose, +): + """Adds tryjobs and updates the status file with the new tryjobs.""" + + try: + for svn_revision, git_hash in zip(revisions, git_hashes): + tryjob_dict = modify_a_tryjob.AddTryjob( + update_packages, + git_hash, + svn_revision, + chroot_path, + patch_metadata_file, + extra_change_lists, + options, + builder, + verbose, + svn_revision, + ) + + bisect_state["jobs"].append(tryjob_dict) + finally: + # Do not want to lose progress if there is an exception. + if last_tested: + new_file = "%s.new" % last_tested + with open(new_file, "w") as json_file: + json.dump( + bisect_state, json_file, indent=4, separators=(",", ": ") + ) + + os.rename(new_file, last_tested) def LoadStatusFile(last_tested, start, end): - """Loads the status file for bisection.""" - - try: - with open(last_tested) as f: - return json.load(f) - except IOError as err: - if err.errno != errno.ENOENT: - raise + """Loads the status file for bisection.""" - return {'start': start, 'end': end, 'jobs': []} - - -def main(args_output): - """Bisects LLVM commits. - - Raises: - AssertionError: The script was run inside the chroot. - """ - - chroot.VerifyOutsideChroot() - patch_metadata_file = 'PATCHES.json' - start = args_output.start_rev - end = args_output.end_rev - - bisect_state = LoadStatusFile(args_output.last_tested, start, end) - if start != bisect_state['start'] or end != bisect_state['end']: - raise ValueError( - f'The start {start} or the end {end} version provided is ' - f'different than "start" {bisect_state["start"]} or "end" ' - f'{bisect_state["end"]} in the .JSON file') - - # Pending and skipped revisions are between 'start_rev' and 'end_rev'. - start_rev, end_rev, pending_revs, skip_revs = GetRemainingRange( - start, end, bisect_state['jobs']) - - revisions, git_hashes = GetCommitsBetween(start_rev, end_rev, - args_output.parallel, - args_output.src_path, pending_revs, - skip_revs) - - # No more revisions between 'start_rev' and 'end_rev', so - # bisection is complete. - # - # This is determined by finding all valid revisions between 'start_rev' - # and 'end_rev' and that are NOT in the 'pending' and 'skipped' set. - if not revisions: - if pending_revs: - # Some tryjobs are not finished which may change the actual bad - # commit/revision when those tryjobs are finished. - no_revisions_message = (f'No revisions between start {start_rev} ' - f'and end {end_rev} to create tryjobs\n') - - if pending_revs: - no_revisions_message += ('The following tryjobs are pending:\n' + - '\n'.join(str(rev) - for rev in pending_revs) + '\n') - - if skip_revs: - no_revisions_message += ('The following tryjobs were skipped:\n' + - '\n'.join(str(rev) - for rev in skip_revs) + '\n') - - raise ValueError(no_revisions_message) - - print(f'Finished bisecting for {args_output.last_tested}') - if args_output.src_path: - bad_llvm_hash = get_llvm_hash.GetGitHashFrom(args_output.src_path, - end_rev) - else: - bad_llvm_hash = get_llvm_hash.LLVMHash().GetLLVMHash(end_rev) - print(f'The bad revision is {end_rev} and its commit hash is ' - f'{bad_llvm_hash}') - if skip_revs: - skip_revs_message = ('\nThe following revisions were skipped:\n' + - '\n'.join(str(rev) for rev in skip_revs)) - print(skip_revs_message) - - if args_output.cleanup: - # Abandon all the CLs created for bisection - gerrit = os.path.join(args_output.chroot_path, 'chromite/bin/gerrit') - for build in bisect_state['jobs']: - try: - subprocess.check_output( - [gerrit, 'abandon', str(build['cl'])], - stderr=subprocess.STDOUT, - encoding='utf-8') - except subprocess.CalledProcessError as err: - # the CL may have been abandoned - if 'chromite.lib.gob_util.GOBError' not in err.output: + try: + with open(last_tested) as f: + return json.load(f) + except IOError as err: + if err.errno != errno.ENOENT: raise - return BisectionExitStatus.BISECTION_COMPLETE.value + return {"start": start, "end": end, "jobs": []} - for rev in revisions: - if update_tryjob_status.FindTryjobIndex(rev, - bisect_state['jobs']) is not None: - raise ValueError(f'Revision {rev} exists already in "jobs"') - Bisect(revisions, git_hashes, bisect_state, args_output.last_tested, - update_chromeos_llvm_hash.DEFAULT_PACKAGES, args_output.chroot_path, - patch_metadata_file, args_output.extra_change_lists, - args_output.options, args_output.builder, args_output.verbose) - - -if __name__ == '__main__': - sys.exit(main(GetCommandLineArgs())) +def main(args_output): + """Bisects LLVM commits. + + Raises: + AssertionError: The script was run inside the chroot. + """ + + chroot.VerifyOutsideChroot() + patch_metadata_file = "PATCHES.json" + start = args_output.start_rev + end = args_output.end_rev + + bisect_state = LoadStatusFile(args_output.last_tested, start, end) + if start != bisect_state["start"] or end != bisect_state["end"]: + raise ValueError( + f"The start {start} or the end {end} version provided is " + f'different than "start" {bisect_state["start"]} or "end" ' + f'{bisect_state["end"]} in the .JSON file' + ) + + # Pending and skipped revisions are between 'start_rev' and 'end_rev'. + start_rev, end_rev, pending_revs, skip_revs = GetRemainingRange( + start, end, bisect_state["jobs"] + ) + + revisions, git_hashes = GetCommitsBetween( + start_rev, + end_rev, + args_output.parallel, + args_output.src_path, + pending_revs, + skip_revs, + ) + + # No more revisions between 'start_rev' and 'end_rev', so + # bisection is complete. + # + # This is determined by finding all valid revisions between 'start_rev' + # and 'end_rev' and that are NOT in the 'pending' and 'skipped' set. + if not revisions: + if pending_revs: + # Some tryjobs are not finished which may change the actual bad + # commit/revision when those tryjobs are finished. + no_revisions_message = ( + f"No revisions between start {start_rev} " + f"and end {end_rev} to create tryjobs\n" + ) + + if pending_revs: + no_revisions_message += ( + "The following tryjobs are pending:\n" + + "\n".join(str(rev) for rev in pending_revs) + + "\n" + ) + + if skip_revs: + no_revisions_message += ( + "The following tryjobs were skipped:\n" + + "\n".join(str(rev) for rev in skip_revs) + + "\n" + ) + + raise ValueError(no_revisions_message) + + print(f"Finished bisecting for {args_output.last_tested}") + if args_output.src_path: + bad_llvm_hash = get_llvm_hash.GetGitHashFrom( + args_output.src_path, end_rev + ) + else: + bad_llvm_hash = get_llvm_hash.LLVMHash().GetLLVMHash(end_rev) + print( + f"The bad revision is {end_rev} and its commit hash is " + f"{bad_llvm_hash}" + ) + if skip_revs: + skip_revs_message = ( + "\nThe following revisions were skipped:\n" + + "\n".join(str(rev) for rev in skip_revs) + ) + print(skip_revs_message) + + if args_output.cleanup: + # Abandon all the CLs created for bisection + gerrit = os.path.join( + args_output.chroot_path, "chromite/bin/gerrit" + ) + for build in bisect_state["jobs"]: + try: + subprocess.check_output( + [gerrit, "abandon", str(build["cl"])], + stderr=subprocess.STDOUT, + encoding="utf-8", + ) + except subprocess.CalledProcessError as err: + # the CL may have been abandoned + if "chromite.lib.gob_util.GOBError" not in err.output: + raise + + return BisectionExitStatus.BISECTION_COMPLETE.value + + for rev in revisions: + if ( + update_tryjob_status.FindTryjobIndex(rev, bisect_state["jobs"]) + is not None + ): + raise ValueError(f'Revision {rev} exists already in "jobs"') + + Bisect( + revisions, + git_hashes, + bisect_state, + args_output.last_tested, + update_chromeos_llvm_hash.DEFAULT_PACKAGES, + args_output.chroot_path, + patch_metadata_file, + args_output.extra_change_lists, + args_output.options, + args_output.builder, + args_output.verbose, + ) + + +if __name__ == "__main__": + sys.exit(main(GetCommandLineArgs())) diff --git a/llvm_tools/llvm_bisection_unittest.py b/llvm_tools/llvm_bisection_unittest.py index 06807ecb..0dfdef54 100755 --- a/llvm_tools/llvm_bisection_unittest.py +++ b/llvm_tools/llvm_bisection_unittest.py @@ -25,489 +25,562 @@ import test_helpers class LLVMBisectionTest(unittest.TestCase): - """Unittests for LLVM bisection.""" - - def testGetRemainingRangePassed(self): - start = 100 - end = 150 - - test_tryjobs = [{ - 'rev': 110, - 'status': 'good', - 'link': 'https://some_tryjob_1_url.com' - }, { - 'rev': 120, - 'status': 'good', - 'link': 'https://some_tryjob_2_url.com' - }, { - 'rev': 130, - 'status': 'pending', - 'link': 'https://some_tryjob_3_url.com' - }, { - 'rev': 135, - 'status': 'skip', - 'link': 'https://some_tryjob_4_url.com' - }, { - 'rev': 140, - 'status': 'bad', - 'link': 'https://some_tryjob_5_url.com' - }] - - # Tuple consists of the new good revision, the new bad revision, a set of - # 'pending' revisions, and a set of 'skip' revisions. - expected_revisions_tuple = 120, 140, {130}, {135} - - self.assertEqual( - llvm_bisection.GetRemainingRange(start, end, test_tryjobs), - expected_revisions_tuple) - - def testGetRemainingRangeFailedWithMissingStatus(self): - start = 100 - end = 150 - - test_tryjobs = [{ - 'rev': 105, - 'status': 'good', - 'link': 'https://some_tryjob_1_url.com' - }, { - 'rev': 120, - 'status': None, - 'link': 'https://some_tryjob_2_url.com' - }, { - 'rev': 140, - 'status': 'bad', - 'link': 'https://some_tryjob_3_url.com' - }] - - with self.assertRaises(ValueError) as err: - llvm_bisection.GetRemainingRange(start, end, test_tryjobs) - - error_message = ('"status" is missing or has no value, please ' - 'go to %s and update it' % test_tryjobs[1]['link']) - self.assertEqual(str(err.exception), error_message) - - def testGetRemainingRangeFailedWithInvalidRange(self): - start = 100 - end = 150 - - test_tryjobs = [{ - 'rev': 110, - 'status': 'bad', - 'link': 'https://some_tryjob_1_url.com' - }, { - 'rev': 125, - 'status': 'skip', - 'link': 'https://some_tryjob_2_url.com' - }, { - 'rev': 140, - 'status': 'good', - 'link': 'https://some_tryjob_3_url.com' - }] - - with self.assertRaises(AssertionError) as err: - llvm_bisection.GetRemainingRange(start, end, test_tryjobs) - - expected_error_message = ('Bisection is broken because %d (good) is >= ' - '%d (bad)' % - (test_tryjobs[2]['rev'], test_tryjobs[0]['rev'])) - - self.assertEqual(str(err.exception), expected_error_message) - - @mock.patch.object(get_llvm_hash, 'GetGitHashFrom') - def testGetCommitsBetweenPassed(self, mock_get_git_hash): - start = git_llvm_rev.base_llvm_revision - end = start + 10 - test_pending_revisions = {start + 7} - test_skip_revisions = { - start + 1, start + 2, start + 4, start + 8, start + 9 - } - parallel = 3 - abs_path_to_src = '/abs/path/to/src' - - revs = ['a123testhash3', 'a123testhash5'] - mock_get_git_hash.side_effect = revs - - git_hashes = [ - git_llvm_rev.base_llvm_revision + 3, - git_llvm_rev.base_llvm_revision + 5 - ] - - self.assertEqual( - llvm_bisection.GetCommitsBetween(start, end, parallel, abs_path_to_src, - test_pending_revisions, - test_skip_revisions), - (git_hashes, revs)) - - def testLoadStatusFilePassedWithExistingFile(self): - start = 100 - end = 150 - - test_bisect_state = {'start': start, 'end': end, 'jobs': []} - - # Simulate that the status file exists. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(test_bisect_state, f) - - self.assertEqual( - llvm_bisection.LoadStatusFile(temp_json_file, start, end), - test_bisect_state) - - def testLoadStatusFilePassedWithoutExistingFile(self): - start = 200 - end = 250 - - expected_bisect_state = {'start': start, 'end': end, 'jobs': []} - - last_tested = '/abs/path/to/file_that_does_not_exist.json' - - self.assertEqual(llvm_bisection.LoadStatusFile(last_tested, start, end), - expected_bisect_state) - - @mock.patch.object(modify_a_tryjob, 'AddTryjob') - def testBisectPassed(self, mock_add_tryjob): - - git_hash_list = ['a123testhash1', 'a123testhash2', 'a123testhash3'] - revisions_list = [102, 104, 106] - - # Simulate behavior of `AddTryjob()` when successfully launched a tryjob for - # the updated packages. - @test_helpers.CallCountsToMockFunctions - def MockAddTryjob(call_count, _packages, _git_hash, _revision, - _chroot_path, _patch_file, _extra_cls, _options, - _builder, _verbose, _svn_revision): - - if call_count < 2: - return {'rev': revisions_list[call_count], 'status': 'pending'} - - # Simulate an exception happened along the way when updating the - # packages' `LLVM_NEXT_HASH`. - if call_count == 2: - raise ValueError('Unable to launch tryjob') - - assert False, 'Called `AddTryjob()` more than expected.' - - # Use the test function to simulate `AddTryjob()`. - mock_add_tryjob.side_effect = MockAddTryjob - - start = 100 - end = 110 - - bisection_contents = {'start': start, 'end': end, 'jobs': []} - - args_output = test_helpers.ArgsOutputTest() - - packages = ['sys-devel/llvm'] - patch_file = '/abs/path/to/PATCHES.json' - - # Create a temporary .JSON file to simulate a status file for bisection. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(bisection_contents, f) - - # Verify that the status file is updated when an exception happened when - # attempting to launch a revision (i.e. progress is not lost). - with self.assertRaises(ValueError) as err: - llvm_bisection.Bisect(revisions_list, git_hash_list, - bisection_contents, temp_json_file, packages, - args_output.chroot_path, patch_file, - args_output.extra_change_lists, - args_output.options, args_output.builders, - args_output.verbose) - - expected_bisection_contents = { - 'start': - start, - 'end': - end, - 'jobs': [{ - 'rev': revisions_list[0], - 'status': 'pending' - }, { - 'rev': revisions_list[1], - 'status': 'pending' - }] - } - - # Verify that the launched tryjobs were added to the status file when - # an exception happened. - with open(temp_json_file) as f: - json_contents = json.load(f) - - self.assertEqual(json_contents, expected_bisection_contents) - - self.assertEqual(str(err.exception), 'Unable to launch tryjob') - - self.assertEqual(mock_add_tryjob.call_count, 3) - - @mock.patch.object(subprocess, 'check_output', return_value=None) - @mock.patch.object(get_llvm_hash.LLVMHash, - 'GetLLVMHash', - return_value='a123testhash4') - @mock.patch.object(llvm_bisection, 'GetCommitsBetween') - @mock.patch.object(llvm_bisection, 'GetRemainingRange') - @mock.patch.object(llvm_bisection, 'LoadStatusFile') - @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - def testMainPassed(self, mock_outside_chroot, mock_load_status_file, - mock_get_range, mock_get_revision_and_hash_list, - _mock_get_bad_llvm_hash, mock_abandon_cl): - - start = 500 - end = 502 - cl = 1 - - bisect_state = { - 'start': start, - 'end': end, - 'jobs': [{ - 'rev': 501, - 'status': 'bad', - 'cl': cl - }] - } - - skip_revisions = {501} - pending_revisions = {} - - mock_load_status_file.return_value = bisect_state - - mock_get_range.return_value = (start, end, pending_revisions, - skip_revisions) - - mock_get_revision_and_hash_list.return_value = [], [] - - args_output = test_helpers.ArgsOutputTest() - args_output.start_rev = start - args_output.end_rev = end - args_output.parallel = 3 - args_output.src_path = None - args_output.chroot_path = 'somepath' - args_output.cleanup = True - - self.assertEqual( - llvm_bisection.main(args_output), - llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value) - - mock_outside_chroot.assert_called_once() - - mock_load_status_file.assert_called_once() - - mock_get_range.assert_called_once() - - mock_get_revision_and_hash_list.assert_called_once() - - mock_abandon_cl.assert_called_once() - self.assertEqual( - mock_abandon_cl.call_args, - mock.call( - [ - os.path.join(args_output.chroot_path, 'chromite/bin/gerrit'), - 'abandon', - str(cl), - ], - stderr=subprocess.STDOUT, - encoding='utf-8', - )) - - @mock.patch.object(llvm_bisection, 'LoadStatusFile') - @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - def testMainFailedWithInvalidRange(self, mock_outside_chroot, - mock_load_status_file): - - start = 500 - end = 502 - - bisect_state = { - 'start': start - 1, - 'end': end, - } - - mock_load_status_file.return_value = bisect_state - - args_output = test_helpers.ArgsOutputTest() - args_output.start_rev = start - args_output.end_rev = end - args_output.parallel = 3 - args_output.src_path = None + """Unittests for LLVM bisection.""" + + def testGetRemainingRangePassed(self): + start = 100 + end = 150 + + test_tryjobs = [ + { + "rev": 110, + "status": "good", + "link": "https://some_tryjob_1_url.com", + }, + { + "rev": 120, + "status": "good", + "link": "https://some_tryjob_2_url.com", + }, + { + "rev": 130, + "status": "pending", + "link": "https://some_tryjob_3_url.com", + }, + { + "rev": 135, + "status": "skip", + "link": "https://some_tryjob_4_url.com", + }, + { + "rev": 140, + "status": "bad", + "link": "https://some_tryjob_5_url.com", + }, + ] + + # Tuple consists of the new good revision, the new bad revision, a set of + # 'pending' revisions, and a set of 'skip' revisions. + expected_revisions_tuple = 120, 140, {130}, {135} + + self.assertEqual( + llvm_bisection.GetRemainingRange(start, end, test_tryjobs), + expected_revisions_tuple, + ) + + def testGetRemainingRangeFailedWithMissingStatus(self): + start = 100 + end = 150 + + test_tryjobs = [ + { + "rev": 105, + "status": "good", + "link": "https://some_tryjob_1_url.com", + }, + { + "rev": 120, + "status": None, + "link": "https://some_tryjob_2_url.com", + }, + { + "rev": 140, + "status": "bad", + "link": "https://some_tryjob_3_url.com", + }, + ] + + with self.assertRaises(ValueError) as err: + llvm_bisection.GetRemainingRange(start, end, test_tryjobs) + + error_message = ( + '"status" is missing or has no value, please ' + "go to %s and update it" % test_tryjobs[1]["link"] + ) + self.assertEqual(str(err.exception), error_message) + + def testGetRemainingRangeFailedWithInvalidRange(self): + start = 100 + end = 150 + + test_tryjobs = [ + { + "rev": 110, + "status": "bad", + "link": "https://some_tryjob_1_url.com", + }, + { + "rev": 125, + "status": "skip", + "link": "https://some_tryjob_2_url.com", + }, + { + "rev": 140, + "status": "good", + "link": "https://some_tryjob_3_url.com", + }, + ] + + with self.assertRaises(AssertionError) as err: + llvm_bisection.GetRemainingRange(start, end, test_tryjobs) + + expected_error_message = ( + "Bisection is broken because %d (good) is >= " + "%d (bad)" % (test_tryjobs[2]["rev"], test_tryjobs[0]["rev"]) + ) + + self.assertEqual(str(err.exception), expected_error_message) + + @mock.patch.object(get_llvm_hash, "GetGitHashFrom") + def testGetCommitsBetweenPassed(self, mock_get_git_hash): + start = git_llvm_rev.base_llvm_revision + end = start + 10 + test_pending_revisions = {start + 7} + test_skip_revisions = { + start + 1, + start + 2, + start + 4, + start + 8, + start + 9, + } + parallel = 3 + abs_path_to_src = "/abs/path/to/src" + + revs = ["a123testhash3", "a123testhash5"] + mock_get_git_hash.side_effect = revs + + git_hashes = [ + git_llvm_rev.base_llvm_revision + 3, + git_llvm_rev.base_llvm_revision + 5, + ] + + self.assertEqual( + llvm_bisection.GetCommitsBetween( + start, + end, + parallel, + abs_path_to_src, + test_pending_revisions, + test_skip_revisions, + ), + (git_hashes, revs), + ) + + def testLoadStatusFilePassedWithExistingFile(self): + start = 100 + end = 150 + + test_bisect_state = {"start": start, "end": end, "jobs": []} + + # Simulate that the status file exists. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(test_bisect_state, f) + + self.assertEqual( + llvm_bisection.LoadStatusFile(temp_json_file, start, end), + test_bisect_state, + ) + + def testLoadStatusFilePassedWithoutExistingFile(self): + start = 200 + end = 250 + + expected_bisect_state = {"start": start, "end": end, "jobs": []} + + last_tested = "/abs/path/to/file_that_does_not_exist.json" + + self.assertEqual( + llvm_bisection.LoadStatusFile(last_tested, start, end), + expected_bisect_state, + ) + + @mock.patch.object(modify_a_tryjob, "AddTryjob") + def testBisectPassed(self, mock_add_tryjob): + + git_hash_list = ["a123testhash1", "a123testhash2", "a123testhash3"] + revisions_list = [102, 104, 106] + + # Simulate behavior of `AddTryjob()` when successfully launched a tryjob for + # the updated packages. + @test_helpers.CallCountsToMockFunctions + def MockAddTryjob( + call_count, + _packages, + _git_hash, + _revision, + _chroot_path, + _patch_file, + _extra_cls, + _options, + _builder, + _verbose, + _svn_revision, + ): + + if call_count < 2: + return {"rev": revisions_list[call_count], "status": "pending"} + + # Simulate an exception happened along the way when updating the + # packages' `LLVM_NEXT_HASH`. + if call_count == 2: + raise ValueError("Unable to launch tryjob") + + assert False, "Called `AddTryjob()` more than expected." + + # Use the test function to simulate `AddTryjob()`. + mock_add_tryjob.side_effect = MockAddTryjob + + start = 100 + end = 110 + + bisection_contents = {"start": start, "end": end, "jobs": []} + + args_output = test_helpers.ArgsOutputTest() + + packages = ["sys-devel/llvm"] + patch_file = "/abs/path/to/PATCHES.json" + + # Create a temporary .JSON file to simulate a status file for bisection. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(bisection_contents, f) + + # Verify that the status file is updated when an exception happened when + # attempting to launch a revision (i.e. progress is not lost). + with self.assertRaises(ValueError) as err: + llvm_bisection.Bisect( + revisions_list, + git_hash_list, + bisection_contents, + temp_json_file, + packages, + args_output.chroot_path, + patch_file, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.verbose, + ) + + expected_bisection_contents = { + "start": start, + "end": end, + "jobs": [ + {"rev": revisions_list[0], "status": "pending"}, + {"rev": revisions_list[1], "status": "pending"}, + ], + } + + # Verify that the launched tryjobs were added to the status file when + # an exception happened. + with open(temp_json_file) as f: + json_contents = json.load(f) + + self.assertEqual(json_contents, expected_bisection_contents) + + self.assertEqual(str(err.exception), "Unable to launch tryjob") + + self.assertEqual(mock_add_tryjob.call_count, 3) + + @mock.patch.object(subprocess, "check_output", return_value=None) + @mock.patch.object( + get_llvm_hash.LLVMHash, "GetLLVMHash", return_value="a123testhash4" + ) + @mock.patch.object(llvm_bisection, "GetCommitsBetween") + @mock.patch.object(llvm_bisection, "GetRemainingRange") + @mock.patch.object(llvm_bisection, "LoadStatusFile") + @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True) + def testMainPassed( + self, + mock_outside_chroot, + mock_load_status_file, + mock_get_range, + mock_get_revision_and_hash_list, + _mock_get_bad_llvm_hash, + mock_abandon_cl, + ): + + start = 500 + end = 502 + cl = 1 + + bisect_state = { + "start": start, + "end": end, + "jobs": [{"rev": 501, "status": "bad", "cl": cl}], + } + + skip_revisions = {501} + pending_revisions = {} + + mock_load_status_file.return_value = bisect_state + + mock_get_range.return_value = ( + start, + end, + pending_revisions, + skip_revisions, + ) + + mock_get_revision_and_hash_list.return_value = [], [] + + args_output = test_helpers.ArgsOutputTest() + args_output.start_rev = start + args_output.end_rev = end + args_output.parallel = 3 + args_output.src_path = None + args_output.chroot_path = "somepath" + args_output.cleanup = True + + self.assertEqual( + llvm_bisection.main(args_output), + llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value, + ) + + mock_outside_chroot.assert_called_once() + + mock_load_status_file.assert_called_once() + + mock_get_range.assert_called_once() + + mock_get_revision_and_hash_list.assert_called_once() + + mock_abandon_cl.assert_called_once() + self.assertEqual( + mock_abandon_cl.call_args, + mock.call( + [ + os.path.join( + args_output.chroot_path, "chromite/bin/gerrit" + ), + "abandon", + str(cl), + ], + stderr=subprocess.STDOUT, + encoding="utf-8", + ), + ) + + @mock.patch.object(llvm_bisection, "LoadStatusFile") + @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True) + def testMainFailedWithInvalidRange( + self, mock_outside_chroot, mock_load_status_file + ): + + start = 500 + end = 502 + + bisect_state = { + "start": start - 1, + "end": end, + } + + mock_load_status_file.return_value = bisect_state + + args_output = test_helpers.ArgsOutputTest() + args_output.start_rev = start + args_output.end_rev = end + args_output.parallel = 3 + args_output.src_path = None + + with self.assertRaises(ValueError) as err: + llvm_bisection.main(args_output) + + error_message = ( + f"The start {start} or the end {end} version provided is " + f'different than "start" {bisect_state["start"]} or "end" ' + f'{bisect_state["end"]} in the .JSON file' + ) + + self.assertEqual(str(err.exception), error_message) + + mock_outside_chroot.assert_called_once() + + mock_load_status_file.assert_called_once() + + @mock.patch.object(llvm_bisection, "GetCommitsBetween") + @mock.patch.object(llvm_bisection, "GetRemainingRange") + @mock.patch.object(llvm_bisection, "LoadStatusFile") + @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True) + def testMainFailedWithPendingBuilds( + self, + mock_outside_chroot, + mock_load_status_file, + mock_get_range, + mock_get_revision_and_hash_list, + ): + + start = 500 + end = 502 + rev = 501 + + bisect_state = { + "start": start, + "end": end, + "jobs": [{"rev": rev, "status": "pending"}], + } + + skip_revisions = {} + pending_revisions = {rev} + + mock_load_status_file.return_value = bisect_state + + mock_get_range.return_value = ( + start, + end, + pending_revisions, + skip_revisions, + ) + + mock_get_revision_and_hash_list.return_value = [], [] + + args_output = test_helpers.ArgsOutputTest() + args_output.start_rev = start + args_output.end_rev = end + args_output.parallel = 3 + args_output.src_path = None + + with self.assertRaises(ValueError) as err: + llvm_bisection.main(args_output) + + error_message = ( + f"No revisions between start {start} and end {end} to " + "create tryjobs\nThe following tryjobs are pending:\n" + f"{rev}\n" + ) + + self.assertEqual(str(err.exception), error_message) + + mock_outside_chroot.assert_called_once() + + mock_load_status_file.assert_called_once() + + mock_get_range.assert_called_once() + + mock_get_revision_and_hash_list.assert_called_once() + + @mock.patch.object(llvm_bisection, "GetCommitsBetween") + @mock.patch.object(llvm_bisection, "GetRemainingRange") + @mock.patch.object(llvm_bisection, "LoadStatusFile") + @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True) + def testMainFailedWithDuplicateBuilds( + self, + mock_outside_chroot, + mock_load_status_file, + mock_get_range, + mock_get_revision_and_hash_list, + ): + + start = 500 + end = 502 + rev = 501 + git_hash = "a123testhash1" + + bisect_state = { + "start": start, + "end": end, + "jobs": [{"rev": rev, "status": "pending"}], + } - with self.assertRaises(ValueError) as err: - llvm_bisection.main(args_output) + skip_revisions = {} + pending_revisions = {rev} - error_message = ( - f'The start {start} or the end {end} version provided is ' - f'different than "start" {bisect_state["start"]} or "end" ' - f'{bisect_state["end"]} in the .JSON file') - - self.assertEqual(str(err.exception), error_message) - - mock_outside_chroot.assert_called_once() - - mock_load_status_file.assert_called_once() + mock_load_status_file.return_value = bisect_state + + mock_get_range.return_value = ( + start, + end, + pending_revisions, + skip_revisions, + ) - @mock.patch.object(llvm_bisection, 'GetCommitsBetween') - @mock.patch.object(llvm_bisection, 'GetRemainingRange') - @mock.patch.object(llvm_bisection, 'LoadStatusFile') - @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - def testMainFailedWithPendingBuilds(self, mock_outside_chroot, - mock_load_status_file, mock_get_range, - mock_get_revision_and_hash_list): + mock_get_revision_and_hash_list.return_value = [rev], [git_hash] + + args_output = test_helpers.ArgsOutputTest() + args_output.start_rev = start + args_output.end_rev = end + args_output.parallel = 3 + args_output.src_path = None + + with self.assertRaises(ValueError) as err: + llvm_bisection.main(args_output) - start = 500 - end = 502 - rev = 501 + error_message = 'Revision %d exists already in "jobs"' % rev + self.assertEqual(str(err.exception), error_message) - bisect_state = { - 'start': start, - 'end': end, - 'jobs': [{ - 'rev': rev, - 'status': 'pending' - }] - } + mock_outside_chroot.assert_called_once() - skip_revisions = {} - pending_revisions = {rev} + mock_load_status_file.assert_called_once() - mock_load_status_file.return_value = bisect_state + mock_get_range.assert_called_once() - mock_get_range.return_value = (start, end, pending_revisions, - skip_revisions) + mock_get_revision_and_hash_list.assert_called_once() - mock_get_revision_and_hash_list.return_value = [], [] - - args_output = test_helpers.ArgsOutputTest() - args_output.start_rev = start - args_output.end_rev = end - args_output.parallel = 3 - args_output.src_path = None + @mock.patch.object(subprocess, "check_output", return_value=None) + @mock.patch.object( + get_llvm_hash.LLVMHash, "GetLLVMHash", return_value="a123testhash4" + ) + @mock.patch.object(llvm_bisection, "GetCommitsBetween") + @mock.patch.object(llvm_bisection, "GetRemainingRange") + @mock.patch.object(llvm_bisection, "LoadStatusFile") + @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True) + def testMainFailedToAbandonCL( + self, + mock_outside_chroot, + mock_load_status_file, + mock_get_range, + mock_get_revision_and_hash_list, + _mock_get_bad_llvm_hash, + mock_abandon_cl, + ): - with self.assertRaises(ValueError) as err: - llvm_bisection.main(args_output) + start = 500 + end = 502 - error_message = (f'No revisions between start {start} and end {end} to ' - 'create tryjobs\nThe following tryjobs are pending:\n' - f'{rev}\n') + bisect_state = { + "start": start, + "end": end, + "jobs": [{"rev": 501, "status": "bad", "cl": 0}], + } - self.assertEqual(str(err.exception), error_message) + skip_revisions = {501} + pending_revisions = {} - mock_outside_chroot.assert_called_once() + mock_load_status_file.return_value = bisect_state - mock_load_status_file.assert_called_once() + mock_get_range.return_value = ( + start, + end, + pending_revisions, + skip_revisions, + ) - mock_get_range.assert_called_once() - - mock_get_revision_and_hash_list.assert_called_once() - - @mock.patch.object(llvm_bisection, 'GetCommitsBetween') - @mock.patch.object(llvm_bisection, 'GetRemainingRange') - @mock.patch.object(llvm_bisection, 'LoadStatusFile') - @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - def testMainFailedWithDuplicateBuilds(self, mock_outside_chroot, - mock_load_status_file, mock_get_range, - mock_get_revision_and_hash_list): + mock_get_revision_and_hash_list.return_value = ([], []) - start = 500 - end = 502 - rev = 501 - git_hash = 'a123testhash1' + error_message = "Error message." + mock_abandon_cl.side_effect = subprocess.CalledProcessError( + returncode=1, cmd=[], output=error_message + ) + + args_output = test_helpers.ArgsOutputTest() + args_output.start_rev = start + args_output.end_rev = end + args_output.parallel = 3 + args_output.src_path = None + args_output.cleanup = True - bisect_state = { - 'start': start, - 'end': end, - 'jobs': [{ - 'rev': rev, - 'status': 'pending' - }] - } + with self.assertRaises(subprocess.CalledProcessError) as err: + llvm_bisection.main(args_output) - skip_revisions = {} - pending_revisions = {rev} + self.assertEqual(err.exception.output, error_message) - mock_load_status_file.return_value = bisect_state + mock_outside_chroot.assert_called_once() - mock_get_range.return_value = (start, end, pending_revisions, - skip_revisions) + mock_load_status_file.assert_called_once() - mock_get_revision_and_hash_list.return_value = [rev], [git_hash] + mock_get_range.assert_called_once() - args_output = test_helpers.ArgsOutputTest() - args_output.start_rev = start - args_output.end_rev = end - args_output.parallel = 3 - args_output.src_path = None - with self.assertRaises(ValueError) as err: - llvm_bisection.main(args_output) - - error_message = ('Revision %d exists already in "jobs"' % rev) - self.assertEqual(str(err.exception), error_message) - - mock_outside_chroot.assert_called_once() - - mock_load_status_file.assert_called_once() - - mock_get_range.assert_called_once() - - mock_get_revision_and_hash_list.assert_called_once() - - @mock.patch.object(subprocess, 'check_output', return_value=None) - @mock.patch.object(get_llvm_hash.LLVMHash, - 'GetLLVMHash', - return_value='a123testhash4') - @mock.patch.object(llvm_bisection, 'GetCommitsBetween') - @mock.patch.object(llvm_bisection, 'GetRemainingRange') - @mock.patch.object(llvm_bisection, 'LoadStatusFile') - @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - def testMainFailedToAbandonCL(self, mock_outside_chroot, - mock_load_status_file, mock_get_range, - mock_get_revision_and_hash_list, - _mock_get_bad_llvm_hash, mock_abandon_cl): - - start = 500 - end = 502 - - bisect_state = { - 'start': start, - 'end': end, - 'jobs': [{ - 'rev': 501, - 'status': 'bad', - 'cl': 0 - }] - } - - skip_revisions = {501} - pending_revisions = {} - - mock_load_status_file.return_value = bisect_state - - mock_get_range.return_value = (start, end, pending_revisions, - skip_revisions) - - mock_get_revision_and_hash_list.return_value = ([], []) - - error_message = 'Error message.' - mock_abandon_cl.side_effect = subprocess.CalledProcessError( - returncode=1, cmd=[], output=error_message) - - args_output = test_helpers.ArgsOutputTest() - args_output.start_rev = start - args_output.end_rev = end - args_output.parallel = 3 - args_output.src_path = None - args_output.cleanup = True - - with self.assertRaises(subprocess.CalledProcessError) as err: - llvm_bisection.main(args_output) - - self.assertEqual(err.exception.output, error_message) - - mock_outside_chroot.assert_called_once() - - mock_load_status_file.assert_called_once() - - mock_get_range.assert_called_once() - - -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/llvm_project.py b/llvm_tools/llvm_project.py index 3dba9ffe..85b4a0c2 100644 --- a/llvm_tools/llvm_project.py +++ b/llvm_tools/llvm_project.py @@ -17,48 +17,59 @@ import git_llvm_rev def get_location() -> str: - """Gets the absolute path for llvm-project-copy.""" - my_dir = os.path.dirname(os.path.abspath(__file__)) - return os.path.join(my_dir, 'llvm-project-copy') + """Gets the absolute path for llvm-project-copy.""" + my_dir = os.path.dirname(os.path.abspath(__file__)) + return os.path.join(my_dir, "llvm-project-copy") def ensure_up_to_date(): - """Ensures that llvm-project-copy is checked out and semi-up-to-date.""" + """Ensures that llvm-project-copy is checked out and semi-up-to-date.""" - checkout = get_location() - if not os.path.isdir(checkout): - print('No llvm-project exists locally; syncing it. This takes a while.', - file=sys.stderr) - actual_checkout = get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools() - assert checkout == actual_checkout, '%s != %s' % (actual_checkout, - checkout) + checkout = get_location() + if not os.path.isdir(checkout): + print( + "No llvm-project exists locally; syncing it. This takes a while.", + file=sys.stderr, + ) + actual_checkout = get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools() + assert checkout == actual_checkout, "%s != %s" % ( + actual_checkout, + checkout, + ) - commit_timestamp = subprocess.check_output( - [ - 'git', 'log', '-n1', '--format=%ct', - 'origin/' + git_llvm_rev.MAIN_BRANCH - ], - cwd=checkout, - encoding='utf-8', - ) + commit_timestamp = subprocess.check_output( + [ + "git", + "log", + "-n1", + "--format=%ct", + "origin/" + git_llvm_rev.MAIN_BRANCH, + ], + cwd=checkout, + encoding="utf-8", + ) - commit_time = datetime.datetime.fromtimestamp(int(commit_timestamp.strip())) - now = datetime.datetime.now() + commit_time = datetime.datetime.fromtimestamp(int(commit_timestamp.strip())) + now = datetime.datetime.now() - time_since_last_commit = now - commit_time + time_since_last_commit = now - commit_time - # Arbitrary, but if it's been more than 2d since we've seen a commit, it's - # probably best to bring us up-to-date. - if time_since_last_commit <= datetime.timedelta(days=2): - return + # Arbitrary, but if it's been more than 2d since we've seen a commit, it's + # probably best to bring us up-to-date. + if time_since_last_commit <= datetime.timedelta(days=2): + return - print('%d days have elapsed since the last commit to %s; auto-syncing' % - (time_since_last_commit.days, checkout), - file=sys.stderr) + print( + "%d days have elapsed since the last commit to %s; auto-syncing" + % (time_since_last_commit.days, checkout), + file=sys.stderr, + ) - result = subprocess.run(['git', 'fetch', 'origin'], - check=False, - cwd=checkout) - if result.returncode: - print('Sync failed somehow; hoping that things are fresh enough, then...', - file=sys.stderr) + result = subprocess.run( + ["git", "fetch", "origin"], check=False, cwd=checkout + ) + if result.returncode: + print( + "Sync failed somehow; hoping that things are fresh enough, then...", + file=sys.stderr, + ) diff --git a/llvm_tools/modify_a_tryjob.py b/llvm_tools/modify_a_tryjob.py index 53f783ba..6ef12008 100755 --- a/llvm_tools/modify_a_tryjob.py +++ b/llvm_tools/modify_a_tryjob.py @@ -23,274 +23,360 @@ import update_tryjob_status class ModifyTryjob(enum.Enum): - """Options to modify a tryjob.""" + """Options to modify a tryjob.""" - REMOVE = 'remove' - RELAUNCH = 'relaunch' - ADD = 'add' + REMOVE = "remove" + RELAUNCH = "relaunch" + ADD = "add" def GetCommandLineArgs(): - """Parses the command line for the command line arguments.""" - - # Default path to the chroot if a path is not specified. - cros_root = os.path.expanduser('~') - cros_root = os.path.join(cros_root, 'chromiumos') - - # Create parser and add optional command-line arguments. - parser = argparse.ArgumentParser( - description='Removes, relaunches, or adds a tryjob.') - - # Add argument for the JSON file to use for the update of a tryjob. - parser.add_argument( - '--status_file', - required=True, - help='The absolute path to the JSON file that contains the tryjobs used ' - 'for bisecting LLVM.') - - # Add argument that determines what action to take on the revision specified. - parser.add_argument( - '--modify_tryjob', - required=True, - choices=[modify_tryjob.value for modify_tryjob in ModifyTryjob], - help='What action to perform on the tryjob.') - - # Add argument that determines which revision to search for in the list of - # tryjobs. - parser.add_argument('--revision', - required=True, - type=int, - help='The revision to either remove or relaunch.') - - # Add argument for other change lists that want to run alongside the tryjob. - parser.add_argument( - '--extra_change_lists', - type=int, - nargs='+', - help='change lists that would like to be run alongside the change list ' - 'of updating the packages') - - # Add argument for custom options for the tryjob. - parser.add_argument('--options', - required=False, - nargs='+', - help='options to use for the tryjob testing') - - # Add argument for the builder to use for the tryjob. - parser.add_argument('--builder', - help='builder to use for the tryjob testing') - - # Add argument for a specific chroot path. - parser.add_argument('--chroot_path', - default=cros_root, - help='the path to the chroot (default: %(default)s)') - - # Add argument for whether to display command contents to `stdout`. - parser.add_argument('--verbose', - action='store_true', - help='display contents of a command to the terminal ' - '(default: %(default)s)') - - args_output = parser.parse_args() - - if (not os.path.isfile(args_output.status_file) - or not args_output.status_file.endswith('.json')): - raise ValueError('File does not exist or does not ending in ".json" ' - ': %s' % args_output.status_file) - - if (args_output.modify_tryjob == ModifyTryjob.ADD.value - and not args_output.builder): - raise ValueError('A builder is required for adding a tryjob.') - elif (args_output.modify_tryjob != ModifyTryjob.ADD.value - and args_output.builder): - raise ValueError('Specifying a builder is only available when adding a ' - 'tryjob.') - - return args_output - - -def GetCLAfterUpdatingPackages(packages, git_hash, svn_version, chroot_path, - patch_metadata_file, svn_option): - """Updates the packages' LLVM_NEXT.""" - - change_list = update_chromeos_llvm_hash.UpdatePackages( - packages=packages, - manifest_packages=[], - llvm_variant=update_chromeos_llvm_hash.LLVMVariant.next, - git_hash=git_hash, - svn_version=svn_version, - chroot_path=chroot_path, - mode=failure_modes.FailureModes.DISABLE_PATCHES, - git_hash_source=svn_option, - extra_commit_msg=None) - - print('\nSuccessfully updated packages to %d' % svn_version) - print('Gerrit URL: %s' % change_list.url) - print('Change list number: %d' % change_list.cl_number) - - return change_list - - -def CreateNewTryjobEntryForBisection(cl, extra_cls, options, builder, - chroot_path, cl_url, revision): - """Submits a tryjob and adds additional information.""" - - # Get the tryjob results after submitting the tryjob. - # Format of 'tryjob_results': - # [ - # { - # 'link' : [TRYJOB_LINK], - # 'buildbucket_id' : [BUILDBUCKET_ID], - # 'extra_cls' : [EXTRA_CLS_LIST], - # 'options' : [EXTRA_OPTIONS_LIST], - # 'builder' : [BUILDER_AS_A_LIST] - # } - # ] - tryjob_results = update_packages_and_run_tests.RunTryJobs( - cl, extra_cls, options, [builder], chroot_path) - print('\nTryjob:') - print(tryjob_results[0]) - - # Add necessary information about the tryjob. - tryjob_results[0]['url'] = cl_url - tryjob_results[0]['rev'] = revision - tryjob_results[0]['status'] = update_tryjob_status.TryjobStatus.PENDING.value - tryjob_results[0]['cl'] = cl - - return tryjob_results[0] - - -def AddTryjob(packages, git_hash, revision, chroot_path, patch_metadata_file, - extra_cls, options, builder, verbose, svn_option): - """Submits a tryjob.""" - - update_chromeos_llvm_hash.verbose = verbose - - change_list = GetCLAfterUpdatingPackages(packages, git_hash, revision, - chroot_path, patch_metadata_file, - svn_option) - - tryjob_dict = CreateNewTryjobEntryForBisection(change_list.cl_number, - extra_cls, options, builder, - chroot_path, change_list.url, - revision) - - return tryjob_dict - - -def PerformTryjobModification(revision, modify_tryjob, status_file, extra_cls, - options, builder, chroot_path, verbose): - """Removes, relaunches, or adds a tryjob. - - Args: - revision: The revision associated with the tryjob. - modify_tryjob: What action to take on the tryjob. - Ex: ModifyTryjob.REMOVE, ModifyTryjob.RELAUNCH, ModifyTryjob.ADD - status_file: The .JSON file that contains the tryjobs. - extra_cls: Extra change lists to be run alongside tryjob - options: Extra options to pass into 'cros tryjob'. - builder: The builder to use for 'cros tryjob'. - chroot_path: The absolute path to the chroot (used by 'cros tryjob' when - relaunching a tryjob). - verbose: Determines whether to print the contents of a command to `stdout`. - """ - - # Format of 'bisect_contents': - # { - # 'start': [START_REVISION_OF_BISECTION] - # 'end': [END_REVISION_OF_BISECTION] - # 'jobs' : [ - # {[TRYJOB_INFORMATION]}, - # {[TRYJOB_INFORMATION]}, - # ..., - # {[TRYJOB_INFORMATION]} - # ] - # } - with open(status_file) as tryjobs: - bisect_contents = json.load(tryjobs) - - if not bisect_contents['jobs'] and modify_tryjob != ModifyTryjob.ADD: - sys.exit('No tryjobs in %s' % status_file) - - tryjob_index = update_tryjob_status.FindTryjobIndex(revision, - bisect_contents['jobs']) - - # 'FindTryjobIndex()' returns None if the tryjob was not found. - if tryjob_index is None and modify_tryjob != ModifyTryjob.ADD: - raise ValueError('Unable to find tryjob for %d in %s' % - (revision, status_file)) - - # Determine the action to take based off of 'modify_tryjob'. - if modify_tryjob == ModifyTryjob.REMOVE: - del bisect_contents['jobs'][tryjob_index] - - print('Successfully deleted the tryjob of revision %d' % revision) - elif modify_tryjob == ModifyTryjob.RELAUNCH: - # Need to update the tryjob link and buildbucket ID. + """Parses the command line for the command line arguments.""" + + # Default path to the chroot if a path is not specified. + cros_root = os.path.expanduser("~") + cros_root = os.path.join(cros_root, "chromiumos") + + # Create parser and add optional command-line arguments. + parser = argparse.ArgumentParser( + description="Removes, relaunches, or adds a tryjob." + ) + + # Add argument for the JSON file to use for the update of a tryjob. + parser.add_argument( + "--status_file", + required=True, + help="The absolute path to the JSON file that contains the tryjobs used " + "for bisecting LLVM.", + ) + + # Add argument that determines what action to take on the revision specified. + parser.add_argument( + "--modify_tryjob", + required=True, + choices=[modify_tryjob.value for modify_tryjob in ModifyTryjob], + help="What action to perform on the tryjob.", + ) + + # Add argument that determines which revision to search for in the list of + # tryjobs. + parser.add_argument( + "--revision", + required=True, + type=int, + help="The revision to either remove or relaunch.", + ) + + # Add argument for other change lists that want to run alongside the tryjob. + parser.add_argument( + "--extra_change_lists", + type=int, + nargs="+", + help="change lists that would like to be run alongside the change list " + "of updating the packages", + ) + + # Add argument for custom options for the tryjob. + parser.add_argument( + "--options", + required=False, + nargs="+", + help="options to use for the tryjob testing", + ) + + # Add argument for the builder to use for the tryjob. + parser.add_argument( + "--builder", help="builder to use for the tryjob testing" + ) + + # Add argument for a specific chroot path. + parser.add_argument( + "--chroot_path", + default=cros_root, + help="the path to the chroot (default: %(default)s)", + ) + + # Add argument for whether to display command contents to `stdout`. + parser.add_argument( + "--verbose", + action="store_true", + help="display contents of a command to the terminal " + "(default: %(default)s)", + ) + + args_output = parser.parse_args() + + if not os.path.isfile( + args_output.status_file + ) or not args_output.status_file.endswith(".json"): + raise ValueError( + 'File does not exist or does not ending in ".json" ' + ": %s" % args_output.status_file + ) + + if ( + args_output.modify_tryjob == ModifyTryjob.ADD.value + and not args_output.builder + ): + raise ValueError("A builder is required for adding a tryjob.") + elif ( + args_output.modify_tryjob != ModifyTryjob.ADD.value + and args_output.builder + ): + raise ValueError( + "Specifying a builder is only available when adding a " "tryjob." + ) + + return args_output + + +def GetCLAfterUpdatingPackages( + packages, + git_hash, + svn_version, + chroot_path, + patch_metadata_file, + svn_option, +): + """Updates the packages' LLVM_NEXT.""" + + change_list = update_chromeos_llvm_hash.UpdatePackages( + packages=packages, + manifest_packages=[], + llvm_variant=update_chromeos_llvm_hash.LLVMVariant.next, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=chroot_path, + mode=failure_modes.FailureModes.DISABLE_PATCHES, + git_hash_source=svn_option, + extra_commit_msg=None, + ) + + print("\nSuccessfully updated packages to %d" % svn_version) + print("Gerrit URL: %s" % change_list.url) + print("Change list number: %d" % change_list.cl_number) + + return change_list + + +def CreateNewTryjobEntryForBisection( + cl, extra_cls, options, builder, chroot_path, cl_url, revision +): + """Submits a tryjob and adds additional information.""" + + # Get the tryjob results after submitting the tryjob. + # Format of 'tryjob_results': + # [ + # { + # 'link' : [TRYJOB_LINK], + # 'buildbucket_id' : [BUILDBUCKET_ID], + # 'extra_cls' : [EXTRA_CLS_LIST], + # 'options' : [EXTRA_OPTIONS_LIST], + # 'builder' : [BUILDER_AS_A_LIST] + # } + # ] tryjob_results = update_packages_and_run_tests.RunTryJobs( - bisect_contents['jobs'][tryjob_index]['cl'], - bisect_contents['jobs'][tryjob_index]['extra_cls'], - bisect_contents['jobs'][tryjob_index]['options'], - bisect_contents['jobs'][tryjob_index]['builder'], chroot_path) - - bisect_contents['jobs'][tryjob_index][ - 'status'] = update_tryjob_status.TryjobStatus.PENDING.value - bisect_contents['jobs'][tryjob_index]['link'] = tryjob_results[0]['link'] - bisect_contents['jobs'][tryjob_index]['buildbucket_id'] = tryjob_results[ - 0]['buildbucket_id'] - - print('Successfully relaunched the tryjob for revision %d and updated ' - 'the tryjob link to %s' % (revision, tryjob_results[0]['link'])) - elif modify_tryjob == ModifyTryjob.ADD: - # Tryjob exists already. - if tryjob_index is not None: - raise ValueError('Tryjob already exists (index is %d) in %s.' % - (tryjob_index, status_file)) - - # Make sure the revision is within the bounds of the start and end of the - # bisection. - elif bisect_contents['start'] < revision < bisect_contents['end']: - - patch_metadata_file = 'PATCHES.json' - - git_hash, revision = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption( - revision) - - tryjob_dict = AddTryjob(update_chromeos_llvm_hash.DEFAULT_PACKAGES, - git_hash, revision, chroot_path, - patch_metadata_file, extra_cls, options, builder, - verbose, revision) - - bisect_contents['jobs'].append(tryjob_dict) - - print('Successfully added tryjob of revision %d' % revision) + cl, extra_cls, options, [builder], chroot_path + ) + print("\nTryjob:") + print(tryjob_results[0]) + + # Add necessary information about the tryjob. + tryjob_results[0]["url"] = cl_url + tryjob_results[0]["rev"] = revision + tryjob_results[0][ + "status" + ] = update_tryjob_status.TryjobStatus.PENDING.value + tryjob_results[0]["cl"] = cl + + return tryjob_results[0] + + +def AddTryjob( + packages, + git_hash, + revision, + chroot_path, + patch_metadata_file, + extra_cls, + options, + builder, + verbose, + svn_option, +): + """Submits a tryjob.""" + + update_chromeos_llvm_hash.verbose = verbose + + change_list = GetCLAfterUpdatingPackages( + packages, + git_hash, + revision, + chroot_path, + patch_metadata_file, + svn_option, + ) + + tryjob_dict = CreateNewTryjobEntryForBisection( + change_list.cl_number, + extra_cls, + options, + builder, + chroot_path, + change_list.url, + revision, + ) + + return tryjob_dict + + +def PerformTryjobModification( + revision, + modify_tryjob, + status_file, + extra_cls, + options, + builder, + chroot_path, + verbose, +): + """Removes, relaunches, or adds a tryjob. + + Args: + revision: The revision associated with the tryjob. + modify_tryjob: What action to take on the tryjob. + Ex: ModifyTryjob.REMOVE, ModifyTryjob.RELAUNCH, ModifyTryjob.ADD + status_file: The .JSON file that contains the tryjobs. + extra_cls: Extra change lists to be run alongside tryjob + options: Extra options to pass into 'cros tryjob'. + builder: The builder to use for 'cros tryjob'. + chroot_path: The absolute path to the chroot (used by 'cros tryjob' when + relaunching a tryjob). + verbose: Determines whether to print the contents of a command to `stdout`. + """ + + # Format of 'bisect_contents': + # { + # 'start': [START_REVISION_OF_BISECTION] + # 'end': [END_REVISION_OF_BISECTION] + # 'jobs' : [ + # {[TRYJOB_INFORMATION]}, + # {[TRYJOB_INFORMATION]}, + # ..., + # {[TRYJOB_INFORMATION]} + # ] + # } + with open(status_file) as tryjobs: + bisect_contents = json.load(tryjobs) + + if not bisect_contents["jobs"] and modify_tryjob != ModifyTryjob.ADD: + sys.exit("No tryjobs in %s" % status_file) + + tryjob_index = update_tryjob_status.FindTryjobIndex( + revision, bisect_contents["jobs"] + ) + + # 'FindTryjobIndex()' returns None if the tryjob was not found. + if tryjob_index is None and modify_tryjob != ModifyTryjob.ADD: + raise ValueError( + "Unable to find tryjob for %d in %s" % (revision, status_file) + ) + + # Determine the action to take based off of 'modify_tryjob'. + if modify_tryjob == ModifyTryjob.REMOVE: + del bisect_contents["jobs"][tryjob_index] + + print("Successfully deleted the tryjob of revision %d" % revision) + elif modify_tryjob == ModifyTryjob.RELAUNCH: + # Need to update the tryjob link and buildbucket ID. + tryjob_results = update_packages_and_run_tests.RunTryJobs( + bisect_contents["jobs"][tryjob_index]["cl"], + bisect_contents["jobs"][tryjob_index]["extra_cls"], + bisect_contents["jobs"][tryjob_index]["options"], + bisect_contents["jobs"][tryjob_index]["builder"], + chroot_path, + ) + + bisect_contents["jobs"][tryjob_index][ + "status" + ] = update_tryjob_status.TryjobStatus.PENDING.value + bisect_contents["jobs"][tryjob_index]["link"] = tryjob_results[0][ + "link" + ] + bisect_contents["jobs"][tryjob_index][ + "buildbucket_id" + ] = tryjob_results[0]["buildbucket_id"] + + print( + "Successfully relaunched the tryjob for revision %d and updated " + "the tryjob link to %s" % (revision, tryjob_results[0]["link"]) + ) + elif modify_tryjob == ModifyTryjob.ADD: + # Tryjob exists already. + if tryjob_index is not None: + raise ValueError( + "Tryjob already exists (index is %d) in %s." + % (tryjob_index, status_file) + ) + + # Make sure the revision is within the bounds of the start and end of the + # bisection. + elif bisect_contents["start"] < revision < bisect_contents["end"]: + + patch_metadata_file = "PATCHES.json" + + ( + git_hash, + revision, + ) = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption(revision) + + tryjob_dict = AddTryjob( + update_chromeos_llvm_hash.DEFAULT_PACKAGES, + git_hash, + revision, + chroot_path, + patch_metadata_file, + extra_cls, + options, + builder, + verbose, + revision, + ) + + bisect_contents["jobs"].append(tryjob_dict) + + print("Successfully added tryjob of revision %d" % revision) + else: + raise ValueError("Failed to add tryjob to %s" % status_file) else: - raise ValueError('Failed to add tryjob to %s' % status_file) - else: - raise ValueError('Invalid "modify_tryjob" option provided: %s' % - modify_tryjob) + raise ValueError( + 'Invalid "modify_tryjob" option provided: %s' % modify_tryjob + ) - with open(status_file, 'w') as update_tryjobs: - json.dump(bisect_contents, - update_tryjobs, - indent=4, - separators=(',', ': ')) + with open(status_file, "w") as update_tryjobs: + json.dump( + bisect_contents, update_tryjobs, indent=4, separators=(",", ": ") + ) def main(): - """Removes, relaunches, or adds a tryjob.""" + """Removes, relaunches, or adds a tryjob.""" - chroot.VerifyOutsideChroot() + chroot.VerifyOutsideChroot() - args_output = GetCommandLineArgs() + args_output = GetCommandLineArgs() - PerformTryjobModification(args_output.revision, - ModifyTryjob(args_output.modify_tryjob), - args_output.status_file, - args_output.extra_change_lists, - args_output.options, args_output.builder, - args_output.chroot_path, args_output.verbose) + PerformTryjobModification( + args_output.revision, + ModifyTryjob(args_output.modify_tryjob), + args_output.status_file, + args_output.extra_change_lists, + args_output.options, + args_output.builder, + args_output.chroot_path, + args_output.verbose, + ) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/llvm_tools/modify_a_tryjob_unittest.py b/llvm_tools/modify_a_tryjob_unittest.py index e01506e8..38ebccad 100755 --- a/llvm_tools/modify_a_tryjob_unittest.py +++ b/llvm_tools/modify_a_tryjob_unittest.py @@ -20,389 +20,435 @@ import update_tryjob_status class ModifyATryjobTest(unittest.TestCase): - """Unittests for modifying a tryjob.""" - - def testNoTryjobsInStatusFile(self): - bisect_test_contents = {'start': 369410, 'end': 369420, 'jobs': []} - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_modify = 369411 - - args_output = test_helpers.ArgsOutputTest() - args_output.builders = None - args_output.options = None - - # Verify the exception is raised there are no tryjobs in the status file - # and the mode is not to 'add' a tryjob. - with self.assertRaises(SystemExit) as err: - modify_a_tryjob.PerformTryjobModification( - revision_to_modify, modify_a_tryjob.ModifyTryjob.REMOVE, - temp_json_file, args_output.extra_change_lists, - args_output.options, args_output.builders, args_output.chroot_path, - args_output.verbose) - - self.assertEqual(str(err.exception), 'No tryjobs in %s' % temp_json_file) - - # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob - # was not found. - @mock.patch.object(update_tryjob_status, - 'FindTryjobIndex', - return_value=None) - def testNoTryjobIndexFound(self, mock_find_tryjob_index): - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'pending', - 'buildbucket_id': 1200 - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_modify = 369412 - - args_output = test_helpers.ArgsOutputTest() - args_output.builders = None - args_output.options = None - - # Verify the exception is raised when the index of the tryjob was not - # found in the status file and the mode is not to 'add' a tryjob. - with self.assertRaises(ValueError) as err: - modify_a_tryjob.PerformTryjobModification( - revision_to_modify, modify_a_tryjob.ModifyTryjob.REMOVE, - temp_json_file, args_output.extra_change_lists, - args_output.options, args_output.builders, args_output.chroot_path, - args_output.verbose) - - self.assertEqual( - str(err.exception), 'Unable to find tryjob for %d in %s' % - (revision_to_modify, temp_json_file)) - - mock_find_tryjob_index.assert_called_once() - - # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob - # was found. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testSuccessfullyRemovedTryjobInStatusFile(self, mock_find_tryjob_index): - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369414, - 'status': 'pending', - 'buildbucket_id': 1200 - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_modify = 369414 - - args_output = test_helpers.ArgsOutputTest() - args_output.builders = None - args_output.options = None - - modify_a_tryjob.PerformTryjobModification( - revision_to_modify, modify_a_tryjob.ModifyTryjob.REMOVE, - temp_json_file, args_output.extra_change_lists, args_output.options, - args_output.builders, args_output.chroot_path, args_output.verbose) - - # Verify that the tryjob was removed from the status file. - with open(temp_json_file) as status_file: - bisect_contents = json.load(status_file) - - expected_file_contents = {'start': 369410, 'end': 369420, 'jobs': []} - - self.assertDictEqual(bisect_contents, expected_file_contents) - - mock_find_tryjob_index.assert_called_once() - - # Simulate the behavior of `RunTryJobs()` when successfully submitted a - # tryjob. - @mock.patch.object(update_packages_and_run_tests, 'RunTryJobs') - # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob - # was found. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testSuccessfullyRelaunchedTryjob(self, mock_find_tryjob_index, - mock_run_tryjob): - - bisect_test_contents = { - 'start': - 369410, - 'end': - 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'bad', - 'link': 'https://some_tryjob_link.com', - 'buildbucket_id': 1200, - 'cl': 123, - 'extra_cls': None, - 'options': None, - 'builder': ['some-builder-tryjob'] - }] - } - - tryjob_result = [{ - 'link': 'https://some_new_tryjob_link.com', - 'buildbucket_id': 20 - }] - - mock_run_tryjob.return_value = tryjob_result - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_modify = 369411 - - args_output = test_helpers.ArgsOutputTest() - args_output.builders = None - args_output.options = None - - modify_a_tryjob.PerformTryjobModification( - revision_to_modify, modify_a_tryjob.ModifyTryjob.RELAUNCH, - temp_json_file, args_output.extra_change_lists, args_output.options, - args_output.builders, args_output.chroot_path, args_output.verbose) - - # Verify that the tryjob's information was updated after submtting the - # tryjob. - with open(temp_json_file) as status_file: - bisect_contents = json.load(status_file) - - expected_file_contents = { - 'start': - 369410, - 'end': - 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'pending', - 'link': 'https://some_new_tryjob_link.com', - 'buildbucket_id': 20, - 'cl': 123, - 'extra_cls': None, - 'options': None, - 'builder': ['some-builder-tryjob'] - }] + """Unittests for modifying a tryjob.""" + + def testNoTryjobsInStatusFile(self): + bisect_test_contents = {"start": 369410, "end": 369420, "jobs": []} + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_modify = 369411 + + args_output = test_helpers.ArgsOutputTest() + args_output.builders = None + args_output.options = None + + # Verify the exception is raised there are no tryjobs in the status file + # and the mode is not to 'add' a tryjob. + with self.assertRaises(SystemExit) as err: + modify_a_tryjob.PerformTryjobModification( + revision_to_modify, + modify_a_tryjob.ModifyTryjob.REMOVE, + temp_json_file, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + args_output.verbose, + ) + + self.assertEqual( + str(err.exception), "No tryjobs in %s" % temp_json_file + ) + + # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob + # was not found. + @mock.patch.object( + update_tryjob_status, "FindTryjobIndex", return_value=None + ) + def testNoTryjobIndexFound(self, mock_find_tryjob_index): + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [ + {"rev": 369411, "status": "pending", "buildbucket_id": 1200} + ], } - self.assertDictEqual(bisect_contents, expected_file_contents) - - mock_find_tryjob_index.assert_called_once() - - mock_run_tryjob.assert_called_once() - - # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob - # was found. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testAddingTryjobThatAlreadyExists(self, mock_find_tryjob_index): - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'bad', - 'builder': ['some-builder'] - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_add = 369411 - - # Index of the tryjob in 'jobs' list. - tryjob_index = 0 - - args_output = test_helpers.ArgsOutputTest() - args_output.options = None - - # Verify the exception is raised when the tryjob that is going to added - # already exists in the status file (found its index). - with self.assertRaises(ValueError) as err: - modify_a_tryjob.PerformTryjobModification( - revision_to_add, modify_a_tryjob.ModifyTryjob.ADD, temp_json_file, - args_output.extra_change_lists, args_output.options, - args_output.builders, args_output.chroot_path, args_output.verbose) - - self.assertEqual( - str(err.exception), 'Tryjob already exists (index is %d) in %s.' % - (tryjob_index, temp_json_file)) - - mock_find_tryjob_index.assert_called_once() - - # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found. - @mock.patch.object(update_tryjob_status, - 'FindTryjobIndex', - return_value=None) - def testSuccessfullyDidNotAddTryjobOutsideOfBisectionBounds( - self, mock_find_tryjob_index): - - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'bad' - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(bisect_test_contents, f) - - # Add a revision that is outside of 'start' and 'end'. - revision_to_add = 369450 - - args_output = test_helpers.ArgsOutputTest() - args_output.options = None - - # Verify the exception is raised when adding a tryjob that does not exist - # and is not within 'start' and 'end'. - with self.assertRaises(ValueError) as err: - modify_a_tryjob.PerformTryjobModification( - revision_to_add, modify_a_tryjob.ModifyTryjob.ADD, temp_json_file, - args_output.extra_change_lists, args_output.options, - args_output.builders, args_output.chroot_path, args_output.verbose) - - self.assertEqual(str(err.exception), - 'Failed to add tryjob to %s' % temp_json_file) - - mock_find_tryjob_index.assert_called_once() - - # Simulate the behavior of `AddTryjob()` when successfully submitted the - # tryjob and constructed the tryjob information (a dictionary). - @mock.patch.object(modify_a_tryjob, 'AddTryjob') - # Simulate the behavior of `GetLLVMHashAndVersionFromSVNOption()` when - # successfully retrieved the git hash of the revision to launch a tryjob for. - @mock.patch.object(get_llvm_hash, - 'GetLLVMHashAndVersionFromSVNOption', - return_value=('a123testhash1', 369418)) - # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found. - @mock.patch.object(update_tryjob_status, - 'FindTryjobIndex', - return_value=None) - def testSuccessfullyAddedTryjob(self, mock_find_tryjob_index, - mock_get_llvm_hash, mock_add_tryjob): - - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'bad' - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(bisect_test_contents, f) - - # Add a revision that is outside of 'start' and 'end'. - revision_to_add = 369418 - - args_output = test_helpers.ArgsOutputTest() - args_output.options = None - - new_tryjob_info = { - 'rev': revision_to_add, - 'status': 'pending', - 'options': args_output.options, - 'extra_cls': args_output.extra_change_lists, - 'builder': args_output.builders - } - - mock_add_tryjob.return_value = new_tryjob_info - - modify_a_tryjob.PerformTryjobModification( - revision_to_add, modify_a_tryjob.ModifyTryjob.ADD, temp_json_file, - args_output.extra_change_lists, args_output.options, - args_output.builders, args_output.chroot_path, args_output.verbose) - - # Verify that the tryjob was added to the status file. - with open(temp_json_file) as status_file: - bisect_contents = json.load(status_file) - - expected_file_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'bad' - }, new_tryjob_info] + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_modify = 369412 + + args_output = test_helpers.ArgsOutputTest() + args_output.builders = None + args_output.options = None + + # Verify the exception is raised when the index of the tryjob was not + # found in the status file and the mode is not to 'add' a tryjob. + with self.assertRaises(ValueError) as err: + modify_a_tryjob.PerformTryjobModification( + revision_to_modify, + modify_a_tryjob.ModifyTryjob.REMOVE, + temp_json_file, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + args_output.verbose, + ) + + self.assertEqual( + str(err.exception), + "Unable to find tryjob for %d in %s" + % (revision_to_modify, temp_json_file), + ) + + mock_find_tryjob_index.assert_called_once() + + # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob + # was found. + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + def testSuccessfullyRemovedTryjobInStatusFile(self, mock_find_tryjob_index): + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [ + {"rev": 369414, "status": "pending", "buildbucket_id": 1200} + ], } - self.assertDictEqual(bisect_contents, expected_file_contents) - - mock_find_tryjob_index.assert_called_once() - - mock_get_llvm_hash.assert_called_once_with(revision_to_add) - - mock_add_tryjob.assert_called_once() - - # Simulate the behavior of `FindTryjobIndex()` when the tryjob was found. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testModifyATryjobOptionDoesNotExist(self, mock_find_tryjob_index): - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369414, - 'status': 'bad' - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with test_helpers.CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - test_helpers.WritePrettyJsonFile(bisect_test_contents, f) - - # Add a revision that is outside of 'start' and 'end'. - revision_to_modify = 369414 - - args_output = test_helpers.ArgsOutputTest() - args_output.builders = None - args_output.options = None + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_modify = 369414 + + args_output = test_helpers.ArgsOutputTest() + args_output.builders = None + args_output.options = None + + modify_a_tryjob.PerformTryjobModification( + revision_to_modify, + modify_a_tryjob.ModifyTryjob.REMOVE, + temp_json_file, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + args_output.verbose, + ) + + # Verify that the tryjob was removed from the status file. + with open(temp_json_file) as status_file: + bisect_contents = json.load(status_file) + + expected_file_contents = { + "start": 369410, + "end": 369420, + "jobs": [], + } + + self.assertDictEqual(bisect_contents, expected_file_contents) + + mock_find_tryjob_index.assert_called_once() + + # Simulate the behavior of `RunTryJobs()` when successfully submitted a + # tryjob. + @mock.patch.object(update_packages_and_run_tests, "RunTryJobs") + # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob + # was found. + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + def testSuccessfullyRelaunchedTryjob( + self, mock_find_tryjob_index, mock_run_tryjob + ): + + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [ + { + "rev": 369411, + "status": "bad", + "link": "https://some_tryjob_link.com", + "buildbucket_id": 1200, + "cl": 123, + "extra_cls": None, + "options": None, + "builder": ["some-builder-tryjob"], + } + ], + } - # Verify the exception is raised when the modify a tryjob option does not - # exist. - with self.assertRaises(ValueError) as err: - modify_a_tryjob.PerformTryjobModification( - revision_to_modify, 'remove_link', temp_json_file, - args_output.extra_change_lists, args_output.options, - args_output.builders, args_output.chroot_path, args_output.verbose) + tryjob_result = [ + {"link": "https://some_new_tryjob_link.com", "buildbucket_id": 20} + ] + + mock_run_tryjob.return_value = tryjob_result + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_modify = 369411 + + args_output = test_helpers.ArgsOutputTest() + args_output.builders = None + args_output.options = None + + modify_a_tryjob.PerformTryjobModification( + revision_to_modify, + modify_a_tryjob.ModifyTryjob.RELAUNCH, + temp_json_file, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + args_output.verbose, + ) + + # Verify that the tryjob's information was updated after submtting the + # tryjob. + with open(temp_json_file) as status_file: + bisect_contents = json.load(status_file) + + expected_file_contents = { + "start": 369410, + "end": 369420, + "jobs": [ + { + "rev": 369411, + "status": "pending", + "link": "https://some_new_tryjob_link.com", + "buildbucket_id": 20, + "cl": 123, + "extra_cls": None, + "options": None, + "builder": ["some-builder-tryjob"], + } + ], + } + + self.assertDictEqual(bisect_contents, expected_file_contents) + + mock_find_tryjob_index.assert_called_once() + + mock_run_tryjob.assert_called_once() + + # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob + # was found. + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + def testAddingTryjobThatAlreadyExists(self, mock_find_tryjob_index): + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [ + {"rev": 369411, "status": "bad", "builder": ["some-builder"]} + ], + } - self.assertEqual(str(err.exception), - 'Invalid "modify_tryjob" option provided: remove_link') + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_add = 369411 + + # Index of the tryjob in 'jobs' list. + tryjob_index = 0 + + args_output = test_helpers.ArgsOutputTest() + args_output.options = None + + # Verify the exception is raised when the tryjob that is going to added + # already exists in the status file (found its index). + with self.assertRaises(ValueError) as err: + modify_a_tryjob.PerformTryjobModification( + revision_to_add, + modify_a_tryjob.ModifyTryjob.ADD, + temp_json_file, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + args_output.verbose, + ) + + self.assertEqual( + str(err.exception), + "Tryjob already exists (index is %d) in %s." + % (tryjob_index, temp_json_file), + ) + + mock_find_tryjob_index.assert_called_once() + + # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found. + @mock.patch.object( + update_tryjob_status, "FindTryjobIndex", return_value=None + ) + def testSuccessfullyDidNotAddTryjobOutsideOfBisectionBounds( + self, mock_find_tryjob_index + ): + + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [{"rev": 369411, "status": "bad"}], + } - mock_find_tryjob_index.assert_called_once() + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(bisect_test_contents, f) + + # Add a revision that is outside of 'start' and 'end'. + revision_to_add = 369450 + + args_output = test_helpers.ArgsOutputTest() + args_output.options = None + + # Verify the exception is raised when adding a tryjob that does not exist + # and is not within 'start' and 'end'. + with self.assertRaises(ValueError) as err: + modify_a_tryjob.PerformTryjobModification( + revision_to_add, + modify_a_tryjob.ModifyTryjob.ADD, + temp_json_file, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + args_output.verbose, + ) + + self.assertEqual( + str(err.exception), + "Failed to add tryjob to %s" % temp_json_file, + ) + + mock_find_tryjob_index.assert_called_once() + + # Simulate the behavior of `AddTryjob()` when successfully submitted the + # tryjob and constructed the tryjob information (a dictionary). + @mock.patch.object(modify_a_tryjob, "AddTryjob") + # Simulate the behavior of `GetLLVMHashAndVersionFromSVNOption()` when + # successfully retrieved the git hash of the revision to launch a tryjob for. + @mock.patch.object( + get_llvm_hash, + "GetLLVMHashAndVersionFromSVNOption", + return_value=("a123testhash1", 369418), + ) + # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found. + @mock.patch.object( + update_tryjob_status, "FindTryjobIndex", return_value=None + ) + def testSuccessfullyAddedTryjob( + self, mock_find_tryjob_index, mock_get_llvm_hash, mock_add_tryjob + ): + + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [{"rev": 369411, "status": "bad"}], + } + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(bisect_test_contents, f) + + # Add a revision that is outside of 'start' and 'end'. + revision_to_add = 369418 + + args_output = test_helpers.ArgsOutputTest() + args_output.options = None + + new_tryjob_info = { + "rev": revision_to_add, + "status": "pending", + "options": args_output.options, + "extra_cls": args_output.extra_change_lists, + "builder": args_output.builders, + } + + mock_add_tryjob.return_value = new_tryjob_info + + modify_a_tryjob.PerformTryjobModification( + revision_to_add, + modify_a_tryjob.ModifyTryjob.ADD, + temp_json_file, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + args_output.verbose, + ) + + # Verify that the tryjob was added to the status file. + with open(temp_json_file) as status_file: + bisect_contents = json.load(status_file) + + expected_file_contents = { + "start": 369410, + "end": 369420, + "jobs": [{"rev": 369411, "status": "bad"}, new_tryjob_info], + } + + self.assertDictEqual(bisect_contents, expected_file_contents) + + mock_find_tryjob_index.assert_called_once() + + mock_get_llvm_hash.assert_called_once_with(revision_to_add) + + mock_add_tryjob.assert_called_once() + + # Simulate the behavior of `FindTryjobIndex()` when the tryjob was found. + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + def testModifyATryjobOptionDoesNotExist(self, mock_find_tryjob_index): + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [{"rev": 369414, "status": "bad"}], + } -if __name__ == '__main__': - unittest.main() + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with test_helpers.CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + test_helpers.WritePrettyJsonFile(bisect_test_contents, f) + + # Add a revision that is outside of 'start' and 'end'. + revision_to_modify = 369414 + + args_output = test_helpers.ArgsOutputTest() + args_output.builders = None + args_output.options = None + + # Verify the exception is raised when the modify a tryjob option does not + # exist. + with self.assertRaises(ValueError) as err: + modify_a_tryjob.PerformTryjobModification( + revision_to_modify, + "remove_link", + temp_json_file, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + args_output.verbose, + ) + + self.assertEqual( + str(err.exception), + 'Invalid "modify_tryjob" option provided: remove_link', + ) + + mock_find_tryjob_index.assert_called_once() + + +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/nightly_revert_checker.py b/llvm_tools/nightly_revert_checker.py index 842d9c92..17b1c40f 100755 --- a/llvm_tools/nightly_revert_checker.py +++ b/llvm_tools/nightly_revert_checker.py @@ -24,383 +24,462 @@ import typing as t import cros_utils.email_sender as email_sender import cros_utils.tiny_render as tiny_render - import get_llvm_hash import get_upstream_patch import git_llvm_rev import revert_checker + State = t.Any -def _find_interesting_android_shas(android_llvm_toolchain_dir: str - ) -> t.List[t.Tuple[str, str]]: - llvm_project = os.path.join(android_llvm_toolchain_dir, - 'toolchain/llvm-project') - - def get_llvm_merge_base(branch: str) -> str: - head_sha = subprocess.check_output( - ['git', 'rev-parse', branch], - cwd=llvm_project, - encoding='utf-8', - ).strip() - merge_base = subprocess.check_output( - ['git', 'merge-base', branch, 'aosp/upstream-main'], - cwd=llvm_project, - encoding='utf-8', - ).strip() - logging.info('Merge-base for %s (HEAD == %s) and upstream-main is %s', - branch, head_sha, merge_base) - return merge_base - - main_legacy = get_llvm_merge_base('aosp/master-legacy') # nocheck - testing_upstream = get_llvm_merge_base('aosp/testing-upstream') - result = [('main-legacy', main_legacy)] - - # If these are the same SHA, there's no point in tracking both. - if main_legacy != testing_upstream: - result.append(('testing-upstream', testing_upstream)) - else: - logging.info('main-legacy and testing-upstream are identical; ignoring ' - 'the latter.') - return result - - -def _parse_llvm_ebuild_for_shas(ebuild_file: io.TextIOWrapper - ) -> t.List[t.Tuple[str, str]]: - def parse_ebuild_assignment(line: str) -> str: - no_comments = line.split('#')[0] - no_assign = no_comments.split('=', 1)[1].strip() - assert no_assign.startswith('"') and no_assign.endswith('"'), no_assign - return no_assign[1:-1] - - llvm_hash, llvm_next_hash = None, None - for line in ebuild_file: - if line.startswith('LLVM_HASH='): - llvm_hash = parse_ebuild_assignment(line) - if llvm_next_hash: - break - if line.startswith('LLVM_NEXT_HASH'): - llvm_next_hash = parse_ebuild_assignment(line) - if llvm_hash: - break - if not llvm_next_hash or not llvm_hash: - raise ValueError('Failed to detect SHAs for llvm/llvm_next. Got: ' - 'llvm=%s; llvm_next=%s' % (llvm_hash, llvm_next_hash)) - - results = [('llvm', llvm_hash)] - if llvm_next_hash != llvm_hash: - results.append(('llvm-next', llvm_next_hash)) - return results - - -def _find_interesting_chromeos_shas(chromeos_base: str - ) -> t.List[t.Tuple[str, str]]: - llvm_dir = os.path.join(chromeos_base, - 'src/third_party/chromiumos-overlay/sys-devel/llvm') - candidate_ebuilds = [ - os.path.join(llvm_dir, x) for x in os.listdir(llvm_dir) - if '_pre' in x and not os.path.islink(os.path.join(llvm_dir, x)) - ] - - if len(candidate_ebuilds) != 1: - raise ValueError('Expected exactly one llvm ebuild candidate; got %s' % - pprint.pformat(candidate_ebuilds)) - - with open(candidate_ebuilds[0], encoding='utf-8') as f: - return _parse_llvm_ebuild_for_shas(f) - - -_Email = t.NamedTuple('_Email', [ - ('subject', str), - ('body', tiny_render.Piece), -]) +def _find_interesting_android_shas( + android_llvm_toolchain_dir: str, +) -> t.List[t.Tuple[str, str]]: + llvm_project = os.path.join( + android_llvm_toolchain_dir, "toolchain/llvm-project" + ) + + def get_llvm_merge_base(branch: str) -> str: + head_sha = subprocess.check_output( + ["git", "rev-parse", branch], + cwd=llvm_project, + encoding="utf-8", + ).strip() + merge_base = subprocess.check_output( + ["git", "merge-base", branch, "aosp/upstream-main"], + cwd=llvm_project, + encoding="utf-8", + ).strip() + logging.info( + "Merge-base for %s (HEAD == %s) and upstream-main is %s", + branch, + head_sha, + merge_base, + ) + return merge_base + + main_legacy = get_llvm_merge_base("aosp/master-legacy") # nocheck + testing_upstream = get_llvm_merge_base("aosp/testing-upstream") + result = [("main-legacy", main_legacy)] + + # If these are the same SHA, there's no point in tracking both. + if main_legacy != testing_upstream: + result.append(("testing-upstream", testing_upstream)) + else: + logging.info( + "main-legacy and testing-upstream are identical; ignoring " + "the latter." + ) + return result + + +def _parse_llvm_ebuild_for_shas( + ebuild_file: io.TextIOWrapper, +) -> t.List[t.Tuple[str, str]]: + def parse_ebuild_assignment(line: str) -> str: + no_comments = line.split("#")[0] + no_assign = no_comments.split("=", 1)[1].strip() + assert no_assign.startswith('"') and no_assign.endswith('"'), no_assign + return no_assign[1:-1] + + llvm_hash, llvm_next_hash = None, None + for line in ebuild_file: + if line.startswith("LLVM_HASH="): + llvm_hash = parse_ebuild_assignment(line) + if llvm_next_hash: + break + if line.startswith("LLVM_NEXT_HASH"): + llvm_next_hash = parse_ebuild_assignment(line) + if llvm_hash: + break + if not llvm_next_hash or not llvm_hash: + raise ValueError( + "Failed to detect SHAs for llvm/llvm_next. Got: " + "llvm=%s; llvm_next=%s" % (llvm_hash, llvm_next_hash) + ) + + results = [("llvm", llvm_hash)] + if llvm_next_hash != llvm_hash: + results.append(("llvm-next", llvm_next_hash)) + return results + + +def _find_interesting_chromeos_shas( + chromeos_base: str, +) -> t.List[t.Tuple[str, str]]: + llvm_dir = os.path.join( + chromeos_base, "src/third_party/chromiumos-overlay/sys-devel/llvm" + ) + candidate_ebuilds = [ + os.path.join(llvm_dir, x) + for x in os.listdir(llvm_dir) + if "_pre" in x and not os.path.islink(os.path.join(llvm_dir, x)) + ] + + if len(candidate_ebuilds) != 1: + raise ValueError( + "Expected exactly one llvm ebuild candidate; got %s" + % pprint.pformat(candidate_ebuilds) + ) + + with open(candidate_ebuilds[0], encoding="utf-8") as f: + return _parse_llvm_ebuild_for_shas(f) + + +_Email = t.NamedTuple( + "_Email", + [ + ("subject", str), + ("body", tiny_render.Piece), + ], +) def _generate_revert_email( - repository_name: str, friendly_name: str, sha: str, + repository_name: str, + friendly_name: str, + sha: str, prettify_sha: t.Callable[[str], tiny_render.Piece], get_sha_description: t.Callable[[str], tiny_render.Piece], - new_reverts: t.List[revert_checker.Revert]) -> _Email: - email_pieces = [ - 'It looks like there may be %s across %s (' % ( - 'a new revert' if len(new_reverts) == 1 else 'new reverts', - friendly_name, - ), - prettify_sha(sha), - ').', - tiny_render.line_break, - tiny_render.line_break, - 'That is:' if len(new_reverts) == 1 else 'These are:', - ] - - revert_listing = [] - for revert in sorted(new_reverts, key=lambda r: r.sha): - revert_listing.append([ - prettify_sha(revert.sha), - ' (appears to revert ', - prettify_sha(revert.reverted_sha), - '): ', - get_sha_description(revert.sha), - ]) - - email_pieces.append(tiny_render.UnorderedList(items=revert_listing)) - email_pieces += [ - tiny_render.line_break, - 'PTAL and consider reverting them locally.', - ] - return _Email( - subject='[revert-checker/%s] new %s discovered across %s' % ( - repository_name, - 'revert' if len(new_reverts) == 1 else 'reverts', - friendly_name, - ), - body=email_pieces, - ) + new_reverts: t.List[revert_checker.Revert], +) -> _Email: + email_pieces = [ + "It looks like there may be %s across %s (" + % ( + "a new revert" if len(new_reverts) == 1 else "new reverts", + friendly_name, + ), + prettify_sha(sha), + ").", + tiny_render.line_break, + tiny_render.line_break, + "That is:" if len(new_reverts) == 1 else "These are:", + ] + + revert_listing = [] + for revert in sorted(new_reverts, key=lambda r: r.sha): + revert_listing.append( + [ + prettify_sha(revert.sha), + " (appears to revert ", + prettify_sha(revert.reverted_sha), + "): ", + get_sha_description(revert.sha), + ] + ) + + email_pieces.append(tiny_render.UnorderedList(items=revert_listing)) + email_pieces += [ + tiny_render.line_break, + "PTAL and consider reverting them locally.", + ] + return _Email( + subject="[revert-checker/%s] new %s discovered across %s" + % ( + repository_name, + "revert" if len(new_reverts) == 1 else "reverts", + friendly_name, + ), + body=email_pieces, + ) _EmailRecipients = t.NamedTuple( - '_EmailRecipients', + "_EmailRecipients", [ - ('well_known', t.List[str]), - ('direct', t.List[str]), + ("well_known", t.List[str]), + ("direct", t.List[str]), ], ) def _send_revert_email(recipients: _EmailRecipients, email: _Email) -> None: - email_sender.EmailSender().SendX20Email( - subject=email.subject, - identifier='revert-checker', - well_known_recipients=recipients.well_known, - direct_recipients=['gbiv@google.com'] + recipients.direct, - text_body=tiny_render.render_text_pieces(email.body), - html_body=tiny_render.render_html_pieces(email.body), - ) + email_sender.EmailSender().SendX20Email( + subject=email.subject, + identifier="revert-checker", + well_known_recipients=recipients.well_known, + direct_recipients=["gbiv@google.com"] + recipients.direct, + text_body=tiny_render.render_text_pieces(email.body), + html_body=tiny_render.render_html_pieces(email.body), + ) def _write_state(state_file: str, new_state: State) -> None: - try: - tmp_file = state_file + '.new' - with open(tmp_file, 'w', encoding='utf-8') as f: - json.dump(new_state, f, sort_keys=True, indent=2, separators=(',', ': ')) - os.rename(tmp_file, state_file) - except: try: - os.remove(tmp_file) - except FileNotFoundError: - pass - raise + tmp_file = state_file + ".new" + with open(tmp_file, "w", encoding="utf-8") as f: + json.dump( + new_state, f, sort_keys=True, indent=2, separators=(",", ": ") + ) + os.rename(tmp_file, state_file) + except: + try: + os.remove(tmp_file) + except FileNotFoundError: + pass + raise def _read_state(state_file: str) -> State: - try: - with open(state_file) as f: - return json.load(f) - except FileNotFoundError: - logging.info('No state file found at %r; starting with an empty slate', - state_file) - return {} - - -def find_shas(llvm_dir: str, interesting_shas: t.List[t.Tuple[str, str]], - state: State, new_state: State): - for friendly_name, sha in interesting_shas: - logging.info('Finding reverts across %s (%s)', friendly_name, sha) - all_reverts = revert_checker.find_reverts(llvm_dir, - sha, - root='origin/' + - git_llvm_rev.MAIN_BRANCH) - logging.info('Detected the following revert(s) across %s:\n%s', - friendly_name, pprint.pformat(all_reverts)) - - new_state[sha] = [r.sha for r in all_reverts] - - if sha not in state: - logging.info('SHA %s is new to me', sha) - existing_reverts = set() - else: - existing_reverts = set(state[sha]) - - new_reverts = [r for r in all_reverts if r.sha not in existing_reverts] - if not new_reverts: - logging.info('...All of which have been reported.') - continue - - yield (friendly_name, sha, new_reverts) - - -def do_cherrypick(chroot_path: str, llvm_dir: str, - interesting_shas: t.List[t.Tuple[str, str]], state: State, - reviewers: t.List[str], cc: t.List[str]) -> State: - new_state: State = {} - seen: t.Set[str] = set() - for friendly_name, _sha, reverts in find_shas(llvm_dir, interesting_shas, - state, new_state): - if friendly_name in seen: - continue - seen.add(friendly_name) - for sha, reverted_sha in reverts: - try: - # We upload reverts for all platforms by default, since there's no - # real reason for them to be CrOS-specific. - get_upstream_patch.get_from_upstream(chroot_path=chroot_path, - create_cl=True, - start_sha=reverted_sha, - patches=[sha], - reviewers=reviewers, - cc=cc, - platforms=()) - except get_upstream_patch.CherrypickError as e: - logging.info('%s, skipping...', str(e)) - return new_state - - -def do_email(is_dry_run: bool, llvm_dir: str, repository: str, - interesting_shas: t.List[t.Tuple[str, str]], state: State, - recipients: _EmailRecipients) -> State: - def prettify_sha(sha: str) -> tiny_render.Piece: - rev = get_llvm_hash.GetVersionFrom(llvm_dir, sha) - - # 12 is arbitrary, but should be unambiguous enough. - short_sha = sha[:12] - return tiny_render.Switch( - text=f'r{rev} ({short_sha})', - html=tiny_render.Link(href='https://reviews.llvm.org/rG' + sha, - inner='r' + str(rev)), + try: + with open(state_file) as f: + return json.load(f) + except FileNotFoundError: + logging.info( + "No state file found at %r; starting with an empty slate", + state_file, + ) + return {} + + +def find_shas( + llvm_dir: str, + interesting_shas: t.List[t.Tuple[str, str]], + state: State, + new_state: State, +): + for friendly_name, sha in interesting_shas: + logging.info("Finding reverts across %s (%s)", friendly_name, sha) + all_reverts = revert_checker.find_reverts( + llvm_dir, sha, root="origin/" + git_llvm_rev.MAIN_BRANCH + ) + logging.info( + "Detected the following revert(s) across %s:\n%s", + friendly_name, + pprint.pformat(all_reverts), + ) + + new_state[sha] = [r.sha for r in all_reverts] + + if sha not in state: + logging.info("SHA %s is new to me", sha) + existing_reverts = set() + else: + existing_reverts = set(state[sha]) + + new_reverts = [r for r in all_reverts if r.sha not in existing_reverts] + if not new_reverts: + logging.info("...All of which have been reported.") + continue + + yield (friendly_name, sha, new_reverts) + + +def do_cherrypick( + chroot_path: str, + llvm_dir: str, + interesting_shas: t.List[t.Tuple[str, str]], + state: State, + reviewers: t.List[str], + cc: t.List[str], +) -> State: + new_state: State = {} + seen: t.Set[str] = set() + for friendly_name, _sha, reverts in find_shas( + llvm_dir, interesting_shas, state, new_state + ): + if friendly_name in seen: + continue + seen.add(friendly_name) + for sha, reverted_sha in reverts: + try: + # We upload reverts for all platforms by default, since there's no + # real reason for them to be CrOS-specific. + get_upstream_patch.get_from_upstream( + chroot_path=chroot_path, + create_cl=True, + start_sha=reverted_sha, + patches=[sha], + reviewers=reviewers, + cc=cc, + platforms=(), + ) + except get_upstream_patch.CherrypickError as e: + logging.info("%s, skipping...", str(e)) + return new_state + + +def do_email( + is_dry_run: bool, + llvm_dir: str, + repository: str, + interesting_shas: t.List[t.Tuple[str, str]], + state: State, + recipients: _EmailRecipients, +) -> State: + def prettify_sha(sha: str) -> tiny_render.Piece: + rev = get_llvm_hash.GetVersionFrom(llvm_dir, sha) + + # 12 is arbitrary, but should be unambiguous enough. + short_sha = sha[:12] + return tiny_render.Switch( + text=f"r{rev} ({short_sha})", + html=tiny_render.Link( + href="https://reviews.llvm.org/rG" + sha, inner="r" + str(rev) + ), + ) + + def get_sha_description(sha: str) -> tiny_render.Piece: + return subprocess.check_output( + ["git", "log", "-n1", "--format=%s", sha], + cwd=llvm_dir, + encoding="utf-8", + ).strip() + + new_state: State = {} + for friendly_name, sha, new_reverts in find_shas( + llvm_dir, interesting_shas, state, new_state + ): + email = _generate_revert_email( + repository, + friendly_name, + sha, + prettify_sha, + get_sha_description, + new_reverts, + ) + if is_dry_run: + logging.info( + "Would send email:\nSubject: %s\nBody:\n%s\n", + email.subject, + tiny_render.render_text_pieces(email.body), + ) + else: + logging.info("Sending email with subject %r...", email.subject) + _send_revert_email(recipients, email) + logging.info("Email sent.") + return new_state + + +def parse_args(argv: t.List[str]) -> t.Any: + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "action", + choices=["cherry-pick", "email", "dry-run"], + help="Automatically cherry-pick upstream reverts, send an email, or " + "write to stdout.", + ) + parser.add_argument( + "--state_file", required=True, help="File to store persistent state in." + ) + parser.add_argument( + "--llvm_dir", required=True, help="Up-to-date LLVM directory to use." + ) + parser.add_argument("--debug", action="store_true") + parser.add_argument( + "--reviewers", + type=str, + nargs="*", + help="Requests reviews from REVIEWERS. All REVIEWERS must have existing " + "accounts.", + ) + parser.add_argument( + "--cc", + type=str, + nargs="*", + help="CCs the CL to the recipients. All recipients must have existing " + "accounts.", ) - def get_sha_description(sha: str) -> tiny_render.Piece: - return subprocess.check_output( - ['git', 'log', '-n1', '--format=%s', sha], - cwd=llvm_dir, - encoding='utf-8', - ).strip() - - new_state: State = {} - for friendly_name, sha, new_reverts in find_shas(llvm_dir, interesting_shas, - state, new_state): - email = _generate_revert_email(repository, friendly_name, sha, - prettify_sha, get_sha_description, - new_reverts) - if is_dry_run: - logging.info('Would send email:\nSubject: %s\nBody:\n%s\n', - email.subject, tiny_render.render_text_pieces(email.body)) - else: - logging.info('Sending email with subject %r...', email.subject) - _send_revert_email(recipients, email) - logging.info('Email sent.') - return new_state + subparsers = parser.add_subparsers(dest="repository") + subparsers.required = True + chromeos_subparser = subparsers.add_parser("chromeos") + chromeos_subparser.add_argument( + "--chromeos_dir", + required=True, + help="Up-to-date CrOS directory to use.", + ) -def parse_args(argv: t.List[str]) -> t.Any: - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'action', - choices=['cherry-pick', 'email', 'dry-run'], - help='Automatically cherry-pick upstream reverts, send an email, or ' - 'write to stdout.') - parser.add_argument('--state_file', - required=True, - help='File to store persistent state in.') - parser.add_argument('--llvm_dir', - required=True, - help='Up-to-date LLVM directory to use.') - parser.add_argument('--debug', action='store_true') - parser.add_argument( - '--reviewers', - type=str, - nargs='*', - help='Requests reviews from REVIEWERS. All REVIEWERS must have existing ' - 'accounts.') - parser.add_argument( - '--cc', - type=str, - nargs='*', - help='CCs the CL to the recipients. All recipients must have existing ' - 'accounts.') - - subparsers = parser.add_subparsers(dest='repository') - subparsers.required = True - - chromeos_subparser = subparsers.add_parser('chromeos') - chromeos_subparser.add_argument('--chromeos_dir', - required=True, - help='Up-to-date CrOS directory to use.') - - android_subparser = subparsers.add_parser('android') - android_subparser.add_argument( - '--android_llvm_toolchain_dir', - required=True, - help='Up-to-date android-llvm-toolchain directory to use.') - - return parser.parse_args(argv) - - -def find_chroot(opts: t.Any, reviewers: t.List[str], cc: t.List[str] - ) -> t.Tuple[str, t.List[t.Tuple[str, str]], _EmailRecipients]: - recipients = reviewers + cc - if opts.repository == 'chromeos': - chroot_path = opts.chromeos_dir - return (chroot_path, _find_interesting_chromeos_shas(chroot_path), - _EmailRecipients(well_known=['mage'], direct=recipients)) - elif opts.repository == 'android': - if opts.action == 'cherry-pick': - raise RuntimeError( - "android doesn't currently support automatic cherry-picking.") - - chroot_path = opts.android_llvm_toolchain_dir - return (chroot_path, _find_interesting_android_shas(chroot_path), - _EmailRecipients(well_known=[], - direct=['android-llvm-dev@google.com'] + - recipients)) - else: - raise ValueError(f'Unknown repository {opts.repository}') + android_subparser = subparsers.add_parser("android") + android_subparser.add_argument( + "--android_llvm_toolchain_dir", + required=True, + help="Up-to-date android-llvm-toolchain directory to use.", + ) + + return parser.parse_args(argv) + + +def find_chroot( + opts: t.Any, reviewers: t.List[str], cc: t.List[str] +) -> t.Tuple[str, t.List[t.Tuple[str, str]], _EmailRecipients]: + recipients = reviewers + cc + if opts.repository == "chromeos": + chroot_path = opts.chromeos_dir + return ( + chroot_path, + _find_interesting_chromeos_shas(chroot_path), + _EmailRecipients(well_known=["mage"], direct=recipients), + ) + elif opts.repository == "android": + if opts.action == "cherry-pick": + raise RuntimeError( + "android doesn't currently support automatic cherry-picking." + ) + + chroot_path = opts.android_llvm_toolchain_dir + return ( + chroot_path, + _find_interesting_android_shas(chroot_path), + _EmailRecipients( + well_known=[], + direct=["android-llvm-dev@google.com"] + recipients, + ), + ) + else: + raise ValueError(f"Unknown repository {opts.repository}") def main(argv: t.List[str]) -> int: - opts = parse_args(argv) - - logging.basicConfig( - format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s', - level=logging.DEBUG if opts.debug else logging.INFO, - ) - - action = opts.action - llvm_dir = opts.llvm_dir - repository = opts.repository - state_file = opts.state_file - reviewers = opts.reviewers if opts.reviewers else [] - cc = opts.cc if opts.cc else [] - - chroot_path, interesting_shas, recipients = find_chroot(opts, reviewers, cc) - logging.info('Interesting SHAs were %r', interesting_shas) - - state = _read_state(state_file) - logging.info('Loaded state\n%s', pprint.pformat(state)) - - # We want to be as free of obvious side-effects as possible in case something - # above breaks. Hence, action as late as possible. - if action == 'cherry-pick': - new_state = do_cherrypick(chroot_path=chroot_path, - llvm_dir=llvm_dir, - interesting_shas=interesting_shas, - state=state, - reviewers=reviewers, - cc=cc) - else: - new_state = do_email(is_dry_run=action == 'dry-run', - llvm_dir=llvm_dir, - repository=repository, - interesting_shas=interesting_shas, - state=state, - recipients=recipients) - - _write_state(state_file, new_state) - return 0 - - -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + opts = parse_args(argv) + + logging.basicConfig( + format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s", + level=logging.DEBUG if opts.debug else logging.INFO, + ) + + action = opts.action + llvm_dir = opts.llvm_dir + repository = opts.repository + state_file = opts.state_file + reviewers = opts.reviewers if opts.reviewers else [] + cc = opts.cc if opts.cc else [] + + chroot_path, interesting_shas, recipients = find_chroot(opts, reviewers, cc) + logging.info("Interesting SHAs were %r", interesting_shas) + + state = _read_state(state_file) + logging.info("Loaded state\n%s", pprint.pformat(state)) + + # We want to be as free of obvious side-effects as possible in case something + # above breaks. Hence, action as late as possible. + if action == "cherry-pick": + new_state = do_cherrypick( + chroot_path=chroot_path, + llvm_dir=llvm_dir, + interesting_shas=interesting_shas, + state=state, + reviewers=reviewers, + cc=cc, + ) + else: + new_state = do_email( + is_dry_run=action == "dry-run", + llvm_dir=llvm_dir, + repository=repository, + interesting_shas=interesting_shas, + state=state, + recipients=recipients, + ) + + _write_state(state_file, new_state) + return 0 + + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/llvm_tools/nightly_revert_checker_test.py b/llvm_tools/nightly_revert_checker_test.py index 5e077f93..2064cf96 100755 --- a/llvm_tools/nightly_revert_checker_test.py +++ b/llvm_tools/nightly_revert_checker_test.py @@ -17,179 +17,207 @@ import get_upstream_patch import nightly_revert_checker import revert_checker + # pylint: disable=protected-access class Test(unittest.TestCase): - """Tests for nightly_revert_checker.""" - - def test_email_rendering_works_for_singular_revert(self): - def prettify_sha(sha: str) -> tiny_render.Piece: - return 'pretty_' + sha - - def get_sha_description(sha: str) -> tiny_render.Piece: - return 'subject_' + sha - - email = nightly_revert_checker._generate_revert_email( - repository_name='${repo}', - friendly_name='${name}', - sha='${sha}', - prettify_sha=prettify_sha, - get_sha_description=get_sha_description, - new_reverts=[ - revert_checker.Revert(sha='${revert_sha}', - reverted_sha='${reverted_sha}') - ]) - - expected_email = nightly_revert_checker._Email( - subject='[revert-checker/${repo}] new revert discovered across ${name}', - body=[ - 'It looks like there may be a new revert across ${name} (', - 'pretty_${sha}', - ').', - tiny_render.line_break, - tiny_render.line_break, - 'That is:', - tiny_render.UnorderedList([[ - 'pretty_${revert_sha}', - ' (appears to revert ', - 'pretty_${reverted_sha}', - '): ', - 'subject_${revert_sha}', - ]]), - tiny_render.line_break, - 'PTAL and consider reverting them locally.', - ]) - - self.assertEqual(email, expected_email) - - def test_email_rendering_works_for_multiple_reverts(self): - def prettify_sha(sha: str) -> tiny_render.Piece: - return 'pretty_' + sha - - def get_sha_description(sha: str) -> tiny_render.Piece: - return 'subject_' + sha - - email = nightly_revert_checker._generate_revert_email( - repository_name='${repo}', - friendly_name='${name}', - sha='${sha}', - prettify_sha=prettify_sha, - get_sha_description=get_sha_description, - new_reverts=[ - revert_checker.Revert(sha='${revert_sha1}', - reverted_sha='${reverted_sha1}'), - revert_checker.Revert(sha='${revert_sha2}', - reverted_sha='${reverted_sha2}'), - # Keep this out-of-order to check that we sort based on SHAs - revert_checker.Revert(sha='${revert_sha0}', - reverted_sha='${reverted_sha0}'), - ]) - - expected_email = nightly_revert_checker._Email( - subject='[revert-checker/${repo}] new reverts discovered across ' - '${name}', - body=[ - 'It looks like there may be new reverts across ${name} (', - 'pretty_${sha}', - ').', - tiny_render.line_break, - tiny_render.line_break, - 'These are:', - tiny_render.UnorderedList([ - [ - 'pretty_${revert_sha0}', - ' (appears to revert ', - 'pretty_${reverted_sha0}', - '): ', - 'subject_${revert_sha0}', - ], - [ - 'pretty_${revert_sha1}', - ' (appears to revert ', - 'pretty_${reverted_sha1}', - '): ', - 'subject_${revert_sha1}', - ], - [ - 'pretty_${revert_sha2}', - ' (appears to revert ', - 'pretty_${reverted_sha2}', - '): ', - 'subject_${revert_sha2}', - ], - ]), - tiny_render.line_break, - 'PTAL and consider reverting them locally.', - ]) - - self.assertEqual(email, expected_email) - - def test_llvm_ebuild_parsing_appears_to_function(self): - llvm_ebuild = io.StringIO('\n'.join(( - 'foo', - '#LLVM_HASH="123"', - 'LLVM_HASH="123" # comment', - 'LLVM_NEXT_HASH="456"', - ))) - - shas = nightly_revert_checker._parse_llvm_ebuild_for_shas(llvm_ebuild) - self.assertEqual(shas, [ - ('llvm', '123'), - ('llvm-next', '456'), - ]) - - def test_llvm_ebuild_parsing_fails_if_both_hashes_arent_present(self): - bad_bodies = [ - '', - 'LLVM_HASH="123" # comment', - 'LLVM_NEXT_HASH="123" # comment', - 'LLVM_NEXT_HASH="123" # comment\n#LLVM_HASH="123"', - ] - - for bad in bad_bodies: - with self.assertRaises(ValueError) as e: - nightly_revert_checker._parse_llvm_ebuild_for_shas(io.StringIO(bad)) - - self.assertIn('Failed to detect SHAs', str(e.exception)) - - @patch('revert_checker.find_reverts') - @patch('get_upstream_patch.get_from_upstream') - def test_do_cherrypick_is_called(self, do_cherrypick, find_reverts): - find_reverts.return_value = [ - revert_checker.Revert('12345abcdef', 'fedcba54321') - ] - nightly_revert_checker.do_cherrypick(chroot_path='/path/to/chroot', - llvm_dir='/path/to/llvm', - interesting_shas=[('12345abcdef', - 'fedcba54321')], - state={}, - reviewers=['meow@chromium.org'], - cc=['purr@chromium.org']) - - do_cherrypick.assert_called_once() - find_reverts.assert_called_once() - - @patch('revert_checker.find_reverts') - @patch('get_upstream_patch.get_from_upstream') - def test_do_cherrypick_handles_cherrypick_error(self, do_cherrypick, - find_reverts): - find_reverts.return_value = [ - revert_checker.Revert('12345abcdef', 'fedcba54321') - ] - do_cherrypick.side_effect = get_upstream_patch.CherrypickError( - 'Patch at 12345abcdef already exists in PATCHES.json') - nightly_revert_checker.do_cherrypick(chroot_path='/path/to/chroot', - llvm_dir='/path/to/llvm', - interesting_shas=[('12345abcdef', - 'fedcba54321')], - state={}, - reviewers=['meow@chromium.org'], - cc=['purr@chromium.org']) - - do_cherrypick.assert_called_once() - find_reverts.assert_called_once() - - -if __name__ == '__main__': - unittest.main() + """Tests for nightly_revert_checker.""" + + def test_email_rendering_works_for_singular_revert(self): + def prettify_sha(sha: str) -> tiny_render.Piece: + return "pretty_" + sha + + def get_sha_description(sha: str) -> tiny_render.Piece: + return "subject_" + sha + + email = nightly_revert_checker._generate_revert_email( + repository_name="${repo}", + friendly_name="${name}", + sha="${sha}", + prettify_sha=prettify_sha, + get_sha_description=get_sha_description, + new_reverts=[ + revert_checker.Revert( + sha="${revert_sha}", reverted_sha="${reverted_sha}" + ) + ], + ) + + expected_email = nightly_revert_checker._Email( + subject="[revert-checker/${repo}] new revert discovered across ${name}", + body=[ + "It looks like there may be a new revert across ${name} (", + "pretty_${sha}", + ").", + tiny_render.line_break, + tiny_render.line_break, + "That is:", + tiny_render.UnorderedList( + [ + [ + "pretty_${revert_sha}", + " (appears to revert ", + "pretty_${reverted_sha}", + "): ", + "subject_${revert_sha}", + ] + ] + ), + tiny_render.line_break, + "PTAL and consider reverting them locally.", + ], + ) + + self.assertEqual(email, expected_email) + + def test_email_rendering_works_for_multiple_reverts(self): + def prettify_sha(sha: str) -> tiny_render.Piece: + return "pretty_" + sha + + def get_sha_description(sha: str) -> tiny_render.Piece: + return "subject_" + sha + + email = nightly_revert_checker._generate_revert_email( + repository_name="${repo}", + friendly_name="${name}", + sha="${sha}", + prettify_sha=prettify_sha, + get_sha_description=get_sha_description, + new_reverts=[ + revert_checker.Revert( + sha="${revert_sha1}", reverted_sha="${reverted_sha1}" + ), + revert_checker.Revert( + sha="${revert_sha2}", reverted_sha="${reverted_sha2}" + ), + # Keep this out-of-order to check that we sort based on SHAs + revert_checker.Revert( + sha="${revert_sha0}", reverted_sha="${reverted_sha0}" + ), + ], + ) + + expected_email = nightly_revert_checker._Email( + subject="[revert-checker/${repo}] new reverts discovered across " + "${name}", + body=[ + "It looks like there may be new reverts across ${name} (", + "pretty_${sha}", + ").", + tiny_render.line_break, + tiny_render.line_break, + "These are:", + tiny_render.UnorderedList( + [ + [ + "pretty_${revert_sha0}", + " (appears to revert ", + "pretty_${reverted_sha0}", + "): ", + "subject_${revert_sha0}", + ], + [ + "pretty_${revert_sha1}", + " (appears to revert ", + "pretty_${reverted_sha1}", + "): ", + "subject_${revert_sha1}", + ], + [ + "pretty_${revert_sha2}", + " (appears to revert ", + "pretty_${reverted_sha2}", + "): ", + "subject_${revert_sha2}", + ], + ] + ), + tiny_render.line_break, + "PTAL and consider reverting them locally.", + ], + ) + + self.assertEqual(email, expected_email) + + def test_llvm_ebuild_parsing_appears_to_function(self): + llvm_ebuild = io.StringIO( + "\n".join( + ( + "foo", + '#LLVM_HASH="123"', + 'LLVM_HASH="123" # comment', + 'LLVM_NEXT_HASH="456"', + ) + ) + ) + + shas = nightly_revert_checker._parse_llvm_ebuild_for_shas(llvm_ebuild) + self.assertEqual( + shas, + [ + ("llvm", "123"), + ("llvm-next", "456"), + ], + ) + + def test_llvm_ebuild_parsing_fails_if_both_hashes_arent_present(self): + bad_bodies = [ + "", + 'LLVM_HASH="123" # comment', + 'LLVM_NEXT_HASH="123" # comment', + 'LLVM_NEXT_HASH="123" # comment\n#LLVM_HASH="123"', + ] + + for bad in bad_bodies: + with self.assertRaises(ValueError) as e: + nightly_revert_checker._parse_llvm_ebuild_for_shas( + io.StringIO(bad) + ) + + self.assertIn("Failed to detect SHAs", str(e.exception)) + + @patch("revert_checker.find_reverts") + @patch("get_upstream_patch.get_from_upstream") + def test_do_cherrypick_is_called(self, do_cherrypick, find_reverts): + find_reverts.return_value = [ + revert_checker.Revert("12345abcdef", "fedcba54321") + ] + nightly_revert_checker.do_cherrypick( + chroot_path="/path/to/chroot", + llvm_dir="/path/to/llvm", + interesting_shas=[("12345abcdef", "fedcba54321")], + state={}, + reviewers=["meow@chromium.org"], + cc=["purr@chromium.org"], + ) + + do_cherrypick.assert_called_once() + find_reverts.assert_called_once() + + @patch("revert_checker.find_reverts") + @patch("get_upstream_patch.get_from_upstream") + def test_do_cherrypick_handles_cherrypick_error( + self, do_cherrypick, find_reverts + ): + find_reverts.return_value = [ + revert_checker.Revert("12345abcdef", "fedcba54321") + ] + do_cherrypick.side_effect = get_upstream_patch.CherrypickError( + "Patch at 12345abcdef already exists in PATCHES.json" + ) + nightly_revert_checker.do_cherrypick( + chroot_path="/path/to/chroot", + llvm_dir="/path/to/llvm", + interesting_shas=[("12345abcdef", "fedcba54321")], + state={}, + reviewers=["meow@chromium.org"], + cc=["purr@chromium.org"], + ) + + do_cherrypick.assert_called_once() + find_reverts.assert_called_once() + + +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index 2893d611..d71c3888 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -20,359 +20,389 @@ from subprocess_helpers import check_output class GitBisectionCode(enum.IntEnum): - """Git bisection exit codes. + """Git bisection exit codes. - Used when patch_manager.py is in the bisection mode, - as we need to return in what way we should handle - certain patch failures. - """ - GOOD = 0 - """All patches applied successfully.""" - BAD = 1 - """The tested patch failed to apply.""" - SKIP = 125 + Used when patch_manager.py is in the bisection mode, + as we need to return in what way we should handle + certain patch failures. + """ + + GOOD = 0 + """All patches applied successfully.""" + BAD = 1 + """The tested patch failed to apply.""" + SKIP = 125 def GetCommandLineArgs(sys_argv: Optional[List[str]]): - """Get the required arguments from the command line.""" - - # Create parser and add optional command-line arguments. - parser = argparse.ArgumentParser(description='A manager for patches.') - - # Add argument for the LLVM version to use for patch management. - parser.add_argument( - '--svn_version', - type=int, - help='the LLVM svn version to use for patch management (determines ' - 'whether a patch is applicable). Required when not bisecting.') - - # Add argument for the patch metadata file that is in $FILESDIR. - parser.add_argument( - '--patch_metadata_file', - required=True, - type=Path, - help='the absolute path to the .json file in "$FILESDIR/" of the ' - 'package which has all the patches and their metadata if applicable') - - # Add argument for the absolute path to the unpacked sources. - parser.add_argument('--src_path', - required=True, - type=Path, - help='the absolute path to the unpacked LLVM sources') - - # Add argument for the mode of the patch manager when handling failing - # applicable patches. - parser.add_argument( - '--failure_mode', - default=FailureModes.FAIL, - type=FailureModes, - help='the mode of the patch manager when handling failed patches ' - '(default: %(default)s)') - parser.add_argument( - '--test_patch', - default='', - help='The rel_patch_path of the patch we want to bisect the ' - 'application of. Not used in other modes.') - - # Parse the command line. - return parser.parse_args(sys_argv) + """Get the required arguments from the command line.""" + + # Create parser and add optional command-line arguments. + parser = argparse.ArgumentParser(description="A manager for patches.") + + # Add argument for the LLVM version to use for patch management. + parser.add_argument( + "--svn_version", + type=int, + help="the LLVM svn version to use for patch management (determines " + "whether a patch is applicable). Required when not bisecting.", + ) + + # Add argument for the patch metadata file that is in $FILESDIR. + parser.add_argument( + "--patch_metadata_file", + required=True, + type=Path, + help='the absolute path to the .json file in "$FILESDIR/" of the ' + "package which has all the patches and their metadata if applicable", + ) + + # Add argument for the absolute path to the unpacked sources. + parser.add_argument( + "--src_path", + required=True, + type=Path, + help="the absolute path to the unpacked LLVM sources", + ) + + # Add argument for the mode of the patch manager when handling failing + # applicable patches. + parser.add_argument( + "--failure_mode", + default=FailureModes.FAIL, + type=FailureModes, + help="the mode of the patch manager when handling failed patches " + "(default: %(default)s)", + ) + parser.add_argument( + "--test_patch", + default="", + help="The rel_patch_path of the patch we want to bisect the " + "application of. Not used in other modes.", + ) + + # Parse the command line. + return parser.parse_args(sys_argv) def GetHEADSVNVersion(src_path): - """Gets the SVN version of HEAD in the src tree.""" + """Gets the SVN version of HEAD in the src tree.""" - cmd = ['git', '-C', src_path, 'rev-parse', 'HEAD'] + cmd = ["git", "-C", src_path, "rev-parse", "HEAD"] - git_hash = check_output(cmd) + git_hash = check_output(cmd) - version = get_llvm_hash.GetVersionFrom(src_path, git_hash.rstrip()) + version = get_llvm_hash.GetVersionFrom(src_path, git_hash.rstrip()) - return version + return version def _WriteJsonChanges(patches: List[Dict[str, Any]], file_io: IO[str]): - """Write JSON changes to file, does not acquire new file lock.""" - json.dump(patches, file_io, indent=4, separators=(',', ': ')) - # Need to add a newline as json.dump omits it. - file_io.write('\n') + """Write JSON changes to file, does not acquire new file lock.""" + json.dump(patches, file_io, indent=4, separators=(",", ": ")) + # Need to add a newline as json.dump omits it. + file_io.write("\n") def GetCommitHashesForBisection(src_path, good_svn_version, bad_svn_version): - """Gets the good and bad commit hashes required by `git bisect start`.""" - - bad_commit_hash = get_llvm_hash.GetGitHashFrom(src_path, bad_svn_version) - - good_commit_hash = get_llvm_hash.GetGitHashFrom(src_path, good_svn_version) - - return good_commit_hash, bad_commit_hash - - -def RemoveOldPatches(svn_version: int, llvm_src_dir: Path, - patches_json_fp: Path): - """Remove patches that don't and will never apply for the future. - - Patches are determined to be "old" via the "is_old" method for - each patch entry. - - Args: - svn_version: LLVM SVN version. - llvm_src_dir: LLVM source directory. - patches_json_fp: Location to edit patches on. - """ - with patches_json_fp.open(encoding='utf-8') as f: - patches_list = json.load(f) - patch_entries = (patch_utils.PatchEntry.from_dict(llvm_src_dir, elem) - for elem in patches_list) - oldness = [(entry, entry.is_old(svn_version)) for entry in patch_entries] - filtered_entries = [entry.to_dict() for entry, old in oldness if not old] - with patch_utils.atomic_write(patches_json_fp, encoding='utf-8') as f: - _WriteJsonChanges(filtered_entries, f) - removed_entries = [entry for entry, old in oldness if old] - plural_patches = 'patch' if len(removed_entries) == 1 else 'patches' - print(f'Removed {len(removed_entries)} old {plural_patches}:') - for r in removed_entries: - print(f'- {r.rel_patch_path}: {r.title()}') - - -def UpdateVersionRanges(svn_version: int, llvm_src_dir: Path, - patches_json_fp: Path): - """Reduce the version ranges of failing patches. - - Patches which fail to apply will have their 'version_range.until' - field reduced to the passed in svn_version. - - Modifies the contents of patches_json_fp. - - Ars: - svn_version: LLVM revision number. - llvm_src_dir: llvm-project directory path. - patches_json_fp: Filepath to the PATCHES.json file. - """ - with patches_json_fp.open(encoding='utf-8') as f: - patch_entries = patch_utils.json_to_patch_entries( - patches_json_fp.parent, - f, - ) - modified_entries = UpdateVersionRangesWithEntries(svn_version, llvm_src_dir, - patch_entries) - with patch_utils.atomic_write(patches_json_fp, encoding='utf-8') as f: - _WriteJsonChanges([p.to_dict() for p in patch_entries], f) - for entry in modified_entries: - print(f'Stopped applying {entry.rel_patch_path} ({entry.title()}) ' - f'for r{svn_version}') + """Gets the good and bad commit hashes required by `git bisect start`.""" + bad_commit_hash = get_llvm_hash.GetGitHashFrom(src_path, bad_svn_version) -def UpdateVersionRangesWithEntries( - svn_version: int, llvm_src_dir: Path, - patch_entries: Iterable[patch_utils.PatchEntry] -) -> List[patch_utils.PatchEntry]: - """Test-able helper for UpdateVersionRanges. + good_commit_hash = get_llvm_hash.GetGitHashFrom(src_path, good_svn_version) - Args: - svn_version: LLVM revision number. - llvm_src_dir: llvm-project directory path. - patch_entries: PatchEntry objects to modify. + return good_commit_hash, bad_commit_hash - Returns: - A list of PatchEntry objects which were modified. - Post: - Modifies patch_entries in place. - """ - modified_entries: List[patch_utils.PatchEntry] = [] - with patch_utils.git_clean_context(llvm_src_dir): - for pe in patch_entries: - test_result = pe.test_apply(llvm_src_dir) - if not test_result: - if pe.version_range is None: - pe.version_range = {} - pe.version_range['until'] = svn_version - modified_entries.append(pe) - else: - # We have to actually apply the patch so that future patches - # will stack properly. - if not pe.apply(llvm_src_dir).succeeded: - raise RuntimeError('Could not apply patch that dry ran successfully') - return modified_entries - - -def CheckPatchApplies(svn_version: int, llvm_src_dir: Path, - patches_json_fp: Path, - rel_patch_path: str) -> GitBisectionCode: - """Check that a given patch with the rel_patch_path applies in the stack. - - This is used in the bisection mode of the patch manager. It's similiar - to ApplyAllFromJson, but differs in that the patch with rel_patch_path - will attempt to apply regardless of its version range, as we're trying - to identify the SVN version - - Args: - svn_version: SVN version to test at. - llvm_src_dir: llvm-project source code diroctory (with a .git). - patches_json_fp: PATCHES.json filepath. - rel_patch_path: Relative patch path of the patch we want to check. If - patches before this patch fail to apply, then the revision is skipped. - """ - with patches_json_fp.open(encoding='utf-8') as f: - patch_entries = patch_utils.json_to_patch_entries( - patches_json_fp.parent, - f, +def RemoveOldPatches( + svn_version: int, llvm_src_dir: Path, patches_json_fp: Path +): + """Remove patches that don't and will never apply for the future. + + Patches are determined to be "old" via the "is_old" method for + each patch entry. + + Args: + svn_version: LLVM SVN version. + llvm_src_dir: LLVM source directory. + patches_json_fp: Location to edit patches on. + """ + with patches_json_fp.open(encoding="utf-8") as f: + patches_list = json.load(f) + patch_entries = ( + patch_utils.PatchEntry.from_dict(llvm_src_dir, elem) + for elem in patches_list ) - with patch_utils.git_clean_context(llvm_src_dir): - success, _, failed_patches = ApplyPatchAndPrior( - svn_version, - llvm_src_dir, - patch_entries, - rel_patch_path, + oldness = [(entry, entry.is_old(svn_version)) for entry in patch_entries] + filtered_entries = [entry.to_dict() for entry, old in oldness if not old] + with patch_utils.atomic_write(patches_json_fp, encoding="utf-8") as f: + _WriteJsonChanges(filtered_entries, f) + removed_entries = [entry for entry, old in oldness if old] + plural_patches = "patch" if len(removed_entries) == 1 else "patches" + print(f"Removed {len(removed_entries)} old {plural_patches}:") + for r in removed_entries: + print(f"- {r.rel_patch_path}: {r.title()}") + + +def UpdateVersionRanges( + svn_version: int, llvm_src_dir: Path, patches_json_fp: Path +): + """Reduce the version ranges of failing patches. + + Patches which fail to apply will have their 'version_range.until' + field reduced to the passed in svn_version. + + Modifies the contents of patches_json_fp. + + Ars: + svn_version: LLVM revision number. + llvm_src_dir: llvm-project directory path. + patches_json_fp: Filepath to the PATCHES.json file. + """ + with patches_json_fp.open(encoding="utf-8") as f: + patch_entries = patch_utils.json_to_patch_entries( + patches_json_fp.parent, + f, + ) + modified_entries = UpdateVersionRangesWithEntries( + svn_version, llvm_src_dir, patch_entries ) - if success: - # Everything is good, patch applied successfully. - print(f'SUCCEEDED applying {rel_patch_path} @ r{svn_version}') - return GitBisectionCode.GOOD - if failed_patches and failed_patches[-1].rel_patch_path == rel_patch_path: - # We attempted to apply this patch, but it failed. - print(f'FAILED to apply {rel_patch_path} @ r{svn_version}') - return GitBisectionCode.BAD - # Didn't attempt to apply the patch, but failed regardless. - # Skip this revision. - print(f'SKIPPED {rel_patch_path} @ r{svn_version} due to prior failures') - return GitBisectionCode.SKIP + with patch_utils.atomic_write(patches_json_fp, encoding="utf-8") as f: + _WriteJsonChanges([p.to_dict() for p in patch_entries], f) + for entry in modified_entries: + print( + f"Stopped applying {entry.rel_patch_path} ({entry.title()}) " + f"for r{svn_version}" + ) + + +def UpdateVersionRangesWithEntries( + svn_version: int, + llvm_src_dir: Path, + patch_entries: Iterable[patch_utils.PatchEntry], +) -> List[patch_utils.PatchEntry]: + """Test-able helper for UpdateVersionRanges. + + Args: + svn_version: LLVM revision number. + llvm_src_dir: llvm-project directory path. + patch_entries: PatchEntry objects to modify. + + Returns: + A list of PatchEntry objects which were modified. + + Post: + Modifies patch_entries in place. + """ + modified_entries: List[patch_utils.PatchEntry] = [] + with patch_utils.git_clean_context(llvm_src_dir): + for pe in patch_entries: + test_result = pe.test_apply(llvm_src_dir) + if not test_result: + if pe.version_range is None: + pe.version_range = {} + pe.version_range["until"] = svn_version + modified_entries.append(pe) + else: + # We have to actually apply the patch so that future patches + # will stack properly. + if not pe.apply(llvm_src_dir).succeeded: + raise RuntimeError( + "Could not apply patch that dry ran successfully" + ) + return modified_entries + + +def CheckPatchApplies( + svn_version: int, + llvm_src_dir: Path, + patches_json_fp: Path, + rel_patch_path: str, +) -> GitBisectionCode: + """Check that a given patch with the rel_patch_path applies in the stack. + + This is used in the bisection mode of the patch manager. It's similiar + to ApplyAllFromJson, but differs in that the patch with rel_patch_path + will attempt to apply regardless of its version range, as we're trying + to identify the SVN version + + Args: + svn_version: SVN version to test at. + llvm_src_dir: llvm-project source code diroctory (with a .git). + patches_json_fp: PATCHES.json filepath. + rel_patch_path: Relative patch path of the patch we want to check. If + patches before this patch fail to apply, then the revision is skipped. + """ + with patches_json_fp.open(encoding="utf-8") as f: + patch_entries = patch_utils.json_to_patch_entries( + patches_json_fp.parent, + f, + ) + with patch_utils.git_clean_context(llvm_src_dir): + success, _, failed_patches = ApplyPatchAndPrior( + svn_version, + llvm_src_dir, + patch_entries, + rel_patch_path, + ) + if success: + # Everything is good, patch applied successfully. + print(f"SUCCEEDED applying {rel_patch_path} @ r{svn_version}") + return GitBisectionCode.GOOD + if failed_patches and failed_patches[-1].rel_patch_path == rel_patch_path: + # We attempted to apply this patch, but it failed. + print(f"FAILED to apply {rel_patch_path} @ r{svn_version}") + return GitBisectionCode.BAD + # Didn't attempt to apply the patch, but failed regardless. + # Skip this revision. + print(f"SKIPPED {rel_patch_path} @ r{svn_version} due to prior failures") + return GitBisectionCode.SKIP def ApplyPatchAndPrior( - svn_version: int, src_dir: Path, - patch_entries: Iterable[patch_utils.PatchEntry], rel_patch_path: str + svn_version: int, + src_dir: Path, + patch_entries: Iterable[patch_utils.PatchEntry], + rel_patch_path: str, ) -> Tuple[bool, List[patch_utils.PatchEntry], List[patch_utils.PatchEntry]]: - """Apply a patch, and all patches that apply before it in the patch stack. - - Patches which did not attempt to apply (because their version range didn't - match and they weren't the patch of interest) do not appear in the output. - - Probably shouldn't be called from outside of CheckPatchApplies, as it modifies - the source dir contents. - - Returns: - A tuple where: - [0]: Did the patch of interest succeed in applying? - [1]: List of applied patches, potentially containing the patch of interest. - [2]: List of failing patches, potentially containing the patch of interest. - """ - failed_patches = [] - applied_patches = [] - # We have to apply every patch up to the one we care about, - # as patches can stack. - for pe in patch_entries: - is_patch_of_interest = pe.rel_patch_path == rel_patch_path - applied, failed_hunks = patch_utils.apply_single_patch_entry( - svn_version, src_dir, pe, ignore_version_range=is_patch_of_interest) - meant_to_apply = bool(failed_hunks) or is_patch_of_interest - if is_patch_of_interest: - if applied: - # We applied the patch we wanted to, we can stop. - applied_patches.append(pe) - return True, applied_patches, failed_patches - else: - # We failed the patch we cared about, we can stop. - failed_patches.append(pe) - return False, applied_patches, failed_patches - else: - if applied: - applied_patches.append(pe) - elif meant_to_apply: - # Broke before we reached the patch we cared about. Stop. - failed_patches.append(pe) - return False, applied_patches, failed_patches - raise ValueError(f'Did not find patch {rel_patch_path}. ' - 'Does it exist?') + """Apply a patch, and all patches that apply before it in the patch stack. + + Patches which did not attempt to apply (because their version range didn't + match and they weren't the patch of interest) do not appear in the output. + + Probably shouldn't be called from outside of CheckPatchApplies, as it modifies + the source dir contents. + + Returns: + A tuple where: + [0]: Did the patch of interest succeed in applying? + [1]: List of applied patches, potentially containing the patch of interest. + [2]: List of failing patches, potentially containing the patch of interest. + """ + failed_patches = [] + applied_patches = [] + # We have to apply every patch up to the one we care about, + # as patches can stack. + for pe in patch_entries: + is_patch_of_interest = pe.rel_patch_path == rel_patch_path + applied, failed_hunks = patch_utils.apply_single_patch_entry( + svn_version, src_dir, pe, ignore_version_range=is_patch_of_interest + ) + meant_to_apply = bool(failed_hunks) or is_patch_of_interest + if is_patch_of_interest: + if applied: + # We applied the patch we wanted to, we can stop. + applied_patches.append(pe) + return True, applied_patches, failed_patches + else: + # We failed the patch we cared about, we can stop. + failed_patches.append(pe) + return False, applied_patches, failed_patches + else: + if applied: + applied_patches.append(pe) + elif meant_to_apply: + # Broke before we reached the patch we cared about. Stop. + failed_patches.append(pe) + return False, applied_patches, failed_patches + raise ValueError(f"Did not find patch {rel_patch_path}. " "Does it exist?") def PrintPatchResults(patch_info: patch_utils.PatchInfo): - """Prints the results of handling the patches of a package. + """Prints the results of handling the patches of a package. - Args: - patch_info: A dataclass that has information on the patches. - """ + Args: + patch_info: A dataclass that has information on the patches. + """ - def _fmt(patches): - return (str(pe.patch_path()) for pe in patches) + def _fmt(patches): + return (str(pe.patch_path()) for pe in patches) - if patch_info.applied_patches: - print('\nThe following patches applied successfully:') - print('\n'.join(_fmt(patch_info.applied_patches))) + if patch_info.applied_patches: + print("\nThe following patches applied successfully:") + print("\n".join(_fmt(patch_info.applied_patches))) - if patch_info.failed_patches: - print('\nThe following patches failed to apply:') - print('\n'.join(_fmt(patch_info.failed_patches))) + if patch_info.failed_patches: + print("\nThe following patches failed to apply:") + print("\n".join(_fmt(patch_info.failed_patches))) - if patch_info.non_applicable_patches: - print('\nThe following patches were not applicable:') - print('\n'.join(_fmt(patch_info.non_applicable_patches))) + if patch_info.non_applicable_patches: + print("\nThe following patches were not applicable:") + print("\n".join(_fmt(patch_info.non_applicable_patches))) - if patch_info.modified_metadata: - print('\nThe patch metadata file %s has been modified' % - os.path.basename(patch_info.modified_metadata)) + if patch_info.modified_metadata: + print( + "\nThe patch metadata file %s has been modified" + % os.path.basename(patch_info.modified_metadata) + ) - if patch_info.disabled_patches: - print('\nThe following patches were disabled:') - print('\n'.join(_fmt(patch_info.disabled_patches))) + if patch_info.disabled_patches: + print("\nThe following patches were disabled:") + print("\n".join(_fmt(patch_info.disabled_patches))) - if patch_info.removed_patches: - print('\nThe following patches were removed from the patch metadata file:') - for cur_patch_path in patch_info.removed_patches: - print('%s' % os.path.basename(cur_patch_path)) + if patch_info.removed_patches: + print( + "\nThe following patches were removed from the patch metadata file:" + ) + for cur_patch_path in patch_info.removed_patches: + print("%s" % os.path.basename(cur_patch_path)) def main(sys_argv: List[str]): - """Applies patches to the source tree and takes action on a failed patch.""" - - args_output = GetCommandLineArgs(sys_argv) - - llvm_src_dir = Path(args_output.src_path) - if not llvm_src_dir.is_dir(): - raise ValueError(f'--src_path arg {llvm_src_dir} is not a directory') - patches_json_fp = Path(args_output.patch_metadata_file) - if not patches_json_fp.is_file(): - raise ValueError('--patch_metadata_file arg ' - f'{patches_json_fp} is not a file') - - def _apply_all(args): - if args.svn_version is None: - raise ValueError('--svn_version must be set when applying patches') - result = patch_utils.apply_all_from_json( - svn_version=args.svn_version, - llvm_src_dir=llvm_src_dir, - patches_json_fp=patches_json_fp, - continue_on_failure=args.failure_mode == FailureModes.CONTINUE) - PrintPatchResults(result) - - def _remove(args): - RemoveOldPatches(args.svn_version, llvm_src_dir, patches_json_fp) - - def _disable(args): - UpdateVersionRanges(args.svn_version, llvm_src_dir, patches_json_fp) - - def _test_single(args): - if not args.test_patch: - raise ValueError('Running with bisect_patches requires the ' - '--test_patch flag.') - svn_version = GetHEADSVNVersion(llvm_src_dir) - error_code = CheckPatchApplies(svn_version, llvm_src_dir, patches_json_fp, - args.test_patch) - # Since this is for bisection, we want to exit with the - # GitBisectionCode enum. - sys.exit(int(error_code)) - - dispatch_table = { - FailureModes.FAIL: _apply_all, - FailureModes.CONTINUE: _apply_all, - FailureModes.REMOVE_PATCHES: _remove, - FailureModes.DISABLE_PATCHES: _disable, - FailureModes.BISECT_PATCHES: _test_single, - } - - if args_output.failure_mode in dispatch_table: - dispatch_table[args_output.failure_mode](args_output) - - -if __name__ == '__main__': - main(sys.argv[1:]) + """Applies patches to the source tree and takes action on a failed patch.""" + + args_output = GetCommandLineArgs(sys_argv) + + llvm_src_dir = Path(args_output.src_path) + if not llvm_src_dir.is_dir(): + raise ValueError(f"--src_path arg {llvm_src_dir} is not a directory") + patches_json_fp = Path(args_output.patch_metadata_file) + if not patches_json_fp.is_file(): + raise ValueError( + "--patch_metadata_file arg " f"{patches_json_fp} is not a file" + ) + + def _apply_all(args): + if args.svn_version is None: + raise ValueError("--svn_version must be set when applying patches") + result = patch_utils.apply_all_from_json( + svn_version=args.svn_version, + llvm_src_dir=llvm_src_dir, + patches_json_fp=patches_json_fp, + continue_on_failure=args.failure_mode == FailureModes.CONTINUE, + ) + PrintPatchResults(result) + + def _remove(args): + RemoveOldPatches(args.svn_version, llvm_src_dir, patches_json_fp) + + def _disable(args): + UpdateVersionRanges(args.svn_version, llvm_src_dir, patches_json_fp) + + def _test_single(args): + if not args.test_patch: + raise ValueError( + "Running with bisect_patches requires the " "--test_patch flag." + ) + svn_version = GetHEADSVNVersion(llvm_src_dir) + error_code = CheckPatchApplies( + svn_version, llvm_src_dir, patches_json_fp, args.test_patch + ) + # Since this is for bisection, we want to exit with the + # GitBisectionCode enum. + sys.exit(int(error_code)) + + dispatch_table = { + FailureModes.FAIL: _apply_all, + FailureModes.CONTINUE: _apply_all, + FailureModes.REMOVE_PATCHES: _remove, + FailureModes.DISABLE_PATCHES: _disable, + FailureModes.BISECT_PATCHES: _test_single, + } + + if args_output.failure_mode in dispatch_table: + dispatch_table[args_output.failure_mode](args_output) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py index 238fd781..444156a5 100755 --- a/llvm_tools/patch_manager_unittest.py +++ b/llvm_tools/patch_manager_unittest.py @@ -17,250 +17,289 @@ import patch_utils class PatchManagerTest(unittest.TestCase): - """Test class when handling patches of packages.""" + """Test class when handling patches of packages.""" - # Simulate behavior of 'os.path.isdir()' when the path is not a directory. - @mock.patch.object(Path, 'is_dir', return_value=False) - def testInvalidDirectoryPassedAsCommandLineArgument(self, mock_isdir): - src_dir = '/some/path/that/is/not/a/directory' - patch_metadata_file = '/some/path/that/is/not/a/file' + # Simulate behavior of 'os.path.isdir()' when the path is not a directory. + @mock.patch.object(Path, "is_dir", return_value=False) + def testInvalidDirectoryPassedAsCommandLineArgument(self, mock_isdir): + src_dir = "/some/path/that/is/not/a/directory" + patch_metadata_file = "/some/path/that/is/not/a/file" - # Verify the exception is raised when the command line argument for - # '--filesdir_path' or '--src_path' is not a directory. - with self.assertRaises(ValueError): - patch_manager.main([ - '--src_path', src_dir, '--patch_metadata_file', patch_metadata_file - ]) - mock_isdir.assert_called_once() + # Verify the exception is raised when the command line argument for + # '--filesdir_path' or '--src_path' is not a directory. + with self.assertRaises(ValueError): + patch_manager.main( + [ + "--src_path", + src_dir, + "--patch_metadata_file", + patch_metadata_file, + ] + ) + mock_isdir.assert_called_once() - # Simulate behavior of 'os.path.isfile()' when the patch metadata file is does - # not exist. - @mock.patch.object(Path, 'is_file', return_value=False) - def testInvalidPathToPatchMetadataFilePassedAsCommandLineArgument( - self, mock_isfile): - src_dir = '/some/path/that/is/not/a/directory' - patch_metadata_file = '/some/path/that/is/not/a/file' + # Simulate behavior of 'os.path.isfile()' when the patch metadata file is does + # not exist. + @mock.patch.object(Path, "is_file", return_value=False) + def testInvalidPathToPatchMetadataFilePassedAsCommandLineArgument( + self, mock_isfile + ): + src_dir = "/some/path/that/is/not/a/directory" + patch_metadata_file = "/some/path/that/is/not/a/file" - # Verify the exception is raised when the command line argument for - # '--filesdir_path' or '--src_path' is not a directory. - with mock.patch.object(Path, 'is_dir', return_value=True): - with self.assertRaises(ValueError): - patch_manager.main([ - '--src_path', src_dir, '--patch_metadata_file', patch_metadata_file - ]) - mock_isfile.assert_called_once() + # Verify the exception is raised when the command line argument for + # '--filesdir_path' or '--src_path' is not a directory. + with mock.patch.object(Path, "is_dir", return_value=True): + with self.assertRaises(ValueError): + patch_manager.main( + [ + "--src_path", + src_dir, + "--patch_metadata_file", + patch_metadata_file, + ] + ) + mock_isfile.assert_called_once() - @mock.patch('builtins.print') - def testRemoveOldPatches(self, _): - """Can remove old patches from PATCHES.json.""" - one_patch_dict = { - 'metadata': { - 'title': '[some label] hello world', - }, - 'platforms': [ - 'chromiumos', - ], - 'rel_patch_path': 'x/y/z', - 'version_range': { - 'from': 4, - 'until': 5, + @mock.patch("builtins.print") + def testRemoveOldPatches(self, _): + """Can remove old patches from PATCHES.json.""" + one_patch_dict = { + "metadata": { + "title": "[some label] hello world", + }, + "platforms": [ + "chromiumos", + ], + "rel_patch_path": "x/y/z", + "version_range": { + "from": 4, + "until": 5, + }, } - } - patches = [ - one_patch_dict, - { - **one_patch_dict, 'version_range': { - 'until': None - } - }, - { - **one_patch_dict, 'version_range': { - 'from': 100 - } - }, - { - **one_patch_dict, 'version_range': { - 'until': 8 - } - }, - ] - cases = [ - (0, lambda x: self.assertEqual(len(x), 4)), - (6, lambda x: self.assertEqual(len(x), 3)), - (8, lambda x: self.assertEqual(len(x), 2)), - (1000, lambda x: self.assertEqual(len(x), 2)), - ] + patches = [ + one_patch_dict, + {**one_patch_dict, "version_range": {"until": None}}, + {**one_patch_dict, "version_range": {"from": 100}}, + {**one_patch_dict, "version_range": {"until": 8}}, + ] + cases = [ + (0, lambda x: self.assertEqual(len(x), 4)), + (6, lambda x: self.assertEqual(len(x), 3)), + (8, lambda x: self.assertEqual(len(x), 2)), + (1000, lambda x: self.assertEqual(len(x), 2)), + ] - def _t(dirname: str, svn_version: int, assertion_f: Callable): - json_filepath = Path(dirname) / 'PATCHES.json' - with json_filepath.open('w', encoding='utf-8') as f: - json.dump(patches, f) - patch_manager.RemoveOldPatches(svn_version, Path(), json_filepath) - with json_filepath.open('r', encoding='utf-8') as f: - result = json.load(f) - assertion_f(result) + def _t(dirname: str, svn_version: int, assertion_f: Callable): + json_filepath = Path(dirname) / "PATCHES.json" + with json_filepath.open("w", encoding="utf-8") as f: + json.dump(patches, f) + patch_manager.RemoveOldPatches(svn_version, Path(), json_filepath) + with json_filepath.open("r", encoding="utf-8") as f: + result = json.load(f) + assertion_f(result) - with tempfile.TemporaryDirectory( - prefix='patch_manager_unittest') as dirname: - for r, a in cases: - _t(dirname, r, a) + with tempfile.TemporaryDirectory( + prefix="patch_manager_unittest" + ) as dirname: + for r, a in cases: + _t(dirname, r, a) - @mock.patch('builtins.print') - @mock.patch.object(patch_utils, 'git_clean_context') - def testCheckPatchApplies(self, _, mock_git_clean_context): - """Tests whether we can apply a single patch for a given svn_version.""" - mock_git_clean_context.return_value = mock.MagicMock() - with tempfile.TemporaryDirectory( - prefix='patch_manager_unittest') as dirname: - dirpath = Path(dirname) - patch_entries = [ - patch_utils.PatchEntry(dirpath, - metadata=None, - platforms=[], - rel_patch_path='another.patch', - version_range={ - 'from': 9, - 'until': 20, - }), - patch_utils.PatchEntry(dirpath, - metadata=None, - platforms=['chromiumos'], - rel_patch_path='example.patch', - version_range={ - 'from': 1, - 'until': 10, - }), - patch_utils.PatchEntry(dirpath, - metadata=None, - platforms=['chromiumos'], - rel_patch_path='patch_after.patch', - version_range={ - 'from': 1, - 'until': 5, - }) - ] - patches_path = dirpath / 'PATCHES.json' - with patch_utils.atomic_write(patches_path, encoding='utf-8') as f: - json.dump([pe.to_dict() for pe in patch_entries], f) + @mock.patch("builtins.print") + @mock.patch.object(patch_utils, "git_clean_context") + def testCheckPatchApplies(self, _, mock_git_clean_context): + """Tests whether we can apply a single patch for a given svn_version.""" + mock_git_clean_context.return_value = mock.MagicMock() + with tempfile.TemporaryDirectory( + prefix="patch_manager_unittest" + ) as dirname: + dirpath = Path(dirname) + patch_entries = [ + patch_utils.PatchEntry( + dirpath, + metadata=None, + platforms=[], + rel_patch_path="another.patch", + version_range={ + "from": 9, + "until": 20, + }, + ), + patch_utils.PatchEntry( + dirpath, + metadata=None, + platforms=["chromiumos"], + rel_patch_path="example.patch", + version_range={ + "from": 1, + "until": 10, + }, + ), + patch_utils.PatchEntry( + dirpath, + metadata=None, + platforms=["chromiumos"], + rel_patch_path="patch_after.patch", + version_range={ + "from": 1, + "until": 5, + }, + ), + ] + patches_path = dirpath / "PATCHES.json" + with patch_utils.atomic_write(patches_path, encoding="utf-8") as f: + json.dump([pe.to_dict() for pe in patch_entries], f) - def _harness1(version: int, return_value: patch_utils.PatchResult, - expected: patch_manager.GitBisectionCode): - with mock.patch.object( - patch_utils.PatchEntry, - 'apply', - return_value=return_value, - ) as m: - result = patch_manager.CheckPatchApplies( - version, - dirpath, - patches_path, - 'example.patch', - ) - self.assertEqual(result, expected) - m.assert_called() + def _harness1( + version: int, + return_value: patch_utils.PatchResult, + expected: patch_manager.GitBisectionCode, + ): + with mock.patch.object( + patch_utils.PatchEntry, + "apply", + return_value=return_value, + ) as m: + result = patch_manager.CheckPatchApplies( + version, + dirpath, + patches_path, + "example.patch", + ) + self.assertEqual(result, expected) + m.assert_called() - _harness1(1, patch_utils.PatchResult(True, {}), - patch_manager.GitBisectionCode.GOOD) - _harness1(2, patch_utils.PatchResult(True, {}), - patch_manager.GitBisectionCode.GOOD) - _harness1(2, patch_utils.PatchResult(False, {}), - patch_manager.GitBisectionCode.BAD) - _harness1(11, patch_utils.PatchResult(False, {}), - patch_manager.GitBisectionCode.BAD) + _harness1( + 1, + patch_utils.PatchResult(True, {}), + patch_manager.GitBisectionCode.GOOD, + ) + _harness1( + 2, + patch_utils.PatchResult(True, {}), + patch_manager.GitBisectionCode.GOOD, + ) + _harness1( + 2, + patch_utils.PatchResult(False, {}), + patch_manager.GitBisectionCode.BAD, + ) + _harness1( + 11, + patch_utils.PatchResult(False, {}), + patch_manager.GitBisectionCode.BAD, + ) - def _harness2(version: int, application_func: Callable, - expected: patch_manager.GitBisectionCode): - with mock.patch.object( - patch_utils, - 'apply_single_patch_entry', - application_func, - ): - result = patch_manager.CheckPatchApplies( - version, - dirpath, - patches_path, - 'example.patch', - ) - self.assertEqual(result, expected) + def _harness2( + version: int, + application_func: Callable, + expected: patch_manager.GitBisectionCode, + ): + with mock.patch.object( + patch_utils, + "apply_single_patch_entry", + application_func, + ): + result = patch_manager.CheckPatchApplies( + version, + dirpath, + patches_path, + "example.patch", + ) + self.assertEqual(result, expected) - # Check patch can apply and fail with good return codes. - def _apply_patch_entry_mock1(v, _, patch_entry, **__): - return patch_entry.can_patch_version(v), None + # Check patch can apply and fail with good return codes. + def _apply_patch_entry_mock1(v, _, patch_entry, **__): + return patch_entry.can_patch_version(v), None - _harness2( - 1, - _apply_patch_entry_mock1, - patch_manager.GitBisectionCode.GOOD, - ) - _harness2( - 11, - _apply_patch_entry_mock1, - patch_manager.GitBisectionCode.BAD, - ) + _harness2( + 1, + _apply_patch_entry_mock1, + patch_manager.GitBisectionCode.GOOD, + ) + _harness2( + 11, + _apply_patch_entry_mock1, + patch_manager.GitBisectionCode.BAD, + ) - # Early exit check, shouldn't apply later failing patch. - def _apply_patch_entry_mock2(v, _, patch_entry, **__): - if (patch_entry.can_patch_version(v) - and patch_entry.rel_patch_path == 'patch_after.patch'): - return False, {'filename': mock.Mock()} - return True, None + # Early exit check, shouldn't apply later failing patch. + def _apply_patch_entry_mock2(v, _, patch_entry, **__): + if ( + patch_entry.can_patch_version(v) + and patch_entry.rel_patch_path == "patch_after.patch" + ): + return False, {"filename": mock.Mock()} + return True, None - _harness2( - 1, - _apply_patch_entry_mock2, - patch_manager.GitBisectionCode.GOOD, - ) + _harness2( + 1, + _apply_patch_entry_mock2, + patch_manager.GitBisectionCode.GOOD, + ) - # Skip check, should exit early on the first patch. - def _apply_patch_entry_mock3(v, _, patch_entry, **__): - if (patch_entry.can_patch_version(v) - and patch_entry.rel_patch_path == 'another.patch'): - return False, {'filename': mock.Mock()} - return True, None + # Skip check, should exit early on the first patch. + def _apply_patch_entry_mock3(v, _, patch_entry, **__): + if ( + patch_entry.can_patch_version(v) + and patch_entry.rel_patch_path == "another.patch" + ): + return False, {"filename": mock.Mock()} + return True, None - _harness2( - 9, - _apply_patch_entry_mock3, - patch_manager.GitBisectionCode.SKIP, - ) + _harness2( + 9, + _apply_patch_entry_mock3, + patch_manager.GitBisectionCode.SKIP, + ) - @mock.patch('patch_utils.git_clean_context', mock.MagicMock) - def testUpdateVersionRanges(self): - """Test the UpdateVersionRanges function.""" - with tempfile.TemporaryDirectory( - prefix='patch_manager_unittest') as dirname: - dirpath = Path(dirname) - patches = [ - patch_utils.PatchEntry(workdir=dirpath, - rel_patch_path='x.patch', - metadata=None, - platforms=None, - version_range={ - 'from': 0, - 'until': 2, - }), - patch_utils.PatchEntry(workdir=dirpath, - rel_patch_path='y.patch', - metadata=None, - platforms=None, - version_range={ - 'from': 0, - 'until': 2, - }), - ] - patches[0].apply = mock.MagicMock(return_value=patch_utils.PatchResult( - succeeded=False, failed_hunks={'a/b/c': []})) - patches[1].apply = mock.MagicMock(return_value=patch_utils.PatchResult( - succeeded=True)) - results = patch_manager.UpdateVersionRangesWithEntries( - 1, dirpath, patches) - # We should only have updated the version_range of the first patch, - # as that one failed to apply. - self.assertEqual(len(results), 1) - self.assertEqual(results[0].version_range, {'from': 0, 'until': 1}) - self.assertEqual(patches[0].version_range, {'from': 0, 'until': 1}) - self.assertEqual(patches[1].version_range, {'from': 0, 'until': 2}) + @mock.patch("patch_utils.git_clean_context", mock.MagicMock) + def testUpdateVersionRanges(self): + """Test the UpdateVersionRanges function.""" + with tempfile.TemporaryDirectory( + prefix="patch_manager_unittest" + ) as dirname: + dirpath = Path(dirname) + patches = [ + patch_utils.PatchEntry( + workdir=dirpath, + rel_patch_path="x.patch", + metadata=None, + platforms=None, + version_range={ + "from": 0, + "until": 2, + }, + ), + patch_utils.PatchEntry( + workdir=dirpath, + rel_patch_path="y.patch", + metadata=None, + platforms=None, + version_range={ + "from": 0, + "until": 2, + }, + ), + ] + patches[0].apply = mock.MagicMock( + return_value=patch_utils.PatchResult( + succeeded=False, failed_hunks={"a/b/c": []} + ) + ) + patches[1].apply = mock.MagicMock( + return_value=patch_utils.PatchResult(succeeded=True) + ) + results = patch_manager.UpdateVersionRangesWithEntries( + 1, dirpath, patches + ) + # We should only have updated the version_range of the first patch, + # as that one failed to apply. + self.assertEqual(len(results), 1) + self.assertEqual(results[0].version_range, {"from": 0, "until": 1}) + self.assertEqual(patches[0].version_range, {"from": 0, "until": 1}) + self.assertEqual(patches[1].version_range, {"from": 0, "until": 2}) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py index 4c602027..846b379a 100644 --- a/llvm_tools/patch_utils.py +++ b/llvm_tools/patch_utils.py @@ -15,413 +15,444 @@ import sys from typing import Any, Dict, IO, List, Optional, Tuple, Union -CHECKED_FILE_RE = re.compile(r'^checking file\s+(.*)$') -HUNK_FAILED_RE = re.compile(r'^Hunk #(\d+) FAILED at.*') -HUNK_HEADER_RE = re.compile(r'^@@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+@@') -HUNK_END_RE = re.compile(r'^--\s*$') -PATCH_SUBFILE_HEADER_RE = re.compile(r'^\+\+\+ [ab]/(.*)$') +CHECKED_FILE_RE = re.compile(r"^checking file\s+(.*)$") +HUNK_FAILED_RE = re.compile(r"^Hunk #(\d+) FAILED at.*") +HUNK_HEADER_RE = re.compile(r"^@@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+@@") +HUNK_END_RE = re.compile(r"^--\s*$") +PATCH_SUBFILE_HEADER_RE = re.compile(r"^\+\+\+ [ab]/(.*)$") @contextlib.contextmanager -def atomic_write(fp: Union[Path, str], mode='w', *args, **kwargs): - """Write to a filepath atomically. - - This works by a temp file swap, created with a .tmp suffix in - the same directory briefly until being renamed to the desired - filepath. - - Args: - fp: Filepath to open. - mode: File mode; can be 'w', 'wb'. Default 'w'. - *args: Passed to Path.open as nargs. - **kwargs: Passed to Path.open as kwargs. - - Raises: - ValueError when the mode is invalid. - """ - if isinstance(fp, str): - fp = Path(fp) - if mode not in ('w', 'wb'): - raise ValueError(f'mode {mode} not accepted') - temp_fp = fp.with_suffix(fp.suffix + '.tmp') - try: - with temp_fp.open(mode, *args, **kwargs) as f: - yield f - except: - if temp_fp.is_file(): - temp_fp.unlink() - raise - temp_fp.rename(fp) +def atomic_write(fp: Union[Path, str], mode="w", *args, **kwargs): + """Write to a filepath atomically. + + This works by a temp file swap, created with a .tmp suffix in + the same directory briefly until being renamed to the desired + filepath. + + Args: + fp: Filepath to open. + mode: File mode; can be 'w', 'wb'. Default 'w'. + *args: Passed to Path.open as nargs. + **kwargs: Passed to Path.open as kwargs. + + Raises: + ValueError when the mode is invalid. + """ + if isinstance(fp, str): + fp = Path(fp) + if mode not in ("w", "wb"): + raise ValueError(f"mode {mode} not accepted") + temp_fp = fp.with_suffix(fp.suffix + ".tmp") + try: + with temp_fp.open(mode, *args, **kwargs) as f: + yield f + except: + if temp_fp.is_file(): + temp_fp.unlink() + raise + temp_fp.rename(fp) @dataclasses.dataclass class Hunk: - """Represents a patch Hunk.""" - hunk_id: int - """Hunk ID for the current file.""" - orig_start: int - orig_hunk_len: int - patch_start: int - patch_hunk_len: int - patch_hunk_lineno_begin: int - patch_hunk_lineno_end: Optional[int] + """Represents a patch Hunk.""" + + hunk_id: int + """Hunk ID for the current file.""" + orig_start: int + orig_hunk_len: int + patch_start: int + patch_hunk_len: int + patch_hunk_lineno_begin: int + patch_hunk_lineno_end: Optional[int] def parse_patch_stream(patch_stream: IO[str]) -> Dict[str, List[Hunk]]: - """Parse a patch file-like into Hunks. - - Args: - patch_stream: A IO stream formatted like a git patch file. - - Returns: - A dictionary mapping filenames to lists of Hunks present - in the patch stream. - """ - - current_filepath = None - current_hunk_id = 0 - current_hunk = None - out = collections.defaultdict(list) - for lineno, line in enumerate(patch_stream.readlines()): - subfile_header = PATCH_SUBFILE_HEADER_RE.match(line) - if subfile_header: - current_filepath = subfile_header.group(1) - if not current_filepath: - raise RuntimeError('Could not get file header in patch stream') - # Need to reset the hunk id, as it's per-file. - current_hunk_id = 0 - continue - hunk_header = HUNK_HEADER_RE.match(line) - if hunk_header: - if not current_filepath: - raise RuntimeError('Parsed hunk before file header in patch stream') - if current_hunk: - # Already parsing a hunk - current_hunk.patch_hunk_lineno_end = lineno - current_hunk_id += 1 - current_hunk = Hunk(hunk_id=current_hunk_id, - orig_start=int(hunk_header.group(1)), - orig_hunk_len=int(hunk_header.group(2)), - patch_start=int(hunk_header.group(3)), - patch_hunk_len=int(hunk_header.group(4)), - patch_hunk_lineno_begin=lineno + 1, - patch_hunk_lineno_end=None) - out[current_filepath].append(current_hunk) - continue - if current_hunk and HUNK_END_RE.match(line): - current_hunk.patch_hunk_lineno_end = lineno - return out + """Parse a patch file-like into Hunks. + + Args: + patch_stream: A IO stream formatted like a git patch file. + + Returns: + A dictionary mapping filenames to lists of Hunks present + in the patch stream. + """ + + current_filepath = None + current_hunk_id = 0 + current_hunk = None + out = collections.defaultdict(list) + for lineno, line in enumerate(patch_stream.readlines()): + subfile_header = PATCH_SUBFILE_HEADER_RE.match(line) + if subfile_header: + current_filepath = subfile_header.group(1) + if not current_filepath: + raise RuntimeError("Could not get file header in patch stream") + # Need to reset the hunk id, as it's per-file. + current_hunk_id = 0 + continue + hunk_header = HUNK_HEADER_RE.match(line) + if hunk_header: + if not current_filepath: + raise RuntimeError( + "Parsed hunk before file header in patch stream" + ) + if current_hunk: + # Already parsing a hunk + current_hunk.patch_hunk_lineno_end = lineno + current_hunk_id += 1 + current_hunk = Hunk( + hunk_id=current_hunk_id, + orig_start=int(hunk_header.group(1)), + orig_hunk_len=int(hunk_header.group(2)), + patch_start=int(hunk_header.group(3)), + patch_hunk_len=int(hunk_header.group(4)), + patch_hunk_lineno_begin=lineno + 1, + patch_hunk_lineno_end=None, + ) + out[current_filepath].append(current_hunk) + continue + if current_hunk and HUNK_END_RE.match(line): + current_hunk.patch_hunk_lineno_end = lineno + return out def parse_failed_patch_output(text: str) -> Dict[str, List[int]]: - current_file = None - failed_hunks = collections.defaultdict(list) - for eline in text.split('\n'): - checked_file_match = CHECKED_FILE_RE.match(eline) - if checked_file_match: - current_file = checked_file_match.group(1) - continue - failed_match = HUNK_FAILED_RE.match(eline) - if failed_match: - if not current_file: - raise ValueError('Input stream was not parsable') - hunk_id = int(failed_match.group(1)) - failed_hunks[current_file].append(hunk_id) - return failed_hunks + current_file = None + failed_hunks = collections.defaultdict(list) + for eline in text.split("\n"): + checked_file_match = CHECKED_FILE_RE.match(eline) + if checked_file_match: + current_file = checked_file_match.group(1) + continue + failed_match = HUNK_FAILED_RE.match(eline) + if failed_match: + if not current_file: + raise ValueError("Input stream was not parsable") + hunk_id = int(failed_match.group(1)) + failed_hunks[current_file].append(hunk_id) + return failed_hunks @dataclasses.dataclass(frozen=True) class PatchResult: - """Result of a patch application.""" - succeeded: bool - failed_hunks: Dict[str, List[Hunk]] = dataclasses.field(default_factory=dict) + """Result of a patch application.""" - def __bool__(self): - return self.succeeded + succeeded: bool + failed_hunks: Dict[str, List[Hunk]] = dataclasses.field( + default_factory=dict + ) - def failure_info(self) -> str: - if self.succeeded: - return '' - s = '' - for file, hunks in self.failed_hunks.items(): - s += f'{file}:\n' - for h in hunks: - s += f'Lines {h.orig_start} to {h.orig_start + h.orig_hunk_len}\n' - s += '--------------------\n' - return s + def __bool__(self): + return self.succeeded + + def failure_info(self) -> str: + if self.succeeded: + return "" + s = "" + for file, hunks in self.failed_hunks.items(): + s += f"{file}:\n" + for h in hunks: + s += f"Lines {h.orig_start} to {h.orig_start + h.orig_hunk_len}\n" + s += "--------------------\n" + return s @dataclasses.dataclass class PatchEntry: - """Object mapping of an entry of PATCHES.json.""" - workdir: Path - """Storage location for the patches.""" - metadata: Optional[Dict[str, Any]] - platforms: Optional[List[str]] - rel_patch_path: str - version_range: Optional[Dict[str, Optional[int]]] - _parsed_hunks = None - - def __post_init__(self): - if not self.workdir.is_dir(): - raise ValueError(f'workdir {self.workdir} is not a directory') - - @classmethod - def from_dict(cls, workdir: Path, data: Dict[str, Any]): - """Instatiate from a dictionary. - - Dictionary must have at least the following key: - - { - 'rel_patch_path': '<relative patch path to workdir>', - } - - Returns: - A new PatchEntry. - """ - return cls(workdir, data.get('metadata'), data.get('platforms'), - data['rel_patch_path'], data.get('version_range')) - - def to_dict(self) -> Dict[str, Any]: - out = { - 'metadata': self.metadata, - 'rel_patch_path': self.rel_patch_path, - 'version_range': self.version_range, - } - if self.platforms: - # To match patch_sync, only serialized when - # non-empty and non-null. - out['platforms'] = sorted(self.platforms) - return out - - def parsed_hunks(self) -> Dict[str, List[Hunk]]: - # Minor caching here because IO is slow. - if not self._parsed_hunks: - with self.patch_path().open(encoding='utf-8') as f: - self._parsed_hunks = parse_patch_stream(f) - return self._parsed_hunks - - def patch_path(self) -> Path: - return self.workdir / self.rel_patch_path - - def can_patch_version(self, svn_version: int) -> bool: - """Is this patch meant to apply to `svn_version`?""" - # Sometimes the key is there, but it's set to None. - if not self.version_range: - return True - from_v = self.version_range.get('from') or 0 - until_v = self.version_range.get('until') - if until_v is None: - until_v = sys.maxsize - return from_v <= svn_version < until_v - - def is_old(self, svn_version: int) -> bool: - """Is this patch old compared to `svn_version`?""" - if not self.version_range: - return False - until_v = self.version_range.get('until') - # Sometimes the key is there, but it's set to None. - if until_v is None: - until_v = sys.maxsize - return svn_version >= until_v - - def apply(self, - root_dir: Path, - extra_args: Optional[List[str]] = None) -> PatchResult: - """Apply a patch to a given directory.""" - if not extra_args: - extra_args = [] - # Cmd to apply a patch in the src unpack path. - abs_patch_path = self.patch_path().absolute() - if not abs_patch_path.is_file(): - raise RuntimeError(f'Cannot apply: patch {abs_patch_path} is not a file') - cmd = [ - 'patch', - '-d', - root_dir.absolute(), - '-f', - '-p1', - '--no-backup-if-mismatch', - '-i', - abs_patch_path, - ] + extra_args - try: - subprocess.run(cmd, encoding='utf-8', check=True, stdout=subprocess.PIPE) - except subprocess.CalledProcessError as e: - parsed_hunks = self.parsed_hunks() - failed_hunks_id_dict = parse_failed_patch_output(e.stdout) - failed_hunks = {} - for path, failed_hunk_ids in failed_hunks_id_dict.items(): - hunks_for_file = parsed_hunks[path] - failed_hunks[path] = [ - hunk for hunk in hunks_for_file if hunk.hunk_id in failed_hunk_ids - ] - return PatchResult(succeeded=False, failed_hunks=failed_hunks) - return PatchResult(succeeded=True) - - def test_apply(self, root_dir: Path) -> PatchResult: - """Dry run applying a patch to a given directory.""" - return self.apply(root_dir, ['--dry-run']) - - def title(self) -> str: - if not self.metadata: - return '' - return self.metadata.get('title', '') + """Object mapping of an entry of PATCHES.json.""" + + workdir: Path + """Storage location for the patches.""" + metadata: Optional[Dict[str, Any]] + platforms: Optional[List[str]] + rel_patch_path: str + version_range: Optional[Dict[str, Optional[int]]] + _parsed_hunks = None + + def __post_init__(self): + if not self.workdir.is_dir(): + raise ValueError(f"workdir {self.workdir} is not a directory") + + @classmethod + def from_dict(cls, workdir: Path, data: Dict[str, Any]): + """Instatiate from a dictionary. + + Dictionary must have at least the following key: + + { + 'rel_patch_path': '<relative patch path to workdir>', + } + + Returns: + A new PatchEntry. + """ + return cls( + workdir, + data.get("metadata"), + data.get("platforms"), + data["rel_patch_path"], + data.get("version_range"), + ) + + def to_dict(self) -> Dict[str, Any]: + out = { + "metadata": self.metadata, + "rel_patch_path": self.rel_patch_path, + "version_range": self.version_range, + } + if self.platforms: + # To match patch_sync, only serialized when + # non-empty and non-null. + out["platforms"] = sorted(self.platforms) + return out + + def parsed_hunks(self) -> Dict[str, List[Hunk]]: + # Minor caching here because IO is slow. + if not self._parsed_hunks: + with self.patch_path().open(encoding="utf-8") as f: + self._parsed_hunks = parse_patch_stream(f) + return self._parsed_hunks + + def patch_path(self) -> Path: + return self.workdir / self.rel_patch_path + + def can_patch_version(self, svn_version: int) -> bool: + """Is this patch meant to apply to `svn_version`?""" + # Sometimes the key is there, but it's set to None. + if not self.version_range: + return True + from_v = self.version_range.get("from") or 0 + until_v = self.version_range.get("until") + if until_v is None: + until_v = sys.maxsize + return from_v <= svn_version < until_v + + def is_old(self, svn_version: int) -> bool: + """Is this patch old compared to `svn_version`?""" + if not self.version_range: + return False + until_v = self.version_range.get("until") + # Sometimes the key is there, but it's set to None. + if until_v is None: + until_v = sys.maxsize + return svn_version >= until_v + + def apply( + self, root_dir: Path, extra_args: Optional[List[str]] = None + ) -> PatchResult: + """Apply a patch to a given directory.""" + if not extra_args: + extra_args = [] + # Cmd to apply a patch in the src unpack path. + abs_patch_path = self.patch_path().absolute() + if not abs_patch_path.is_file(): + raise RuntimeError( + f"Cannot apply: patch {abs_patch_path} is not a file" + ) + cmd = [ + "patch", + "-d", + root_dir.absolute(), + "-f", + "-p1", + "--no-backup-if-mismatch", + "-i", + abs_patch_path, + ] + extra_args + try: + subprocess.run( + cmd, encoding="utf-8", check=True, stdout=subprocess.PIPE + ) + except subprocess.CalledProcessError as e: + parsed_hunks = self.parsed_hunks() + failed_hunks_id_dict = parse_failed_patch_output(e.stdout) + failed_hunks = {} + for path, failed_hunk_ids in failed_hunks_id_dict.items(): + hunks_for_file = parsed_hunks[path] + failed_hunks[path] = [ + hunk + for hunk in hunks_for_file + if hunk.hunk_id in failed_hunk_ids + ] + return PatchResult(succeeded=False, failed_hunks=failed_hunks) + return PatchResult(succeeded=True) + + def test_apply(self, root_dir: Path) -> PatchResult: + """Dry run applying a patch to a given directory.""" + return self.apply(root_dir, ["--dry-run"]) + + def title(self) -> str: + if not self.metadata: + return "" + return self.metadata.get("title", "") @dataclasses.dataclass(frozen=True) class PatchInfo: - """Holds info for a round of patch applications.""" - # str types are legacy. Patch lists should - # probably be PatchEntries, - applied_patches: List[PatchEntry] - failed_patches: List[PatchEntry] - # Can be deleted once legacy code is removed. - non_applicable_patches: List[str] - # Can be deleted once legacy code is removed. - disabled_patches: List[str] - # Can be deleted once legacy code is removed. - removed_patches: List[str] - # Can be deleted once legacy code is removed. - modified_metadata: Optional[str] - - def _asdict(self): - return dataclasses.asdict(self) + """Holds info for a round of patch applications.""" + + # str types are legacy. Patch lists should + # probably be PatchEntries, + applied_patches: List[PatchEntry] + failed_patches: List[PatchEntry] + # Can be deleted once legacy code is removed. + non_applicable_patches: List[str] + # Can be deleted once legacy code is removed. + disabled_patches: List[str] + # Can be deleted once legacy code is removed. + removed_patches: List[str] + # Can be deleted once legacy code is removed. + modified_metadata: Optional[str] + + def _asdict(self): + return dataclasses.asdict(self) def json_to_patch_entries(workdir: Path, json_fd: IO[str]) -> List[PatchEntry]: - """Convert a json IO object to List[PatchEntry]. + """Convert a json IO object to List[PatchEntry]. - Examples: - >>> f = open('PATCHES.json') - >>> patch_entries = json_to_patch_entries(Path(), f) - """ - return [PatchEntry.from_dict(workdir, d) for d in json.load(json_fd)] + Examples: + >>> f = open('PATCHES.json') + >>> patch_entries = json_to_patch_entries(Path(), f) + """ + return [PatchEntry.from_dict(workdir, d) for d in json.load(json_fd)] def _print_failed_patch(pe: PatchEntry, failed_hunks: Dict[str, List[Hunk]]): - """Print information about a single failing PatchEntry. - - Args: - pe: A PatchEntry that failed. - failed_hunks: Hunks for pe which failed as dict: - filepath: [Hunk...] - """ - print(f'Could not apply {pe.rel_patch_path}: {pe.title()}', file=sys.stderr) - for fp, hunks in failed_hunks.items(): - print(f'{fp}:', file=sys.stderr) - for h in hunks: - print( - f'- {pe.rel_patch_path} ' - f'l:{h.patch_hunk_lineno_begin}...{h.patch_hunk_lineno_end}', - file=sys.stderr) - - -def apply_all_from_json(svn_version: int, - llvm_src_dir: Path, - patches_json_fp: Path, - continue_on_failure: bool = False) -> PatchInfo: - """Attempt to apply some patches to a given LLVM source tree. - - This relies on a PATCHES.json file to be the primary way - the patches are applied. - - Args: - svn_version: LLVM Subversion revision to patch. - llvm_src_dir: llvm-project root-level source directory to patch. - patches_json_fp: Filepath to the PATCHES.json file. - continue_on_failure: Skip any patches which failed to apply, - rather than throw an Exception. - """ - with patches_json_fp.open(encoding='utf-8') as f: - patches = json_to_patch_entries(patches_json_fp.parent, f) - skipped_patches = [] - failed_patches = [] - applied_patches = [] - for pe in patches: - applied, failed_hunks = apply_single_patch_entry(svn_version, llvm_src_dir, - pe) - if applied: - applied_patches.append(pe) - continue - if failed_hunks is not None: - if continue_on_failure: - failed_patches.append(pe) - continue - else: - _print_failed_patch(pe, failed_hunks) - raise RuntimeError('failed to apply patch ' - f'{pe.patch_path()}: {pe.title()}') - # Didn't apply, didn't fail, it was skipped. - skipped_patches.append(pe) - return PatchInfo( - non_applicable_patches=skipped_patches, - applied_patches=applied_patches, - failed_patches=failed_patches, - disabled_patches=[], - removed_patches=[], - modified_metadata=None, - ) + """Print information about a single failing PatchEntry. + + Args: + pe: A PatchEntry that failed. + failed_hunks: Hunks for pe which failed as dict: + filepath: [Hunk...] + """ + print(f"Could not apply {pe.rel_patch_path}: {pe.title()}", file=sys.stderr) + for fp, hunks in failed_hunks.items(): + print(f"{fp}:", file=sys.stderr) + for h in hunks: + print( + f"- {pe.rel_patch_path} " + f"l:{h.patch_hunk_lineno_begin}...{h.patch_hunk_lineno_end}", + file=sys.stderr, + ) + + +def apply_all_from_json( + svn_version: int, + llvm_src_dir: Path, + patches_json_fp: Path, + continue_on_failure: bool = False, +) -> PatchInfo: + """Attempt to apply some patches to a given LLVM source tree. + + This relies on a PATCHES.json file to be the primary way + the patches are applied. + + Args: + svn_version: LLVM Subversion revision to patch. + llvm_src_dir: llvm-project root-level source directory to patch. + patches_json_fp: Filepath to the PATCHES.json file. + continue_on_failure: Skip any patches which failed to apply, + rather than throw an Exception. + """ + with patches_json_fp.open(encoding="utf-8") as f: + patches = json_to_patch_entries(patches_json_fp.parent, f) + skipped_patches = [] + failed_patches = [] + applied_patches = [] + for pe in patches: + applied, failed_hunks = apply_single_patch_entry( + svn_version, llvm_src_dir, pe + ) + if applied: + applied_patches.append(pe) + continue + if failed_hunks is not None: + if continue_on_failure: + failed_patches.append(pe) + continue + else: + _print_failed_patch(pe, failed_hunks) + raise RuntimeError( + "failed to apply patch " f"{pe.patch_path()}: {pe.title()}" + ) + # Didn't apply, didn't fail, it was skipped. + skipped_patches.append(pe) + return PatchInfo( + non_applicable_patches=skipped_patches, + applied_patches=applied_patches, + failed_patches=failed_patches, + disabled_patches=[], + removed_patches=[], + modified_metadata=None, + ) def apply_single_patch_entry( svn_version: int, llvm_src_dir: Path, pe: PatchEntry, - ignore_version_range: bool = False + ignore_version_range: bool = False, ) -> Tuple[bool, Optional[Dict[str, List[Hunk]]]]: - """Try to apply a single PatchEntry object. - - Returns: - Tuple where the first element indicates whether the patch applied, - and the second element is a faild hunk mapping from file name to lists of - hunks (if the patch didn't apply). - """ - # Don't apply patches outside of the version range. - if not ignore_version_range and not pe.can_patch_version(svn_version): - return False, None - # Test first to avoid making changes. - test_application = pe.test_apply(llvm_src_dir) - if not test_application: - return False, test_application.failed_hunks - # Now actually make changes. - application_result = pe.apply(llvm_src_dir) - if not application_result: - # This should be very rare/impossible. - return False, application_result.failed_hunks - return True, None + """Try to apply a single PatchEntry object. + + Returns: + Tuple where the first element indicates whether the patch applied, + and the second element is a faild hunk mapping from file name to lists of + hunks (if the patch didn't apply). + """ + # Don't apply patches outside of the version range. + if not ignore_version_range and not pe.can_patch_version(svn_version): + return False, None + # Test first to avoid making changes. + test_application = pe.test_apply(llvm_src_dir) + if not test_application: + return False, test_application.failed_hunks + # Now actually make changes. + application_result = pe.apply(llvm_src_dir) + if not application_result: + # This should be very rare/impossible. + return False, application_result.failed_hunks + return True, None def is_git_dirty(git_root_dir: Path) -> bool: - """Return whether the given git directory has uncommitted changes.""" - if not git_root_dir.is_dir(): - raise ValueError(f'git_root_dir {git_root_dir} is not a directory') - cmd = ['git', 'ls-files', '-m', '--other', '--exclude-standard'] - return (subprocess.run(cmd, - stdout=subprocess.PIPE, - check=True, - cwd=git_root_dir, - encoding='utf-8').stdout != '') + """Return whether the given git directory has uncommitted changes.""" + if not git_root_dir.is_dir(): + raise ValueError(f"git_root_dir {git_root_dir} is not a directory") + cmd = ["git", "ls-files", "-m", "--other", "--exclude-standard"] + return ( + subprocess.run( + cmd, + stdout=subprocess.PIPE, + check=True, + cwd=git_root_dir, + encoding="utf-8", + ).stdout + != "" + ) def clean_src_tree(src_path): - """Cleans the source tree of the changes made in 'src_path'.""" + """Cleans the source tree of the changes made in 'src_path'.""" - reset_src_tree_cmd = ['git', '-C', src_path, 'reset', 'HEAD', '--hard'] + reset_src_tree_cmd = ["git", "-C", src_path, "reset", "HEAD", "--hard"] - subprocess.run(reset_src_tree_cmd, check=True) + subprocess.run(reset_src_tree_cmd, check=True) - clean_src_tree_cmd = ['git', '-C', src_path, 'clean', '-fd'] + clean_src_tree_cmd = ["git", "-C", src_path, "clean", "-fd"] - subprocess.run(clean_src_tree_cmd, check=True) + subprocess.run(clean_src_tree_cmd, check=True) @contextlib.contextmanager def git_clean_context(git_root_dir: Path): - """Cleans up a git directory when the context exits.""" - if is_git_dirty(git_root_dir): - raise RuntimeError('Cannot setup clean context; git_root_dir is dirty') - try: - yield - finally: - clean_src_tree(git_root_dir) + """Cleans up a git directory when the context exits.""" + if is_git_dirty(git_root_dir): + raise RuntimeError("Cannot setup clean context; git_root_dir is dirty") + try: + yield + finally: + clean_src_tree(git_root_dir) diff --git a/llvm_tools/patch_utils_unittest.py b/llvm_tools/patch_utils_unittest.py index 04541ae0..54c38763 100755 --- a/llvm_tools/patch_utils_unittest.py +++ b/llvm_tools/patch_utils_unittest.py @@ -16,87 +16,90 @@ import patch_utils as pu class TestPatchUtils(unittest.TestCase): - """Test the patch_utils.""" - - def test_atomic_write(self): - """Test that atomic write safely writes.""" - prior_contents = 'This is a test written by patch_utils_unittest.py\n' - new_contents = 'I am a test written by patch_utils_unittest.py\n' - with tempfile.TemporaryDirectory(prefix='patch_utils_unittest') as dirname: - dirpath = Path(dirname) - filepath = dirpath / 'test_atomic_write.txt' - with filepath.open('w', encoding='utf-8') as f: - f.write(prior_contents) - - def _t(): - with pu.atomic_write(filepath, encoding='utf-8') as f: - f.write(new_contents) - raise Exception('Expected failure') - - self.assertRaises(Exception, _t) - with filepath.open(encoding='utf-8') as f: - lines = f.readlines() - self.assertEqual(lines[0], prior_contents) - with pu.atomic_write(filepath, encoding='utf-8') as f: - f.write(new_contents) - with filepath.open(encoding='utf-8') as f: - lines = f.readlines() - self.assertEqual(lines[0], new_contents) - - def test_from_to_dict(self): - """Test to and from dict conversion.""" - d = TestPatchUtils._default_json_dict() - d['metadata'] = { - 'title': 'hello world', - 'info': [], - 'other_extra_info': { - 'extra_flags': [], + """Test the patch_utils.""" + + def test_atomic_write(self): + """Test that atomic write safely writes.""" + prior_contents = "This is a test written by patch_utils_unittest.py\n" + new_contents = "I am a test written by patch_utils_unittest.py\n" + with tempfile.TemporaryDirectory( + prefix="patch_utils_unittest" + ) as dirname: + dirpath = Path(dirname) + filepath = dirpath / "test_atomic_write.txt" + with filepath.open("w", encoding="utf-8") as f: + f.write(prior_contents) + + def _t(): + with pu.atomic_write(filepath, encoding="utf-8") as f: + f.write(new_contents) + raise Exception("Expected failure") + + self.assertRaises(Exception, _t) + with filepath.open(encoding="utf-8") as f: + lines = f.readlines() + self.assertEqual(lines[0], prior_contents) + with pu.atomic_write(filepath, encoding="utf-8") as f: + f.write(new_contents) + with filepath.open(encoding="utf-8") as f: + lines = f.readlines() + self.assertEqual(lines[0], new_contents) + + def test_from_to_dict(self): + """Test to and from dict conversion.""" + d = TestPatchUtils._default_json_dict() + d["metadata"] = { + "title": "hello world", + "info": [], + "other_extra_info": { + "extra_flags": [], + }, } - } - e = pu.PatchEntry.from_dict(TestPatchUtils._mock_dir(), d) - self.assertEqual(d, e.to_dict()) - - def test_patch_path(self): - """Test that we can get the full path from a PatchEntry.""" - d = TestPatchUtils._default_json_dict() - with mock.patch.object(Path, 'is_dir', return_value=True): - entry = pu.PatchEntry.from_dict(Path('/home/dir'), d) - self.assertEqual(entry.patch_path(), - Path('/home/dir') / d['rel_patch_path']) - - def test_can_patch_version(self): - """Test that patch application based on version is correct.""" - base_dict = TestPatchUtils._default_json_dict() - workdir = TestPatchUtils._mock_dir() - e1 = pu.PatchEntry.from_dict(workdir, base_dict) - self.assertFalse(e1.can_patch_version(3)) - self.assertTrue(e1.can_patch_version(4)) - self.assertTrue(e1.can_patch_version(5)) - self.assertFalse(e1.can_patch_version(9)) - base_dict['version_range'] = {'until': 9} - e2 = pu.PatchEntry.from_dict(workdir, base_dict) - self.assertTrue(e2.can_patch_version(0)) - self.assertTrue(e2.can_patch_version(5)) - self.assertFalse(e2.can_patch_version(9)) - base_dict['version_range'] = {'from': 4} - e3 = pu.PatchEntry.from_dict(workdir, base_dict) - self.assertFalse(e3.can_patch_version(3)) - self.assertTrue(e3.can_patch_version(5)) - self.assertTrue(e3.can_patch_version(1 << 31)) - base_dict['version_range'] = {'from': 4, 'until': None} - e4 = pu.PatchEntry.from_dict(workdir, base_dict) - self.assertFalse(e4.can_patch_version(3)) - self.assertTrue(e4.can_patch_version(5)) - self.assertTrue(e4.can_patch_version(1 << 31)) - base_dict['version_range'] = {'from': None, 'until': 9} - e5 = pu.PatchEntry.from_dict(workdir, base_dict) - self.assertTrue(e5.can_patch_version(0)) - self.assertTrue(e5.can_patch_version(5)) - self.assertFalse(e5.can_patch_version(9)) - - def test_can_parse_from_json(self): - """Test that patches be loaded from json.""" - json = """ + e = pu.PatchEntry.from_dict(TestPatchUtils._mock_dir(), d) + self.assertEqual(d, e.to_dict()) + + def test_patch_path(self): + """Test that we can get the full path from a PatchEntry.""" + d = TestPatchUtils._default_json_dict() + with mock.patch.object(Path, "is_dir", return_value=True): + entry = pu.PatchEntry.from_dict(Path("/home/dir"), d) + self.assertEqual( + entry.patch_path(), Path("/home/dir") / d["rel_patch_path"] + ) + + def test_can_patch_version(self): + """Test that patch application based on version is correct.""" + base_dict = TestPatchUtils._default_json_dict() + workdir = TestPatchUtils._mock_dir() + e1 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertFalse(e1.can_patch_version(3)) + self.assertTrue(e1.can_patch_version(4)) + self.assertTrue(e1.can_patch_version(5)) + self.assertFalse(e1.can_patch_version(9)) + base_dict["version_range"] = {"until": 9} + e2 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertTrue(e2.can_patch_version(0)) + self.assertTrue(e2.can_patch_version(5)) + self.assertFalse(e2.can_patch_version(9)) + base_dict["version_range"] = {"from": 4} + e3 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertFalse(e3.can_patch_version(3)) + self.assertTrue(e3.can_patch_version(5)) + self.assertTrue(e3.can_patch_version(1 << 31)) + base_dict["version_range"] = {"from": 4, "until": None} + e4 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertFalse(e4.can_patch_version(3)) + self.assertTrue(e4.can_patch_version(5)) + self.assertTrue(e4.can_patch_version(1 << 31)) + base_dict["version_range"] = {"from": None, "until": 9} + e5 = pu.PatchEntry.from_dict(workdir, base_dict) + self.assertTrue(e5.can_patch_version(0)) + self.assertTrue(e5.can_patch_version(5)) + self.assertFalse(e5.can_patch_version(9)) + + def test_can_parse_from_json(self): + """Test that patches be loaded from json.""" + json = """ [ { "metadata": {}, @@ -118,51 +121,56 @@ class TestPatchUtils(unittest.TestCase): } ] """ - result = pu.json_to_patch_entries(Path(), io.StringIO(json)) - self.assertEqual(len(result), 4) - - def test_parsed_hunks(self): - """Test that we can parse patch file hunks.""" - m = mock.mock_open(read_data=_EXAMPLE_PATCH) - - def mocked_open(self, *args, **kwargs): - return m(self, *args, **kwargs) - - with mock.patch.object(Path, 'open', mocked_open): - e = pu.PatchEntry.from_dict(TestPatchUtils._mock_dir(), - TestPatchUtils._default_json_dict()) - hunk_dict = e.parsed_hunks() - - m.assert_called() - filename1 = 'clang/lib/Driver/ToolChains/Clang.cpp' - filename2 = 'llvm/lib/Passes/PassBuilder.cpp' - self.assertEqual(set(hunk_dict.keys()), {filename1, filename2}) - hunk_list1 = hunk_dict[filename1] - hunk_list2 = hunk_dict[filename2] - self.assertEqual(len(hunk_list1), 1) - self.assertEqual(len(hunk_list2), 2) - - def test_apply_when_patch_nonexistent(self): - """Test that we error out when we try to apply a non-existent patch.""" - src_dir = TestPatchUtils._mock_dir('somewhere/llvm-project') - patch_dir = TestPatchUtils._mock_dir() - e = pu.PatchEntry.from_dict(patch_dir, TestPatchUtils._default_json_dict()) - with mock.patch('subprocess.run', mock.MagicMock()): - self.assertRaises(RuntimeError, lambda: e.apply(src_dir)) - - def test_apply_success(self): - """Test that we can call apply.""" - src_dir = TestPatchUtils._mock_dir('somewhere/llvm-project') - patch_dir = TestPatchUtils._mock_dir() - e = pu.PatchEntry.from_dict(patch_dir, TestPatchUtils._default_json_dict()) - with mock.patch('pathlib.Path.is_file', return_value=True): - with mock.patch('subprocess.run', mock.MagicMock()): - result = e.apply(src_dir) - self.assertTrue(result.succeeded) - - def test_parse_failed_patch_output(self): - """Test that we can call parse `patch` output.""" - fixture = """ + result = pu.json_to_patch_entries(Path(), io.StringIO(json)) + self.assertEqual(len(result), 4) + + def test_parsed_hunks(self): + """Test that we can parse patch file hunks.""" + m = mock.mock_open(read_data=_EXAMPLE_PATCH) + + def mocked_open(self, *args, **kwargs): + return m(self, *args, **kwargs) + + with mock.patch.object(Path, "open", mocked_open): + e = pu.PatchEntry.from_dict( + TestPatchUtils._mock_dir(), TestPatchUtils._default_json_dict() + ) + hunk_dict = e.parsed_hunks() + + m.assert_called() + filename1 = "clang/lib/Driver/ToolChains/Clang.cpp" + filename2 = "llvm/lib/Passes/PassBuilder.cpp" + self.assertEqual(set(hunk_dict.keys()), {filename1, filename2}) + hunk_list1 = hunk_dict[filename1] + hunk_list2 = hunk_dict[filename2] + self.assertEqual(len(hunk_list1), 1) + self.assertEqual(len(hunk_list2), 2) + + def test_apply_when_patch_nonexistent(self): + """Test that we error out when we try to apply a non-existent patch.""" + src_dir = TestPatchUtils._mock_dir("somewhere/llvm-project") + patch_dir = TestPatchUtils._mock_dir() + e = pu.PatchEntry.from_dict( + patch_dir, TestPatchUtils._default_json_dict() + ) + with mock.patch("subprocess.run", mock.MagicMock()): + self.assertRaises(RuntimeError, lambda: e.apply(src_dir)) + + def test_apply_success(self): + """Test that we can call apply.""" + src_dir = TestPatchUtils._mock_dir("somewhere/llvm-project") + patch_dir = TestPatchUtils._mock_dir() + e = pu.PatchEntry.from_dict( + patch_dir, TestPatchUtils._default_json_dict() + ) + with mock.patch("pathlib.Path.is_file", return_value=True): + with mock.patch("subprocess.run", mock.MagicMock()): + result = e.apply(src_dir) + self.assertTrue(result.succeeded) + + def test_parse_failed_patch_output(self): + """Test that we can call parse `patch` output.""" + fixture = """ checking file a/b/c.cpp Hunk #1 SUCCEEDED at 96 with fuzz 1. Hunk #12 FAILED at 77. @@ -172,59 +180,63 @@ Hunk #4 FAILED at 30. checking file works.cpp Hunk #1 SUCCEEDED at 96 with fuzz 1. """ - result = pu.parse_failed_patch_output(fixture) - self.assertEqual(result['a/b/c.cpp'], [12, 42]) - self.assertEqual(result['x/y/z.h'], [4]) - self.assertNotIn('works.cpp', result) - - def test_is_git_dirty(self): - """Test if a git directory has uncommitted changes.""" - with tempfile.TemporaryDirectory(prefix='patch_utils_unittest') as dirname: - dirpath = Path(dirname) - - def _run_h(cmd): - subprocess.run(cmd, - cwd=dirpath, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - check=True) - - _run_h(['git', 'init']) - self.assertFalse(pu.is_git_dirty(dirpath)) - test_file = dirpath / 'test_file' - test_file.touch() - self.assertTrue(pu.is_git_dirty(dirpath)) - _run_h(['git', 'add', '.']) - _run_h(['git', 'commit', '-m', 'test']) - self.assertFalse(pu.is_git_dirty(dirpath)) - test_file.touch() - self.assertFalse(pu.is_git_dirty(dirpath)) - with test_file.open('w', encoding='utf-8'): - test_file.write_text('abc') - self.assertTrue(pu.is_git_dirty(dirpath)) - - @staticmethod - def _default_json_dict(): - return { - 'metadata': { - 'title': 'hello world', - }, - 'platforms': ['a'], - 'rel_patch_path': 'x/y/z', - 'version_range': { - 'from': 4, - 'until': 9, + result = pu.parse_failed_patch_output(fixture) + self.assertEqual(result["a/b/c.cpp"], [12, 42]) + self.assertEqual(result["x/y/z.h"], [4]) + self.assertNotIn("works.cpp", result) + + def test_is_git_dirty(self): + """Test if a git directory has uncommitted changes.""" + with tempfile.TemporaryDirectory( + prefix="patch_utils_unittest" + ) as dirname: + dirpath = Path(dirname) + + def _run_h(cmd): + subprocess.run( + cmd, + cwd=dirpath, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + check=True, + ) + + _run_h(["git", "init"]) + self.assertFalse(pu.is_git_dirty(dirpath)) + test_file = dirpath / "test_file" + test_file.touch() + self.assertTrue(pu.is_git_dirty(dirpath)) + _run_h(["git", "add", "."]) + _run_h(["git", "commit", "-m", "test"]) + self.assertFalse(pu.is_git_dirty(dirpath)) + test_file.touch() + self.assertFalse(pu.is_git_dirty(dirpath)) + with test_file.open("w", encoding="utf-8"): + test_file.write_text("abc") + self.assertTrue(pu.is_git_dirty(dirpath)) + + @staticmethod + def _default_json_dict(): + return { + "metadata": { + "title": "hello world", + }, + "platforms": ["a"], + "rel_patch_path": "x/y/z", + "version_range": { + "from": 4, + "until": 9, + }, } - } - @staticmethod - def _mock_dir(path: str = 'a/b/c'): - workdir = Path(path) - workdir = mock.MagicMock(workdir) - workdir.is_dir = lambda: True - workdir.joinpath = lambda x: Path(path).joinpath(x) - workdir.__truediv__ = lambda self, x: self.joinpath(x) - return workdir + @staticmethod + def _mock_dir(path: str = "a/b/c"): + workdir = Path(path) + workdir = mock.MagicMock(workdir) + workdir.is_dir = lambda: True + workdir.joinpath = lambda x: Path(path).joinpath(x) + workdir.__truediv__ = lambda self, x: self.joinpath(x) + return workdir _EXAMPLE_PATCH = """ @@ -272,5 +284,5 @@ index c5fd68299eb..4c6e15eeeb9 100644 // FIXME: It would seem like these should come first in the optimization """ -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/revert_checker.py b/llvm_tools/revert_checker.py index 2a0ab861..17914ba8 100755 --- a/llvm_tools/revert_checker.py +++ b/llvm_tools/revert_checker.py @@ -1,12 +1,12 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -#===----------------------------------------------------------------------===## +# ===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # -#===----------------------------------------------------------------------===## +# ===----------------------------------------------------------------------===## # # !!!!!!!!!!!! NOTE !!!!!!!!!!!! # This is copied directly from upstream LLVM. Please make any changes upstream, @@ -51,9 +51,10 @@ import logging import re import subprocess import sys -from typing import Generator, List, NamedTuple, Iterable +from typing import Generator, Iterable, List, NamedTuple -assert sys.version_info >= (3, 6), 'Only Python 3.6+ is supported.' + +assert sys.version_info >= (3, 6), "Only Python 3.6+ is supported." # People are creative with their reverts, and heuristics are a bit difficult. # Like 90% of of reverts have "This reverts commit ${full_sha}". @@ -65,213 +66,256 @@ assert sys.version_info >= (3, 6), 'Only Python 3.6+ is supported.' def _try_parse_reverts_from_commit_message(commit_message: str) -> List[str]: - if not commit_message: - return [] + if not commit_message: + return [] - results = re.findall(r'This reverts commit ([a-f0-9]{40})\b', commit_message) + results = re.findall( + r"This reverts commit ([a-f0-9]{40})\b", commit_message + ) - first_line = commit_message.splitlines()[0] - initial_revert = re.match(r'Revert ([a-f0-9]{6,}) "', first_line) - if initial_revert: - results.append(initial_revert.group(1)) - return results + first_line = commit_message.splitlines()[0] + initial_revert = re.match(r'Revert ([a-f0-9]{6,}) "', first_line) + if initial_revert: + results.append(initial_revert.group(1)) + return results def _stream_stdout(command: List[str]) -> Generator[str, None, None]: - with subprocess.Popen( - command, stdout=subprocess.PIPE, encoding='utf-8', errors='replace') as p: - assert p.stdout is not None # for mypy's happiness. - yield from p.stdout + with subprocess.Popen( + command, stdout=subprocess.PIPE, encoding="utf-8", errors="replace" + ) as p: + assert p.stdout is not None # for mypy's happiness. + yield from p.stdout def _resolve_sha(git_dir: str, sha: str) -> str: - if len(sha) == 40: - return sha - - return subprocess.check_output( - ['git', '-C', git_dir, 'rev-parse', sha], - encoding='utf-8', - stderr=subprocess.DEVNULL, - ).strip() - - -_LogEntry = NamedTuple('_LogEntry', [ - ('sha', str), - ('commit_message', str), -]) - - -def _log_stream(git_dir: str, root_sha: str, - end_at_sha: str) -> Iterable[_LogEntry]: - sep = 50 * '<>' - log_command = [ - 'git', - '-C', - git_dir, - 'log', - '^' + end_at_sha, - root_sha, - '--format=' + sep + '%n%H%n%B%n', - ] - - stdout_stream = iter(_stream_stdout(log_command)) - - # Find the next separator line. If there's nothing to log, it may not exist. - # It might not be the first line if git feels complainy. - found_commit_header = False - for line in stdout_stream: - if line.rstrip() == sep: - found_commit_header = True - break - - while found_commit_header: - sha = next(stdout_stream, None) - assert sha is not None, 'git died?' - sha = sha.rstrip() - - commit_message = [] - + if len(sha) == 40: + return sha + + return subprocess.check_output( + ["git", "-C", git_dir, "rev-parse", sha], + encoding="utf-8", + stderr=subprocess.DEVNULL, + ).strip() + + +_LogEntry = NamedTuple( + "_LogEntry", + [ + ("sha", str), + ("commit_message", str), + ], +) + + +def _log_stream( + git_dir: str, root_sha: str, end_at_sha: str +) -> Iterable[_LogEntry]: + sep = 50 * "<>" + log_command = [ + "git", + "-C", + git_dir, + "log", + "^" + end_at_sha, + root_sha, + "--format=" + sep + "%n%H%n%B%n", + ] + + stdout_stream = iter(_stream_stdout(log_command)) + + # Find the next separator line. If there's nothing to log, it may not exist. + # It might not be the first line if git feels complainy. found_commit_header = False for line in stdout_stream: - line = line.rstrip() - if line.rstrip() == sep: - found_commit_header = True - break - commit_message.append(line) + if line.rstrip() == sep: + found_commit_header = True + break + + while found_commit_header: + sha = next(stdout_stream, None) + assert sha is not None, "git died?" + sha = sha.rstrip() + + commit_message = [] + + found_commit_header = False + for line in stdout_stream: + line = line.rstrip() + if line.rstrip() == sep: + found_commit_header = True + break + commit_message.append(line) - yield _LogEntry(sha, '\n'.join(commit_message).rstrip()) + yield _LogEntry(sha, "\n".join(commit_message).rstrip()) def _shas_between(git_dir: str, base_ref: str, head_ref: str) -> Iterable[str]: - rev_list = [ - 'git', - '-C', - git_dir, - 'rev-list', - '--first-parent', - f'{base_ref}..{head_ref}', - ] - return (x.strip() for x in _stream_stdout(rev_list)) + rev_list = [ + "git", + "-C", + git_dir, + "rev-list", + "--first-parent", + f"{base_ref}..{head_ref}", + ] + return (x.strip() for x in _stream_stdout(rev_list)) def _rev_parse(git_dir: str, ref: str) -> str: - return subprocess.check_output( - ['git', '-C', git_dir, 'rev-parse', ref], - encoding='utf-8', - ).strip() + return subprocess.check_output( + ["git", "-C", git_dir, "rev-parse", ref], + encoding="utf-8", + ).strip() -Revert = NamedTuple('Revert', [ - ('sha', str), - ('reverted_sha', str), -]) +Revert = NamedTuple( + "Revert", + [ + ("sha", str), + ("reverted_sha", str), + ], +) def _find_common_parent_commit(git_dir: str, ref_a: str, ref_b: str) -> str: - """Finds the closest common parent commit between `ref_a` and `ref_b`.""" - return subprocess.check_output( - ['git', '-C', git_dir, 'merge-base', ref_a, ref_b], - encoding='utf-8', - ).strip() + """Finds the closest common parent commit between `ref_a` and `ref_b`.""" + return subprocess.check_output( + ["git", "-C", git_dir, "merge-base", ref_a, ref_b], + encoding="utf-8", + ).strip() def find_reverts(git_dir: str, across_ref: str, root: str) -> List[Revert]: - """Finds reverts across `across_ref` in `git_dir`, starting from `root`. - - These reverts are returned in order of oldest reverts first. - """ - across_sha = _rev_parse(git_dir, across_ref) - root_sha = _rev_parse(git_dir, root) - - common_ancestor = _find_common_parent_commit(git_dir, across_sha, root_sha) - if common_ancestor != across_sha: - raise ValueError(f"{across_sha} isn't an ancestor of {root_sha} " - '(common ancestor: {common_ancestor})') - - intermediate_commits = set(_shas_between(git_dir, across_sha, root_sha)) - assert across_sha not in intermediate_commits - - logging.debug('%d commits appear between %s and %s', - len(intermediate_commits), across_sha, root_sha) - - all_reverts = [] - for sha, commit_message in _log_stream(git_dir, root_sha, across_sha): - reverts = _try_parse_reverts_from_commit_message(commit_message) - if not reverts: - continue - - resolved_reverts = sorted(set(_resolve_sha(git_dir, x) for x in reverts)) - for reverted_sha in resolved_reverts: - if reverted_sha in intermediate_commits: - logging.debug('Commit %s reverts %s, which happened after %s', sha, - reverted_sha, across_sha) - continue - - try: - object_type = subprocess.check_output( - ['git', '-C', git_dir, 'cat-file', '-t', reverted_sha], - encoding='utf-8', - stderr=subprocess.DEVNULL, - ).strip() - except subprocess.CalledProcessError: - logging.warning( - 'Failed to resolve reverted object %s (claimed to be reverted ' - 'by sha %s)', reverted_sha, sha) - continue - - if object_type == 'commit': - all_reverts.append(Revert(sha, reverted_sha)) - continue - - logging.error("%s claims to revert %s -- which isn't a commit -- %s", sha, - object_type, reverted_sha) - - # Since `all_reverts` contains reverts in log order (e.g., newer comes before - # older), we need to reverse this to keep with our guarantee of older = - # earlier in the result. - all_reverts.reverse() - return all_reverts + """Finds reverts across `across_ref` in `git_dir`, starting from `root`. + + These reverts are returned in order of oldest reverts first. + """ + across_sha = _rev_parse(git_dir, across_ref) + root_sha = _rev_parse(git_dir, root) + + common_ancestor = _find_common_parent_commit(git_dir, across_sha, root_sha) + if common_ancestor != across_sha: + raise ValueError( + f"{across_sha} isn't an ancestor of {root_sha} " + "(common ancestor: {common_ancestor})" + ) + + intermediate_commits = set(_shas_between(git_dir, across_sha, root_sha)) + assert across_sha not in intermediate_commits + + logging.debug( + "%d commits appear between %s and %s", + len(intermediate_commits), + across_sha, + root_sha, + ) + + all_reverts = [] + for sha, commit_message in _log_stream(git_dir, root_sha, across_sha): + reverts = _try_parse_reverts_from_commit_message(commit_message) + if not reverts: + continue + + resolved_reverts = sorted( + set(_resolve_sha(git_dir, x) for x in reverts) + ) + for reverted_sha in resolved_reverts: + if reverted_sha in intermediate_commits: + logging.debug( + "Commit %s reverts %s, which happened after %s", + sha, + reverted_sha, + across_sha, + ) + continue + + try: + object_type = subprocess.check_output( + ["git", "-C", git_dir, "cat-file", "-t", reverted_sha], + encoding="utf-8", + stderr=subprocess.DEVNULL, + ).strip() + except subprocess.CalledProcessError: + logging.warning( + "Failed to resolve reverted object %s (claimed to be reverted " + "by sha %s)", + reverted_sha, + sha, + ) + continue + + if object_type == "commit": + all_reverts.append(Revert(sha, reverted_sha)) + continue + + logging.error( + "%s claims to revert %s -- which isn't a commit -- %s", + sha, + object_type, + reverted_sha, + ) + + # Since `all_reverts` contains reverts in log order (e.g., newer comes before + # older), we need to reverse this to keep with our guarantee of older = + # earlier in the result. + all_reverts.reverse() + return all_reverts def _main() -> None: - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'base_ref', help='Git ref or sha to check for reverts around.') - parser.add_argument( - '-C', '--git_dir', default='.', help='Git directory to use.') - parser.add_argument( - 'root', nargs='+', help='Root(s) to search for commits from.') - parser.add_argument('--debug', action='store_true') - parser.add_argument( - '-u', '--review_url', action='store_true', - help='Format SHAs as llvm review URLs') - opts = parser.parse_args() - - logging.basicConfig( - format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s', - level=logging.DEBUG if opts.debug else logging.INFO, - ) - - # `root`s can have related history, so we want to filter duplicate commits - # out. The overwhelmingly common case is also to have one root, and it's way - # easier to reason about output that comes in an order that's meaningful to - # git. - seen_reverts = set() - all_reverts = [] - for root in opts.root: - for revert in find_reverts(opts.git_dir, opts.base_ref, root): - if revert not in seen_reverts: - seen_reverts.add(revert) - all_reverts.append(revert) - - for revert in all_reverts: - sha_fmt = (f'https://reviews.llvm.org/rG{revert.sha}' - if opts.review_url else revert.sha) - reverted_sha_fmt = (f'https://reviews.llvm.org/rG{revert.reverted_sha}' - if opts.review_url else revert.reverted_sha) - print(f'{sha_fmt} claims to revert {reverted_sha_fmt}') - - -if __name__ == '__main__': - _main() + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "base_ref", help="Git ref or sha to check for reverts around." + ) + parser.add_argument( + "-C", "--git_dir", default=".", help="Git directory to use." + ) + parser.add_argument( + "root", nargs="+", help="Root(s) to search for commits from." + ) + parser.add_argument("--debug", action="store_true") + parser.add_argument( + "-u", + "--review_url", + action="store_true", + help="Format SHAs as llvm review URLs", + ) + opts = parser.parse_args() + + logging.basicConfig( + format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s", + level=logging.DEBUG if opts.debug else logging.INFO, + ) + + # `root`s can have related history, so we want to filter duplicate commits + # out. The overwhelmingly common case is also to have one root, and it's way + # easier to reason about output that comes in an order that's meaningful to + # git. + seen_reverts = set() + all_reverts = [] + for root in opts.root: + for revert in find_reverts(opts.git_dir, opts.base_ref, root): + if revert not in seen_reverts: + seen_reverts.add(revert) + all_reverts.append(revert) + + for revert in all_reverts: + sha_fmt = ( + f"https://reviews.llvm.org/rG{revert.sha}" + if opts.review_url + else revert.sha + ) + reverted_sha_fmt = ( + f"https://reviews.llvm.org/rG{revert.reverted_sha}" + if opts.review_url + else revert.reverted_sha + ) + print(f"{sha_fmt} claims to revert {reverted_sha_fmt}") + + +if __name__ == "__main__": + _main() diff --git a/llvm_tools/subprocess_helpers.py b/llvm_tools/subprocess_helpers.py index ac36ea66..d4f545d2 100644 --- a/llvm_tools/subprocess_helpers.py +++ b/llvm_tools/subprocess_helpers.py @@ -11,51 +11,50 @@ import subprocess def CheckCommand(cmd): - """Executes the command using Popen().""" + """Executes the command using Popen().""" - cmd_obj = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - encoding='UTF-8') + cmd_obj = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="UTF-8" + ) - stdout, _ = cmd_obj.communicate() + stdout, _ = cmd_obj.communicate() - if cmd_obj.returncode: - print(stdout) - raise subprocess.CalledProcessError(cmd_obj.returncode, cmd) + if cmd_obj.returncode: + print(stdout) + raise subprocess.CalledProcessError(cmd_obj.returncode, cmd) def check_output(cmd, cwd=None): - """Wrapper for pre-python3 subprocess.check_output().""" + """Wrapper for pre-python3 subprocess.check_output().""" - return subprocess.check_output(cmd, encoding='UTF-8', cwd=cwd) + return subprocess.check_output(cmd, encoding="UTF-8", cwd=cwd) def check_call(cmd, cwd=None): - """Wrapper for pre-python3 subprocess.check_call().""" + """Wrapper for pre-python3 subprocess.check_call().""" - subprocess.check_call(cmd, encoding='UTF-8', cwd=cwd) + subprocess.check_call(cmd, encoding="UTF-8", cwd=cwd) # FIXME: CTRL+C does not work when executing a command inside the chroot via # `cros_sdk`. def ChrootRunCommand(chroot_path, cmd, verbose=False): - """Runs the command inside the chroot.""" + """Runs the command inside the chroot.""" - exec_chroot_cmd = ['cros_sdk', '--'] - exec_chroot_cmd.extend(cmd) + exec_chroot_cmd = ["cros_sdk", "--"] + exec_chroot_cmd.extend(cmd) - return ExecCommandAndCaptureOutput(exec_chroot_cmd, - cwd=chroot_path, - verbose=verbose) + return ExecCommandAndCaptureOutput( + exec_chroot_cmd, cwd=chroot_path, verbose=verbose + ) def ExecCommandAndCaptureOutput(cmd, cwd=None, verbose=False): - """Executes the command and prints to stdout if possible.""" + """Executes the command and prints to stdout if possible.""" - out = check_output(cmd, cwd=cwd).rstrip() + out = check_output(cmd, cwd=cwd).rstrip() - if verbose and out: - print(out) + if verbose and out: + print(out) - return out + return out diff --git a/llvm_tools/test_helpers.py b/llvm_tools/test_helpers.py index f9748e2a..2391a48c 100644 --- a/llvm_tools/test_helpers.py +++ b/llvm_tools/test_helpers.py @@ -8,82 +8,82 @@ from __future__ import print_function from contextlib import contextmanager -from tempfile import mkstemp import json import os +from tempfile import mkstemp class ArgsOutputTest(object): - """Testing class to simulate a argument parser object.""" + """Testing class to simulate a argument parser object.""" - def __init__(self, svn_option='google3'): - self.chroot_path = '/abs/path/to/chroot' - self.last_tested = '/abs/path/to/last_tested_file.json' - self.llvm_version = svn_option - self.verbose = False - self.extra_change_lists = None - self.options = ['latest-toolchain'] - self.builders = ['some-builder'] + def __init__(self, svn_option="google3"): + self.chroot_path = "/abs/path/to/chroot" + self.last_tested = "/abs/path/to/last_tested_file.json" + self.llvm_version = svn_option + self.verbose = False + self.extra_change_lists = None + self.options = ["latest-toolchain"] + self.builders = ["some-builder"] # FIXME: Migrate modules with similar helper to use this module. def CallCountsToMockFunctions(mock_function): - """A decorator that passes a call count to the function it decorates. + """A decorator that passes a call count to the function it decorates. - Examples: - @CallCountsToMockFunctions - def foo(call_count): - return call_count - ... - ... - [foo(), foo(), foo()] - [0, 1, 2] - """ + Examples: + @CallCountsToMockFunctions + def foo(call_count): + return call_count + ... + ... + [foo(), foo(), foo()] + [0, 1, 2] + """ - counter = [0] + counter = [0] - def Result(*args, **kwargs): - # For some values of `counter`, the mock function would simulate raising - # an exception, so let the test case catch the exception via - # `unittest.TestCase.assertRaises()` and to also handle recursive functions. - prev_counter = counter[0] - counter[0] += 1 + def Result(*args, **kwargs): + # For some values of `counter`, the mock function would simulate raising + # an exception, so let the test case catch the exception via + # `unittest.TestCase.assertRaises()` and to also handle recursive functions. + prev_counter = counter[0] + counter[0] += 1 - ret_value = mock_function(prev_counter, *args, **kwargs) + ret_value = mock_function(prev_counter, *args, **kwargs) - return ret_value + return ret_value - return Result + return Result def WritePrettyJsonFile(file_name, json_object): - """Writes the contents of the file to the json object. + """Writes the contents of the file to the json object. - Args: - file_name: The file that has contents to be used for the json object. - json_object: The json object to write to. - """ + Args: + file_name: The file that has contents to be used for the json object. + json_object: The json object to write to. + """ - json.dump(file_name, json_object, indent=4, separators=(',', ': ')) + json.dump(file_name, json_object, indent=4, separators=(",", ": ")) def CreateTemporaryJsonFile(): - """Makes a temporary .json file.""" + """Makes a temporary .json file.""" - return CreateTemporaryFile(suffix='.json') + return CreateTemporaryFile(suffix=".json") @contextmanager -def CreateTemporaryFile(suffix=''): - """Makes a temporary file.""" +def CreateTemporaryFile(suffix=""): + """Makes a temporary file.""" - fd, temp_file_path = mkstemp(suffix=suffix) + fd, temp_file_path = mkstemp(suffix=suffix) - os.close(fd) + os.close(fd) - try: - yield temp_file_path + try: + yield temp_file_path - finally: - if os.path.isfile(temp_file_path): - os.remove(temp_file_path) + finally: + if os.path.isfile(temp_file_path): + os.remove(temp_file_path) diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index 3a2ce2cf..366e233f 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -28,22 +28,22 @@ import subprocess_helpers DEFAULT_PACKAGES = [ - 'dev-util/lldb-server', - 'sys-devel/llvm', - 'sys-libs/compiler-rt', - 'sys-libs/libcxx', - 'sys-libs/llvm-libunwind', + "dev-util/lldb-server", + "sys-devel/llvm", + "sys-libs/compiler-rt", + "sys-libs/libcxx", + "sys-libs/llvm-libunwind", ] -DEFAULT_MANIFEST_PACKAGES = ['sys-devel/llvm'] +DEFAULT_MANIFEST_PACKAGES = ["sys-devel/llvm"] # Specify which LLVM hash to update class LLVMVariant(enum.Enum): - """Represent the LLVM hash in an ebuild file to update.""" + """Represent the LLVM hash in an ebuild file to update.""" - current = 'LLVM_HASH' - next = 'LLVM_NEXT_HASH' + current = "LLVM_HASH" + next = "LLVM_NEXT_HASH" # If set to `True`, then the contents of `stdout` after executing a command will @@ -52,662 +52,733 @@ verbose = False def defaultCrosRoot() -> Path: - """Get default location of chroot_path. + """Get default location of chroot_path. - The logic assumes that the cros_root is ~/chromiumos, unless llvm_tools is - inside of a CrOS checkout, in which case that checkout should be used. + The logic assumes that the cros_root is ~/chromiumos, unless llvm_tools is + inside of a CrOS checkout, in which case that checkout should be used. - Returns: - The best guess location for the cros checkout. - """ - llvm_tools_path = os.path.realpath(os.path.dirname(__file__)) - if llvm_tools_path.endswith('src/third_party/toolchain-utils/llvm_tools'): - return Path(llvm_tools_path).parent.parent.parent.parent - return Path.home() / 'chromiumos' + Returns: + The best guess location for the cros checkout. + """ + llvm_tools_path = os.path.realpath(os.path.dirname(__file__)) + if llvm_tools_path.endswith("src/third_party/toolchain-utils/llvm_tools"): + return Path(llvm_tools_path).parent.parent.parent.parent + return Path.home() / "chromiumos" def GetCommandLineArgs(): - """Parses the command line for the optional command line arguments. - - Returns: - The log level to use when retrieving the LLVM hash or google3 LLVM version, - the chroot path to use for executing chroot commands, - a list of a package or packages to update their LLVM next hash, - and the LLVM version to use when retrieving the LLVM hash. - """ - - # Create parser and add optional command-line arguments. - parser = argparse.ArgumentParser( - description="Updates the build's hash for llvm-next.") - - # Add argument for a specific chroot path. - parser.add_argument('--chroot_path', - type=Path, - default=defaultCrosRoot(), - help='the path to the chroot (default: %(default)s)') - - # Add argument for specific builds to uprev and update their llvm-next hash. - parser.add_argument( - '--update_packages', - default=','.join(DEFAULT_PACKAGES), - help='Comma-separated ebuilds to update llvm-next hash for ' - '(default: %(default)s)') - - parser.add_argument('--manifest_packages', - default=','.join(DEFAULT_MANIFEST_PACKAGES), - help='Comma-separated ebuilds to update manifests for ' - '(default: %(default)s)') - - # Add argument for whether to display command contents to `stdout`. - parser.add_argument('--verbose', - action='store_true', - help='display contents of a command to the terminal ' - '(default: %(default)s)') - - # Add argument for the LLVM hash to update - parser.add_argument( - '--is_llvm_next', - action='store_true', - help='which llvm hash to update. If specified, update LLVM_NEXT_HASH. ' - 'Otherwise, update LLVM_HASH') - - # Add argument for the LLVM version to use. - parser.add_argument( - '--llvm_version', - type=get_llvm_hash.IsSvnOption, - required=True, - help='which git hash to use. Either a svn revision, or one ' - f'of {sorted(get_llvm_hash.KNOWN_HASH_SOURCES)}') - - # Add argument for the mode of the patch management when handling patches. - parser.add_argument( - '--failure_mode', - default=failure_modes.FailureModes.FAIL.value, - choices=[ - failure_modes.FailureModes.FAIL.value, - failure_modes.FailureModes.CONTINUE.value, - failure_modes.FailureModes.DISABLE_PATCHES.value, - failure_modes.FailureModes.REMOVE_PATCHES.value - ], - help='the mode of the patch manager when handling failed patches ' - '(default: %(default)s)') - - # Add argument for the patch metadata file. - parser.add_argument( - '--patch_metadata_file', - default='PATCHES.json', - help='the .json file that has all the patches and their ' - 'metadata if applicable (default: PATCHES.json inside $FILESDIR)') - - # Parse the command line. - args_output = parser.parse_args() - - # FIXME: We shouldn't be using globals here, but until we fix it, make pylint - # stop complaining about it. - # pylint: disable=global-statement - global verbose - - verbose = args_output.verbose - - return args_output + """Parses the command line for the optional command line arguments. + + Returns: + The log level to use when retrieving the LLVM hash or google3 LLVM version, + the chroot path to use for executing chroot commands, + a list of a package or packages to update their LLVM next hash, + and the LLVM version to use when retrieving the LLVM hash. + """ + + # Create parser and add optional command-line arguments. + parser = argparse.ArgumentParser( + description="Updates the build's hash for llvm-next." + ) + + # Add argument for a specific chroot path. + parser.add_argument( + "--chroot_path", + type=Path, + default=defaultCrosRoot(), + help="the path to the chroot (default: %(default)s)", + ) + + # Add argument for specific builds to uprev and update their llvm-next hash. + parser.add_argument( + "--update_packages", + default=",".join(DEFAULT_PACKAGES), + help="Comma-separated ebuilds to update llvm-next hash for " + "(default: %(default)s)", + ) + + parser.add_argument( + "--manifest_packages", + default=",".join(DEFAULT_MANIFEST_PACKAGES), + help="Comma-separated ebuilds to update manifests for " + "(default: %(default)s)", + ) + + # Add argument for whether to display command contents to `stdout`. + parser.add_argument( + "--verbose", + action="store_true", + help="display contents of a command to the terminal " + "(default: %(default)s)", + ) + + # Add argument for the LLVM hash to update + parser.add_argument( + "--is_llvm_next", + action="store_true", + help="which llvm hash to update. If specified, update LLVM_NEXT_HASH. " + "Otherwise, update LLVM_HASH", + ) + + # Add argument for the LLVM version to use. + parser.add_argument( + "--llvm_version", + type=get_llvm_hash.IsSvnOption, + required=True, + help="which git hash to use. Either a svn revision, or one " + f"of {sorted(get_llvm_hash.KNOWN_HASH_SOURCES)}", + ) + + # Add argument for the mode of the patch management when handling patches. + parser.add_argument( + "--failure_mode", + default=failure_modes.FailureModes.FAIL.value, + choices=[ + failure_modes.FailureModes.FAIL.value, + failure_modes.FailureModes.CONTINUE.value, + failure_modes.FailureModes.DISABLE_PATCHES.value, + failure_modes.FailureModes.REMOVE_PATCHES.value, + ], + help="the mode of the patch manager when handling failed patches " + "(default: %(default)s)", + ) + + # Add argument for the patch metadata file. + parser.add_argument( + "--patch_metadata_file", + default="PATCHES.json", + help="the .json file that has all the patches and their " + "metadata if applicable (default: PATCHES.json inside $FILESDIR)", + ) + + # Parse the command line. + args_output = parser.parse_args() + + # FIXME: We shouldn't be using globals here, but until we fix it, make pylint + # stop complaining about it. + # pylint: disable=global-statement + global verbose + + verbose = args_output.verbose + + return args_output def GetEbuildPathsFromSymLinkPaths(symlinks): - """Reads the symlink(s) to get the ebuild path(s) to the package(s). + """Reads the symlink(s) to get the ebuild path(s) to the package(s). - Args: - symlinks: A list of absolute path symlink/symlinks that point - to the package's ebuild. + Args: + symlinks: A list of absolute path symlink/symlinks that point + to the package's ebuild. - Returns: - A dictionary where the key is the absolute path of the symlink and the value - is the absolute path to the ebuild that was read from the symlink. + Returns: + A dictionary where the key is the absolute path of the symlink and the value + is the absolute path to the ebuild that was read from the symlink. - Raises: - ValueError: Invalid symlink(s) were provided. - """ + Raises: + ValueError: Invalid symlink(s) were provided. + """ - # A dictionary that holds: - # key: absolute symlink path - # value: absolute ebuild path - resolved_paths = {} + # A dictionary that holds: + # key: absolute symlink path + # value: absolute ebuild path + resolved_paths = {} - # Iterate through each symlink. - # - # For each symlink, check that it is a valid symlink, - # and then construct the ebuild path, and - # then add the ebuild path to the dict. - for cur_symlink in symlinks: - if not os.path.islink(cur_symlink): - raise ValueError(f'Invalid symlink provided: {cur_symlink}') + # Iterate through each symlink. + # + # For each symlink, check that it is a valid symlink, + # and then construct the ebuild path, and + # then add the ebuild path to the dict. + for cur_symlink in symlinks: + if not os.path.islink(cur_symlink): + raise ValueError(f"Invalid symlink provided: {cur_symlink}") - # Construct the absolute path to the ebuild. - ebuild_path = os.path.realpath(cur_symlink) + # Construct the absolute path to the ebuild. + ebuild_path = os.path.realpath(cur_symlink) - if cur_symlink not in resolved_paths: - resolved_paths[cur_symlink] = ebuild_path + if cur_symlink not in resolved_paths: + resolved_paths[cur_symlink] = ebuild_path - return resolved_paths + return resolved_paths def UpdateEbuildLLVMHash(ebuild_path, llvm_variant, git_hash, svn_version): - """Updates the LLVM hash in the ebuild. + """Updates the LLVM hash in the ebuild. - The build changes are staged for commit in the temporary repo. + The build changes are staged for commit in the temporary repo. - Args: - ebuild_path: The absolute path to the ebuild. - llvm_variant: Which LLVM hash to update. - git_hash: The new git hash. - svn_version: The SVN-style revision number of git_hash. + Args: + ebuild_path: The absolute path to the ebuild. + llvm_variant: Which LLVM hash to update. + git_hash: The new git hash. + svn_version: The SVN-style revision number of git_hash. - Raises: - ValueError: Invalid ebuild path provided or failed to stage the commit - of the changes or failed to update the LLVM hash. - """ + Raises: + ValueError: Invalid ebuild path provided or failed to stage the commit + of the changes or failed to update the LLVM hash. + """ - # Iterate through each ebuild. - # - # For each ebuild, read the file in - # advance and then create a temporary file - # that gets updated with the new LLVM hash - # and revision number and then the ebuild file - # gets updated to the temporary file. + # Iterate through each ebuild. + # + # For each ebuild, read the file in + # advance and then create a temporary file + # that gets updated with the new LLVM hash + # and revision number and then the ebuild file + # gets updated to the temporary file. - if not os.path.isfile(ebuild_path): - raise ValueError(f'Invalid ebuild path provided: {ebuild_path}') + if not os.path.isfile(ebuild_path): + raise ValueError(f"Invalid ebuild path provided: {ebuild_path}") - temp_ebuild_file = f'{ebuild_path}.temp' + temp_ebuild_file = f"{ebuild_path}.temp" - with open(ebuild_path) as ebuild_file: - # write updates to a temporary file in case of interrupts - with open(temp_ebuild_file, 'w') as temp_file: - for cur_line in ReplaceLLVMHash(ebuild_file, llvm_variant, git_hash, - svn_version): - temp_file.write(cur_line) - os.rename(temp_ebuild_file, ebuild_path) + with open(ebuild_path) as ebuild_file: + # write updates to a temporary file in case of interrupts + with open(temp_ebuild_file, "w") as temp_file: + for cur_line in ReplaceLLVMHash( + ebuild_file, llvm_variant, git_hash, svn_version + ): + temp_file.write(cur_line) + os.rename(temp_ebuild_file, ebuild_path) - # Get the path to the parent directory. - parent_dir = os.path.dirname(ebuild_path) + # Get the path to the parent directory. + parent_dir = os.path.dirname(ebuild_path) - # Stage the changes. - subprocess.check_output(['git', '-C', parent_dir, 'add', ebuild_path]) + # Stage the changes. + subprocess.check_output(["git", "-C", parent_dir, "add", ebuild_path]) def ReplaceLLVMHash(ebuild_lines, llvm_variant, git_hash, svn_version): - """Updates the LLVM git hash. + """Updates the LLVM git hash. - Args: - ebuild_lines: The contents of the ebuild file. - llvm_variant: The LLVM hash to update. - git_hash: The new git hash. - svn_version: The SVN-style revision number of git_hash. + Args: + ebuild_lines: The contents of the ebuild file. + llvm_variant: The LLVM hash to update. + git_hash: The new git hash. + svn_version: The SVN-style revision number of git_hash. - Yields: - lines of the modified ebuild file - """ - is_updated = False - llvm_regex = re.compile('^' + re.escape(llvm_variant.value) + - '=\"[a-z0-9]+\"') - for cur_line in ebuild_lines: - if not is_updated and llvm_regex.search(cur_line): - # Update the git hash and revision number. - cur_line = f'{llvm_variant.value}=\"{git_hash}\" # r{svn_version}\n' + Yields: + lines of the modified ebuild file + """ + is_updated = False + llvm_regex = re.compile( + "^" + re.escape(llvm_variant.value) + '="[a-z0-9]+"' + ) + for cur_line in ebuild_lines: + if not is_updated and llvm_regex.search(cur_line): + # Update the git hash and revision number. + cur_line = f'{llvm_variant.value}="{git_hash}" # r{svn_version}\n' - is_updated = True + is_updated = True - yield cur_line + yield cur_line - if not is_updated: - raise ValueError(f'Failed to update {llvm_variant.value}') + if not is_updated: + raise ValueError(f"Failed to update {llvm_variant.value}") def UprevEbuildSymlink(symlink): - """Uprevs the symlink's revision number. + """Uprevs the symlink's revision number. - Increases the revision number by 1 and stages the change in - the temporary repo. + Increases the revision number by 1 and stages the change in + the temporary repo. - Args: - symlink: The absolute path of an ebuild symlink. + Args: + symlink: The absolute path of an ebuild symlink. - Raises: - ValueError: Failed to uprev the symlink or failed to stage the changes. - """ + Raises: + ValueError: Failed to uprev the symlink or failed to stage the changes. + """ - if not os.path.islink(symlink): - raise ValueError(f'Invalid symlink provided: {symlink}') + if not os.path.islink(symlink): + raise ValueError(f"Invalid symlink provided: {symlink}") - new_symlink, is_changed = re.subn( - r'r([0-9]+).ebuild', - lambda match: 'r%s.ebuild' % str(int(match.group(1)) + 1), - symlink, - count=1) + new_symlink, is_changed = re.subn( + r"r([0-9]+).ebuild", + lambda match: "r%s.ebuild" % str(int(match.group(1)) + 1), + symlink, + count=1, + ) - if not is_changed: - raise ValueError('Failed to uprev the symlink.') + if not is_changed: + raise ValueError("Failed to uprev the symlink.") - # rename the symlink - subprocess.check_output( - ['git', '-C', - os.path.dirname(symlink), 'mv', symlink, new_symlink]) + # rename the symlink + subprocess.check_output( + ["git", "-C", os.path.dirname(symlink), "mv", symlink, new_symlink] + ) def UprevEbuildToVersion(symlink, svn_version, git_hash): - """Uprevs the ebuild's revision number. - - Increases the revision number by 1 and stages the change in - the temporary repo. - - Args: - symlink: The absolute path of an ebuild symlink. - svn_version: The SVN-style revision number of git_hash. - git_hash: The new git hash. - - Raises: - ValueError: Failed to uprev the ebuild or failed to stage the changes. - AssertionError: No llvm version provided for an LLVM uprev - """ - - if not os.path.islink(symlink): - raise ValueError(f'Invalid symlink provided: {symlink}') - - ebuild = os.path.realpath(symlink) - llvm_major_version = get_llvm_hash.GetLLVMMajorVersion(git_hash) - # llvm - package = os.path.basename(os.path.dirname(symlink)) - if not package: - raise ValueError('Tried to uprev an unknown package') - if package == 'llvm': - new_ebuild, is_changed = re.subn( - r'(\d+)\.(\d+)_pre([0-9]+)_p([0-9]+)', - '%s.\\2_pre%s_p%s' % (llvm_major_version, svn_version, - datetime.datetime.today().strftime('%Y%m%d')), - ebuild, - count=1) - # any other package - else: - new_ebuild, is_changed = re.subn(r'(\d+)\.(\d+)_pre([0-9]+)', - '%s.\\2_pre%s' % - (llvm_major_version, svn_version), - ebuild, - count=1) - - if not is_changed: # failed to increment the revision number - raise ValueError('Failed to uprev the ebuild.') - - symlink_dir = os.path.dirname(symlink) - - # Rename the ebuild - subprocess.check_output(['git', '-C', symlink_dir, 'mv', ebuild, new_ebuild]) - - # Create a symlink of the renamed ebuild - new_symlink = new_ebuild[:-len('.ebuild')] + '-r1.ebuild' - subprocess.check_output(['ln', '-s', '-r', new_ebuild, new_symlink]) - - if not os.path.islink(new_symlink): - raise ValueError(f'Invalid symlink name: {new_ebuild[:-len(".ebuild")]}') - - subprocess.check_output(['git', '-C', symlink_dir, 'add', new_symlink]) - - # Remove the old symlink - subprocess.check_output(['git', '-C', symlink_dir, 'rm', symlink]) - - -def CreatePathDictionaryFromPackages(chroot_path, update_packages): - """Creates a symlink and ebuild path pair dictionary from the packages. - - Args: - chroot_path: The absolute path to the chroot. - update_packages: The filtered packages to be updated. - - Returns: - A dictionary where the key is the absolute path to the symlink - of the package and the value is the absolute path to the ebuild of - the package. - """ - - # Construct a list containing the chroot file paths of the package(s). - chroot_file_paths = chroot.GetChrootEbuildPaths(chroot_path, update_packages) - - # Construct a list containing the symlink(s) of the package(s). - symlink_file_paths = chroot.ConvertChrootPathsToAbsolutePaths( - chroot_path, chroot_file_paths) - - # Create a dictionary where the key is the absolute path of the symlink to - # the package and the value is the absolute path to the ebuild of the package. - return GetEbuildPathsFromSymLinkPaths(symlink_file_paths) - - -def RemovePatchesFromFilesDir(patches): - """Removes the patches from $FILESDIR of a package. + """Uprevs the ebuild's revision number. + + Increases the revision number by 1 and stages the change in + the temporary repo. + + Args: + symlink: The absolute path of an ebuild symlink. + svn_version: The SVN-style revision number of git_hash. + git_hash: The new git hash. + + Raises: + ValueError: Failed to uprev the ebuild or failed to stage the changes. + AssertionError: No llvm version provided for an LLVM uprev + """ + + if not os.path.islink(symlink): + raise ValueError(f"Invalid symlink provided: {symlink}") + + ebuild = os.path.realpath(symlink) + llvm_major_version = get_llvm_hash.GetLLVMMajorVersion(git_hash) + # llvm + package = os.path.basename(os.path.dirname(symlink)) + if not package: + raise ValueError("Tried to uprev an unknown package") + if package == "llvm": + new_ebuild, is_changed = re.subn( + r"(\d+)\.(\d+)_pre([0-9]+)_p([0-9]+)", + "%s.\\2_pre%s_p%s" + % ( + llvm_major_version, + svn_version, + datetime.datetime.today().strftime("%Y%m%d"), + ), + ebuild, + count=1, + ) + # any other package + else: + new_ebuild, is_changed = re.subn( + r"(\d+)\.(\d+)_pre([0-9]+)", + "%s.\\2_pre%s" % (llvm_major_version, svn_version), + ebuild, + count=1, + ) - Args: - patches: A list of absolute pathes of patches to remove + if not is_changed: # failed to increment the revision number + raise ValueError("Failed to uprev the ebuild.") - Raises: - ValueError: Failed to remove a patch in $FILESDIR. - """ + symlink_dir = os.path.dirname(symlink) - for patch in patches: + # Rename the ebuild subprocess.check_output( - ['git', '-C', os.path.dirname(patch), 'rm', '-f', patch]) + ["git", "-C", symlink_dir, "mv", ebuild, new_ebuild] + ) + # Create a symlink of the renamed ebuild + new_symlink = new_ebuild[: -len(".ebuild")] + "-r1.ebuild" + subprocess.check_output(["ln", "-s", "-r", new_ebuild, new_symlink]) -def StagePatchMetadataFileForCommit(patch_metadata_file_path): - """Stages the updated patch metadata file for commit. + if not os.path.islink(new_symlink): + raise ValueError( + f'Invalid symlink name: {new_ebuild[:-len(".ebuild")]}' + ) - Args: - patch_metadata_file_path: The absolute path to the patch metadata file. + subprocess.check_output(["git", "-C", symlink_dir, "add", new_symlink]) - Raises: - ValueError: Failed to stage the patch metadata file for commit or invalid - patch metadata file. - """ + # Remove the old symlink + subprocess.check_output(["git", "-C", symlink_dir, "rm", symlink]) - if not os.path.isfile(patch_metadata_file_path): - raise ValueError( - f'Invalid patch metadata file provided: {patch_metadata_file_path}') - # Cmd to stage the patch metadata file for commit. - subprocess.check_output([ - 'git', '-C', - os.path.dirname(patch_metadata_file_path), 'add', - patch_metadata_file_path - ]) +def CreatePathDictionaryFromPackages(chroot_path, update_packages): + """Creates a symlink and ebuild path pair dictionary from the packages. + Args: + chroot_path: The absolute path to the chroot. + update_packages: The filtered packages to be updated. -def StagePackagesPatchResultsForCommit(package_info_dict, commit_messages): - """Stages the patch results of the packages to the commit message. + Returns: + A dictionary where the key is the absolute path to the symlink + of the package and the value is the absolute path to the ebuild of + the package. + """ - Args: - package_info_dict: A dictionary where the key is the package name and the - value is a dictionary that contains information about the patches of the - package (key). - commit_messages: The commit message that has the updated ebuilds and - upreving information. + # Construct a list containing the chroot file paths of the package(s). + chroot_file_paths = chroot.GetChrootEbuildPaths( + chroot_path, update_packages + ) - Returns: - commit_messages with new additions - """ + # Construct a list containing the symlink(s) of the package(s). + symlink_file_paths = chroot.ConvertChrootPathsToAbsolutePaths( + chroot_path, chroot_file_paths + ) - # For each package, check if any patches for that package have - # changed, if so, add which patches have changed to the commit - # message. - for package_name, patch_info_dict in package_info_dict.items(): - if (patch_info_dict['disabled_patches'] - or patch_info_dict['removed_patches'] - or patch_info_dict['modified_metadata']): - cur_package_header = f'\nFor the package {package_name}:' - commit_messages.append(cur_package_header) + # Create a dictionary where the key is the absolute path of the symlink to + # the package and the value is the absolute path to the ebuild of the package. + return GetEbuildPathsFromSymLinkPaths(symlink_file_paths) - # Add to the commit message that the patch metadata file was modified. - if patch_info_dict['modified_metadata']: - patch_metadata_path = patch_info_dict['modified_metadata'] - metadata_file_name = os.path.basename(patch_metadata_path) - commit_messages.append( - f'The patch metadata file {metadata_file_name} was modified') - StagePatchMetadataFileForCommit(patch_metadata_path) +def RemovePatchesFromFilesDir(patches): + """Removes the patches from $FILESDIR of a package. - # Add each disabled patch to the commit message. - if patch_info_dict['disabled_patches']: - commit_messages.append('The following patches were disabled:') + Args: + patches: A list of absolute pathes of patches to remove - for patch_path in patch_info_dict['disabled_patches']: - commit_messages.append(os.path.basename(patch_path)) + Raises: + ValueError: Failed to remove a patch in $FILESDIR. + """ - # Add each removed patch to the commit message. - if patch_info_dict['removed_patches']: - commit_messages.append('The following patches were removed:') + for patch in patches: + subprocess.check_output( + ["git", "-C", os.path.dirname(patch), "rm", "-f", patch] + ) - for patch_path in patch_info_dict['removed_patches']: - commit_messages.append(os.path.basename(patch_path)) - RemovePatchesFromFilesDir(patch_info_dict['removed_patches']) +def StagePatchMetadataFileForCommit(patch_metadata_file_path): + """Stages the updated patch metadata file for commit. - return commit_messages + Args: + patch_metadata_file_path: The absolute path to the patch metadata file. + Raises: + ValueError: Failed to stage the patch metadata file for commit or invalid + patch metadata file. + """ -def UpdateManifests(packages: List[str], chroot_path: Path): - """Updates manifest files for packages. - - Args: - packages: A list of packages to update manifests for. - chroot_path: The absolute path to the chroot. - - Raises: - CalledProcessError: ebuild failed to update manifest. - """ - manifest_ebuilds = chroot.GetChrootEbuildPaths(chroot_path, packages) - for ebuild_path in manifest_ebuilds: - subprocess_helpers.ChrootRunCommand(chroot_path, - ['ebuild', ebuild_path, 'manifest']) - - -def UpdatePackages(packages, manifest_packages: List[str], llvm_variant, - git_hash, svn_version, chroot_path: Path, mode, - git_hash_source, extra_commit_msg): - """Updates an LLVM hash and uprevs the ebuild of the packages. - - A temporary repo is created for the changes. The changes are - then uploaded for review. - - Args: - packages: A list of all the packages that are going to be updated. - manifest_packages: A list of packages to update manifests for. - llvm_variant: The LLVM hash to update. - git_hash: The new git hash. - svn_version: The SVN-style revision number of git_hash. - chroot_path: The absolute path to the chroot. - mode: The mode of the patch manager when handling an applicable patch - that failed to apply. - Ex. 'FailureModes.FAIL' - git_hash_source: The source of which git hash to use based off of. - Ex. 'google3', 'tot', or <version> such as 365123 - extra_commit_msg: extra test to append to the commit message. - - Returns: - A nametuple that has two (key, value) pairs, where the first pair is the - Gerrit commit URL and the second pair is the change list number. - """ - - # Construct a dictionary where the key is the absolute path of the symlink to - # the package and the value is the absolute path to the ebuild of the package. - paths_dict = CreatePathDictionaryFromPackages(chroot_path, packages) - - repo_path = os.path.dirname(next(iter(paths_dict.values()))) - - branch = 'update-' + llvm_variant.value + '-' + git_hash - - git.CreateBranch(repo_path, branch) - - try: - commit_message_header = 'llvm' - if llvm_variant == LLVMVariant.next: - commit_message_header = 'llvm-next' - if git_hash_source in get_llvm_hash.KNOWN_HASH_SOURCES: - commit_message_header += ( - f'/{git_hash_source}: upgrade to {git_hash} (r{svn_version})') - else: - commit_message_header += (f': upgrade to {git_hash} (r{svn_version})') + if not os.path.isfile(patch_metadata_file_path): + raise ValueError( + f"Invalid patch metadata file provided: {patch_metadata_file_path}" + ) - commit_lines = [ - commit_message_header + '\n', - 'The following packages have been updated:', - ] + # Cmd to stage the patch metadata file for commit. + subprocess.check_output( + [ + "git", + "-C", + os.path.dirname(patch_metadata_file_path), + "add", + patch_metadata_file_path, + ] + ) - # Holds the list of packages that are updating. - packages = [] - # Iterate through the dictionary. - # - # For each iteration: - # 1) Update the ebuild's LLVM hash. - # 2) Uprev the ebuild (symlink). - # 3) Add the modified package to the commit message. - for symlink_path, ebuild_path in paths_dict.items(): - path_to_ebuild_dir = os.path.dirname(ebuild_path) +def StagePackagesPatchResultsForCommit(package_info_dict, commit_messages): + """Stages the patch results of the packages to the commit message. + + Args: + package_info_dict: A dictionary where the key is the package name and the + value is a dictionary that contains information about the patches of the + package (key). + commit_messages: The commit message that has the updated ebuilds and + upreving information. - UpdateEbuildLLVMHash(ebuild_path, llvm_variant, git_hash, svn_version) + Returns: + commit_messages with new additions + """ - if llvm_variant == LLVMVariant.current: - UprevEbuildToVersion(symlink_path, svn_version, git_hash) - else: - UprevEbuildSymlink(symlink_path) + # For each package, check if any patches for that package have + # changed, if so, add which patches have changed to the commit + # message. + for package_name, patch_info_dict in package_info_dict.items(): + if ( + patch_info_dict["disabled_patches"] + or patch_info_dict["removed_patches"] + or patch_info_dict["modified_metadata"] + ): + cur_package_header = f"\nFor the package {package_name}:" + commit_messages.append(cur_package_header) - cur_dir_name = os.path.basename(path_to_ebuild_dir) - parent_dir_name = os.path.basename(os.path.dirname(path_to_ebuild_dir)) + # Add to the commit message that the patch metadata file was modified. + if patch_info_dict["modified_metadata"]: + patch_metadata_path = patch_info_dict["modified_metadata"] + metadata_file_name = os.path.basename(patch_metadata_path) + commit_messages.append( + f"The patch metadata file {metadata_file_name} was modified" + ) - packages.append(f'{parent_dir_name}/{cur_dir_name}') - commit_lines.append(f'{parent_dir_name}/{cur_dir_name}') + StagePatchMetadataFileForCommit(patch_metadata_path) - if manifest_packages: - UpdateManifests(manifest_packages, chroot_path) - commit_lines.append('Updated manifest for:') - commit_lines.extend(manifest_packages) + # Add each disabled patch to the commit message. + if patch_info_dict["disabled_patches"]: + commit_messages.append("The following patches were disabled:") - EnsurePackageMaskContains(chroot_path, git_hash) + for patch_path in patch_info_dict["disabled_patches"]: + commit_messages.append(os.path.basename(patch_path)) - # Handle the patches for each package. - package_info_dict = UpdatePackagesPatchMetadataFile( - chroot_path, svn_version, packages, mode) + # Add each removed patch to the commit message. + if patch_info_dict["removed_patches"]: + commit_messages.append("The following patches were removed:") - # Update the commit message if changes were made to a package's patches. - commit_lines = StagePackagesPatchResultsForCommit(package_info_dict, - commit_lines) + for patch_path in patch_info_dict["removed_patches"]: + commit_messages.append(os.path.basename(patch_path)) - if extra_commit_msg: - commit_lines.append(extra_commit_msg) + RemovePatchesFromFilesDir(patch_info_dict["removed_patches"]) - change_list = git.UploadChanges(repo_path, branch, commit_lines) + return commit_messages - finally: - git.DeleteBranch(repo_path, branch) - return change_list +def UpdateManifests(packages: List[str], chroot_path: Path): + """Updates manifest files for packages. + + Args: + packages: A list of packages to update manifests for. + chroot_path: The absolute path to the chroot. + + Raises: + CalledProcessError: ebuild failed to update manifest. + """ + manifest_ebuilds = chroot.GetChrootEbuildPaths(chroot_path, packages) + for ebuild_path in manifest_ebuilds: + subprocess_helpers.ChrootRunCommand( + chroot_path, ["ebuild", ebuild_path, "manifest"] + ) + + +def UpdatePackages( + packages, + manifest_packages: List[str], + llvm_variant, + git_hash, + svn_version, + chroot_path: Path, + mode, + git_hash_source, + extra_commit_msg, +): + """Updates an LLVM hash and uprevs the ebuild of the packages. + + A temporary repo is created for the changes. The changes are + then uploaded for review. + + Args: + packages: A list of all the packages that are going to be updated. + manifest_packages: A list of packages to update manifests for. + llvm_variant: The LLVM hash to update. + git_hash: The new git hash. + svn_version: The SVN-style revision number of git_hash. + chroot_path: The absolute path to the chroot. + mode: The mode of the patch manager when handling an applicable patch + that failed to apply. + Ex. 'FailureModes.FAIL' + git_hash_source: The source of which git hash to use based off of. + Ex. 'google3', 'tot', or <version> such as 365123 + extra_commit_msg: extra test to append to the commit message. + + Returns: + A nametuple that has two (key, value) pairs, where the first pair is the + Gerrit commit URL and the second pair is the change list number. + """ + + # Construct a dictionary where the key is the absolute path of the symlink to + # the package and the value is the absolute path to the ebuild of the package. + paths_dict = CreatePathDictionaryFromPackages(chroot_path, packages) + + repo_path = os.path.dirname(next(iter(paths_dict.values()))) + + branch = "update-" + llvm_variant.value + "-" + git_hash + + git.CreateBranch(repo_path, branch) + + try: + commit_message_header = "llvm" + if llvm_variant == LLVMVariant.next: + commit_message_header = "llvm-next" + if git_hash_source in get_llvm_hash.KNOWN_HASH_SOURCES: + commit_message_header += ( + f"/{git_hash_source}: upgrade to {git_hash} (r{svn_version})" + ) + else: + commit_message_header += f": upgrade to {git_hash} (r{svn_version})" + + commit_lines = [ + commit_message_header + "\n", + "The following packages have been updated:", + ] + + # Holds the list of packages that are updating. + packages = [] + + # Iterate through the dictionary. + # + # For each iteration: + # 1) Update the ebuild's LLVM hash. + # 2) Uprev the ebuild (symlink). + # 3) Add the modified package to the commit message. + for symlink_path, ebuild_path in paths_dict.items(): + path_to_ebuild_dir = os.path.dirname(ebuild_path) + + UpdateEbuildLLVMHash( + ebuild_path, llvm_variant, git_hash, svn_version + ) + + if llvm_variant == LLVMVariant.current: + UprevEbuildToVersion(symlink_path, svn_version, git_hash) + else: + UprevEbuildSymlink(symlink_path) + + cur_dir_name = os.path.basename(path_to_ebuild_dir) + parent_dir_name = os.path.basename( + os.path.dirname(path_to_ebuild_dir) + ) + + packages.append(f"{parent_dir_name}/{cur_dir_name}") + commit_lines.append(f"{parent_dir_name}/{cur_dir_name}") + + if manifest_packages: + UpdateManifests(manifest_packages, chroot_path) + commit_lines.append("Updated manifest for:") + commit_lines.extend(manifest_packages) + + EnsurePackageMaskContains(chroot_path, git_hash) + + # Handle the patches for each package. + package_info_dict = UpdatePackagesPatchMetadataFile( + chroot_path, svn_version, packages, mode + ) + + # Update the commit message if changes were made to a package's patches. + commit_lines = StagePackagesPatchResultsForCommit( + package_info_dict, commit_lines + ) + + if extra_commit_msg: + commit_lines.append(extra_commit_msg) + + change_list = git.UploadChanges(repo_path, branch, commit_lines) + + finally: + git.DeleteBranch(repo_path, branch) + + return change_list def EnsurePackageMaskContains(chroot_path, git_hash): - """Adds the major version of llvm to package.mask if it's not already present. + """Adds the major version of llvm to package.mask if it's not already present. - Args: - chroot_path: The absolute path to the chroot. - git_hash: The new git hash. + Args: + chroot_path: The absolute path to the chroot. + git_hash: The new git hash. - Raises: - FileExistsError: package.mask not found in ../../chromiumos-overlay - """ + Raises: + FileExistsError: package.mask not found in ../../chromiumos-overlay + """ - llvm_major_version = get_llvm_hash.GetLLVMMajorVersion(git_hash) + llvm_major_version = get_llvm_hash.GetLLVMMajorVersion(git_hash) - overlay_dir = os.path.join(chroot_path, 'src/third_party/chromiumos-overlay') - mask_path = os.path.join(overlay_dir, - 'profiles/targets/chromeos/package.mask') - with open(mask_path, 'r+') as mask_file: - mask_contents = mask_file.read() - expected_line = f'=sys-devel/llvm-{llvm_major_version}.0_pre*\n' - if expected_line not in mask_contents: - mask_file.write(expected_line) + overlay_dir = os.path.join( + chroot_path, "src/third_party/chromiumos-overlay" + ) + mask_path = os.path.join( + overlay_dir, "profiles/targets/chromeos/package.mask" + ) + with open(mask_path, "r+") as mask_file: + mask_contents = mask_file.read() + expected_line = f"=sys-devel/llvm-{llvm_major_version}.0_pre*\n" + if expected_line not in mask_contents: + mask_file.write(expected_line) - subprocess.check_output(['git', '-C', overlay_dir, 'add', mask_path]) + subprocess.check_output(["git", "-C", overlay_dir, "add", mask_path]) def UpdatePackagesPatchMetadataFile( - chroot_path: Path, svn_version: int, packages: List[str], - mode: failure_modes.FailureModes) -> Dict[str, patch_utils.PatchInfo]: - """Updates the packages metadata file. - - Args: - chroot_path: The absolute path to the chroot. - svn_version: The version to use for patch management. - packages: All the packages to update their patch metadata file. - mode: The mode for the patch manager to use when an applicable patch - fails to apply. - Ex: 'FailureModes.FAIL' - - Returns: - A dictionary where the key is the package name and the value is a dictionary - that has information on the patches. - """ - - # A dictionary where the key is the package name and the value is a dictionary - # that has information on the patches. - package_info = {} - - llvm_hash = get_llvm_hash.LLVMHash() - - with llvm_hash.CreateTempDirectory() as temp_dir: - with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as dirname: - # Ensure that 'svn_version' exists in the chromiumum mirror of LLVM by - # finding its corresponding git hash. - git_hash = get_llvm_hash.GetGitHashFrom(dirname, svn_version) - move_head_cmd = ['git', '-C', dirname, 'checkout', git_hash, '-q'] - subprocess.run(move_head_cmd, stdout=subprocess.DEVNULL, check=True) - - for cur_package in packages: - # Get the absolute path to $FILESDIR of the package. - chroot_ebuild_str = subprocess_helpers.ChrootRunCommand( - chroot_path, ['equery', 'w', cur_package]).strip() - if not chroot_ebuild_str: - raise RuntimeError(f'could not find ebuild for {cur_package}') - chroot_ebuild_path = Path( - chroot.ConvertChrootPathsToAbsolutePaths(chroot_path, - [chroot_ebuild_str])[0]) - patches_json_fp = chroot_ebuild_path.parent / 'files' / 'PATCHES.json' - if not patches_json_fp.is_file(): - raise RuntimeError(f'patches file {patches_json_fp} is not a file') - - src_path = Path(dirname) - with patch_utils.git_clean_context(src_path): - patches_info = patch_utils.apply_all_from_json( - svn_version=svn_version, - llvm_src_dir=src_path, - patches_json_fp=patches_json_fp, - continue_on_failure=mode == failure_modes.FailureModes.CONTINUE, - ) - package_info[cur_package] = patches_info._asdict() - - return package_info + chroot_path: Path, + svn_version: int, + packages: List[str], + mode: failure_modes.FailureModes, +) -> Dict[str, patch_utils.PatchInfo]: + """Updates the packages metadata file. + + Args: + chroot_path: The absolute path to the chroot. + svn_version: The version to use for patch management. + packages: All the packages to update their patch metadata file. + mode: The mode for the patch manager to use when an applicable patch + fails to apply. + Ex: 'FailureModes.FAIL' + + Returns: + A dictionary where the key is the package name and the value is a dictionary + that has information on the patches. + """ + + # A dictionary where the key is the package name and the value is a dictionary + # that has information on the patches. + package_info = {} + + llvm_hash = get_llvm_hash.LLVMHash() + + with llvm_hash.CreateTempDirectory() as temp_dir: + with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as dirname: + # Ensure that 'svn_version' exists in the chromiumum mirror of LLVM by + # finding its corresponding git hash. + git_hash = get_llvm_hash.GetGitHashFrom(dirname, svn_version) + move_head_cmd = ["git", "-C", dirname, "checkout", git_hash, "-q"] + subprocess.run(move_head_cmd, stdout=subprocess.DEVNULL, check=True) + + for cur_package in packages: + # Get the absolute path to $FILESDIR of the package. + chroot_ebuild_str = subprocess_helpers.ChrootRunCommand( + chroot_path, ["equery", "w", cur_package] + ).strip() + if not chroot_ebuild_str: + raise RuntimeError( + f"could not find ebuild for {cur_package}" + ) + chroot_ebuild_path = Path( + chroot.ConvertChrootPathsToAbsolutePaths( + chroot_path, [chroot_ebuild_str] + )[0] + ) + patches_json_fp = ( + chroot_ebuild_path.parent / "files" / "PATCHES.json" + ) + if not patches_json_fp.is_file(): + raise RuntimeError( + f"patches file {patches_json_fp} is not a file" + ) + + src_path = Path(dirname) + with patch_utils.git_clean_context(src_path): + patches_info = patch_utils.apply_all_from_json( + svn_version=svn_version, + llvm_src_dir=src_path, + patches_json_fp=patches_json_fp, + continue_on_failure=mode + == failure_modes.FailureModes.CONTINUE, + ) + package_info[cur_package] = patches_info._asdict() + + return package_info def main(): - """Updates the LLVM next hash for each package. + """Updates the LLVM next hash for each package. - Raises: - AssertionError: The script was run inside the chroot. - """ + Raises: + AssertionError: The script was run inside the chroot. + """ - chroot.VerifyOutsideChroot() + chroot.VerifyOutsideChroot() - args_output = GetCommandLineArgs() + args_output = GetCommandLineArgs() - llvm_variant = LLVMVariant.current - if args_output.is_llvm_next: - llvm_variant = LLVMVariant.next + llvm_variant = LLVMVariant.current + if args_output.is_llvm_next: + llvm_variant = LLVMVariant.next - git_hash_source = args_output.llvm_version + git_hash_source = args_output.llvm_version - git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption( - git_hash_source) + git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption( + git_hash_source + ) - packages = args_output.update_packages.split(',') - manifest_packages = args_output.manifest_packages.split(',') - change_list = UpdatePackages(packages=packages, - manifest_packages=manifest_packages, - llvm_variant=llvm_variant, - git_hash=git_hash, - svn_version=svn_version, - chroot_path=args_output.chroot_path, - mode=failure_modes.FailureModes( - args_output.failure_mode), - git_hash_source=git_hash_source, - extra_commit_msg=None) + packages = args_output.update_packages.split(",") + manifest_packages = args_output.manifest_packages.split(",") + change_list = UpdatePackages( + packages=packages, + manifest_packages=manifest_packages, + llvm_variant=llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=args_output.chroot_path, + mode=failure_modes.FailureModes(args_output.failure_mode), + git_hash_source=git_hash_source, + extra_commit_msg=None, + ) - print(f'Successfully updated packages to {git_hash} ({svn_version})') - print(f'Gerrit URL: {change_list.url}') - print(f'Change list number: {change_list.cl_number}') + print(f"Successfully updated packages to {git_hash} ({svn_version})") + print(f"Gerrit URL: {change_list.url}") + print(f"Change list number: {change_list.cl_number}") -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index 9a51b62a..c361334a 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -24,885 +24,1058 @@ import subprocess_helpers import test_helpers import update_chromeos_llvm_hash + # These are unittests; protected access is OK to a point. # pylint: disable=protected-access class UpdateLLVMHashTest(unittest.TestCase): - """Test class for updating LLVM hashes of packages.""" - - @mock.patch.object(os.path, 'realpath') - def testDefaultCrosRootFromCrOSCheckout(self, mock_llvm_tools): - llvm_tools_path = '/path/to/cros/src/third_party/toolchain-utils/llvm_tools' - mock_llvm_tools.return_value = llvm_tools_path - self.assertEqual(update_chromeos_llvm_hash.defaultCrosRoot(), - Path('/path/to/cros')) - - @mock.patch.object(os.path, 'realpath') - def testDefaultCrosRootFromOutsideCrOSCheckout(self, mock_llvm_tools): - mock_llvm_tools.return_value = '~/toolchain-utils/llvm_tools' - self.assertEqual(update_chromeos_llvm_hash.defaultCrosRoot(), - Path.home() / 'chromiumos') - - # Simulate behavior of 'os.path.isfile()' when the ebuild path to a package - # does not exist. - @mock.patch.object(os.path, 'isfile', return_value=False) - def testFailedToUpdateLLVMHashForInvalidEbuildPath(self, mock_isfile): - ebuild_path = '/some/path/to/package.ebuild' - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current - git_hash = 'a123testhash1' - svn_version = 1000 - - # Verify the exception is raised when the ebuild path does not exist. - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_path, llvm_variant, - git_hash, svn_version) - - self.assertEqual(str(err.exception), - 'Invalid ebuild path provided: %s' % ebuild_path) - - mock_isfile.assert_called_once() - - # Simulate 'os.path.isfile' behavior on a valid ebuild path. - @mock.patch.object(os.path, 'isfile', return_value=True) - def testFailedToUpdateLLVMHash(self, mock_isfile): - # Create a temporary file to simulate an ebuild file of a package. - with test_helpers.CreateTemporaryJsonFile() as ebuild_file: - with open(ebuild_file, 'w') as f: - f.write('\n'.join([ - 'First line in the ebuild', 'Second line in the ebuild', - 'Last line in the ebuild' - ])) - - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current - git_hash = 'a123testhash1' - svn_version = 1000 - - # Verify the exception is raised when the ebuild file does not have - # 'LLVM_HASH'. - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_file, - llvm_variant, git_hash, - svn_version) - - self.assertEqual(str(err.exception), 'Failed to update LLVM_HASH') - - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next - - self.assertEqual(mock_isfile.call_count, 2) - - # Simulate 'os.path.isfile' behavior on a valid ebuild path. - @mock.patch.object(os.path, 'isfile', return_value=True) - def testFailedToUpdateLLVMNextHash(self, mock_isfile): - # Create a temporary file to simulate an ebuild file of a package. - with test_helpers.CreateTemporaryJsonFile() as ebuild_file: - with open(ebuild_file, 'w') as f: - f.write('\n'.join([ - 'First line in the ebuild', 'Second line in the ebuild', - 'Last line in the ebuild' - ])) - - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next - git_hash = 'a123testhash1' - svn_version = 1000 - - # Verify the exception is raised when the ebuild file does not have - # 'LLVM_NEXT_HASH'. - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_file, - llvm_variant, git_hash, - svn_version) - - self.assertEqual(str(err.exception), 'Failed to update LLVM_NEXT_HASH') - - self.assertEqual(mock_isfile.call_count, 2) - - @mock.patch.object(os.path, 'isfile', return_value=True) - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyStageTheEbuildForCommitForLLVMHashUpdate( - self, mock_stage_commit_command, mock_isfile): - - # Create a temporary file to simulate an ebuild file of a package. - with test_helpers.CreateTemporaryJsonFile() as ebuild_file: - # Updates LLVM_HASH to 'git_hash' and revision to - # 'svn_version'. - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current - git_hash = 'a123testhash1' - svn_version = 1000 - - with open(ebuild_file, 'w') as f: - f.write('\n'.join([ - 'First line in the ebuild', 'Second line in the ebuild', - 'LLVM_HASH=\"a12b34c56d78e90\" # r500', 'Last line in the ebuild' - ])) - - update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_file, llvm_variant, - git_hash, svn_version) - - expected_file_contents = [ - 'First line in the ebuild\n', 'Second line in the ebuild\n', - 'LLVM_HASH=\"a123testhash1\" # r1000\n', 'Last line in the ebuild' - ] - - # Verify the new file contents of the ebuild file match the expected file - # contents. - with open(ebuild_file) as new_file: - file_contents_as_a_list = [cur_line for cur_line in new_file] - self.assertListEqual(file_contents_as_a_list, expected_file_contents) - - self.assertEqual(mock_isfile.call_count, 2) - - mock_stage_commit_command.assert_called_once() - - @mock.patch.object(os.path, 'isfile', return_value=True) - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyStageTheEbuildForCommitForLLVMNextHashUpdate( - self, mock_stage_commit_command, mock_isfile): - - # Create a temporary file to simulate an ebuild file of a package. - with test_helpers.CreateTemporaryJsonFile() as ebuild_file: - # Updates LLVM_NEXT_HASH to 'git_hash' and revision to - # 'svn_version'. - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next - git_hash = 'a123testhash1' - svn_version = 1000 - - with open(ebuild_file, 'w') as f: - f.write('\n'.join([ - 'First line in the ebuild', 'Second line in the ebuild', - 'LLVM_NEXT_HASH=\"a12b34c56d78e90\" # r500', - 'Last line in the ebuild' - ])) - - update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_file, llvm_variant, - git_hash, svn_version) - - expected_file_contents = [ - 'First line in the ebuild\n', 'Second line in the ebuild\n', - 'LLVM_NEXT_HASH=\"a123testhash1\" # r1000\n', - 'Last line in the ebuild' - ] - - # Verify the new file contents of the ebuild file match the expected file - # contents. - with open(ebuild_file) as new_file: - file_contents_as_a_list = [cur_line for cur_line in new_file] - self.assertListEqual(file_contents_as_a_list, expected_file_contents) - - self.assertEqual(mock_isfile.call_count, 2) - - mock_stage_commit_command.assert_called_once() - - @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') - @mock.patch.object(os.path, 'islink', return_value=False) - def testFailedToUprevEbuildToVersionForInvalidSymlink( - self, mock_islink, mock_llvm_version): - symlink_path = '/path/to/chroot/package/package.ebuild' - svn_version = 1000 - git_hash = 'badf00d' - mock_llvm_version.return_value = '1234' - - # Verify the exception is raised when a invalid symbolic link is passed in. - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.UprevEbuildToVersion(symlink_path, svn_version, - git_hash) - - self.assertEqual(str(err.exception), - 'Invalid symlink provided: %s' % symlink_path) - - mock_islink.assert_called_once() - mock_llvm_version.assert_not_called() - - @mock.patch.object(os.path, 'islink', return_value=False) - def testFailedToUprevEbuildSymlinkForInvalidSymlink(self, mock_islink): - symlink_path = '/path/to/chroot/package/package.ebuild' - - # Verify the exception is raised when a invalid symbolic link is passed in. - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_path) - - self.assertEqual(str(err.exception), - 'Invalid symlink provided: %s' % symlink_path) - - mock_islink.assert_called_once() - - @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') - # Simulate 'os.path.islink' when a symbolic link is passed in. - @mock.patch.object(os.path, 'islink', return_value=True) - # Simulate 'os.path.realpath' when a symbolic link is passed in. - @mock.patch.object(os.path, 'realpath', return_value=True) - def testFailedToUprevEbuildToVersion(self, mock_realpath, mock_islink, - mock_llvm_version): - symlink_path = '/path/to/chroot/llvm/llvm_pre123_p.ebuild' - mock_realpath.return_value = '/abs/path/to/llvm/llvm_pre123_p.ebuild' - git_hash = 'badf00d' - mock_llvm_version.return_value = '1234' - svn_version = 1000 - - # Verify the exception is raised when the symlink does not match the - # expected pattern - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.UprevEbuildToVersion(symlink_path, svn_version, - git_hash) - - self.assertEqual(str(err.exception), 'Failed to uprev the ebuild.') - - mock_llvm_version.assert_called_once_with(git_hash) - mock_islink.assert_called_once_with(symlink_path) - - # Simulate 'os.path.islink' when a symbolic link is passed in. - @mock.patch.object(os.path, 'islink', return_value=True) - def testFailedToUprevEbuildSymlink(self, mock_islink): - symlink_path = '/path/to/chroot/llvm/llvm_pre123_p.ebuild' - - # Verify the exception is raised when the symlink does not match the - # expected pattern - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_path) - - self.assertEqual(str(err.exception), 'Failed to uprev the symlink.') - - mock_islink.assert_called_once_with(symlink_path) - - @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') - @mock.patch.object(os.path, 'islink', return_value=True) - @mock.patch.object(os.path, 'realpath') - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyUprevEbuildToVersionLLVM(self, mock_command_output, - mock_realpath, mock_islink, - mock_llvm_version): - symlink = '/path/to/llvm/llvm-12.0_pre3_p2-r10.ebuild' - ebuild = '/abs/path/to/llvm/llvm-12.0_pre3_p2.ebuild' - mock_realpath.return_value = ebuild - git_hash = 'badf00d' - mock_llvm_version.return_value = '1234' - svn_version = 1000 - - update_chromeos_llvm_hash.UprevEbuildToVersion(symlink, svn_version, - git_hash) - - mock_llvm_version.assert_called_once_with(git_hash) - - mock_islink.assert_called() - - mock_realpath.assert_called_once_with(symlink) - - mock_command_output.assert_called() - - # Verify commands - symlink_dir = os.path.dirname(symlink) - timestamp = datetime.datetime.today().strftime('%Y%m%d') - new_ebuild = '/abs/path/to/llvm/llvm-1234.0_pre1000_p%s.ebuild' % timestamp - new_symlink = new_ebuild[:-len('.ebuild')] + '-r1.ebuild' - - expected_cmd = ['git', '-C', symlink_dir, 'mv', ebuild, new_ebuild] - self.assertEqual(mock_command_output.call_args_list[0], - mock.call(expected_cmd)) - - expected_cmd = ['ln', '-s', '-r', new_ebuild, new_symlink] - self.assertEqual(mock_command_output.call_args_list[1], - mock.call(expected_cmd)) - - expected_cmd = ['git', '-C', symlink_dir, 'add', new_symlink] - self.assertEqual(mock_command_output.call_args_list[2], - mock.call(expected_cmd)) - - expected_cmd = ['git', '-C', symlink_dir, 'rm', symlink] - self.assertEqual(mock_command_output.call_args_list[3], - mock.call(expected_cmd)) - - @mock.patch.object(chroot, - 'GetChrootEbuildPaths', - return_value=['/chroot/path/test.ebuild']) - @mock.patch.object(subprocess, 'check_output', return_value='') - def testManifestUpdate(self, mock_subprocess, mock_ebuild_paths): - manifest_packages = ['sys-devel/llvm'] - chroot_path = '/path/to/chroot' - update_chromeos_llvm_hash.UpdateManifests(manifest_packages, chroot_path) - - args = mock_subprocess.call_args[0][-1] - manifest_cmd = [ - 'cros_sdk', '--', 'ebuild', '/chroot/path/test.ebuild', 'manifest' - ] - self.assertEqual(args, manifest_cmd) - mock_ebuild_paths.assert_called_once() - - @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') - @mock.patch.object(os.path, 'islink', return_value=True) - @mock.patch.object(os.path, 'realpath') - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyUprevEbuildToVersionNonLLVM(self, mock_command_output, - mock_realpath, mock_islink, - mock_llvm_version): - symlink = '/abs/path/to/compiler-rt/compiler-rt-12.0_pre314159265-r4.ebuild' - ebuild = '/abs/path/to/compiler-rt/compiler-rt-12.0_pre314159265.ebuild' - mock_realpath.return_value = ebuild - mock_llvm_version.return_value = '1234' - svn_version = 1000 - git_hash = '5678' - - update_chromeos_llvm_hash.UprevEbuildToVersion(symlink, svn_version, - git_hash) - - mock_islink.assert_called() - - mock_realpath.assert_called_once_with(symlink) - - mock_llvm_version.assert_called_once_with(git_hash) - - mock_command_output.assert_called() - - # Verify commands - symlink_dir = os.path.dirname(symlink) - new_ebuild = '/abs/path/to/compiler-rt/compiler-rt-1234.0_pre1000.ebuild' - new_symlink = new_ebuild[:-len('.ebuild')] + '-r1.ebuild' - - expected_cmd = ['git', '-C', symlink_dir, 'mv', ebuild, new_ebuild] - self.assertEqual(mock_command_output.call_args_list[0], - mock.call(expected_cmd)) - - expected_cmd = ['ln', '-s', '-r', new_ebuild, new_symlink] - self.assertEqual(mock_command_output.call_args_list[1], - mock.call(expected_cmd)) - - expected_cmd = ['git', '-C', symlink_dir, 'add', new_symlink] - self.assertEqual(mock_command_output.call_args_list[2], - mock.call(expected_cmd)) - - expected_cmd = ['git', '-C', symlink_dir, 'rm', symlink] - self.assertEqual(mock_command_output.call_args_list[3], - mock.call(expected_cmd)) - - @mock.patch.object(os.path, 'islink', return_value=True) - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyUprevEbuildSymlink(self, mock_command_output, - mock_islink): - symlink_to_uprev = '/symlink/to/package-r1.ebuild' - - update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_to_uprev) - - mock_islink.assert_called_once_with(symlink_to_uprev) - - mock_command_output.assert_called_once() - - # Simulate behavior of 'os.path.isdir()' when the path to the repo is not a - - # directory. - - @mock.patch.object(chroot, 'GetChrootEbuildPaths') - @mock.patch.object(chroot, 'ConvertChrootPathsToAbsolutePaths') - def testExceptionRaisedWhenCreatingPathDictionaryFromPackages( - self, mock_chroot_paths_to_symlinks, mock_get_chroot_paths): - - chroot_path = '/some/path/to/chroot' - - package_name = 'test-pckg/package' - package_chroot_path = '/some/chroot/path/to/package-r1.ebuild' - - # Test function to simulate 'ConvertChrootPathsToAbsolutePaths' when a - # symlink does not start with the prefix '/mnt/host/source'. - def BadPrefixChrootPath(*args): - assert len(args) == 2 - raise ValueError('Invalid prefix for the chroot path: ' - '%s' % package_chroot_path) - - # Simulate 'GetChrootEbuildPaths' when valid packages are passed in. - # - # Returns a list of chroot paths. - mock_get_chroot_paths.return_value = [package_chroot_path] - - # Use test function to simulate 'ConvertChrootPathsToAbsolutePaths' - # behavior. - mock_chroot_paths_to_symlinks.side_effect = BadPrefixChrootPath - - # Verify exception is raised when for an invalid prefix in the symlink. - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.CreatePathDictionaryFromPackages( - chroot_path, [package_name]) - - self.assertEqual( - str(err.exception), 'Invalid prefix for the chroot path: ' - '%s' % package_chroot_path) - - mock_get_chroot_paths.assert_called_once_with(chroot_path, [package_name]) - - mock_chroot_paths_to_symlinks.assert_called_once_with( - chroot_path, [package_chroot_path]) - - @mock.patch.object(chroot, 'GetChrootEbuildPaths') - @mock.patch.object(chroot, 'ConvertChrootPathsToAbsolutePaths') - @mock.patch.object(update_chromeos_llvm_hash, - 'GetEbuildPathsFromSymLinkPaths') - def testSuccessfullyCreatedPathDictionaryFromPackages( - self, mock_ebuild_paths_from_symlink_paths, - mock_chroot_paths_to_symlinks, mock_get_chroot_paths): - - package_chroot_path = '/mnt/host/source/src/path/to/package-r1.ebuild' - - # Simulate 'GetChrootEbuildPaths' when returning a chroot path for a valid - # package. - # - # Returns a list of chroot paths. - mock_get_chroot_paths.return_value = [package_chroot_path] - - package_symlink_path = '/some/path/to/chroot/src/path/to/package-r1.ebuild' - - # Simulate 'ConvertChrootPathsToAbsolutePaths' when returning a symlink to - # a chroot path that points to a package. - # - # Returns a list of symlink file paths. - mock_chroot_paths_to_symlinks.return_value = [package_symlink_path] - - chroot_package_path = '/some/path/to/chroot/src/path/to/package.ebuild' - - # Simulate 'GetEbuildPathsFromSymlinkPaths' when returning a dictionary of - # a symlink that points to an ebuild. - # - # Returns a dictionary of a symlink and ebuild file path pair - # where the key is the absolute path to the symlink of the ebuild file - # and the value is the absolute path to the ebuild file of the package. - mock_ebuild_paths_from_symlink_paths.return_value = { - package_symlink_path: chroot_package_path - } - - chroot_path = '/some/path/to/chroot' - package_name = 'test-pckg/package' - - self.assertEqual( - update_chromeos_llvm_hash.CreatePathDictionaryFromPackages( - chroot_path, [package_name]), - {package_symlink_path: chroot_package_path}) - - mock_get_chroot_paths.assert_called_once_with(chroot_path, [package_name]) - - mock_chroot_paths_to_symlinks.assert_called_once_with( - chroot_path, [package_chroot_path]) - - mock_ebuild_paths_from_symlink_paths.assert_called_once_with( - [package_symlink_path]) - - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyRemovedPatchesFromFilesDir(self, mock_run_cmd): - patches_to_remove_list = [ - '/abs/path/to/filesdir/cherry/fix_output.patch', - '/abs/path/to/filesdir/display_results.patch' - ] - - update_chromeos_llvm_hash.RemovePatchesFromFilesDir(patches_to_remove_list) - - self.assertEqual(mock_run_cmd.call_count, 2) - - @mock.patch.object(os.path, 'isfile', return_value=False) - def testInvalidPatchMetadataFileStagedForCommit(self, mock_isfile): - patch_metadata_path = '/abs/path/to/filesdir/PATCHES' - - # Verify the exception is raised when the absolute path to the patch - # metadata file does not exist or is not a file. - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.StagePatchMetadataFileForCommit( - patch_metadata_path) - - self.assertEqual( - str(err.exception), 'Invalid patch metadata file provided: ' - '%s' % patch_metadata_path) - - mock_isfile.assert_called_once() - - @mock.patch.object(os.path, 'isfile', return_value=True) - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyStagedPatchMetadataFileForCommit(self, mock_run_cmd, _): - - patch_metadata_path = '/abs/path/to/filesdir/PATCHES.json' - - update_chromeos_llvm_hash.StagePatchMetadataFileForCommit( - patch_metadata_path) - - mock_run_cmd.assert_called_once() - - def testNoPatchResultsForCommit(self): - package_1_patch_info_dict = { - 'applied_patches': ['display_results.patch'], - 'failed_patches': ['fixes_output.patch'], - 'non_applicable_patches': [], - 'disabled_patches': [], - 'removed_patches': [], - 'modified_metadata': None - } - - package_2_patch_info_dict = { - 'applied_patches': ['redirects_stdout.patch', 'fix_display.patch'], - 'failed_patches': [], - 'non_applicable_patches': [], - 'disabled_patches': [], - 'removed_patches': [], - 'modified_metadata': None - } - - test_package_info_dict = { - 'test-packages/package1': package_1_patch_info_dict, - 'test-packages/package2': package_2_patch_info_dict - } - - test_commit_message = ['Updated packages'] - - self.assertListEqual( - update_chromeos_llvm_hash.StagePackagesPatchResultsForCommit( - test_package_info_dict, test_commit_message), test_commit_message) - - @mock.patch.object(update_chromeos_llvm_hash, - 'StagePatchMetadataFileForCommit') - @mock.patch.object(update_chromeos_llvm_hash, 'RemovePatchesFromFilesDir') - def testAddedPatchResultsForCommit(self, mock_remove_patches, - mock_stage_patches_for_commit): - - package_1_patch_info_dict = { - 'applied_patches': [], - 'failed_patches': [], - 'non_applicable_patches': [], - 'disabled_patches': ['fixes_output.patch'], - 'removed_patches': [], - 'modified_metadata': '/abs/path/to/filesdir/PATCHES.json' - } - - package_2_patch_info_dict = { - 'applied_patches': ['fix_display.patch'], - 'failed_patches': [], - 'non_applicable_patches': [], - 'disabled_patches': [], - 'removed_patches': ['/abs/path/to/filesdir/redirect_stdout.patch'], - 'modified_metadata': '/abs/path/to/filesdir/PATCHES.json' - } - - test_package_info_dict = { - 'test-packages/package1': package_1_patch_info_dict, - 'test-packages/package2': package_2_patch_info_dict - } - - test_commit_message = ['Updated packages'] - - expected_commit_messages = [ - 'Updated packages', '\nFor the package test-packages/package1:', - 'The patch metadata file PATCHES.json was modified', - 'The following patches were disabled:', 'fixes_output.patch', - '\nFor the package test-packages/package2:', - 'The patch metadata file PATCHES.json was modified', - 'The following patches were removed:', 'redirect_stdout.patch' - ] - - self.assertListEqual( - update_chromeos_llvm_hash.StagePackagesPatchResultsForCommit( - test_package_info_dict, test_commit_message), - expected_commit_messages) - - path_to_removed_patch = '/abs/path/to/filesdir/redirect_stdout.patch' - - mock_remove_patches.assert_called_once_with([path_to_removed_patch]) - - self.assertEqual(mock_stage_patches_for_commit.call_count, 2) - - @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') - @mock.patch.object(update_chromeos_llvm_hash, - 'CreatePathDictionaryFromPackages') - @mock.patch.object(git, 'CreateBranch') - @mock.patch.object(update_chromeos_llvm_hash, 'UpdateEbuildLLVMHash') - @mock.patch.object(update_chromeos_llvm_hash, 'UprevEbuildSymlink') - @mock.patch.object(git, 'UploadChanges') - @mock.patch.object(git, 'DeleteBranch') - @mock.patch.object(os.path, 'realpath') - def testExceptionRaisedWhenUpdatingPackages( - self, mock_realpath, mock_delete_repo, mock_upload_changes, - mock_uprev_symlink, mock_update_llvm_next, mock_create_repo, - mock_create_path_dict, mock_llvm_major_version): - - path_to_package_dir = '/some/path/to/chroot/src/path/to' - abs_path_to_package = os.path.join(path_to_package_dir, 'package.ebuild') - symlink_path_to_package = os.path.join(path_to_package_dir, - 'package-r1.ebuild') - - mock_llvm_major_version.return_value = '1234' - - # Test function to simulate 'CreateBranch' when successfully created the - # branch on a valid repo path. - def SuccessfullyCreateBranchForChanges(_, branch): - self.assertEqual(branch, 'update-LLVM_NEXT_HASH-a123testhash4') - - # Test function to simulate 'UpdateEbuildLLVMHash' when successfully - # updated the ebuild's 'LLVM_NEXT_HASH'. - def SuccessfullyUpdatedLLVMHash(ebuild_path, _, git_hash, svn_version): - self.assertEqual(ebuild_path, abs_path_to_package) - self.assertEqual(git_hash, 'a123testhash4') - self.assertEqual(svn_version, 1000) - - # Test function to simulate 'UprevEbuildSymlink' when the symlink to the - # ebuild does not have a revision number. - def FailedToUprevEbuildSymlink(_): - # Raises a 'ValueError' exception because the symlink did not have have a - # revision number. - raise ValueError('Failed to uprev the ebuild.') - - # Test function to fail on 'UploadChanges' if the function gets called - # when an exception is raised. - def ShouldNotExecuteUploadChanges(*args): - # Test function should not be called (i.e. execution should resume in the - # 'finally' block) because 'UprevEbuildSymlink' raised an - # exception. - assert len(args) == 3 - assert False, ('Failed to go to "finally" block ' - 'after the exception was raised.') - - test_package_path_dict = {symlink_path_to_package: abs_path_to_package} - - # Simulate behavior of 'CreatePathDictionaryFromPackages()' when - # successfully created a dictionary where the key is the absolute path to - # the symlink of the package and value is the absolute path to the ebuild of - # the package. - mock_create_path_dict.return_value = test_package_path_dict - - # Use test function to simulate behavior. - mock_create_repo.side_effect = SuccessfullyCreateBranchForChanges - mock_update_llvm_next.side_effect = SuccessfullyUpdatedLLVMHash - mock_uprev_symlink.side_effect = FailedToUprevEbuildSymlink - mock_upload_changes.side_effect = ShouldNotExecuteUploadChanges - mock_realpath.return_value = '/abs/path/to/test-packages/package1.ebuild' - - packages_to_update = ['test-packages/package1'] - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next - git_hash = 'a123testhash4' - svn_version = 1000 - chroot_path = Path('/some/path/to/chroot') - git_hash_source = 'google3' - branch = 'update-LLVM_NEXT_HASH-a123testhash4' - extra_commit_msg = None - - # Verify exception is raised when an exception is thrown within - # the 'try' block by UprevEbuildSymlink function. - with self.assertRaises(ValueError) as err: - update_chromeos_llvm_hash.UpdatePackages( - packages=packages_to_update, - manifest_packages=[], - llvm_variant=llvm_variant, - git_hash=git_hash, - svn_version=svn_version, - chroot_path=chroot_path, - mode=failure_modes.FailureModes.FAIL, - git_hash_source=git_hash_source, - extra_commit_msg=extra_commit_msg) - - self.assertEqual(str(err.exception), 'Failed to uprev the ebuild.') - - mock_create_path_dict.assert_called_once_with(chroot_path, - packages_to_update) - - mock_create_repo.assert_called_once_with(path_to_package_dir, branch) - - mock_update_llvm_next.assert_called_once_with(abs_path_to_package, - llvm_variant, git_hash, - svn_version) - - mock_uprev_symlink.assert_called_once_with(symlink_path_to_package) - - mock_upload_changes.assert_not_called() - - mock_delete_repo.assert_called_once_with(path_to_package_dir, branch) - - @mock.patch.object(update_chromeos_llvm_hash, 'EnsurePackageMaskContains') - @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') - @mock.patch.object(update_chromeos_llvm_hash, - 'CreatePathDictionaryFromPackages') - @mock.patch.object(git, 'CreateBranch') - @mock.patch.object(update_chromeos_llvm_hash, 'UpdateEbuildLLVMHash') - @mock.patch.object(update_chromeos_llvm_hash, 'UprevEbuildSymlink') - @mock.patch.object(git, 'UploadChanges') - @mock.patch.object(git, 'DeleteBranch') - @mock.patch.object(update_chromeos_llvm_hash, - 'UpdatePackagesPatchMetadataFile') - @mock.patch.object(update_chromeos_llvm_hash, - 'StagePatchMetadataFileForCommit') - def testSuccessfullyUpdatedPackages( - self, mock_stage_patch_file, mock_update_package_metadata_file, - mock_delete_repo, mock_upload_changes, mock_uprev_symlink, - mock_update_llvm_next, mock_create_repo, mock_create_path_dict, - mock_llvm_version, mock_mask_contains): - - path_to_package_dir = '/some/path/to/chroot/src/path/to' - abs_path_to_package = os.path.join(path_to_package_dir, 'package.ebuild') - symlink_path_to_package = os.path.join(path_to_package_dir, - 'package-r1.ebuild') - - # Test function to simulate 'CreateBranch' when successfully created the - # branch for the changes to be made to the ebuild files. - def SuccessfullyCreateBranchForChanges(_, branch): - self.assertEqual(branch, 'update-LLVM_NEXT_HASH-a123testhash5') - - # Test function to simulate 'UploadChanges' after a successfull update of - # 'LLVM_NEXT_HASH" of the ebuild file. - def SuccessfullyUpdatedLLVMHash(ebuild_path, _, git_hash, svn_version): - self.assertEqual(ebuild_path, - '/some/path/to/chroot/src/path/to/package.ebuild') - self.assertEqual(git_hash, 'a123testhash5') - self.assertEqual(svn_version, 1000) - - # Test function to simulate 'UprevEbuildSymlink' when successfully - # incremented the revision number by 1. - def SuccessfullyUprevedEbuildSymlink(symlink_path): - self.assertEqual(symlink_path, - '/some/path/to/chroot/src/path/to/package-r1.ebuild') - - # Test function to simulate 'UpdatePackagesPatchMetadataFile()' when the - # patch results contains a disabled patch in 'disable_patches' mode. - def RetrievedPatchResults(chroot_path, svn_version, packages, mode): - - self.assertEqual(chroot_path, Path('/some/path/to/chroot')) - self.assertEqual(svn_version, 1000) - self.assertListEqual(packages, ['path/to']) - self.assertEqual(mode, failure_modes.FailureModes.DISABLE_PATCHES) - - patch_metadata_file = 'PATCHES.json' - PatchInfo = collections.namedtuple('PatchInfo', [ - 'applied_patches', 'failed_patches', 'non_applicable_patches', - 'disabled_patches', 'removed_patches', 'modified_metadata' - ]) - - package_patch_info = PatchInfo( - applied_patches=['fix_display.patch'], - failed_patches=['fix_stdout.patch'], - non_applicable_patches=[], - disabled_patches=['fix_stdout.patch'], - removed_patches=[], - modified_metadata='/abs/path/to/filesdir/%s' % patch_metadata_file) - - package_info_dict = {'path/to': package_patch_info._asdict()} - - # Returns a dictionary where the key is the package and the value is a - # dictionary that contains information about the package's patch results - # produced by the patch manager. - return package_info_dict - - # Test function to simulate 'UploadChanges()' when successfully created a - # commit for the changes made to the packages and their patches and - # retrieved the change list of the commit. - def SuccessfullyUploadedChanges(*args): - assert len(args) == 3 - commit_url = 'https://some_name/path/to/commit/+/12345' - return git.CommitContents(url=commit_url, cl_number=12345) - - test_package_path_dict = {symlink_path_to_package: abs_path_to_package} - - # Simulate behavior of 'CreatePathDictionaryFromPackages()' when - # successfully created a dictionary where the key is the absolute path to - # the symlink of the package and value is the absolute path to the ebuild of - # the package. - mock_create_path_dict.return_value = test_package_path_dict - - # Use test function to simulate behavior. - mock_create_repo.side_effect = SuccessfullyCreateBranchForChanges - mock_update_llvm_next.side_effect = SuccessfullyUpdatedLLVMHash - mock_uprev_symlink.side_effect = SuccessfullyUprevedEbuildSymlink - mock_update_package_metadata_file.side_effect = RetrievedPatchResults - mock_upload_changes.side_effect = SuccessfullyUploadedChanges - mock_llvm_version.return_value = '1234' - mock_mask_contains.reurn_value = None - - packages_to_update = ['test-packages/package1'] - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next - git_hash = 'a123testhash5' - svn_version = 1000 - chroot_path = Path('/some/path/to/chroot') - git_hash_source = 'tot' - branch = 'update-LLVM_NEXT_HASH-a123testhash5' - extra_commit_msg = '\ncommit-message-end' - - change_list = update_chromeos_llvm_hash.UpdatePackages( - packages=packages_to_update, - manifest_packages=[], - llvm_variant=llvm_variant, - git_hash=git_hash, - svn_version=svn_version, - chroot_path=chroot_path, - mode=failure_modes.FailureModes.DISABLE_PATCHES, - git_hash_source=git_hash_source, - extra_commit_msg=extra_commit_msg) - - self.assertEqual(change_list.url, - 'https://some_name/path/to/commit/+/12345') - - self.assertEqual(change_list.cl_number, 12345) - - mock_create_path_dict.assert_called_once_with(chroot_path, - packages_to_update) - - mock_create_repo.assert_called_once_with(path_to_package_dir, branch) - - mock_update_llvm_next.assert_called_once_with(abs_path_to_package, - llvm_variant, git_hash, - svn_version) - - mock_uprev_symlink.assert_called_once_with(symlink_path_to_package) - - mock_mask_contains.assert_called_once_with(chroot_path, git_hash) - - expected_commit_messages = [ - 'llvm-next/tot: upgrade to a123testhash5 (r1000)\n', - 'The following packages have been updated:', 'path/to', - '\nFor the package path/to:', - 'The patch metadata file PATCHES.json was modified', - 'The following patches were disabled:', 'fix_stdout.patch', - '\ncommit-message-end' - ] - - mock_update_package_metadata_file.assert_called_once() - - mock_stage_patch_file.assert_called_once_with( - '/abs/path/to/filesdir/PATCHES.json') - - mock_upload_changes.assert_called_once_with(path_to_package_dir, branch, - expected_commit_messages) - - mock_delete_repo.assert_called_once_with(path_to_package_dir, branch) - - @mock.patch.object(subprocess, 'check_output', return_value=None) - @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') - def testEnsurePackageMaskContainsExisting(self, mock_llvm_version, - mock_git_add): - chroot_path = 'absolute/path/to/chroot' - git_hash = 'badf00d' - mock_llvm_version.return_value = '1234' - with mock.patch( - 'update_chromeos_llvm_hash.open', - mock.mock_open(read_data='\n=sys-devel/llvm-1234.0_pre*\n'), - create=True) as mock_file: - update_chromeos_llvm_hash.EnsurePackageMaskContains( - chroot_path, git_hash) - handle = mock_file() - handle.write.assert_not_called() - mock_llvm_version.assert_called_once_with(git_hash) - - overlay_dir = 'absolute/path/to/chroot/src/third_party/chromiumos-overlay' - mask_path = overlay_dir + '/profiles/targets/chromeos/package.mask' - mock_git_add.assert_called_once_with( - ['git', '-C', overlay_dir, 'add', mask_path]) - - @mock.patch.object(subprocess, 'check_output', return_value=None) - @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion') - def testEnsurePackageMaskContainsNotExisting(self, mock_llvm_version, - mock_git_add): - chroot_path = 'absolute/path/to/chroot' - git_hash = 'badf00d' - mock_llvm_version.return_value = '1234' - with mock.patch('update_chromeos_llvm_hash.open', - mock.mock_open(read_data='nothing relevant'), - create=True) as mock_file: - update_chromeos_llvm_hash.EnsurePackageMaskContains( - chroot_path, git_hash) - handle = mock_file() - handle.write.assert_called_once_with('=sys-devel/llvm-1234.0_pre*\n') - mock_llvm_version.assert_called_once_with(git_hash) - - overlay_dir = 'absolute/path/to/chroot/src/third_party/chromiumos-overlay' - mask_path = overlay_dir + '/profiles/targets/chromeos/package.mask' - mock_git_add.assert_called_once_with( - ['git', '-C', overlay_dir, 'add', mask_path]) - - -if __name__ == '__main__': - unittest.main() + """Test class for updating LLVM hashes of packages.""" + + @mock.patch.object(os.path, "realpath") + def testDefaultCrosRootFromCrOSCheckout(self, mock_llvm_tools): + llvm_tools_path = ( + "/path/to/cros/src/third_party/toolchain-utils/llvm_tools" + ) + mock_llvm_tools.return_value = llvm_tools_path + self.assertEqual( + update_chromeos_llvm_hash.defaultCrosRoot(), Path("/path/to/cros") + ) + + @mock.patch.object(os.path, "realpath") + def testDefaultCrosRootFromOutsideCrOSCheckout(self, mock_llvm_tools): + mock_llvm_tools.return_value = "~/toolchain-utils/llvm_tools" + self.assertEqual( + update_chromeos_llvm_hash.defaultCrosRoot(), + Path.home() / "chromiumos", + ) + + # Simulate behavior of 'os.path.isfile()' when the ebuild path to a package + # does not exist. + @mock.patch.object(os.path, "isfile", return_value=False) + def testFailedToUpdateLLVMHashForInvalidEbuildPath(self, mock_isfile): + ebuild_path = "/some/path/to/package.ebuild" + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current + git_hash = "a123testhash1" + svn_version = 1000 + + # Verify the exception is raised when the ebuild path does not exist. + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.UpdateEbuildLLVMHash( + ebuild_path, llvm_variant, git_hash, svn_version + ) + + self.assertEqual( + str(err.exception), "Invalid ebuild path provided: %s" % ebuild_path + ) + + mock_isfile.assert_called_once() + + # Simulate 'os.path.isfile' behavior on a valid ebuild path. + @mock.patch.object(os.path, "isfile", return_value=True) + def testFailedToUpdateLLVMHash(self, mock_isfile): + # Create a temporary file to simulate an ebuild file of a package. + with test_helpers.CreateTemporaryJsonFile() as ebuild_file: + with open(ebuild_file, "w") as f: + f.write( + "\n".join( + [ + "First line in the ebuild", + "Second line in the ebuild", + "Last line in the ebuild", + ] + ) + ) + + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current + git_hash = "a123testhash1" + svn_version = 1000 + + # Verify the exception is raised when the ebuild file does not have + # 'LLVM_HASH'. + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.UpdateEbuildLLVMHash( + ebuild_file, llvm_variant, git_hash, svn_version + ) + + self.assertEqual(str(err.exception), "Failed to update LLVM_HASH") + + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next + + self.assertEqual(mock_isfile.call_count, 2) + + # Simulate 'os.path.isfile' behavior on a valid ebuild path. + @mock.patch.object(os.path, "isfile", return_value=True) + def testFailedToUpdateLLVMNextHash(self, mock_isfile): + # Create a temporary file to simulate an ebuild file of a package. + with test_helpers.CreateTemporaryJsonFile() as ebuild_file: + with open(ebuild_file, "w") as f: + f.write( + "\n".join( + [ + "First line in the ebuild", + "Second line in the ebuild", + "Last line in the ebuild", + ] + ) + ) + + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next + git_hash = "a123testhash1" + svn_version = 1000 + + # Verify the exception is raised when the ebuild file does not have + # 'LLVM_NEXT_HASH'. + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.UpdateEbuildLLVMHash( + ebuild_file, llvm_variant, git_hash, svn_version + ) + + self.assertEqual( + str(err.exception), "Failed to update LLVM_NEXT_HASH" + ) + + self.assertEqual(mock_isfile.call_count, 2) + + @mock.patch.object(os.path, "isfile", return_value=True) + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyStageTheEbuildForCommitForLLVMHashUpdate( + self, mock_stage_commit_command, mock_isfile + ): + + # Create a temporary file to simulate an ebuild file of a package. + with test_helpers.CreateTemporaryJsonFile() as ebuild_file: + # Updates LLVM_HASH to 'git_hash' and revision to + # 'svn_version'. + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current + git_hash = "a123testhash1" + svn_version = 1000 + + with open(ebuild_file, "w") as f: + f.write( + "\n".join( + [ + "First line in the ebuild", + "Second line in the ebuild", + 'LLVM_HASH="a12b34c56d78e90" # r500', + "Last line in the ebuild", + ] + ) + ) + + update_chromeos_llvm_hash.UpdateEbuildLLVMHash( + ebuild_file, llvm_variant, git_hash, svn_version + ) + + expected_file_contents = [ + "First line in the ebuild\n", + "Second line in the ebuild\n", + 'LLVM_HASH="a123testhash1" # r1000\n', + "Last line in the ebuild", + ] + + # Verify the new file contents of the ebuild file match the expected file + # contents. + with open(ebuild_file) as new_file: + file_contents_as_a_list = [cur_line for cur_line in new_file] + self.assertListEqual( + file_contents_as_a_list, expected_file_contents + ) + + self.assertEqual(mock_isfile.call_count, 2) + + mock_stage_commit_command.assert_called_once() + + @mock.patch.object(os.path, "isfile", return_value=True) + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyStageTheEbuildForCommitForLLVMNextHashUpdate( + self, mock_stage_commit_command, mock_isfile + ): + + # Create a temporary file to simulate an ebuild file of a package. + with test_helpers.CreateTemporaryJsonFile() as ebuild_file: + # Updates LLVM_NEXT_HASH to 'git_hash' and revision to + # 'svn_version'. + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next + git_hash = "a123testhash1" + svn_version = 1000 + + with open(ebuild_file, "w") as f: + f.write( + "\n".join( + [ + "First line in the ebuild", + "Second line in the ebuild", + 'LLVM_NEXT_HASH="a12b34c56d78e90" # r500', + "Last line in the ebuild", + ] + ) + ) + + update_chromeos_llvm_hash.UpdateEbuildLLVMHash( + ebuild_file, llvm_variant, git_hash, svn_version + ) + + expected_file_contents = [ + "First line in the ebuild\n", + "Second line in the ebuild\n", + 'LLVM_NEXT_HASH="a123testhash1" # r1000\n', + "Last line in the ebuild", + ] + + # Verify the new file contents of the ebuild file match the expected file + # contents. + with open(ebuild_file) as new_file: + file_contents_as_a_list = [cur_line for cur_line in new_file] + self.assertListEqual( + file_contents_as_a_list, expected_file_contents + ) + + self.assertEqual(mock_isfile.call_count, 2) + + mock_stage_commit_command.assert_called_once() + + @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion") + @mock.patch.object(os.path, "islink", return_value=False) + def testFailedToUprevEbuildToVersionForInvalidSymlink( + self, mock_islink, mock_llvm_version + ): + symlink_path = "/path/to/chroot/package/package.ebuild" + svn_version = 1000 + git_hash = "badf00d" + mock_llvm_version.return_value = "1234" + + # Verify the exception is raised when a invalid symbolic link is passed in. + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.UprevEbuildToVersion( + symlink_path, svn_version, git_hash + ) + + self.assertEqual( + str(err.exception), "Invalid symlink provided: %s" % symlink_path + ) + + mock_islink.assert_called_once() + mock_llvm_version.assert_not_called() + + @mock.patch.object(os.path, "islink", return_value=False) + def testFailedToUprevEbuildSymlinkForInvalidSymlink(self, mock_islink): + symlink_path = "/path/to/chroot/package/package.ebuild" + + # Verify the exception is raised when a invalid symbolic link is passed in. + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_path) + + self.assertEqual( + str(err.exception), "Invalid symlink provided: %s" % symlink_path + ) + + mock_islink.assert_called_once() + + @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion") + # Simulate 'os.path.islink' when a symbolic link is passed in. + @mock.patch.object(os.path, "islink", return_value=True) + # Simulate 'os.path.realpath' when a symbolic link is passed in. + @mock.patch.object(os.path, "realpath", return_value=True) + def testFailedToUprevEbuildToVersion( + self, mock_realpath, mock_islink, mock_llvm_version + ): + symlink_path = "/path/to/chroot/llvm/llvm_pre123_p.ebuild" + mock_realpath.return_value = "/abs/path/to/llvm/llvm_pre123_p.ebuild" + git_hash = "badf00d" + mock_llvm_version.return_value = "1234" + svn_version = 1000 + + # Verify the exception is raised when the symlink does not match the + # expected pattern + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.UprevEbuildToVersion( + symlink_path, svn_version, git_hash + ) + + self.assertEqual(str(err.exception), "Failed to uprev the ebuild.") + + mock_llvm_version.assert_called_once_with(git_hash) + mock_islink.assert_called_once_with(symlink_path) + + # Simulate 'os.path.islink' when a symbolic link is passed in. + @mock.patch.object(os.path, "islink", return_value=True) + def testFailedToUprevEbuildSymlink(self, mock_islink): + symlink_path = "/path/to/chroot/llvm/llvm_pre123_p.ebuild" + + # Verify the exception is raised when the symlink does not match the + # expected pattern + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_path) + + self.assertEqual(str(err.exception), "Failed to uprev the symlink.") + + mock_islink.assert_called_once_with(symlink_path) + + @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion") + @mock.patch.object(os.path, "islink", return_value=True) + @mock.patch.object(os.path, "realpath") + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyUprevEbuildToVersionLLVM( + self, mock_command_output, mock_realpath, mock_islink, mock_llvm_version + ): + symlink = "/path/to/llvm/llvm-12.0_pre3_p2-r10.ebuild" + ebuild = "/abs/path/to/llvm/llvm-12.0_pre3_p2.ebuild" + mock_realpath.return_value = ebuild + git_hash = "badf00d" + mock_llvm_version.return_value = "1234" + svn_version = 1000 + + update_chromeos_llvm_hash.UprevEbuildToVersion( + symlink, svn_version, git_hash + ) + + mock_llvm_version.assert_called_once_with(git_hash) + + mock_islink.assert_called() + + mock_realpath.assert_called_once_with(symlink) + + mock_command_output.assert_called() + + # Verify commands + symlink_dir = os.path.dirname(symlink) + timestamp = datetime.datetime.today().strftime("%Y%m%d") + new_ebuild = ( + "/abs/path/to/llvm/llvm-1234.0_pre1000_p%s.ebuild" % timestamp + ) + new_symlink = new_ebuild[: -len(".ebuild")] + "-r1.ebuild" + + expected_cmd = ["git", "-C", symlink_dir, "mv", ebuild, new_ebuild] + self.assertEqual( + mock_command_output.call_args_list[0], mock.call(expected_cmd) + ) + + expected_cmd = ["ln", "-s", "-r", new_ebuild, new_symlink] + self.assertEqual( + mock_command_output.call_args_list[1], mock.call(expected_cmd) + ) + + expected_cmd = ["git", "-C", symlink_dir, "add", new_symlink] + self.assertEqual( + mock_command_output.call_args_list[2], mock.call(expected_cmd) + ) + + expected_cmd = ["git", "-C", symlink_dir, "rm", symlink] + self.assertEqual( + mock_command_output.call_args_list[3], mock.call(expected_cmd) + ) + + @mock.patch.object( + chroot, + "GetChrootEbuildPaths", + return_value=["/chroot/path/test.ebuild"], + ) + @mock.patch.object(subprocess, "check_output", return_value="") + def testManifestUpdate(self, mock_subprocess, mock_ebuild_paths): + manifest_packages = ["sys-devel/llvm"] + chroot_path = "/path/to/chroot" + update_chromeos_llvm_hash.UpdateManifests( + manifest_packages, chroot_path + ) + + args = mock_subprocess.call_args[0][-1] + manifest_cmd = [ + "cros_sdk", + "--", + "ebuild", + "/chroot/path/test.ebuild", + "manifest", + ] + self.assertEqual(args, manifest_cmd) + mock_ebuild_paths.assert_called_once() + + @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion") + @mock.patch.object(os.path, "islink", return_value=True) + @mock.patch.object(os.path, "realpath") + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyUprevEbuildToVersionNonLLVM( + self, mock_command_output, mock_realpath, mock_islink, mock_llvm_version + ): + symlink = ( + "/abs/path/to/compiler-rt/compiler-rt-12.0_pre314159265-r4.ebuild" + ) + ebuild = "/abs/path/to/compiler-rt/compiler-rt-12.0_pre314159265.ebuild" + mock_realpath.return_value = ebuild + mock_llvm_version.return_value = "1234" + svn_version = 1000 + git_hash = "5678" + + update_chromeos_llvm_hash.UprevEbuildToVersion( + symlink, svn_version, git_hash + ) + + mock_islink.assert_called() + + mock_realpath.assert_called_once_with(symlink) + + mock_llvm_version.assert_called_once_with(git_hash) + + mock_command_output.assert_called() + + # Verify commands + symlink_dir = os.path.dirname(symlink) + new_ebuild = ( + "/abs/path/to/compiler-rt/compiler-rt-1234.0_pre1000.ebuild" + ) + new_symlink = new_ebuild[: -len(".ebuild")] + "-r1.ebuild" + + expected_cmd = ["git", "-C", symlink_dir, "mv", ebuild, new_ebuild] + self.assertEqual( + mock_command_output.call_args_list[0], mock.call(expected_cmd) + ) + + expected_cmd = ["ln", "-s", "-r", new_ebuild, new_symlink] + self.assertEqual( + mock_command_output.call_args_list[1], mock.call(expected_cmd) + ) + + expected_cmd = ["git", "-C", symlink_dir, "add", new_symlink] + self.assertEqual( + mock_command_output.call_args_list[2], mock.call(expected_cmd) + ) + + expected_cmd = ["git", "-C", symlink_dir, "rm", symlink] + self.assertEqual( + mock_command_output.call_args_list[3], mock.call(expected_cmd) + ) + + @mock.patch.object(os.path, "islink", return_value=True) + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyUprevEbuildSymlink( + self, mock_command_output, mock_islink + ): + symlink_to_uprev = "/symlink/to/package-r1.ebuild" + + update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_to_uprev) + + mock_islink.assert_called_once_with(symlink_to_uprev) + + mock_command_output.assert_called_once() + + # Simulate behavior of 'os.path.isdir()' when the path to the repo is not a + + # directory. + + @mock.patch.object(chroot, "GetChrootEbuildPaths") + @mock.patch.object(chroot, "ConvertChrootPathsToAbsolutePaths") + def testExceptionRaisedWhenCreatingPathDictionaryFromPackages( + self, mock_chroot_paths_to_symlinks, mock_get_chroot_paths + ): + + chroot_path = "/some/path/to/chroot" + + package_name = "test-pckg/package" + package_chroot_path = "/some/chroot/path/to/package-r1.ebuild" + + # Test function to simulate 'ConvertChrootPathsToAbsolutePaths' when a + # symlink does not start with the prefix '/mnt/host/source'. + def BadPrefixChrootPath(*args): + assert len(args) == 2 + raise ValueError( + "Invalid prefix for the chroot path: " + "%s" % package_chroot_path + ) + + # Simulate 'GetChrootEbuildPaths' when valid packages are passed in. + # + # Returns a list of chroot paths. + mock_get_chroot_paths.return_value = [package_chroot_path] + + # Use test function to simulate 'ConvertChrootPathsToAbsolutePaths' + # behavior. + mock_chroot_paths_to_symlinks.side_effect = BadPrefixChrootPath + + # Verify exception is raised when for an invalid prefix in the symlink. + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.CreatePathDictionaryFromPackages( + chroot_path, [package_name] + ) + + self.assertEqual( + str(err.exception), + "Invalid prefix for the chroot path: " "%s" % package_chroot_path, + ) + + mock_get_chroot_paths.assert_called_once_with( + chroot_path, [package_name] + ) + + mock_chroot_paths_to_symlinks.assert_called_once_with( + chroot_path, [package_chroot_path] + ) + + @mock.patch.object(chroot, "GetChrootEbuildPaths") + @mock.patch.object(chroot, "ConvertChrootPathsToAbsolutePaths") + @mock.patch.object( + update_chromeos_llvm_hash, "GetEbuildPathsFromSymLinkPaths" + ) + def testSuccessfullyCreatedPathDictionaryFromPackages( + self, + mock_ebuild_paths_from_symlink_paths, + mock_chroot_paths_to_symlinks, + mock_get_chroot_paths, + ): + + package_chroot_path = "/mnt/host/source/src/path/to/package-r1.ebuild" + + # Simulate 'GetChrootEbuildPaths' when returning a chroot path for a valid + # package. + # + # Returns a list of chroot paths. + mock_get_chroot_paths.return_value = [package_chroot_path] + + package_symlink_path = ( + "/some/path/to/chroot/src/path/to/package-r1.ebuild" + ) + + # Simulate 'ConvertChrootPathsToAbsolutePaths' when returning a symlink to + # a chroot path that points to a package. + # + # Returns a list of symlink file paths. + mock_chroot_paths_to_symlinks.return_value = [package_symlink_path] + + chroot_package_path = "/some/path/to/chroot/src/path/to/package.ebuild" + + # Simulate 'GetEbuildPathsFromSymlinkPaths' when returning a dictionary of + # a symlink that points to an ebuild. + # + # Returns a dictionary of a symlink and ebuild file path pair + # where the key is the absolute path to the symlink of the ebuild file + # and the value is the absolute path to the ebuild file of the package. + mock_ebuild_paths_from_symlink_paths.return_value = { + package_symlink_path: chroot_package_path + } + + chroot_path = "/some/path/to/chroot" + package_name = "test-pckg/package" + + self.assertEqual( + update_chromeos_llvm_hash.CreatePathDictionaryFromPackages( + chroot_path, [package_name] + ), + {package_symlink_path: chroot_package_path}, + ) + + mock_get_chroot_paths.assert_called_once_with( + chroot_path, [package_name] + ) + + mock_chroot_paths_to_symlinks.assert_called_once_with( + chroot_path, [package_chroot_path] + ) + + mock_ebuild_paths_from_symlink_paths.assert_called_once_with( + [package_symlink_path] + ) + + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyRemovedPatchesFromFilesDir(self, mock_run_cmd): + patches_to_remove_list = [ + "/abs/path/to/filesdir/cherry/fix_output.patch", + "/abs/path/to/filesdir/display_results.patch", + ] + + update_chromeos_llvm_hash.RemovePatchesFromFilesDir( + patches_to_remove_list + ) + + self.assertEqual(mock_run_cmd.call_count, 2) + + @mock.patch.object(os.path, "isfile", return_value=False) + def testInvalidPatchMetadataFileStagedForCommit(self, mock_isfile): + patch_metadata_path = "/abs/path/to/filesdir/PATCHES" + + # Verify the exception is raised when the absolute path to the patch + # metadata file does not exist or is not a file. + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.StagePatchMetadataFileForCommit( + patch_metadata_path + ) + + self.assertEqual( + str(err.exception), + "Invalid patch metadata file provided: " "%s" % patch_metadata_path, + ) + + mock_isfile.assert_called_once() + + @mock.patch.object(os.path, "isfile", return_value=True) + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyStagedPatchMetadataFileForCommit(self, mock_run_cmd, _): + + patch_metadata_path = "/abs/path/to/filesdir/PATCHES.json" + + update_chromeos_llvm_hash.StagePatchMetadataFileForCommit( + patch_metadata_path + ) + + mock_run_cmd.assert_called_once() + + def testNoPatchResultsForCommit(self): + package_1_patch_info_dict = { + "applied_patches": ["display_results.patch"], + "failed_patches": ["fixes_output.patch"], + "non_applicable_patches": [], + "disabled_patches": [], + "removed_patches": [], + "modified_metadata": None, + } + + package_2_patch_info_dict = { + "applied_patches": ["redirects_stdout.patch", "fix_display.patch"], + "failed_patches": [], + "non_applicable_patches": [], + "disabled_patches": [], + "removed_patches": [], + "modified_metadata": None, + } + + test_package_info_dict = { + "test-packages/package1": package_1_patch_info_dict, + "test-packages/package2": package_2_patch_info_dict, + } + + test_commit_message = ["Updated packages"] + + self.assertListEqual( + update_chromeos_llvm_hash.StagePackagesPatchResultsForCommit( + test_package_info_dict, test_commit_message + ), + test_commit_message, + ) + + @mock.patch.object( + update_chromeos_llvm_hash, "StagePatchMetadataFileForCommit" + ) + @mock.patch.object(update_chromeos_llvm_hash, "RemovePatchesFromFilesDir") + def testAddedPatchResultsForCommit( + self, mock_remove_patches, mock_stage_patches_for_commit + ): + + package_1_patch_info_dict = { + "applied_patches": [], + "failed_patches": [], + "non_applicable_patches": [], + "disabled_patches": ["fixes_output.patch"], + "removed_patches": [], + "modified_metadata": "/abs/path/to/filesdir/PATCHES.json", + } + + package_2_patch_info_dict = { + "applied_patches": ["fix_display.patch"], + "failed_patches": [], + "non_applicable_patches": [], + "disabled_patches": [], + "removed_patches": ["/abs/path/to/filesdir/redirect_stdout.patch"], + "modified_metadata": "/abs/path/to/filesdir/PATCHES.json", + } + + test_package_info_dict = { + "test-packages/package1": package_1_patch_info_dict, + "test-packages/package2": package_2_patch_info_dict, + } + + test_commit_message = ["Updated packages"] + + expected_commit_messages = [ + "Updated packages", + "\nFor the package test-packages/package1:", + "The patch metadata file PATCHES.json was modified", + "The following patches were disabled:", + "fixes_output.patch", + "\nFor the package test-packages/package2:", + "The patch metadata file PATCHES.json was modified", + "The following patches were removed:", + "redirect_stdout.patch", + ] + + self.assertListEqual( + update_chromeos_llvm_hash.StagePackagesPatchResultsForCommit( + test_package_info_dict, test_commit_message + ), + expected_commit_messages, + ) + + path_to_removed_patch = "/abs/path/to/filesdir/redirect_stdout.patch" + + mock_remove_patches.assert_called_once_with([path_to_removed_patch]) + + self.assertEqual(mock_stage_patches_for_commit.call_count, 2) + + @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion") + @mock.patch.object( + update_chromeos_llvm_hash, "CreatePathDictionaryFromPackages" + ) + @mock.patch.object(git, "CreateBranch") + @mock.patch.object(update_chromeos_llvm_hash, "UpdateEbuildLLVMHash") + @mock.patch.object(update_chromeos_llvm_hash, "UprevEbuildSymlink") + @mock.patch.object(git, "UploadChanges") + @mock.patch.object(git, "DeleteBranch") + @mock.patch.object(os.path, "realpath") + def testExceptionRaisedWhenUpdatingPackages( + self, + mock_realpath, + mock_delete_repo, + mock_upload_changes, + mock_uprev_symlink, + mock_update_llvm_next, + mock_create_repo, + mock_create_path_dict, + mock_llvm_major_version, + ): + + path_to_package_dir = "/some/path/to/chroot/src/path/to" + abs_path_to_package = os.path.join( + path_to_package_dir, "package.ebuild" + ) + symlink_path_to_package = os.path.join( + path_to_package_dir, "package-r1.ebuild" + ) + + mock_llvm_major_version.return_value = "1234" + + # Test function to simulate 'CreateBranch' when successfully created the + # branch on a valid repo path. + def SuccessfullyCreateBranchForChanges(_, branch): + self.assertEqual(branch, "update-LLVM_NEXT_HASH-a123testhash4") + + # Test function to simulate 'UpdateEbuildLLVMHash' when successfully + # updated the ebuild's 'LLVM_NEXT_HASH'. + def SuccessfullyUpdatedLLVMHash(ebuild_path, _, git_hash, svn_version): + self.assertEqual(ebuild_path, abs_path_to_package) + self.assertEqual(git_hash, "a123testhash4") + self.assertEqual(svn_version, 1000) + + # Test function to simulate 'UprevEbuildSymlink' when the symlink to the + # ebuild does not have a revision number. + def FailedToUprevEbuildSymlink(_): + # Raises a 'ValueError' exception because the symlink did not have have a + # revision number. + raise ValueError("Failed to uprev the ebuild.") + + # Test function to fail on 'UploadChanges' if the function gets called + # when an exception is raised. + def ShouldNotExecuteUploadChanges(*args): + # Test function should not be called (i.e. execution should resume in the + # 'finally' block) because 'UprevEbuildSymlink' raised an + # exception. + assert len(args) == 3 + assert False, ( + 'Failed to go to "finally" block ' + "after the exception was raised." + ) + + test_package_path_dict = {symlink_path_to_package: abs_path_to_package} + + # Simulate behavior of 'CreatePathDictionaryFromPackages()' when + # successfully created a dictionary where the key is the absolute path to + # the symlink of the package and value is the absolute path to the ebuild of + # the package. + mock_create_path_dict.return_value = test_package_path_dict + + # Use test function to simulate behavior. + mock_create_repo.side_effect = SuccessfullyCreateBranchForChanges + mock_update_llvm_next.side_effect = SuccessfullyUpdatedLLVMHash + mock_uprev_symlink.side_effect = FailedToUprevEbuildSymlink + mock_upload_changes.side_effect = ShouldNotExecuteUploadChanges + mock_realpath.return_value = ( + "/abs/path/to/test-packages/package1.ebuild" + ) + + packages_to_update = ["test-packages/package1"] + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next + git_hash = "a123testhash4" + svn_version = 1000 + chroot_path = Path("/some/path/to/chroot") + git_hash_source = "google3" + branch = "update-LLVM_NEXT_HASH-a123testhash4" + extra_commit_msg = None + + # Verify exception is raised when an exception is thrown within + # the 'try' block by UprevEbuildSymlink function. + with self.assertRaises(ValueError) as err: + update_chromeos_llvm_hash.UpdatePackages( + packages=packages_to_update, + manifest_packages=[], + llvm_variant=llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=chroot_path, + mode=failure_modes.FailureModes.FAIL, + git_hash_source=git_hash_source, + extra_commit_msg=extra_commit_msg, + ) + + self.assertEqual(str(err.exception), "Failed to uprev the ebuild.") + + mock_create_path_dict.assert_called_once_with( + chroot_path, packages_to_update + ) + + mock_create_repo.assert_called_once_with(path_to_package_dir, branch) + + mock_update_llvm_next.assert_called_once_with( + abs_path_to_package, llvm_variant, git_hash, svn_version + ) + + mock_uprev_symlink.assert_called_once_with(symlink_path_to_package) + + mock_upload_changes.assert_not_called() + + mock_delete_repo.assert_called_once_with(path_to_package_dir, branch) + + @mock.patch.object(update_chromeos_llvm_hash, "EnsurePackageMaskContains") + @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion") + @mock.patch.object( + update_chromeos_llvm_hash, "CreatePathDictionaryFromPackages" + ) + @mock.patch.object(git, "CreateBranch") + @mock.patch.object(update_chromeos_llvm_hash, "UpdateEbuildLLVMHash") + @mock.patch.object(update_chromeos_llvm_hash, "UprevEbuildSymlink") + @mock.patch.object(git, "UploadChanges") + @mock.patch.object(git, "DeleteBranch") + @mock.patch.object( + update_chromeos_llvm_hash, "UpdatePackagesPatchMetadataFile" + ) + @mock.patch.object( + update_chromeos_llvm_hash, "StagePatchMetadataFileForCommit" + ) + def testSuccessfullyUpdatedPackages( + self, + mock_stage_patch_file, + mock_update_package_metadata_file, + mock_delete_repo, + mock_upload_changes, + mock_uprev_symlink, + mock_update_llvm_next, + mock_create_repo, + mock_create_path_dict, + mock_llvm_version, + mock_mask_contains, + ): + + path_to_package_dir = "/some/path/to/chroot/src/path/to" + abs_path_to_package = os.path.join( + path_to_package_dir, "package.ebuild" + ) + symlink_path_to_package = os.path.join( + path_to_package_dir, "package-r1.ebuild" + ) + + # Test function to simulate 'CreateBranch' when successfully created the + # branch for the changes to be made to the ebuild files. + def SuccessfullyCreateBranchForChanges(_, branch): + self.assertEqual(branch, "update-LLVM_NEXT_HASH-a123testhash5") + + # Test function to simulate 'UploadChanges' after a successfull update of + # 'LLVM_NEXT_HASH" of the ebuild file. + def SuccessfullyUpdatedLLVMHash(ebuild_path, _, git_hash, svn_version): + self.assertEqual( + ebuild_path, "/some/path/to/chroot/src/path/to/package.ebuild" + ) + self.assertEqual(git_hash, "a123testhash5") + self.assertEqual(svn_version, 1000) + + # Test function to simulate 'UprevEbuildSymlink' when successfully + # incremented the revision number by 1. + def SuccessfullyUprevedEbuildSymlink(symlink_path): + self.assertEqual( + symlink_path, + "/some/path/to/chroot/src/path/to/package-r1.ebuild", + ) + + # Test function to simulate 'UpdatePackagesPatchMetadataFile()' when the + # patch results contains a disabled patch in 'disable_patches' mode. + def RetrievedPatchResults(chroot_path, svn_version, packages, mode): + + self.assertEqual(chroot_path, Path("/some/path/to/chroot")) + self.assertEqual(svn_version, 1000) + self.assertListEqual(packages, ["path/to"]) + self.assertEqual(mode, failure_modes.FailureModes.DISABLE_PATCHES) + + patch_metadata_file = "PATCHES.json" + PatchInfo = collections.namedtuple( + "PatchInfo", + [ + "applied_patches", + "failed_patches", + "non_applicable_patches", + "disabled_patches", + "removed_patches", + "modified_metadata", + ], + ) + + package_patch_info = PatchInfo( + applied_patches=["fix_display.patch"], + failed_patches=["fix_stdout.patch"], + non_applicable_patches=[], + disabled_patches=["fix_stdout.patch"], + removed_patches=[], + modified_metadata="/abs/path/to/filesdir/%s" + % patch_metadata_file, + ) + + package_info_dict = {"path/to": package_patch_info._asdict()} + + # Returns a dictionary where the key is the package and the value is a + # dictionary that contains information about the package's patch results + # produced by the patch manager. + return package_info_dict + + # Test function to simulate 'UploadChanges()' when successfully created a + # commit for the changes made to the packages and their patches and + # retrieved the change list of the commit. + def SuccessfullyUploadedChanges(*args): + assert len(args) == 3 + commit_url = "https://some_name/path/to/commit/+/12345" + return git.CommitContents(url=commit_url, cl_number=12345) + + test_package_path_dict = {symlink_path_to_package: abs_path_to_package} + + # Simulate behavior of 'CreatePathDictionaryFromPackages()' when + # successfully created a dictionary where the key is the absolute path to + # the symlink of the package and value is the absolute path to the ebuild of + # the package. + mock_create_path_dict.return_value = test_package_path_dict + + # Use test function to simulate behavior. + mock_create_repo.side_effect = SuccessfullyCreateBranchForChanges + mock_update_llvm_next.side_effect = SuccessfullyUpdatedLLVMHash + mock_uprev_symlink.side_effect = SuccessfullyUprevedEbuildSymlink + mock_update_package_metadata_file.side_effect = RetrievedPatchResults + mock_upload_changes.side_effect = SuccessfullyUploadedChanges + mock_llvm_version.return_value = "1234" + mock_mask_contains.reurn_value = None + + packages_to_update = ["test-packages/package1"] + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next + git_hash = "a123testhash5" + svn_version = 1000 + chroot_path = Path("/some/path/to/chroot") + git_hash_source = "tot" + branch = "update-LLVM_NEXT_HASH-a123testhash5" + extra_commit_msg = "\ncommit-message-end" + + change_list = update_chromeos_llvm_hash.UpdatePackages( + packages=packages_to_update, + manifest_packages=[], + llvm_variant=llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=chroot_path, + mode=failure_modes.FailureModes.DISABLE_PATCHES, + git_hash_source=git_hash_source, + extra_commit_msg=extra_commit_msg, + ) + + self.assertEqual( + change_list.url, "https://some_name/path/to/commit/+/12345" + ) + + self.assertEqual(change_list.cl_number, 12345) + + mock_create_path_dict.assert_called_once_with( + chroot_path, packages_to_update + ) + + mock_create_repo.assert_called_once_with(path_to_package_dir, branch) + + mock_update_llvm_next.assert_called_once_with( + abs_path_to_package, llvm_variant, git_hash, svn_version + ) + + mock_uprev_symlink.assert_called_once_with(symlink_path_to_package) + + mock_mask_contains.assert_called_once_with(chroot_path, git_hash) + + expected_commit_messages = [ + "llvm-next/tot: upgrade to a123testhash5 (r1000)\n", + "The following packages have been updated:", + "path/to", + "\nFor the package path/to:", + "The patch metadata file PATCHES.json was modified", + "The following patches were disabled:", + "fix_stdout.patch", + "\ncommit-message-end", + ] + + mock_update_package_metadata_file.assert_called_once() + + mock_stage_patch_file.assert_called_once_with( + "/abs/path/to/filesdir/PATCHES.json" + ) + + mock_upload_changes.assert_called_once_with( + path_to_package_dir, branch, expected_commit_messages + ) + + mock_delete_repo.assert_called_once_with(path_to_package_dir, branch) + + @mock.patch.object(subprocess, "check_output", return_value=None) + @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion") + def testEnsurePackageMaskContainsExisting( + self, mock_llvm_version, mock_git_add + ): + chroot_path = "absolute/path/to/chroot" + git_hash = "badf00d" + mock_llvm_version.return_value = "1234" + with mock.patch( + "update_chromeos_llvm_hash.open", + mock.mock_open(read_data="\n=sys-devel/llvm-1234.0_pre*\n"), + create=True, + ) as mock_file: + update_chromeos_llvm_hash.EnsurePackageMaskContains( + chroot_path, git_hash + ) + handle = mock_file() + handle.write.assert_not_called() + mock_llvm_version.assert_called_once_with(git_hash) + + overlay_dir = ( + "absolute/path/to/chroot/src/third_party/chromiumos-overlay" + ) + mask_path = overlay_dir + "/profiles/targets/chromeos/package.mask" + mock_git_add.assert_called_once_with( + ["git", "-C", overlay_dir, "add", mask_path] + ) + + @mock.patch.object(subprocess, "check_output", return_value=None) + @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion") + def testEnsurePackageMaskContainsNotExisting( + self, mock_llvm_version, mock_git_add + ): + chroot_path = "absolute/path/to/chroot" + git_hash = "badf00d" + mock_llvm_version.return_value = "1234" + with mock.patch( + "update_chromeos_llvm_hash.open", + mock.mock_open(read_data="nothing relevant"), + create=True, + ) as mock_file: + update_chromeos_llvm_hash.EnsurePackageMaskContains( + chroot_path, git_hash + ) + handle = mock_file() + handle.write.assert_called_once_with( + "=sys-devel/llvm-1234.0_pre*\n" + ) + mock_llvm_version.assert_called_once_with(git_hash) + + overlay_dir = ( + "absolute/path/to/chroot/src/third_party/chromiumos-overlay" + ) + mask_path = overlay_dir + "/profiles/targets/chromeos/package.mask" + mock_git_add.assert_called_once_with( + ["git", "-C", overlay_dir, "add", mask_path] + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/update_packages_and_run_tests.py b/llvm_tools/update_packages_and_run_tests.py index 477caa61..5d004546 100755 --- a/llvm_tools/update_packages_and_run_tests.py +++ b/llvm_tools/update_packages_and_run_tests.py @@ -20,464 +20,507 @@ import get_llvm_hash import update_chromeos_llvm_hash -VALID_CQ_TRYBOTS = ['llvm', 'llvm-next', 'llvm-tot'] +VALID_CQ_TRYBOTS = ["llvm", "llvm-next", "llvm-tot"] def GetCommandLineArgs(): - """Parses the command line for the command line arguments. - - Returns: - The log level to use when retrieving the LLVM hash or google3 LLVM version, - the chroot path to use for executing chroot commands, - a list of a package or packages to update their LLVM next hash, - and the LLVM version to use when retrieving the LLVM hash. - """ - - # Default path to the chroot if a path is not specified. - cros_root = os.path.expanduser('~') - cros_root = os.path.join(cros_root, 'chromiumos') - - # Create parser and add optional command-line arguments. - parser = argparse.ArgumentParser( - description='Update an LLVM hash of packages and run tests.') - - # Add argument for other change lists that want to run alongside the tryjob - # which has a change list of updating a package's git hash. - parser.add_argument( - '--extra_change_lists', - type=int, - nargs='+', - default=[], - help='change lists that would like to be run alongside the change list ' - 'of updating the packages') - - # Add argument for a specific chroot path. - parser.add_argument('--chroot_path', - default=cros_root, - help='the path to the chroot (default: %(default)s)') - - # Add argument to choose between llvm and llvm-next. - parser.add_argument( - '--is_llvm_next', - action='store_true', - help='which llvm hash to update. Update LLVM_NEXT_HASH if specified. ' - 'Otherwise, update LLVM_HASH') - - # Add argument for the absolute path to the file that contains information on - # the previous tested svn version. - parser.add_argument( - '--last_tested', - help='the absolute path to the file that contains the last tested ' - 'arguments.') - - # Add argument for the LLVM version to use. - parser.add_argument('--llvm_version', - type=get_llvm_hash.IsSvnOption, - required=True, - help='which git hash of LLVM to find ' - '{google3, ToT, <svn_version>} ' - '(default: finds the git hash of the google3 LLVM ' - 'version)') - - # Add argument to add reviewers for the created CL. - parser.add_argument('--reviewers', - nargs='+', - default=[], - help='The reviewers for the package update changelist') - - # Add argument for whether to display command contents to `stdout`. - parser.add_argument('--verbose', - action='store_true', - help='display contents of a command to the terminal ' - '(default: %(default)s)') - - subparsers = parser.add_subparsers(dest='subparser_name') - subparser_names = [] - # Testing with the tryjobs. - tryjob_subparser = subparsers.add_parser('tryjobs') - subparser_names.append('tryjobs') - tryjob_subparser.add_argument('--builders', - required=True, - nargs='+', - default=[], - help='builders to use for the tryjob testing') - - # Add argument for custom options for the tryjob. - tryjob_subparser.add_argument('--options', - required=False, - nargs='+', - default=[], - help='options to use for the tryjob testing') - - # Testing with the recipe builders - recipe_subparser = subparsers.add_parser('recipe') - subparser_names.append('recipe') - recipe_subparser.add_argument('--options', - required=False, - nargs='+', - default=[], - help='options passed to the recipe builders') - - recipe_subparser.add_argument('--builders', - required=True, - nargs='+', - default=[], - help='recipe builders to launch') - - # Testing with CQ. - cq_subparser = subparsers.add_parser('cq') - subparser_names.append('cq') - - # Add argument for specify a cq trybot to test along with other cq builders - # e.g. llvm, llvm-next or llvm-tot - cq_subparser.add_argument( - '--cq_trybot', - choices=VALID_CQ_TRYBOTS, - help='include the trybot to test together with other cq builders ' - 'available: %(choices)s') - - args_output = parser.parse_args() - - if args_output.subparser_name not in subparser_names: - parser.error('one of %s must be specified' % subparser_names) - - return args_output + """Parses the command line for the command line arguments. + + Returns: + The log level to use when retrieving the LLVM hash or google3 LLVM version, + the chroot path to use for executing chroot commands, + a list of a package or packages to update their LLVM next hash, + and the LLVM version to use when retrieving the LLVM hash. + """ + + # Default path to the chroot if a path is not specified. + cros_root = os.path.expanduser("~") + cros_root = os.path.join(cros_root, "chromiumos") + + # Create parser and add optional command-line arguments. + parser = argparse.ArgumentParser( + description="Update an LLVM hash of packages and run tests." + ) + + # Add argument for other change lists that want to run alongside the tryjob + # which has a change list of updating a package's git hash. + parser.add_argument( + "--extra_change_lists", + type=int, + nargs="+", + default=[], + help="change lists that would like to be run alongside the change list " + "of updating the packages", + ) + + # Add argument for a specific chroot path. + parser.add_argument( + "--chroot_path", + default=cros_root, + help="the path to the chroot (default: %(default)s)", + ) + + # Add argument to choose between llvm and llvm-next. + parser.add_argument( + "--is_llvm_next", + action="store_true", + help="which llvm hash to update. Update LLVM_NEXT_HASH if specified. " + "Otherwise, update LLVM_HASH", + ) + + # Add argument for the absolute path to the file that contains information on + # the previous tested svn version. + parser.add_argument( + "--last_tested", + help="the absolute path to the file that contains the last tested " + "arguments.", + ) + + # Add argument for the LLVM version to use. + parser.add_argument( + "--llvm_version", + type=get_llvm_hash.IsSvnOption, + required=True, + help="which git hash of LLVM to find " + "{google3, ToT, <svn_version>} " + "(default: finds the git hash of the google3 LLVM " + "version)", + ) + + # Add argument to add reviewers for the created CL. + parser.add_argument( + "--reviewers", + nargs="+", + default=[], + help="The reviewers for the package update changelist", + ) + + # Add argument for whether to display command contents to `stdout`. + parser.add_argument( + "--verbose", + action="store_true", + help="display contents of a command to the terminal " + "(default: %(default)s)", + ) + + subparsers = parser.add_subparsers(dest="subparser_name") + subparser_names = [] + # Testing with the tryjobs. + tryjob_subparser = subparsers.add_parser("tryjobs") + subparser_names.append("tryjobs") + tryjob_subparser.add_argument( + "--builders", + required=True, + nargs="+", + default=[], + help="builders to use for the tryjob testing", + ) + + # Add argument for custom options for the tryjob. + tryjob_subparser.add_argument( + "--options", + required=False, + nargs="+", + default=[], + help="options to use for the tryjob testing", + ) + + # Testing with the recipe builders + recipe_subparser = subparsers.add_parser("recipe") + subparser_names.append("recipe") + recipe_subparser.add_argument( + "--options", + required=False, + nargs="+", + default=[], + help="options passed to the recipe builders", + ) + + recipe_subparser.add_argument( + "--builders", + required=True, + nargs="+", + default=[], + help="recipe builders to launch", + ) + + # Testing with CQ. + cq_subparser = subparsers.add_parser("cq") + subparser_names.append("cq") + + # Add argument for specify a cq trybot to test along with other cq builders + # e.g. llvm, llvm-next or llvm-tot + cq_subparser.add_argument( + "--cq_trybot", + choices=VALID_CQ_TRYBOTS, + help="include the trybot to test together with other cq builders " + "available: %(choices)s", + ) + + args_output = parser.parse_args() + + if args_output.subparser_name not in subparser_names: + parser.error("one of %s must be specified" % subparser_names) + + return args_output def UnchangedSinceLastRun(last_tested_file, arg_dict): - """Gets the arguments used for last run + """Gets the arguments used for last run - Args: - last_tested_file: The absolute path to the file that contains the - arguments for the last run. - arg_dict: The arguments used for this run. + Args: + last_tested_file: The absolute path to the file that contains the + arguments for the last run. + arg_dict: The arguments used for this run. - Returns: - Return true if the arguments used for last run exist and are the - same as the arguments used for this run. Otherwise return false. - """ + Returns: + Return true if the arguments used for last run exist and are the + same as the arguments used for this run. Otherwise return false. + """ - if not last_tested_file: - return False + if not last_tested_file: + return False - # Get the last tested svn version if the file exists. - last_arg_dict = None - try: - with open(last_tested_file) as f: - last_arg_dict = json.load(f) + # Get the last tested svn version if the file exists. + last_arg_dict = None + try: + with open(last_tested_file) as f: + last_arg_dict = json.load(f) - except (IOError, ValueError): - return False + except (IOError, ValueError): + return False - return arg_dict == last_arg_dict + return arg_dict == last_arg_dict def AddReviewers(cl, reviewers, chroot_path): - """Add reviewers for the created CL.""" + """Add reviewers for the created CL.""" - gerrit_abs_path = os.path.join(chroot_path, 'chromite/bin/gerrit') - for reviewer in reviewers: - cmd = [gerrit_abs_path, 'reviewers', str(cl), reviewer] + gerrit_abs_path = os.path.join(chroot_path, "chromite/bin/gerrit") + for reviewer in reviewers: + cmd = [gerrit_abs_path, "reviewers", str(cl), reviewer] - subprocess.check_output(cmd) + subprocess.check_output(cmd) def AddLinksToCL(tests, cl, chroot_path): - """Adds the test link(s) to the CL as a comment.""" + """Adds the test link(s) to the CL as a comment.""" - # NOTE: Invoking `cros_sdk` does not make each tryjob link appear on its own - # line, so invoking the `gerrit` command directly instead of using `cros_sdk` - # to do it for us. - # - # FIXME: Need to figure out why `cros_sdk` does not add each tryjob link as a - # newline. - gerrit_abs_path = os.path.join(chroot_path, 'chromite/bin/gerrit') + # NOTE: Invoking `cros_sdk` does not make each tryjob link appear on its own + # line, so invoking the `gerrit` command directly instead of using `cros_sdk` + # to do it for us. + # + # FIXME: Need to figure out why `cros_sdk` does not add each tryjob link as a + # newline. + gerrit_abs_path = os.path.join(chroot_path, "chromite/bin/gerrit") - links = ['Started the following tests:'] - links.extend(test['link'] for test in tests) + links = ["Started the following tests:"] + links.extend(test["link"] for test in tests) - add_message_cmd = [gerrit_abs_path, 'message', str(cl), '\n'.join(links)] + add_message_cmd = [gerrit_abs_path, "message", str(cl), "\n".join(links)] - subprocess.check_output(add_message_cmd) + subprocess.check_output(add_message_cmd) # Testing with tryjobs def GetCurrentTimeInUTC(): - """Returns the current time via `datetime.datetime.utcnow()`.""" - return datetime.datetime.utcnow() + """Returns the current time via `datetime.datetime.utcnow()`.""" + return datetime.datetime.utcnow() def GetTryJobCommand(change_list, extra_change_lists, options, builder): - """Constructs the 'tryjob' command. + """Constructs the 'tryjob' command. - Args: - change_list: The CL obtained from updating the packages. - extra_change_lists: Extra change lists that would like to be run alongside - the change list of updating the packages. - options: Options to be passed into the tryjob command. - builder: The builder to be passed into the tryjob command. + Args: + change_list: The CL obtained from updating the packages. + extra_change_lists: Extra change lists that would like to be run alongside + the change list of updating the packages. + options: Options to be passed into the tryjob command. + builder: The builder to be passed into the tryjob command. - Returns: - The 'tryjob' command with the change list of updating the packages and - any extra information that was passed into the command line. - """ + Returns: + The 'tryjob' command with the change list of updating the packages and + any extra information that was passed into the command line. + """ - tryjob_cmd = ['cros', 'tryjob', '--yes', '--json', '-g', '%d' % change_list] + tryjob_cmd = ["cros", "tryjob", "--yes", "--json", "-g", "%d" % change_list] - if extra_change_lists: - for extra_cl in extra_change_lists: - tryjob_cmd.extend(['-g', '%d' % extra_cl]) + if extra_change_lists: + for extra_cl in extra_change_lists: + tryjob_cmd.extend(["-g", "%d" % extra_cl]) - if options: - tryjob_cmd.extend('--%s' % option for option in options) + if options: + tryjob_cmd.extend("--%s" % option for option in options) - tryjob_cmd.append(builder) + tryjob_cmd.append(builder) - return tryjob_cmd + return tryjob_cmd def RunTryJobs(cl_number, extra_change_lists, options, builders, chroot_path): - """Runs a tryjob/tryjobs. + """Runs a tryjob/tryjobs. - Args: - cl_number: The CL created by updating the packages. - extra_change_lists: Any extra change lists that would run alongside the CL - that was created by updating the packages ('cl_number'). - options: Any options to be passed into the 'tryjob' command. - builders: All the builders to run the 'tryjob' with. - chroot_path: The absolute path to the chroot. + Args: + cl_number: The CL created by updating the packages. + extra_change_lists: Any extra change lists that would run alongside the CL + that was created by updating the packages ('cl_number'). + options: Any options to be passed into the 'tryjob' command. + builders: All the builders to run the 'tryjob' with. + chroot_path: The absolute path to the chroot. - Returns: - A list that contains stdout contents of each tryjob, where stdout is - information (a hashmap) about the tryjob. The hashmap also contains stderr - if there was an error when running a tryjob. + Returns: + A list that contains stdout contents of each tryjob, where stdout is + information (a hashmap) about the tryjob. The hashmap also contains stderr + if there was an error when running a tryjob. - Raises: - ValueError: Failed to submit a tryjob. - """ + Raises: + ValueError: Failed to submit a tryjob. + """ - # Contains the results of each builder. - tests = [] + # Contains the results of each builder. + tests = [] - # Run tryjobs with the change list number obtained from updating the - # packages and append additional changes lists and options obtained from the - # command line. - for builder in builders: - cmd = GetTryJobCommand(cl_number, extra_change_lists, options, builder) + # Run tryjobs with the change list number obtained from updating the + # packages and append additional changes lists and options obtained from the + # command line. + for builder in builders: + cmd = GetTryJobCommand(cl_number, extra_change_lists, options, builder) - out = subprocess.check_output(cmd, cwd=chroot_path, encoding='utf-8') + out = subprocess.check_output(cmd, cwd=chroot_path, encoding="utf-8") - test_output = json.loads(out) + test_output = json.loads(out) - buildbucket_id = int(test_output[0]['id']) + buildbucket_id = int(test_output[0]["id"]) - tests.append({ - 'launch_time': str(GetCurrentTimeInUTC()), - 'link': 'http://ci.chromium.org/b/%s' % buildbucket_id, - 'buildbucket_id': buildbucket_id, - 'extra_cls': extra_change_lists, - 'options': options, - 'builder': [builder] - }) + tests.append( + { + "launch_time": str(GetCurrentTimeInUTC()), + "link": "http://ci.chromium.org/b/%s" % buildbucket_id, + "buildbucket_id": buildbucket_id, + "extra_cls": extra_change_lists, + "options": options, + "builder": [builder], + } + ) - AddLinksToCL(tests, cl_number, chroot_path) + AddLinksToCL(tests, cl_number, chroot_path) - return tests + return tests -def StartRecipeBuilders(cl_number, extra_change_lists, options, builders, - chroot_path): - """Launch recipe builders. +def StartRecipeBuilders( + cl_number, extra_change_lists, options, builders, chroot_path +): + """Launch recipe builders. - Args: - cl_number: The CL created by updating the packages. - extra_change_lists: Any extra change lists that would run alongside the CL - that was created by updating the packages ('cl_number'). - options: Any options to be passed into the 'tryjob' command. - builders: All the builders to run the 'tryjob' with. - chroot_path: The absolute path to the chroot. + Args: + cl_number: The CL created by updating the packages. + extra_change_lists: Any extra change lists that would run alongside the CL + that was created by updating the packages ('cl_number'). + options: Any options to be passed into the 'tryjob' command. + builders: All the builders to run the 'tryjob' with. + chroot_path: The absolute path to the chroot. - Returns: - A list that contains stdout contents of each builder, where stdout is - information (a hashmap) about the tryjob. The hashmap also contains stderr - if there was an error when running a tryjob. + Returns: + A list that contains stdout contents of each builder, where stdout is + information (a hashmap) about the tryjob. The hashmap also contains stderr + if there was an error when running a tryjob. - Raises: - ValueError: Failed to start a builder. - """ + Raises: + ValueError: Failed to start a builder. + """ - # Contains the results of each builder. - tests = [] + # Contains the results of each builder. + tests = [] - # Launch a builders with the change list number obtained from updating the - # packages and append additional changes lists and options obtained from the - # command line. - for builder in builders: - cmd = ['bb', 'add', '-json'] + # Launch a builders with the change list number obtained from updating the + # packages and append additional changes lists and options obtained from the + # command line. + for builder in builders: + cmd = ["bb", "add", "-json"] - if cl_number: - cmd.extend(['-cl', 'crrev.com/c/%d' % cl_number]) + if cl_number: + cmd.extend(["-cl", "crrev.com/c/%d" % cl_number]) - if extra_change_lists: - for cl in extra_change_lists: - cmd.extend(['-cl', 'crrev.com/c/%d' % cl]) + if extra_change_lists: + for cl in extra_change_lists: + cmd.extend(["-cl", "crrev.com/c/%d" % cl]) - if options: - cmd.extend(options) + if options: + cmd.extend(options) - cmd.append(builder) + cmd.append(builder) - out = subprocess.check_output(cmd, cwd=chroot_path, encoding='utf-8') + out = subprocess.check_output(cmd, cwd=chroot_path, encoding="utf-8") - test_output = json.loads(out) + test_output = json.loads(out) - tests.append({ - 'launch_time': test_output['createTime'], - 'link': 'http://ci.chromium.org/b/%s' % test_output['id'], - 'buildbucket_id': test_output['id'], - 'extra_cls': extra_change_lists, - 'options': options, - 'builder': [builder] - }) + tests.append( + { + "launch_time": test_output["createTime"], + "link": "http://ci.chromium.org/b/%s" % test_output["id"], + "buildbucket_id": test_output["id"], + "extra_cls": extra_change_lists, + "options": options, + "builder": [builder], + } + ) - AddLinksToCL(tests, cl_number, chroot_path) + AddLinksToCL(tests, cl_number, chroot_path) - return tests + return tests # Testing with CQ def GetCQDependString(dependent_cls): - """Get CQ dependency string e.g. `Cq-Depend: chromium:MM, chromium:NN`.""" + """Get CQ dependency string e.g. `Cq-Depend: chromium:MM, chromium:NN`.""" - if not dependent_cls: - return None + if not dependent_cls: + return None - # Cq-Depend must start a new paragraph prefixed with "Cq-Depend". - return '\nCq-Depend: ' + ', '.join( - ('chromium:%s' % i) for i in dependent_cls) + # Cq-Depend must start a new paragraph prefixed with "Cq-Depend". + return "\nCq-Depend: " + ", ".join( + ("chromium:%s" % i) for i in dependent_cls + ) def GetCQIncludeTrybotsString(trybot): - """Get Cq-Include-Trybots string, for more llvm testings""" + """Get Cq-Include-Trybots string, for more llvm testings""" - if not trybot: - return None + if not trybot: + return None - if trybot not in VALID_CQ_TRYBOTS: - raise ValueError('%s is not a valid llvm trybot' % trybot) + if trybot not in VALID_CQ_TRYBOTS: + raise ValueError("%s is not a valid llvm trybot" % trybot) - # Cq-Include-Trybots must start a new paragraph prefixed - # with "Cq-Include-Trybots". - return '\nCq-Include-Trybots:chromeos/cq:cq-%s-orchestrator' % trybot + # Cq-Include-Trybots must start a new paragraph prefixed + # with "Cq-Include-Trybots". + return "\nCq-Include-Trybots:chromeos/cq:cq-%s-orchestrator" % trybot def StartCQDryRun(cl, dependent_cls, chroot_path): - """Start CQ dry run for the changelist and dependencies.""" + """Start CQ dry run for the changelist and dependencies.""" - gerrit_abs_path = os.path.join(chroot_path, 'chromite/bin/gerrit') + gerrit_abs_path = os.path.join(chroot_path, "chromite/bin/gerrit") - cl_list = [cl] - cl_list.extend(dependent_cls) + cl_list = [cl] + cl_list.extend(dependent_cls) - for changes in cl_list: - cq_dry_run_cmd = [gerrit_abs_path, 'label-cq', str(changes), '1'] + for changes in cl_list: + cq_dry_run_cmd = [gerrit_abs_path, "label-cq", str(changes), "1"] - subprocess.check_output(cq_dry_run_cmd) + subprocess.check_output(cq_dry_run_cmd) def main(): - """Updates the packages' LLVM hash and run tests. - - Raises: - AssertionError: The script was run inside the chroot. - """ - - chroot.VerifyOutsideChroot() - - args_output = GetCommandLineArgs() - - svn_option = args_output.llvm_version - - git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption( - svn_option) - - # There is no need to run tryjobs when all the key parameters remain unchanged - # from last time. - - # If --last_tested is specified, check if the current run has the same - # arguments last time --last_tested is used. - if args_output.last_tested: - chroot_file_paths = chroot.GetChrootEbuildPaths( - args_output.chroot_path, update_chromeos_llvm_hash.DEFAULT_PACKAGES) - arg_dict = { - 'svn_version': svn_version, - 'ebuilds': chroot_file_paths, - 'extra_cls': args_output.extra_change_lists, - } - if args_output.subparser_name in ('tryjobs', 'recipe'): - arg_dict['builders'] = args_output.builders - arg_dict['tryjob_options'] = args_output.options - if UnchangedSinceLastRun(args_output.last_tested, arg_dict): - print('svn version (%d) matches the last tested svn version in %s' % - (svn_version, args_output.last_tested)) - return - - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current - if args_output.is_llvm_next: - llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next - update_chromeos_llvm_hash.verbose = args_output.verbose - extra_commit_msg = None - if args_output.subparser_name == 'cq': - cq_depend_msg = GetCQDependString(args_output.extra_change_lists) - if cq_depend_msg: - extra_commit_msg = cq_depend_msg - cq_trybot_msg = GetCQIncludeTrybotsString(args_output.cq_trybot) - if cq_trybot_msg: - extra_commit_msg += cq_trybot_msg - - change_list = update_chromeos_llvm_hash.UpdatePackages( - packages=update_chromeos_llvm_hash.DEFAULT_PACKAGES, - manifest_packages=[], - llvm_variant=llvm_variant, - git_hash=git_hash, - svn_version=svn_version, - chroot_path=args_output.chroot_path, - mode=failure_modes.FailureModes.DISABLE_PATCHES, - git_hash_source=svn_option, - extra_commit_msg=extra_commit_msg) - - AddReviewers(change_list.cl_number, args_output.reviewers, - args_output.chroot_path) - - print('Successfully updated packages to %d' % svn_version) - print('Gerrit URL: %s' % change_list.url) - print('Change list number: %d' % change_list.cl_number) - - if args_output.subparser_name == 'tryjobs': - tests = RunTryJobs(change_list.cl_number, args_output.extra_change_lists, - args_output.options, args_output.builders, - args_output.chroot_path) - print('Tests:') - for test in tests: - print(test) - elif args_output.subparser_name == 'recipe': - tests = StartRecipeBuilders(change_list.cl_number, - args_output.extra_change_lists, - args_output.options, args_output.builders, - args_output.chroot_path) - print('Tests:') - for test in tests: - print(test) - - else: - StartCQDryRun(change_list.cl_number, args_output.extra_change_lists, - args_output.chroot_path) - - # If --last_tested is specified, record the arguments used - if args_output.last_tested: - with open(args_output.last_tested, 'w') as f: - json.dump(arg_dict, f, indent=2) - - -if __name__ == '__main__': - main() + """Updates the packages' LLVM hash and run tests. + + Raises: + AssertionError: The script was run inside the chroot. + """ + + chroot.VerifyOutsideChroot() + + args_output = GetCommandLineArgs() + + svn_option = args_output.llvm_version + + git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption( + svn_option + ) + + # There is no need to run tryjobs when all the key parameters remain unchanged + # from last time. + + # If --last_tested is specified, check if the current run has the same + # arguments last time --last_tested is used. + if args_output.last_tested: + chroot_file_paths = chroot.GetChrootEbuildPaths( + args_output.chroot_path, update_chromeos_llvm_hash.DEFAULT_PACKAGES + ) + arg_dict = { + "svn_version": svn_version, + "ebuilds": chroot_file_paths, + "extra_cls": args_output.extra_change_lists, + } + if args_output.subparser_name in ("tryjobs", "recipe"): + arg_dict["builders"] = args_output.builders + arg_dict["tryjob_options"] = args_output.options + if UnchangedSinceLastRun(args_output.last_tested, arg_dict): + print( + "svn version (%d) matches the last tested svn version in %s" + % (svn_version, args_output.last_tested) + ) + return + + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current + if args_output.is_llvm_next: + llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next + update_chromeos_llvm_hash.verbose = args_output.verbose + extra_commit_msg = None + if args_output.subparser_name == "cq": + cq_depend_msg = GetCQDependString(args_output.extra_change_lists) + if cq_depend_msg: + extra_commit_msg = cq_depend_msg + cq_trybot_msg = GetCQIncludeTrybotsString(args_output.cq_trybot) + if cq_trybot_msg: + extra_commit_msg += cq_trybot_msg + + change_list = update_chromeos_llvm_hash.UpdatePackages( + packages=update_chromeos_llvm_hash.DEFAULT_PACKAGES, + manifest_packages=[], + llvm_variant=llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=args_output.chroot_path, + mode=failure_modes.FailureModes.DISABLE_PATCHES, + git_hash_source=svn_option, + extra_commit_msg=extra_commit_msg, + ) + + AddReviewers( + change_list.cl_number, args_output.reviewers, args_output.chroot_path + ) + + print("Successfully updated packages to %d" % svn_version) + print("Gerrit URL: %s" % change_list.url) + print("Change list number: %d" % change_list.cl_number) + + if args_output.subparser_name == "tryjobs": + tests = RunTryJobs( + change_list.cl_number, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + ) + print("Tests:") + for test in tests: + print(test) + elif args_output.subparser_name == "recipe": + tests = StartRecipeBuilders( + change_list.cl_number, + args_output.extra_change_lists, + args_output.options, + args_output.builders, + args_output.chroot_path, + ) + print("Tests:") + for test in tests: + print(test) + + else: + StartCQDryRun( + change_list.cl_number, + args_output.extra_change_lists, + args_output.chroot_path, + ) + + # If --last_tested is specified, record the arguments used + if args_output.last_tested: + with open(args_output.last_tested, "w") as f: + json.dump(arg_dict, f, indent=2) + + +if __name__ == "__main__": + main() diff --git a/llvm_tools/update_packages_and_run_tests_unittest.py b/llvm_tools/update_packages_and_run_tests_unittest.py index a4b4f29c..0b029e04 100755 --- a/llvm_tools/update_packages_and_run_tests_unittest.py +++ b/llvm_tools/update_packages_and_run_tests_unittest.py @@ -23,433 +23,521 @@ import update_packages_and_run_tests # Testing with tryjobs. class UpdatePackagesAndRunTryjobsTest(unittest.TestCase): - """Unittests when running tryjobs after updating packages.""" - - def testNoLastTestedFile(self): - self.assertEqual( - update_packages_and_run_tests.UnchangedSinceLastRun(None, {}), False) - - def testEmptyLastTestedFile(self): - with test_helpers.CreateTemporaryFile() as temp_file: - self.assertEqual( - update_packages_and_run_tests.UnchangedSinceLastRun(temp_file, {}), - False) - - def testLastTestedFileDoesNotExist(self): - # Simulate 'open()' on a lasted tested file that does not exist. - mock.mock_open(read_data='') - - self.assertEqual( - update_packages_and_run_tests.UnchangedSinceLastRun( - '/some/file/that/does/not/exist.txt', {}), False) - - def testMatchedLastTestedFile(self): - with test_helpers.CreateTemporaryFile() as last_tested_file: - arg_dict = { - 'svn_version': - 1234, - 'ebuilds': [ - '/path/to/package1-r2.ebuild', - '/path/to/package2/package2-r3.ebuild' - ], - 'builders': [ - 'kevin-llvm-next-toolchain-tryjob', - 'eve-llvm-next-toolchain-tryjob' - ], - 'extra_cls': [10, 1], - 'tryjob_options': ['latest-toolchain', 'hwtest'] - } - - with open(last_tested_file, 'w') as f: - f.write(json.dumps(arg_dict, indent=2)) - - self.assertEqual( - update_packages_and_run_tests.UnchangedSinceLastRun( - last_tested_file, arg_dict), True) - - def testGetTryJobCommandWithNoExtraInformation(self): - change_list = 1234 - - builder = 'nocturne' - - expected_cmd = [ - 'cros', 'tryjob', '--yes', '--json', '-g', - '%d' % change_list, builder - ] - - self.assertEqual( - update_packages_and_run_tests.GetTryJobCommand(change_list, None, None, - builder), expected_cmd) - - def testGetTryJobCommandWithExtraInformation(self): - change_list = 4321 - extra_cls = [1000, 10] - options = ['option1', 'option2'] - builder = 'kevin' - - expected_cmd = [ - 'cros', - 'tryjob', - '--yes', - '--json', - '-g', - '%d' % change_list, - '-g', - '%d' % extra_cls[0], - '-g', - '%d' % extra_cls[1], - '--%s' % options[0], - '--%s' % options[1], - builder, - ] - - self.assertEqual( - update_packages_and_run_tests.GetTryJobCommand(change_list, extra_cls, - options, builder), - expected_cmd) - - @mock.patch.object(update_packages_and_run_tests, - 'GetCurrentTimeInUTC', - return_value='2019-09-09') - @mock.patch.object(update_packages_and_run_tests, 'AddLinksToCL') - @mock.patch.object(subprocess, 'check_output') - def testSuccessfullySubmittedTryJob(self, mock_cmd, mock_add_links_to_cl, - mock_launch_time): - - expected_cmd = [ - 'cros', 'tryjob', '--yes', '--json', '-g', - '%d' % 900, '-g', - '%d' % 1200, '--some_option', 'builder1' - ] - - bb_id = '1234' - url = 'http://ci.chromium.org/b/%s' % bb_id - - mock_cmd.return_value = json.dumps([{'id': bb_id, 'url': url}]) - - chroot_path = '/some/path/to/chroot' - cl = 900 - extra_cls = [1200] - options = ['some_option'] - builders = ['builder1'] - - tests = update_packages_and_run_tests.RunTryJobs(cl, extra_cls, options, - builders, chroot_path) - - expected_tests = [{ - 'launch_time': mock_launch_time.return_value, - 'link': url, - 'buildbucket_id': int(bb_id), - 'extra_cls': extra_cls, - 'options': options, - 'builder': builders - }] - - self.assertEqual(tests, expected_tests) - - mock_cmd.assert_called_once_with(expected_cmd, - cwd=chroot_path, - encoding='utf-8') - - mock_add_links_to_cl.assert_called_once() - - @mock.patch.object(update_packages_and_run_tests, 'AddLinksToCL') - @mock.patch.object(subprocess, 'check_output') - def testSuccessfullySubmittedRecipeBuilders(self, mock_cmd, - mock_add_links_to_cl): - - expected_cmd = [ - 'bb', 'add', '-json', '-cl', - 'crrev.com/c/%s' % 900, '-cl', - 'crrev.com/c/%s' % 1200, 'some_option', 'builder1' - ] - - bb_id = '1234' - create_time = '2020-04-18T00:03:53.978767Z' - - mock_cmd.return_value = json.dumps({ - 'id': bb_id, - 'createTime': create_time - }) - - chroot_path = '/some/path/to/chroot' - cl = 900 - extra_cls = [1200] - options = ['some_option'] - builders = ['builder1'] - - tests = update_packages_and_run_tests.StartRecipeBuilders( - cl, extra_cls, options, builders, chroot_path) - - expected_tests = [{ - 'launch_time': create_time, - 'link': 'http://ci.chromium.org/b/%s' % bb_id, - 'buildbucket_id': bb_id, - 'extra_cls': extra_cls, - 'options': options, - 'builder': builders - }] - - self.assertEqual(tests, expected_tests) - - mock_cmd.assert_called_once_with(expected_cmd, - cwd=chroot_path, - encoding='utf-8') - - mock_add_links_to_cl.assert_called_once() - - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testSuccessfullyAddedTestLinkToCL(self, mock_exec_cmd): - chroot_path = '/abs/path/to/chroot' - - test_cl_number = 1000 - - tests = [{'link': 'https://some_tryjob_link.com'}] - - update_packages_and_run_tests.AddLinksToCL(tests, test_cl_number, - chroot_path) - - expected_gerrit_message = [ - '%s/chromite/bin/gerrit' % chroot_path, 'message', - str(test_cl_number), - 'Started the following tests:\n%s' % tests[0]['link'] - ] + """Unittests when running tryjobs after updating packages.""" + + def testNoLastTestedFile(self): + self.assertEqual( + update_packages_and_run_tests.UnchangedSinceLastRun(None, {}), False + ) + + def testEmptyLastTestedFile(self): + with test_helpers.CreateTemporaryFile() as temp_file: + self.assertEqual( + update_packages_and_run_tests.UnchangedSinceLastRun( + temp_file, {} + ), + False, + ) + + def testLastTestedFileDoesNotExist(self): + # Simulate 'open()' on a lasted tested file that does not exist. + mock.mock_open(read_data="") + + self.assertEqual( + update_packages_and_run_tests.UnchangedSinceLastRun( + "/some/file/that/does/not/exist.txt", {} + ), + False, + ) + + def testMatchedLastTestedFile(self): + with test_helpers.CreateTemporaryFile() as last_tested_file: + arg_dict = { + "svn_version": 1234, + "ebuilds": [ + "/path/to/package1-r2.ebuild", + "/path/to/package2/package2-r3.ebuild", + ], + "builders": [ + "kevin-llvm-next-toolchain-tryjob", + "eve-llvm-next-toolchain-tryjob", + ], + "extra_cls": [10, 1], + "tryjob_options": ["latest-toolchain", "hwtest"], + } + + with open(last_tested_file, "w") as f: + f.write(json.dumps(arg_dict, indent=2)) + + self.assertEqual( + update_packages_and_run_tests.UnchangedSinceLastRun( + last_tested_file, arg_dict + ), + True, + ) + + def testGetTryJobCommandWithNoExtraInformation(self): + change_list = 1234 + + builder = "nocturne" + + expected_cmd = [ + "cros", + "tryjob", + "--yes", + "--json", + "-g", + "%d" % change_list, + builder, + ] + + self.assertEqual( + update_packages_and_run_tests.GetTryJobCommand( + change_list, None, None, builder + ), + expected_cmd, + ) + + def testGetTryJobCommandWithExtraInformation(self): + change_list = 4321 + extra_cls = [1000, 10] + options = ["option1", "option2"] + builder = "kevin" + + expected_cmd = [ + "cros", + "tryjob", + "--yes", + "--json", + "-g", + "%d" % change_list, + "-g", + "%d" % extra_cls[0], + "-g", + "%d" % extra_cls[1], + "--%s" % options[0], + "--%s" % options[1], + builder, + ] + + self.assertEqual( + update_packages_and_run_tests.GetTryJobCommand( + change_list, extra_cls, options, builder + ), + expected_cmd, + ) + + @mock.patch.object( + update_packages_and_run_tests, + "GetCurrentTimeInUTC", + return_value="2019-09-09", + ) + @mock.patch.object(update_packages_and_run_tests, "AddLinksToCL") + @mock.patch.object(subprocess, "check_output") + def testSuccessfullySubmittedTryJob( + self, mock_cmd, mock_add_links_to_cl, mock_launch_time + ): + + expected_cmd = [ + "cros", + "tryjob", + "--yes", + "--json", + "-g", + "%d" % 900, + "-g", + "%d" % 1200, + "--some_option", + "builder1", + ] + + bb_id = "1234" + url = "http://ci.chromium.org/b/%s" % bb_id + + mock_cmd.return_value = json.dumps([{"id": bb_id, "url": url}]) + + chroot_path = "/some/path/to/chroot" + cl = 900 + extra_cls = [1200] + options = ["some_option"] + builders = ["builder1"] + + tests = update_packages_and_run_tests.RunTryJobs( + cl, extra_cls, options, builders, chroot_path + ) + + expected_tests = [ + { + "launch_time": mock_launch_time.return_value, + "link": url, + "buildbucket_id": int(bb_id), + "extra_cls": extra_cls, + "options": options, + "builder": builders, + } + ] + + self.assertEqual(tests, expected_tests) + + mock_cmd.assert_called_once_with( + expected_cmd, cwd=chroot_path, encoding="utf-8" + ) + + mock_add_links_to_cl.assert_called_once() + + @mock.patch.object(update_packages_and_run_tests, "AddLinksToCL") + @mock.patch.object(subprocess, "check_output") + def testSuccessfullySubmittedRecipeBuilders( + self, mock_cmd, mock_add_links_to_cl + ): + + expected_cmd = [ + "bb", + "add", + "-json", + "-cl", + "crrev.com/c/%s" % 900, + "-cl", + "crrev.com/c/%s" % 1200, + "some_option", + "builder1", + ] + + bb_id = "1234" + create_time = "2020-04-18T00:03:53.978767Z" + + mock_cmd.return_value = json.dumps( + {"id": bb_id, "createTime": create_time} + ) + + chroot_path = "/some/path/to/chroot" + cl = 900 + extra_cls = [1200] + options = ["some_option"] + builders = ["builder1"] + + tests = update_packages_and_run_tests.StartRecipeBuilders( + cl, extra_cls, options, builders, chroot_path + ) + + expected_tests = [ + { + "launch_time": create_time, + "link": "http://ci.chromium.org/b/%s" % bb_id, + "buildbucket_id": bb_id, + "extra_cls": extra_cls, + "options": options, + "builder": builders, + } + ] + + self.assertEqual(tests, expected_tests) + + mock_cmd.assert_called_once_with( + expected_cmd, cwd=chroot_path, encoding="utf-8" + ) + + mock_add_links_to_cl.assert_called_once() + + @mock.patch.object(subprocess, "check_output", return_value=None) + def testSuccessfullyAddedTestLinkToCL(self, mock_exec_cmd): + chroot_path = "/abs/path/to/chroot" + + test_cl_number = 1000 + + tests = [{"link": "https://some_tryjob_link.com"}] + + update_packages_and_run_tests.AddLinksToCL( + tests, test_cl_number, chroot_path + ) + + expected_gerrit_message = [ + "%s/chromite/bin/gerrit" % chroot_path, + "message", + str(test_cl_number), + "Started the following tests:\n%s" % tests[0]["link"], + ] + + mock_exec_cmd.assert_called_once_with(expected_gerrit_message) + + @mock.patch.object(update_packages_and_run_tests, "RunTryJobs") + @mock.patch.object(update_chromeos_llvm_hash, "UpdatePackages") + @mock.patch.object(update_packages_and_run_tests, "GetCommandLineArgs") + @mock.patch.object(get_llvm_hash, "GetLLVMHashAndVersionFromSVNOption") + @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True) + @mock.patch.object(chroot, "GetChrootEbuildPaths") + def testUpdatedLastTestedFileWithNewTestedRevision( + self, + mock_get_chroot_build_paths, + mock_outside_chroot, + mock_get_hash_and_version, + mock_get_commandline_args, + mock_update_packages, + mock_run_tryjobs, + ): + + # Create a temporary file to simulate the last tested file that contains a + # revision. + with test_helpers.CreateTemporaryFile() as last_tested_file: + builders = [ + "kevin-llvm-next-toolchain-tryjob", + "eve-llvm-next-toolchain-tryjob", + ] + extra_cls = [10, 1] + tryjob_options = ["latest-toolchain", "hwtest"] + ebuilds = [ + "/path/to/package1/package1-r2.ebuild", + "/path/to/package2/package2-r3.ebuild", + ] + + arg_dict = { + "svn_version": 100, + "ebuilds": ebuilds, + "builders": builders, + "extra_cls": extra_cls, + "tryjob_options": tryjob_options, + } + # Parepared last tested file + with open(last_tested_file, "w") as f: + json.dump(arg_dict, f, indent=2) + + # Call with a changed LLVM svn version + args_output = test_helpers.ArgsOutputTest() + args_output.is_llvm_next = True + args_output.extra_change_lists = extra_cls + args_output.last_tested = last_tested_file + args_output.reviewers = [] + + args_output.subparser_name = "tryjobs" + args_output.builders = builders + args_output.options = tryjob_options + + mock_get_commandline_args.return_value = args_output + + mock_get_chroot_build_paths.return_value = ebuilds + + mock_get_hash_and_version.return_value = ("a123testhash2", 200) + + mock_update_packages.return_value = git.CommitContents( + url="https://some_cl_url.com", cl_number=12345 + ) + + mock_run_tryjobs.return_value = [ + {"link": "https://some_tryjob_url.com", "buildbucket_id": 1234} + ] + + update_packages_and_run_tests.main() - mock_exec_cmd.assert_called_once_with(expected_gerrit_message) - - @mock.patch.object(update_packages_and_run_tests, 'RunTryJobs') - @mock.patch.object(update_chromeos_llvm_hash, 'UpdatePackages') - @mock.patch.object(update_packages_and_run_tests, 'GetCommandLineArgs') - @mock.patch.object(get_llvm_hash, 'GetLLVMHashAndVersionFromSVNOption') - @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True) - @mock.patch.object(chroot, 'GetChrootEbuildPaths') - def testUpdatedLastTestedFileWithNewTestedRevision( - self, mock_get_chroot_build_paths, mock_outside_chroot, - mock_get_hash_and_version, mock_get_commandline_args, - mock_update_packages, mock_run_tryjobs): - - # Create a temporary file to simulate the last tested file that contains a - # revision. - with test_helpers.CreateTemporaryFile() as last_tested_file: - builders = [ - 'kevin-llvm-next-toolchain-tryjob', 'eve-llvm-next-toolchain-tryjob' - ] - extra_cls = [10, 1] - tryjob_options = ['latest-toolchain', 'hwtest'] - ebuilds = [ - '/path/to/package1/package1-r2.ebuild', - '/path/to/package2/package2-r3.ebuild' - ] + # Verify that the lasted tested file has been updated to the new LLVM + # revision. + with open(last_tested_file) as f: + arg_dict = json.load(f) - arg_dict = { - 'svn_version': 100, - 'ebuilds': ebuilds, - 'builders': builders, - 'extra_cls': extra_cls, - 'tryjob_options': tryjob_options - } - # Parepared last tested file - with open(last_tested_file, 'w') as f: - json.dump(arg_dict, f, indent=2) + self.assertEqual(arg_dict["svn_version"], 200) + + mock_outside_chroot.assert_called_once() - # Call with a changed LLVM svn version - args_output = test_helpers.ArgsOutputTest() - args_output.is_llvm_next = True - args_output.extra_change_lists = extra_cls - args_output.last_tested = last_tested_file - args_output.reviewers = [] - - args_output.subparser_name = 'tryjobs' - args_output.builders = builders - args_output.options = tryjob_options - - mock_get_commandline_args.return_value = args_output - - mock_get_chroot_build_paths.return_value = ebuilds - - mock_get_hash_and_version.return_value = ('a123testhash2', 200) - - mock_update_packages.return_value = git.CommitContents( - url='https://some_cl_url.com', cl_number=12345) - - mock_run_tryjobs.return_value = [{ - 'link': 'https://some_tryjob_url.com', - 'buildbucket_id': 1234 - }] - - update_packages_and_run_tests.main() - - # Verify that the lasted tested file has been updated to the new LLVM - # revision. - with open(last_tested_file) as f: - arg_dict = json.load(f) - - self.assertEqual(arg_dict['svn_version'], 200) - - mock_outside_chroot.assert_called_once() - - mock_get_commandline_args.assert_called_once() - - mock_get_hash_and_version.assert_called_once() - - mock_run_tryjobs.assert_called_once() - - mock_update_packages.assert_called_once() + mock_get_commandline_args.assert_called_once() + + mock_get_hash_and_version.assert_called_once() + + mock_run_tryjobs.assert_called_once() + + mock_update_packages.assert_called_once() class UpdatePackagesAndRunTestCQTest(unittest.TestCase): - """Unittests for CQ dry run after updating packages.""" - - def testGetCQDependString(self): - test_no_changelists = [] - test_single_changelist = [1234] - test_multiple_changelists = [1234, 5678] - - self.assertIsNone( - update_packages_and_run_tests.GetCQDependString(test_no_changelists)) - - self.assertEqual( - update_packages_and_run_tests.GetCQDependString( - test_single_changelist), '\nCq-Depend: chromium:1234') - - self.assertEqual( - update_packages_and_run_tests.GetCQDependString( - test_multiple_changelists), - '\nCq-Depend: chromium:1234, chromium:5678') - - def testGetCQIncludeTrybotsString(self): - test_no_trybot = None - test_valid_trybot = 'llvm-next' - test_invalid_trybot = 'invalid-name' - - self.assertIsNone( - update_packages_and_run_tests.GetCQIncludeTrybotsString( - test_no_trybot)) - - self.assertEqual( - update_packages_and_run_tests.GetCQIncludeTrybotsString( - test_valid_trybot), - '\nCq-Include-Trybots:chromeos/cq:cq-llvm-next-orchestrator') - - with self.assertRaises(ValueError) as context: - update_packages_and_run_tests.GetCQIncludeTrybotsString( - test_invalid_trybot) - - self.assertIn('is not a valid llvm trybot', str(context.exception)) - - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testStartCQDryRunNoDeps(self, mock_exec_cmd): - chroot_path = '/abs/path/to/chroot' - test_cl_number = 1000 - - # test with no deps cls. - extra_cls = [] - update_packages_and_run_tests.StartCQDryRun(test_cl_number, extra_cls, - chroot_path) - - expected_gerrit_message = [ - '%s/chromite/bin/gerrit' % chroot_path, 'label-cq', - str(test_cl_number), '1' - ] - - mock_exec_cmd.assert_called_once_with(expected_gerrit_message) - - # Mock ExecCommandAndCaptureOutput for the gerrit command execution. - @mock.patch.object(subprocess, 'check_output', return_value=None) - # test with a single deps cl. - def testStartCQDryRunSingleDep(self, mock_exec_cmd): - chroot_path = '/abs/path/to/chroot' - test_cl_number = 1000 - - extra_cls = [2000] - update_packages_and_run_tests.StartCQDryRun(test_cl_number, extra_cls, - chroot_path) - - expected_gerrit_cmd_1 = [ - '%s/chromite/bin/gerrit' % chroot_path, 'label-cq', - str(test_cl_number), '1' - ] - expected_gerrit_cmd_2 = [ - '%s/chromite/bin/gerrit' % chroot_path, 'label-cq', - str(2000), '1' - ] - - self.assertEqual(mock_exec_cmd.call_count, 2) - self.assertEqual(mock_exec_cmd.call_args_list[0], - mock.call(expected_gerrit_cmd_1)) - self.assertEqual(mock_exec_cmd.call_args_list[1], - mock.call(expected_gerrit_cmd_2)) - - # Mock ExecCommandAndCaptureOutput for the gerrit command execution. - @mock.patch.object(subprocess, 'check_output', return_value=None) - def testStartCQDryRunMultipleDep(self, mock_exec_cmd): - chroot_path = '/abs/path/to/chroot' - test_cl_number = 1000 - - # test with multiple deps cls. - extra_cls = [3000, 4000] - update_packages_and_run_tests.StartCQDryRun(test_cl_number, extra_cls, - chroot_path) - - expected_gerrit_cmd_1 = [ - '%s/chromite/bin/gerrit' % chroot_path, 'label-cq', - str(test_cl_number), '1' - ] - expected_gerrit_cmd_2 = [ - '%s/chromite/bin/gerrit' % chroot_path, 'label-cq', - str(3000), '1' - ] - expected_gerrit_cmd_3 = [ - '%s/chromite/bin/gerrit' % chroot_path, 'label-cq', - str(4000), '1' - ] - - self.assertEqual(mock_exec_cmd.call_count, 3) - self.assertEqual(mock_exec_cmd.call_args_list[0], - mock.call(expected_gerrit_cmd_1)) - self.assertEqual(mock_exec_cmd.call_args_list[1], - mock.call(expected_gerrit_cmd_2)) - self.assertEqual(mock_exec_cmd.call_args_list[2], - mock.call(expected_gerrit_cmd_3)) - - # Mock ExecCommandAndCaptureOutput for the gerrit command execution. - @mock.patch.object(subprocess, 'check_output', return_value=None) - # test with no reviewers. - def testAddReviewersNone(self, mock_exec_cmd): - chroot_path = '/abs/path/to/chroot' - reviewers = [] - test_cl_number = 1000 - - update_packages_and_run_tests.AddReviewers(test_cl_number, reviewers, - chroot_path) - self.assertTrue(mock_exec_cmd.not_called) - - # Mock ExecCommandAndCaptureOutput for the gerrit command execution. - @mock.patch.object(subprocess, 'check_output', return_value=None) - # test with multiple reviewers. - def testAddReviewersMultiple(self, mock_exec_cmd): - chroot_path = '/abs/path/to/chroot' - reviewers = ['none1@chromium.org', 'none2@chromium.org'] - test_cl_number = 1000 - - update_packages_and_run_tests.AddReviewers(test_cl_number, reviewers, - chroot_path) - - expected_gerrit_cmd_1 = [ - '%s/chromite/bin/gerrit' % chroot_path, 'reviewers', - str(test_cl_number), 'none1@chromium.org' - ] - expected_gerrit_cmd_2 = [ - '%s/chromite/bin/gerrit' % chroot_path, 'reviewers', - str(test_cl_number), 'none2@chromium.org' - ] - - self.assertEqual(mock_exec_cmd.call_count, 2) - self.assertEqual(mock_exec_cmd.call_args_list[0], - mock.call(expected_gerrit_cmd_1)) - self.assertEqual(mock_exec_cmd.call_args_list[1], - mock.call(expected_gerrit_cmd_2)) - - -if __name__ == '__main__': - unittest.main() + """Unittests for CQ dry run after updating packages.""" + + def testGetCQDependString(self): + test_no_changelists = [] + test_single_changelist = [1234] + test_multiple_changelists = [1234, 5678] + + self.assertIsNone( + update_packages_and_run_tests.GetCQDependString(test_no_changelists) + ) + + self.assertEqual( + update_packages_and_run_tests.GetCQDependString( + test_single_changelist + ), + "\nCq-Depend: chromium:1234", + ) + + self.assertEqual( + update_packages_and_run_tests.GetCQDependString( + test_multiple_changelists + ), + "\nCq-Depend: chromium:1234, chromium:5678", + ) + + def testGetCQIncludeTrybotsString(self): + test_no_trybot = None + test_valid_trybot = "llvm-next" + test_invalid_trybot = "invalid-name" + + self.assertIsNone( + update_packages_and_run_tests.GetCQIncludeTrybotsString( + test_no_trybot + ) + ) + + self.assertEqual( + update_packages_and_run_tests.GetCQIncludeTrybotsString( + test_valid_trybot + ), + "\nCq-Include-Trybots:chromeos/cq:cq-llvm-next-orchestrator", + ) + + with self.assertRaises(ValueError) as context: + update_packages_and_run_tests.GetCQIncludeTrybotsString( + test_invalid_trybot + ) + + self.assertIn("is not a valid llvm trybot", str(context.exception)) + + @mock.patch.object(subprocess, "check_output", return_value=None) + def testStartCQDryRunNoDeps(self, mock_exec_cmd): + chroot_path = "/abs/path/to/chroot" + test_cl_number = 1000 + + # test with no deps cls. + extra_cls = [] + update_packages_and_run_tests.StartCQDryRun( + test_cl_number, extra_cls, chroot_path + ) + + expected_gerrit_message = [ + "%s/chromite/bin/gerrit" % chroot_path, + "label-cq", + str(test_cl_number), + "1", + ] + + mock_exec_cmd.assert_called_once_with(expected_gerrit_message) + + # Mock ExecCommandAndCaptureOutput for the gerrit command execution. + @mock.patch.object(subprocess, "check_output", return_value=None) + # test with a single deps cl. + def testStartCQDryRunSingleDep(self, mock_exec_cmd): + chroot_path = "/abs/path/to/chroot" + test_cl_number = 1000 + + extra_cls = [2000] + update_packages_and_run_tests.StartCQDryRun( + test_cl_number, extra_cls, chroot_path + ) + + expected_gerrit_cmd_1 = [ + "%s/chromite/bin/gerrit" % chroot_path, + "label-cq", + str(test_cl_number), + "1", + ] + expected_gerrit_cmd_2 = [ + "%s/chromite/bin/gerrit" % chroot_path, + "label-cq", + str(2000), + "1", + ] + + self.assertEqual(mock_exec_cmd.call_count, 2) + self.assertEqual( + mock_exec_cmd.call_args_list[0], mock.call(expected_gerrit_cmd_1) + ) + self.assertEqual( + mock_exec_cmd.call_args_list[1], mock.call(expected_gerrit_cmd_2) + ) + + # Mock ExecCommandAndCaptureOutput for the gerrit command execution. + @mock.patch.object(subprocess, "check_output", return_value=None) + def testStartCQDryRunMultipleDep(self, mock_exec_cmd): + chroot_path = "/abs/path/to/chroot" + test_cl_number = 1000 + + # test with multiple deps cls. + extra_cls = [3000, 4000] + update_packages_and_run_tests.StartCQDryRun( + test_cl_number, extra_cls, chroot_path + ) + + expected_gerrit_cmd_1 = [ + "%s/chromite/bin/gerrit" % chroot_path, + "label-cq", + str(test_cl_number), + "1", + ] + expected_gerrit_cmd_2 = [ + "%s/chromite/bin/gerrit" % chroot_path, + "label-cq", + str(3000), + "1", + ] + expected_gerrit_cmd_3 = [ + "%s/chromite/bin/gerrit" % chroot_path, + "label-cq", + str(4000), + "1", + ] + + self.assertEqual(mock_exec_cmd.call_count, 3) + self.assertEqual( + mock_exec_cmd.call_args_list[0], mock.call(expected_gerrit_cmd_1) + ) + self.assertEqual( + mock_exec_cmd.call_args_list[1], mock.call(expected_gerrit_cmd_2) + ) + self.assertEqual( + mock_exec_cmd.call_args_list[2], mock.call(expected_gerrit_cmd_3) + ) + + # Mock ExecCommandAndCaptureOutput for the gerrit command execution. + @mock.patch.object(subprocess, "check_output", return_value=None) + # test with no reviewers. + def testAddReviewersNone(self, mock_exec_cmd): + chroot_path = "/abs/path/to/chroot" + reviewers = [] + test_cl_number = 1000 + + update_packages_and_run_tests.AddReviewers( + test_cl_number, reviewers, chroot_path + ) + self.assertTrue(mock_exec_cmd.not_called) + + # Mock ExecCommandAndCaptureOutput for the gerrit command execution. + @mock.patch.object(subprocess, "check_output", return_value=None) + # test with multiple reviewers. + def testAddReviewersMultiple(self, mock_exec_cmd): + chroot_path = "/abs/path/to/chroot" + reviewers = ["none1@chromium.org", "none2@chromium.org"] + test_cl_number = 1000 + + update_packages_and_run_tests.AddReviewers( + test_cl_number, reviewers, chroot_path + ) + + expected_gerrit_cmd_1 = [ + "%s/chromite/bin/gerrit" % chroot_path, + "reviewers", + str(test_cl_number), + "none1@chromium.org", + ] + expected_gerrit_cmd_2 = [ + "%s/chromite/bin/gerrit" % chroot_path, + "reviewers", + str(test_cl_number), + "none2@chromium.org", + ] + + self.assertEqual(mock_exec_cmd.call_count, 2) + self.assertEqual( + mock_exec_cmd.call_args_list[0], mock.call(expected_gerrit_cmd_1) + ) + self.assertEqual( + mock_exec_cmd.call_args_list[1], mock.call(expected_gerrit_cmd_2) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/update_tryjob_status.py b/llvm_tools/update_tryjob_status.py index 43901e8e..ea7fe9c0 100755 --- a/llvm_tools/update_tryjob_status.py +++ b/llvm_tools/update_tryjob_status.py @@ -20,249 +20,291 @@ from test_helpers import CreateTemporaryJsonFile class TryjobStatus(enum.Enum): - """Values for the 'status' field of a tryjob.""" + """Values for the 'status' field of a tryjob.""" - GOOD = 'good' - BAD = 'bad' - PENDING = 'pending' - SKIP = 'skip' + GOOD = "good" + BAD = "bad" + PENDING = "pending" + SKIP = "skip" - # Executes the script passed into the command line (this script's exit code - # determines the 'status' value of the tryjob). - CUSTOM_SCRIPT = 'custom_script' + # Executes the script passed into the command line (this script's exit code + # determines the 'status' value of the tryjob). + CUSTOM_SCRIPT = "custom_script" class CustomScriptStatus(enum.Enum): - """Exit code values of a custom script.""" + """Exit code values of a custom script.""" - # NOTE: Not using 1 for 'bad' because the custom script can raise an - # exception which would cause the exit code of the script to be 1, so the - # tryjob's 'status' would be updated when there is an exception. - # - # Exit codes are as follows: - # 0: 'good' - # 124: 'bad' - # 125: 'skip' - GOOD = 0 - BAD = 124 - SKIP = 125 + # NOTE: Not using 1 for 'bad' because the custom script can raise an + # exception which would cause the exit code of the script to be 1, so the + # tryjob's 'status' would be updated when there is an exception. + # + # Exit codes are as follows: + # 0: 'good' + # 124: 'bad' + # 125: 'skip' + GOOD = 0 + BAD = 124 + SKIP = 125 custom_script_exit_value_mapping = { CustomScriptStatus.GOOD.value: TryjobStatus.GOOD.value, CustomScriptStatus.BAD.value: TryjobStatus.BAD.value, - CustomScriptStatus.SKIP.value: TryjobStatus.SKIP.value + CustomScriptStatus.SKIP.value: TryjobStatus.SKIP.value, } def GetCommandLineArgs(): - """Parses the command line for the command line arguments.""" - - # Default absoute path to the chroot if not specified. - cros_root = os.path.expanduser('~') - cros_root = os.path.join(cros_root, 'chromiumos') - - # Create parser and add optional command-line arguments. - parser = argparse.ArgumentParser( - description='Updates the status of a tryjob.') - - # Add argument for the JSON file to use for the update of a tryjob. - parser.add_argument( - '--status_file', - required=True, - help='The absolute path to the JSON file that contains the tryjobs used ' - 'for bisecting LLVM.') - - # Add argument that sets the 'status' field to that value. - parser.add_argument( - '--set_status', - required=True, - choices=[tryjob_status.value for tryjob_status in TryjobStatus], - help='Sets the "status" field of the tryjob.') - - # Add argument that determines which revision to search for in the list of - # tryjobs. - parser.add_argument('--revision', - required=True, - type=int, - help='The revision to set its status.') - - # Add argument for the custom script to execute for the 'custom_script' - # option in '--set_status'. - parser.add_argument( - '--custom_script', - help='The absolute path to the custom script to execute (its exit code ' - 'should be %d for "good", %d for "bad", or %d for "skip")' % - (CustomScriptStatus.GOOD.value, CustomScriptStatus.BAD.value, - CustomScriptStatus.SKIP.value)) - - args_output = parser.parse_args() - - if not (os.path.isfile(args_output.status_file - and not args_output.status_file.endswith('.json'))): - raise ValueError('File does not exist or does not ending in ".json" ' - ': %s' % args_output.status_file) - - if (args_output.set_status == TryjobStatus.CUSTOM_SCRIPT.value - and not args_output.custom_script): - raise ValueError('Please provide the absolute path to the script to ' - 'execute.') - - return args_output + """Parses the command line for the command line arguments.""" + + # Default absoute path to the chroot if not specified. + cros_root = os.path.expanduser("~") + cros_root = os.path.join(cros_root, "chromiumos") + + # Create parser and add optional command-line arguments. + parser = argparse.ArgumentParser( + description="Updates the status of a tryjob." + ) + + # Add argument for the JSON file to use for the update of a tryjob. + parser.add_argument( + "--status_file", + required=True, + help="The absolute path to the JSON file that contains the tryjobs used " + "for bisecting LLVM.", + ) + + # Add argument that sets the 'status' field to that value. + parser.add_argument( + "--set_status", + required=True, + choices=[tryjob_status.value for tryjob_status in TryjobStatus], + help='Sets the "status" field of the tryjob.', + ) + + # Add argument that determines which revision to search for in the list of + # tryjobs. + parser.add_argument( + "--revision", + required=True, + type=int, + help="The revision to set its status.", + ) + + # Add argument for the custom script to execute for the 'custom_script' + # option in '--set_status'. + parser.add_argument( + "--custom_script", + help="The absolute path to the custom script to execute (its exit code " + 'should be %d for "good", %d for "bad", or %d for "skip")' + % ( + CustomScriptStatus.GOOD.value, + CustomScriptStatus.BAD.value, + CustomScriptStatus.SKIP.value, + ), + ) + + args_output = parser.parse_args() + + if not ( + os.path.isfile( + args_output.status_file + and not args_output.status_file.endswith(".json") + ) + ): + raise ValueError( + 'File does not exist or does not ending in ".json" ' + ": %s" % args_output.status_file + ) + + if ( + args_output.set_status == TryjobStatus.CUSTOM_SCRIPT.value + and not args_output.custom_script + ): + raise ValueError( + "Please provide the absolute path to the script to " "execute." + ) + + return args_output def FindTryjobIndex(revision, tryjobs_list): - """Searches the list of tryjob dictionaries to find 'revision'. + """Searches the list of tryjob dictionaries to find 'revision'. - Uses the key 'rev' for each dictionary and compares the value against - 'revision.' + Uses the key 'rev' for each dictionary and compares the value against + 'revision.' - Args: - revision: The revision to search for in the tryjobs. - tryjobs_list: A list of tryjob dictionaries of the format: - { - 'rev' : [REVISION], - 'url' : [URL_OF_CL], - 'cl' : [CL_NUMBER], - 'link' : [TRYJOB_LINK], - 'status' : [TRYJOB_STATUS], - 'buildbucket_id': [BUILDBUCKET_ID] - } + Args: + revision: The revision to search for in the tryjobs. + tryjobs_list: A list of tryjob dictionaries of the format: + { + 'rev' : [REVISION], + 'url' : [URL_OF_CL], + 'cl' : [CL_NUMBER], + 'link' : [TRYJOB_LINK], + 'status' : [TRYJOB_STATUS], + 'buildbucket_id': [BUILDBUCKET_ID] + } - Returns: - The index within the list or None to indicate it was not found. - """ + Returns: + The index within the list or None to indicate it was not found. + """ - for cur_index, cur_tryjob_dict in enumerate(tryjobs_list): - if cur_tryjob_dict['rev'] == revision: - return cur_index + for cur_index, cur_tryjob_dict in enumerate(tryjobs_list): + if cur_tryjob_dict["rev"] == revision: + return cur_index - return None + return None def GetCustomScriptResult(custom_script, status_file, tryjob_contents): - """Returns the conversion of the exit code of the custom script. - - Args: - custom_script: Absolute path to the script to be executed. - status_file: Absolute path to the file that contains information about the - bisection of LLVM. - tryjob_contents: A dictionary of the contents of the tryjob (e.g. 'status', - 'url', 'link', 'buildbucket_id', etc.). - - Returns: - The exit code conversion to either return 'good', 'bad', or 'skip'. - - Raises: - ValueError: The custom script failed to provide the correct exit code. - """ - - # Create a temporary file to write the contents of the tryjob at index - # 'tryjob_index' (the temporary file path will be passed into the custom - # script as a command line argument). - with CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as tryjob_file: - json.dump(tryjob_contents, tryjob_file, indent=4, separators=(',', ': ')) - - exec_script_cmd = [custom_script, temp_json_file] - - # Execute the custom script to get the exit code. - exec_script_cmd_obj = subprocess.Popen(exec_script_cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - _, stderr = exec_script_cmd_obj.communicate() - - # Invalid exit code by the custom script. - if exec_script_cmd_obj.returncode not in custom_script_exit_value_mapping: - # Save the .JSON file to the directory of 'status_file'. - name_of_json_file = os.path.join(os.path.dirname(status_file), - os.path.basename(temp_json_file)) - - os.rename(temp_json_file, name_of_json_file) - - raise ValueError( - 'Custom script %s exit code %d did not match ' - 'any of the expected exit codes: %d for "good", %d ' - 'for "bad", or %d for "skip".\nPlease check %s for information ' - 'about the tryjob: %s' % - (custom_script, exec_script_cmd_obj.returncode, - CustomScriptStatus.GOOD.value, CustomScriptStatus.BAD.value, - CustomScriptStatus.SKIP.value, name_of_json_file, stderr)) - - return custom_script_exit_value_mapping[exec_script_cmd_obj.returncode] + """Returns the conversion of the exit code of the custom script. + + Args: + custom_script: Absolute path to the script to be executed. + status_file: Absolute path to the file that contains information about the + bisection of LLVM. + tryjob_contents: A dictionary of the contents of the tryjob (e.g. 'status', + 'url', 'link', 'buildbucket_id', etc.). + + Returns: + The exit code conversion to either return 'good', 'bad', or 'skip'. + + Raises: + ValueError: The custom script failed to provide the correct exit code. + """ + + # Create a temporary file to write the contents of the tryjob at index + # 'tryjob_index' (the temporary file path will be passed into the custom + # script as a command line argument). + with CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as tryjob_file: + json.dump( + tryjob_contents, tryjob_file, indent=4, separators=(",", ": ") + ) + + exec_script_cmd = [custom_script, temp_json_file] + + # Execute the custom script to get the exit code. + exec_script_cmd_obj = subprocess.Popen( + exec_script_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + _, stderr = exec_script_cmd_obj.communicate() + + # Invalid exit code by the custom script. + if ( + exec_script_cmd_obj.returncode + not in custom_script_exit_value_mapping + ): + # Save the .JSON file to the directory of 'status_file'. + name_of_json_file = os.path.join( + os.path.dirname(status_file), os.path.basename(temp_json_file) + ) + + os.rename(temp_json_file, name_of_json_file) + + raise ValueError( + "Custom script %s exit code %d did not match " + 'any of the expected exit codes: %d for "good", %d ' + 'for "bad", or %d for "skip".\nPlease check %s for information ' + "about the tryjob: %s" + % ( + custom_script, + exec_script_cmd_obj.returncode, + CustomScriptStatus.GOOD.value, + CustomScriptStatus.BAD.value, + CustomScriptStatus.SKIP.value, + name_of_json_file, + stderr, + ) + ) + + return custom_script_exit_value_mapping[exec_script_cmd_obj.returncode] def UpdateTryjobStatus(revision, set_status, status_file, custom_script): - """Updates a tryjob's 'status' field based off of 'set_status'. - - Args: - revision: The revision associated with the tryjob. - set_status: What to update the 'status' field to. - Ex: TryjobStatus.Good, TryjobStatus.BAD, TryjobStatus.PENDING, or - TryjobStatus. - status_file: The .JSON file that contains the tryjobs. - custom_script: The absolute path to a script that will be executed which - will determine the 'status' value of the tryjob. - """ - - # Format of 'bisect_contents': - # { - # 'start': [START_REVISION_OF_BISECTION] - # 'end': [END_REVISION_OF_BISECTION] - # 'jobs' : [ - # {[TRYJOB_INFORMATION]}, - # {[TRYJOB_INFORMATION]}, - # ..., - # {[TRYJOB_INFORMATION]} - # ] - # } - with open(status_file) as tryjobs: - bisect_contents = json.load(tryjobs) - - if not bisect_contents['jobs']: - sys.exit('No tryjobs in %s' % status_file) - - tryjob_index = FindTryjobIndex(revision, bisect_contents['jobs']) - - # 'FindTryjobIndex()' returns None if the revision was not found. - if tryjob_index is None: - raise ValueError('Unable to find tryjob for %d in %s' % - (revision, status_file)) - - # Set 'status' depending on 'set_status' for the tryjob. - if set_status == TryjobStatus.GOOD: - bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.GOOD.value - elif set_status == TryjobStatus.BAD: - bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.BAD.value - elif set_status == TryjobStatus.PENDING: - bisect_contents['jobs'][tryjob_index][ - 'status'] = TryjobStatus.PENDING.value - elif set_status == TryjobStatus.SKIP: - bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.SKIP.value - elif set_status == TryjobStatus.CUSTOM_SCRIPT: - bisect_contents['jobs'][tryjob_index]['status'] = GetCustomScriptResult( - custom_script, status_file, bisect_contents['jobs'][tryjob_index]) - else: - raise ValueError('Invalid "set_status" option provided: %s' % set_status) - - with open(status_file, 'w') as update_tryjobs: - json.dump(bisect_contents, - update_tryjobs, - indent=4, - separators=(',', ': ')) + """Updates a tryjob's 'status' field based off of 'set_status'. + + Args: + revision: The revision associated with the tryjob. + set_status: What to update the 'status' field to. + Ex: TryjobStatus.Good, TryjobStatus.BAD, TryjobStatus.PENDING, or + TryjobStatus. + status_file: The .JSON file that contains the tryjobs. + custom_script: The absolute path to a script that will be executed which + will determine the 'status' value of the tryjob. + """ + + # Format of 'bisect_contents': + # { + # 'start': [START_REVISION_OF_BISECTION] + # 'end': [END_REVISION_OF_BISECTION] + # 'jobs' : [ + # {[TRYJOB_INFORMATION]}, + # {[TRYJOB_INFORMATION]}, + # ..., + # {[TRYJOB_INFORMATION]} + # ] + # } + with open(status_file) as tryjobs: + bisect_contents = json.load(tryjobs) + + if not bisect_contents["jobs"]: + sys.exit("No tryjobs in %s" % status_file) + + tryjob_index = FindTryjobIndex(revision, bisect_contents["jobs"]) + + # 'FindTryjobIndex()' returns None if the revision was not found. + if tryjob_index is None: + raise ValueError( + "Unable to find tryjob for %d in %s" % (revision, status_file) + ) + + # Set 'status' depending on 'set_status' for the tryjob. + if set_status == TryjobStatus.GOOD: + bisect_contents["jobs"][tryjob_index][ + "status" + ] = TryjobStatus.GOOD.value + elif set_status == TryjobStatus.BAD: + bisect_contents["jobs"][tryjob_index]["status"] = TryjobStatus.BAD.value + elif set_status == TryjobStatus.PENDING: + bisect_contents["jobs"][tryjob_index][ + "status" + ] = TryjobStatus.PENDING.value + elif set_status == TryjobStatus.SKIP: + bisect_contents["jobs"][tryjob_index][ + "status" + ] = TryjobStatus.SKIP.value + elif set_status == TryjobStatus.CUSTOM_SCRIPT: + bisect_contents["jobs"][tryjob_index]["status"] = GetCustomScriptResult( + custom_script, status_file, bisect_contents["jobs"][tryjob_index] + ) + else: + raise ValueError( + 'Invalid "set_status" option provided: %s' % set_status + ) + + with open(status_file, "w") as update_tryjobs: + json.dump( + bisect_contents, update_tryjobs, indent=4, separators=(",", ": ") + ) def main(): - """Updates the status of a tryjob.""" + """Updates the status of a tryjob.""" - chroot.VerifyOutsideChroot() + chroot.VerifyOutsideChroot() - args_output = GetCommandLineArgs() + args_output = GetCommandLineArgs() - UpdateTryjobStatus(args_output.revision, - TryjobStatus(args_output.set_status), - args_output.status_file, args_output.custom_script) + UpdateTryjobStatus( + args_output.revision, + TryjobStatus(args_output.set_status), + args_output.status_file, + args_output.custom_script, + ) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/llvm_tools/update_tryjob_status_unittest.py b/llvm_tools/update_tryjob_status_unittest.py index 8487e6f6..b6fc59c8 100755 --- a/llvm_tools/update_tryjob_status_unittest.py +++ b/llvm_tools/update_tryjob_status_unittest.py @@ -16,463 +16,522 @@ import unittest.mock as mock from test_helpers import CreateTemporaryJsonFile from test_helpers import WritePrettyJsonFile -from update_tryjob_status import TryjobStatus -from update_tryjob_status import CustomScriptStatus import update_tryjob_status +from update_tryjob_status import CustomScriptStatus +from update_tryjob_status import TryjobStatus class UpdateTryjobStatusTest(unittest.TestCase): - """Unittests for updating a tryjob's 'status'.""" - - def testFoundTryjobIndex(self): - test_tryjobs = [{ - 'rev': 123, - 'url': 'https://some_url_to_CL.com', - 'cl': 'https://some_link_to_tryjob.com', - 'status': 'good', - 'buildbucket_id': 91835 - }, { - 'rev': 1000, - 'url': 'https://some_url_to_CL.com', - 'cl': 'https://some_link_to_tryjob.com', - 'status': 'pending', - 'buildbucket_id': 10931 - }] - - expected_index = 0 - - revision_to_find = 123 - - self.assertEqual( - update_tryjob_status.FindTryjobIndex(revision_to_find, test_tryjobs), - expected_index) - - def testNotFindTryjobIndex(self): - test_tryjobs = [{ - 'rev': 500, - 'url': 'https://some_url_to_CL.com', - 'cl': 'https://some_link_to_tryjob.com', - 'status': 'bad', - 'buildbucket_id': 390 - }, { - 'rev': 10, - 'url': 'https://some_url_to_CL.com', - 'cl': 'https://some_link_to_tryjob.com', - 'status': 'skip', - 'buildbucket_id': 10 - }] - - revision_to_find = 250 - - self.assertIsNone( - update_tryjob_status.FindTryjobIndex(revision_to_find, test_tryjobs)) - - @mock.patch.object(subprocess, 'Popen') - # Simulate the behavior of `os.rename()` when successfully renamed a file. - @mock.patch.object(os, 'rename', return_value=None) - # Simulate the behavior of `os.path.basename()` when successfully retrieved - # the basename of the temp .JSON file. - @mock.patch.object(os.path, 'basename', return_value='tmpFile.json') - def testInvalidExitCodeByCustomScript(self, mock_basename, mock_rename_file, - mock_exec_custom_script): - - error_message_by_custom_script = 'Failed to parse .JSON file' - - # Simulate the behavior of 'subprocess.Popen()' when executing the custom - # script. - # - # `Popen.communicate()` returns a tuple of `stdout` and `stderr`. - mock_exec_custom_script.return_value.communicate.return_value = ( - None, error_message_by_custom_script) - - # Exit code of 1 is not in the mapping, so an exception will be raised. - custom_script_exit_code = 1 - - mock_exec_custom_script.return_value.returncode = custom_script_exit_code - - tryjob_contents = { - 'status': 'good', - 'rev': 1234, - 'url': 'https://some_url_to_CL.com', - 'link': 'https://some_url_to_tryjob.com' - } - - custom_script_path = '/abs/path/to/script.py' - status_file_path = '/abs/path/to/status_file.json' - - name_json_file = os.path.join(os.path.dirname(status_file_path), - 'tmpFile.json') - - expected_error_message = ( - 'Custom script %s exit code %d did not match ' - 'any of the expected exit codes: %s for "good", ' - '%d for "bad", or %d for "skip".\nPlease check ' - '%s for information about the tryjob: %s' % - (custom_script_path, custom_script_exit_code, - CustomScriptStatus.GOOD.value, CustomScriptStatus.BAD.value, - CustomScriptStatus.SKIP.value, name_json_file, - error_message_by_custom_script)) - - # Verify the exception is raised when the exit code by the custom script - # does not match any of the exit codes in the mapping of - # `custom_script_exit_value_mapping`. - with self.assertRaises(ValueError) as err: - update_tryjob_status.GetCustomScriptResult(custom_script_path, - status_file_path, - tryjob_contents) - - self.assertEqual(str(err.exception), expected_error_message) - - mock_exec_custom_script.assert_called_once() - - mock_rename_file.assert_called_once() - - mock_basename.assert_called_once() - - @mock.patch.object(subprocess, 'Popen') - # Simulate the behavior of `os.rename()` when successfully renamed a file. - @mock.patch.object(os, 'rename', return_value=None) - # Simulate the behavior of `os.path.basename()` when successfully retrieved - # the basename of the temp .JSON file. - @mock.patch.object(os.path, 'basename', return_value='tmpFile.json') - def testValidExitCodeByCustomScript(self, mock_basename, mock_rename_file, - mock_exec_custom_script): - - # Simulate the behavior of 'subprocess.Popen()' when executing the custom - # script. - # - # `Popen.communicate()` returns a tuple of `stdout` and `stderr`. - mock_exec_custom_script.return_value.communicate.return_value = (None, - None) - - mock_exec_custom_script.return_value.returncode = ( - CustomScriptStatus.GOOD.value) - - tryjob_contents = { - 'status': 'good', - 'rev': 1234, - 'url': 'https://some_url_to_CL.com', - 'link': 'https://some_url_to_tryjob.com' - } - - custom_script_path = '/abs/path/to/script.py' - status_file_path = '/abs/path/to/status_file.json' - - self.assertEqual( - update_tryjob_status.GetCustomScriptResult(custom_script_path, - status_file_path, - tryjob_contents), - TryjobStatus.GOOD.value) - - mock_exec_custom_script.assert_called_once() - - mock_rename_file.assert_not_called() - - mock_basename.assert_not_called() - - def testNoTryjobsInStatusFileWhenUpdatingTryjobStatus(self): - bisect_test_contents = {'start': 369410, 'end': 369420, 'jobs': []} - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_update = 369412 - - custom_script = None - - # Verify the exception is raised when the `status_file` does not have any - # `jobs` (empty). - with self.assertRaises(SystemExit) as err: - update_tryjob_status.UpdateTryjobStatus(revision_to_update, - TryjobStatus.GOOD, - temp_json_file, custom_script) - - self.assertEqual(str(err.exception), 'No tryjobs in %s' % temp_json_file) - - # Simulate the behavior of `FindTryjobIndex()` when the tryjob does not exist - # in the status file. - @mock.patch.object(update_tryjob_status, - 'FindTryjobIndex', - return_value=None) - def testNotFindTryjobIndexWhenUpdatingTryjobStatus(self, - mock_find_tryjob_index): - - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'pending' - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_update = 369416 - - custom_script = None - - # Verify the exception is raised when the `status_file` does not have any - # `jobs` (empty). - with self.assertRaises(ValueError) as err: - update_tryjob_status.UpdateTryjobStatus(revision_to_update, - TryjobStatus.SKIP, - temp_json_file, custom_script) - - self.assertEqual( - str(err.exception), 'Unable to find tryjob for %d in %s' % - (revision_to_update, temp_json_file)) - - mock_find_tryjob_index.assert_called_once() - - # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the - # status file. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testSuccessfullyUpdatedTryjobStatusToGood(self, mock_find_tryjob_index): - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'pending' - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_update = 369411 - - # Index of the tryjob that is going to have its 'status' value updated. - tryjob_index = 0 - - custom_script = None - - update_tryjob_status.UpdateTryjobStatus(revision_to_update, - TryjobStatus.GOOD, - temp_json_file, custom_script) - - # Verify that the tryjob's 'status' has been updated in the status file. - with open(temp_json_file) as status_file: - bisect_contents = json.load(status_file) - - self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'], - TryjobStatus.GOOD.value) - - mock_find_tryjob_index.assert_called_once() - - # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the - # status file. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testSuccessfullyUpdatedTryjobStatusToBad(self, mock_find_tryjob_index): - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'pending' - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_update = 369411 - - # Index of the tryjob that is going to have its 'status' value updated. - tryjob_index = 0 - - custom_script = None - - update_tryjob_status.UpdateTryjobStatus(revision_to_update, - TryjobStatus.BAD, temp_json_file, - custom_script) - - # Verify that the tryjob's 'status' has been updated in the status file. - with open(temp_json_file) as status_file: - bisect_contents = json.load(status_file) - - self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'], - TryjobStatus.BAD.value) - - mock_find_tryjob_index.assert_called_once() - - # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the - # status file. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testSuccessfullyUpdatedTryjobStatusToPending(self, - mock_find_tryjob_index): - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'skip' - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_update = 369411 - - # Index of the tryjob that is going to have its 'status' value updated. - tryjob_index = 0 - - custom_script = None - - update_tryjob_status.UpdateTryjobStatus( - revision_to_update, update_tryjob_status.TryjobStatus.SKIP, - temp_json_file, custom_script) - - # Verify that the tryjob's 'status' has been updated in the status file. - with open(temp_json_file) as status_file: - bisect_contents = json.load(status_file) - - self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'], - update_tryjob_status.TryjobStatus.SKIP.value) - - mock_find_tryjob_index.assert_called_once() - - # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the - # status file. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testSuccessfullyUpdatedTryjobStatusToSkip(self, mock_find_tryjob_index): - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'pending', - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_update = 369411 - - # Index of the tryjob that is going to have its 'status' value updated. - tryjob_index = 0 - - custom_script = None - - update_tryjob_status.UpdateTryjobStatus( - revision_to_update, update_tryjob_status.TryjobStatus.PENDING, - temp_json_file, custom_script) - - # Verify that the tryjob's 'status' has been updated in the status file. - with open(temp_json_file) as status_file: - bisect_contents = json.load(status_file) - - self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'], - update_tryjob_status.TryjobStatus.PENDING.value) - - mock_find_tryjob_index.assert_called_once() - - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - @mock.patch.object(update_tryjob_status, - 'GetCustomScriptResult', - return_value=TryjobStatus.SKIP.value) - def testUpdatedTryjobStatusToAutoPassedWithCustomScript( - self, mock_get_custom_script_result, mock_find_tryjob_index): - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'pending', - 'buildbucket_id': 1200 - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_update = 369411 - - # Index of the tryjob that is going to have its 'status' value updated. - tryjob_index = 0 - - custom_script_path = '/abs/path/to/custom_script.py' - - update_tryjob_status.UpdateTryjobStatus( - revision_to_update, update_tryjob_status.TryjobStatus.CUSTOM_SCRIPT, - temp_json_file, custom_script_path) - - # Verify that the tryjob's 'status' has been updated in the status file. - with open(temp_json_file) as status_file: - bisect_contents = json.load(status_file) - - self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'], - update_tryjob_status.TryjobStatus.SKIP.value) - - mock_get_custom_script_result.assert_called_once() - - mock_find_tryjob_index.assert_called_once() - - # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the - # status file. - @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0) - def testSetStatusDoesNotExistWhenUpdatingTryjobStatus( - self, mock_find_tryjob_index): - - bisect_test_contents = { - 'start': 369410, - 'end': 369420, - 'jobs': [{ - 'rev': 369411, - 'status': 'pending', - 'buildbucket_id': 1200 - }] - } - - # Create a temporary .JSON file to simulate a .JSON file that has bisection - # contents. - with CreateTemporaryJsonFile() as temp_json_file: - with open(temp_json_file, 'w') as f: - WritePrettyJsonFile(bisect_test_contents, f) - - revision_to_update = 369411 - - nonexistent_update_status = 'revert_status' - - custom_script = None - - # Verify the exception is raised when the `set_status` command line - # argument does not exist in the mapping. - with self.assertRaises(ValueError) as err: - update_tryjob_status.UpdateTryjobStatus(revision_to_update, - nonexistent_update_status, - temp_json_file, custom_script) - - self.assertEqual(str(err.exception), - 'Invalid "set_status" option provided: revert_status') - - mock_find_tryjob_index.assert_called_once() - - -if __name__ == '__main__': - unittest.main() + """Unittests for updating a tryjob's 'status'.""" + + def testFoundTryjobIndex(self): + test_tryjobs = [ + { + "rev": 123, + "url": "https://some_url_to_CL.com", + "cl": "https://some_link_to_tryjob.com", + "status": "good", + "buildbucket_id": 91835, + }, + { + "rev": 1000, + "url": "https://some_url_to_CL.com", + "cl": "https://some_link_to_tryjob.com", + "status": "pending", + "buildbucket_id": 10931, + }, + ] + + expected_index = 0 + + revision_to_find = 123 + + self.assertEqual( + update_tryjob_status.FindTryjobIndex( + revision_to_find, test_tryjobs + ), + expected_index, + ) + + def testNotFindTryjobIndex(self): + test_tryjobs = [ + { + "rev": 500, + "url": "https://some_url_to_CL.com", + "cl": "https://some_link_to_tryjob.com", + "status": "bad", + "buildbucket_id": 390, + }, + { + "rev": 10, + "url": "https://some_url_to_CL.com", + "cl": "https://some_link_to_tryjob.com", + "status": "skip", + "buildbucket_id": 10, + }, + ] + + revision_to_find = 250 + + self.assertIsNone( + update_tryjob_status.FindTryjobIndex(revision_to_find, test_tryjobs) + ) + + @mock.patch.object(subprocess, "Popen") + # Simulate the behavior of `os.rename()` when successfully renamed a file. + @mock.patch.object(os, "rename", return_value=None) + # Simulate the behavior of `os.path.basename()` when successfully retrieved + # the basename of the temp .JSON file. + @mock.patch.object(os.path, "basename", return_value="tmpFile.json") + def testInvalidExitCodeByCustomScript( + self, mock_basename, mock_rename_file, mock_exec_custom_script + ): + + error_message_by_custom_script = "Failed to parse .JSON file" + + # Simulate the behavior of 'subprocess.Popen()' when executing the custom + # script. + # + # `Popen.communicate()` returns a tuple of `stdout` and `stderr`. + mock_exec_custom_script.return_value.communicate.return_value = ( + None, + error_message_by_custom_script, + ) + + # Exit code of 1 is not in the mapping, so an exception will be raised. + custom_script_exit_code = 1 + + mock_exec_custom_script.return_value.returncode = ( + custom_script_exit_code + ) + + tryjob_contents = { + "status": "good", + "rev": 1234, + "url": "https://some_url_to_CL.com", + "link": "https://some_url_to_tryjob.com", + } + + custom_script_path = "/abs/path/to/script.py" + status_file_path = "/abs/path/to/status_file.json" + + name_json_file = os.path.join( + os.path.dirname(status_file_path), "tmpFile.json" + ) + + expected_error_message = ( + "Custom script %s exit code %d did not match " + 'any of the expected exit codes: %s for "good", ' + '%d for "bad", or %d for "skip".\nPlease check ' + "%s for information about the tryjob: %s" + % ( + custom_script_path, + custom_script_exit_code, + CustomScriptStatus.GOOD.value, + CustomScriptStatus.BAD.value, + CustomScriptStatus.SKIP.value, + name_json_file, + error_message_by_custom_script, + ) + ) + + # Verify the exception is raised when the exit code by the custom script + # does not match any of the exit codes in the mapping of + # `custom_script_exit_value_mapping`. + with self.assertRaises(ValueError) as err: + update_tryjob_status.GetCustomScriptResult( + custom_script_path, status_file_path, tryjob_contents + ) + + self.assertEqual(str(err.exception), expected_error_message) + + mock_exec_custom_script.assert_called_once() + + mock_rename_file.assert_called_once() + + mock_basename.assert_called_once() + + @mock.patch.object(subprocess, "Popen") + # Simulate the behavior of `os.rename()` when successfully renamed a file. + @mock.patch.object(os, "rename", return_value=None) + # Simulate the behavior of `os.path.basename()` when successfully retrieved + # the basename of the temp .JSON file. + @mock.patch.object(os.path, "basename", return_value="tmpFile.json") + def testValidExitCodeByCustomScript( + self, mock_basename, mock_rename_file, mock_exec_custom_script + ): + + # Simulate the behavior of 'subprocess.Popen()' when executing the custom + # script. + # + # `Popen.communicate()` returns a tuple of `stdout` and `stderr`. + mock_exec_custom_script.return_value.communicate.return_value = ( + None, + None, + ) + + mock_exec_custom_script.return_value.returncode = ( + CustomScriptStatus.GOOD.value + ) + + tryjob_contents = { + "status": "good", + "rev": 1234, + "url": "https://some_url_to_CL.com", + "link": "https://some_url_to_tryjob.com", + } + + custom_script_path = "/abs/path/to/script.py" + status_file_path = "/abs/path/to/status_file.json" + + self.assertEqual( + update_tryjob_status.GetCustomScriptResult( + custom_script_path, status_file_path, tryjob_contents + ), + TryjobStatus.GOOD.value, + ) + + mock_exec_custom_script.assert_called_once() + + mock_rename_file.assert_not_called() + + mock_basename.assert_not_called() + + def testNoTryjobsInStatusFileWhenUpdatingTryjobStatus(self): + bisect_test_contents = {"start": 369410, "end": 369420, "jobs": []} + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_update = 369412 + + custom_script = None + + # Verify the exception is raised when the `status_file` does not have any + # `jobs` (empty). + with self.assertRaises(SystemExit) as err: + update_tryjob_status.UpdateTryjobStatus( + revision_to_update, + TryjobStatus.GOOD, + temp_json_file, + custom_script, + ) + + self.assertEqual( + str(err.exception), "No tryjobs in %s" % temp_json_file + ) + + # Simulate the behavior of `FindTryjobIndex()` when the tryjob does not exist + # in the status file. + @mock.patch.object( + update_tryjob_status, "FindTryjobIndex", return_value=None + ) + def testNotFindTryjobIndexWhenUpdatingTryjobStatus( + self, mock_find_tryjob_index + ): + + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [{"rev": 369411, "status": "pending"}], + } + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_update = 369416 + + custom_script = None + + # Verify the exception is raised when the `status_file` does not have any + # `jobs` (empty). + with self.assertRaises(ValueError) as err: + update_tryjob_status.UpdateTryjobStatus( + revision_to_update, + TryjobStatus.SKIP, + temp_json_file, + custom_script, + ) + + self.assertEqual( + str(err.exception), + "Unable to find tryjob for %d in %s" + % (revision_to_update, temp_json_file), + ) + + mock_find_tryjob_index.assert_called_once() + + # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the + # status file. + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + def testSuccessfullyUpdatedTryjobStatusToGood(self, mock_find_tryjob_index): + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [{"rev": 369411, "status": "pending"}], + } + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_update = 369411 + + # Index of the tryjob that is going to have its 'status' value updated. + tryjob_index = 0 + + custom_script = None + + update_tryjob_status.UpdateTryjobStatus( + revision_to_update, + TryjobStatus.GOOD, + temp_json_file, + custom_script, + ) + + # Verify that the tryjob's 'status' has been updated in the status file. + with open(temp_json_file) as status_file: + bisect_contents = json.load(status_file) + + self.assertEqual( + bisect_contents["jobs"][tryjob_index]["status"], + TryjobStatus.GOOD.value, + ) + + mock_find_tryjob_index.assert_called_once() + + # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the + # status file. + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + def testSuccessfullyUpdatedTryjobStatusToBad(self, mock_find_tryjob_index): + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [{"rev": 369411, "status": "pending"}], + } + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_update = 369411 + + # Index of the tryjob that is going to have its 'status' value updated. + tryjob_index = 0 + + custom_script = None + + update_tryjob_status.UpdateTryjobStatus( + revision_to_update, + TryjobStatus.BAD, + temp_json_file, + custom_script, + ) + + # Verify that the tryjob's 'status' has been updated in the status file. + with open(temp_json_file) as status_file: + bisect_contents = json.load(status_file) + + self.assertEqual( + bisect_contents["jobs"][tryjob_index]["status"], + TryjobStatus.BAD.value, + ) + + mock_find_tryjob_index.assert_called_once() + + # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the + # status file. + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + def testSuccessfullyUpdatedTryjobStatusToPending( + self, mock_find_tryjob_index + ): + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [{"rev": 369411, "status": "skip"}], + } + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_update = 369411 + + # Index of the tryjob that is going to have its 'status' value updated. + tryjob_index = 0 + + custom_script = None + + update_tryjob_status.UpdateTryjobStatus( + revision_to_update, + update_tryjob_status.TryjobStatus.SKIP, + temp_json_file, + custom_script, + ) + + # Verify that the tryjob's 'status' has been updated in the status file. + with open(temp_json_file) as status_file: + bisect_contents = json.load(status_file) + + self.assertEqual( + bisect_contents["jobs"][tryjob_index]["status"], + update_tryjob_status.TryjobStatus.SKIP.value, + ) + + mock_find_tryjob_index.assert_called_once() + + # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the + # status file. + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + def testSuccessfullyUpdatedTryjobStatusToSkip(self, mock_find_tryjob_index): + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [ + { + "rev": 369411, + "status": "pending", + } + ], + } + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_update = 369411 + + # Index of the tryjob that is going to have its 'status' value updated. + tryjob_index = 0 + + custom_script = None + + update_tryjob_status.UpdateTryjobStatus( + revision_to_update, + update_tryjob_status.TryjobStatus.PENDING, + temp_json_file, + custom_script, + ) + + # Verify that the tryjob's 'status' has been updated in the status file. + with open(temp_json_file) as status_file: + bisect_contents = json.load(status_file) + + self.assertEqual( + bisect_contents["jobs"][tryjob_index]["status"], + update_tryjob_status.TryjobStatus.PENDING.value, + ) + + mock_find_tryjob_index.assert_called_once() + + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + @mock.patch.object( + update_tryjob_status, + "GetCustomScriptResult", + return_value=TryjobStatus.SKIP.value, + ) + def testUpdatedTryjobStatusToAutoPassedWithCustomScript( + self, mock_get_custom_script_result, mock_find_tryjob_index + ): + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [ + {"rev": 369411, "status": "pending", "buildbucket_id": 1200} + ], + } + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_update = 369411 + + # Index of the tryjob that is going to have its 'status' value updated. + tryjob_index = 0 + + custom_script_path = "/abs/path/to/custom_script.py" + + update_tryjob_status.UpdateTryjobStatus( + revision_to_update, + update_tryjob_status.TryjobStatus.CUSTOM_SCRIPT, + temp_json_file, + custom_script_path, + ) + + # Verify that the tryjob's 'status' has been updated in the status file. + with open(temp_json_file) as status_file: + bisect_contents = json.load(status_file) + + self.assertEqual( + bisect_contents["jobs"][tryjob_index]["status"], + update_tryjob_status.TryjobStatus.SKIP.value, + ) + + mock_get_custom_script_result.assert_called_once() + + mock_find_tryjob_index.assert_called_once() + + # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the + # status file. + @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0) + def testSetStatusDoesNotExistWhenUpdatingTryjobStatus( + self, mock_find_tryjob_index + ): + + bisect_test_contents = { + "start": 369410, + "end": 369420, + "jobs": [ + {"rev": 369411, "status": "pending", "buildbucket_id": 1200} + ], + } + + # Create a temporary .JSON file to simulate a .JSON file that has bisection + # contents. + with CreateTemporaryJsonFile() as temp_json_file: + with open(temp_json_file, "w") as f: + WritePrettyJsonFile(bisect_test_contents, f) + + revision_to_update = 369411 + + nonexistent_update_status = "revert_status" + + custom_script = None + + # Verify the exception is raised when the `set_status` command line + # argument does not exist in the mapping. + with self.assertRaises(ValueError) as err: + update_tryjob_status.UpdateTryjobStatus( + revision_to_update, + nonexistent_update_status, + temp_json_file, + custom_script, + ) + + self.assertEqual( + str(err.exception), + 'Invalid "set_status" option provided: revert_status', + ) + + mock_find_tryjob_index.assert_called_once() + + +if __name__ == "__main__": + unittest.main() diff --git a/llvm_tools/upload_lexan_crashes_to_forcey.py b/llvm_tools/upload_lexan_crashes_to_forcey.py index 050168a5..204061b0 100755 --- a/llvm_tools/upload_lexan_crashes_to_forcey.py +++ b/llvm_tools/upload_lexan_crashes_to_forcey.py @@ -16,141 +16,149 @@ import shutil import subprocess import sys import tempfile -from typing import Generator, List, Iterable +from typing import Generator, Iterable, List -gsurl_base = 'gs://chrome-clang-crash-reports/v1' + +gsurl_base = "gs://chrome-clang-crash-reports/v1" def gsutil_ls(loc: str) -> List[str]: - results = subprocess.run(['gsutil.py', 'ls', loc], - stdout=subprocess.PIPE, - check=True, - encoding='utf-8') - return [l.strip() for l in results.stdout.splitlines()] + results = subprocess.run( + ["gsutil.py", "ls", loc], + stdout=subprocess.PIPE, + check=True, + encoding="utf-8", + ) + return [l.strip() for l in results.stdout.splitlines()] def gsurl_ls_last_numbers(url: str) -> List[int]: - return sorted(int(x.rstrip('/').split('/')[-1]) for x in gsutil_ls(url)) + return sorted(int(x.rstrip("/").split("/")[-1]) for x in gsutil_ls(url)) def get_available_year_numbers() -> List[int]: - return gsurl_ls_last_numbers(gsurl_base) + return gsurl_ls_last_numbers(gsurl_base) def get_available_month_numbers(year: int) -> List[int]: - return gsurl_ls_last_numbers(f'{gsurl_base}/{year}') + return gsurl_ls_last_numbers(f"{gsurl_base}/{year}") def get_available_day_numbers(year: int, month: int) -> List[int]: - return gsurl_ls_last_numbers(f'{gsurl_base}/{year}/{month:02d}') + return gsurl_ls_last_numbers(f"{gsurl_base}/{year}/{month:02d}") def get_available_test_case_urls(year: int, month: int, day: int) -> List[str]: - return gsutil_ls(f'{gsurl_base}/{year}/{month:02d}/{day:02d}') + return gsutil_ls(f"{gsurl_base}/{year}/{month:02d}/{day:02d}") -def test_cases_on_or_after(date: datetime.datetime - ) -> Generator[str, None, None]: - """Yields all test-cases submitted on or after the given date.""" - for year in get_available_year_numbers(): - if year < date.year: - continue +def test_cases_on_or_after( + date: datetime.datetime, +) -> Generator[str, None, None]: + """Yields all test-cases submitted on or after the given date.""" + for year in get_available_year_numbers(): + if year < date.year: + continue - for month in get_available_month_numbers(year): - if year == date.year and month < date.month: - continue + for month in get_available_month_numbers(year): + if year == date.year and month < date.month: + continue - for day in get_available_day_numbers(year, month): - when = datetime.date(year, month, day) - if when < date: - continue + for day in get_available_day_numbers(year, month): + when = datetime.date(year, month, day) + if when < date: + continue - yield when, get_available_test_case_urls(year, month, day) + yield when, get_available_test_case_urls(year, month, day) def to_ymd(date: datetime.date) -> str: - return date.strftime('%Y-%m-%d') + return date.strftime("%Y-%m-%d") def from_ymd(date_str: str) -> datetime.date: - return datetime.datetime.strptime(date_str, '%Y-%m-%d').date() - - -def persist_state(seen_urls: Iterable[str], state_file: str, - current_date: datetime.date): - tmp_state_file = state_file + '.tmp' - with open(tmp_state_file, 'w', encoding='utf-8') as f: - json.dump( - { - 'already_seen': sorted(seen_urls), - 'most_recent_date': to_ymd(current_date), - }, - f, - ) - os.rename(tmp_state_file, state_file) + return datetime.datetime.strptime(date_str, "%Y-%m-%d").date() + + +def persist_state( + seen_urls: Iterable[str], state_file: str, current_date: datetime.date +): + tmp_state_file = state_file + ".tmp" + with open(tmp_state_file, "w", encoding="utf-8") as f: + json.dump( + { + "already_seen": sorted(seen_urls), + "most_recent_date": to_ymd(current_date), + }, + f, + ) + os.rename(tmp_state_file, state_file) @contextlib.contextmanager def temp_dir() -> Generator[str, None, None]: - loc = tempfile.mkdtemp('lexan-autosubmit') - try: - yield loc - finally: - shutil.rmtree(loc) + loc = tempfile.mkdtemp("lexan-autosubmit") + try: + yield loc + finally: + shutil.rmtree(loc) def download_and_unpack_test_case(gs_url: str, tempdir: str) -> None: - suffix = os.path.splitext(gs_url)[1] - target_name = 'test_case' + suffix - target = os.path.join(tempdir, target_name) - subprocess.run(['gsutil.py', 'cp', gs_url, target], check=True) - subprocess.run(['tar', 'xaf', target_name], check=True, cwd=tempdir) - os.unlink(target) + suffix = os.path.splitext(gs_url)[1] + target_name = "test_case" + suffix + target = os.path.join(tempdir, target_name) + subprocess.run(["gsutil.py", "cp", gs_url, target], check=True) + subprocess.run(["tar", "xaf", target_name], check=True, cwd=tempdir) + os.unlink(target) def submit_test_case(gs_url: str, cr_tool: str) -> None: - logging.info('Submitting %s', gs_url) - with temp_dir() as tempdir: - download_and_unpack_test_case(gs_url, tempdir) - - # Sometimes (e.g., in - # gs://chrome-clang-crash-reports/v1/2020/03/27/ - # chromium.clang-ToTiOS-12754-GTXToolKit-2bfcde.tgz) - # we'll get `.crash` files. Unclear why, but let's filter them out anyway. - repro_files = [ - os.path.join(tempdir, x) for x in os.listdir(tempdir) - if not x.endswith('.crash') - ] - assert len(repro_files) == 2, repro_files - if repro_files[0].endswith('.sh'): - sh_file, src_file = repro_files - assert not src_file.endswith('.sh'), repro_files - else: - src_file, sh_file = repro_files - assert sh_file.endswith('.sh'), repro_files - - # Peephole: lexan got a crash upload with a way old clang. Ignore it. - with open(sh_file, encoding='utf-8') as f: - if 'Crash reproducer for clang version 9.0.0' in f.read(): - logging.warning( - 'Skipping upload for %s; seems to be with an old clang', gs_url) - return - - subprocess.run( - [ - cr_tool, - 'reduce', - '-stream=false', - '-wait=false', - '-note', - gs_url, - '-sh_file', - os.path.join(tempdir, sh_file), - '-src_file', - os.path.join(tempdir, src_file), - ], - check=True, - ) + logging.info("Submitting %s", gs_url) + with temp_dir() as tempdir: + download_and_unpack_test_case(gs_url, tempdir) + + # Sometimes (e.g., in + # gs://chrome-clang-crash-reports/v1/2020/03/27/ + # chromium.clang-ToTiOS-12754-GTXToolKit-2bfcde.tgz) + # we'll get `.crash` files. Unclear why, but let's filter them out anyway. + repro_files = [ + os.path.join(tempdir, x) + for x in os.listdir(tempdir) + if not x.endswith(".crash") + ] + assert len(repro_files) == 2, repro_files + if repro_files[0].endswith(".sh"): + sh_file, src_file = repro_files + assert not src_file.endswith(".sh"), repro_files + else: + src_file, sh_file = repro_files + assert sh_file.endswith(".sh"), repro_files + + # Peephole: lexan got a crash upload with a way old clang. Ignore it. + with open(sh_file, encoding="utf-8") as f: + if "Crash reproducer for clang version 9.0.0" in f.read(): + logging.warning( + "Skipping upload for %s; seems to be with an old clang", + gs_url, + ) + return + + subprocess.run( + [ + cr_tool, + "reduce", + "-stream=false", + "-wait=false", + "-note", + gs_url, + "-sh_file", + os.path.join(tempdir, sh_file), + "-src_file", + os.path.join(tempdir, src_file), + ], + check=True, + ) def submit_new_test_cases( @@ -159,114 +167,119 @@ def submit_new_test_cases( forcey: str, state_file_path: str, ) -> None: - """Submits new test-cases to forcey. - - This will persist state after each test-case is submitted. - - Args: - last_seen_test_cases: test-cases which have been submitted already, and - should be skipped if seen again. - earliest_date_to_check: the earliest date we should consider test-cases - from. - forcey: path to the forcey binary. - state_file_path: path to our state file. - """ - # `all_test_cases_seen` is the union of all test-cases seen on this and prior - # invocations. It guarantees, in all cases we care about, that we won't - # submit the same test-case twice. `test_cases_seen_this_invocation` is - # persisted as "all of the test-cases we've seen on this and prior - # invocations" if we successfully submit _all_ test-cases. - # - # Since you can visualize the test-cases this script considers as a sliding - # window that only moves forward, if we saw a test-case on a prior iteration - # but no longer see it, we'll never see it again (since it fell out of our - # sliding window by being too old). Hence, keeping it around is - # pointless. - # - # We only persist this minimized set of test-cases if _everything_ succeeds, - # since if something fails below, there's a chance that we haven't revisited - # test-cases that we've already seen. - all_test_cases_seen = set(last_seen_test_cases) - test_cases_seen_this_invocation = [] - most_recent_date = earliest_date_to_check - for date, candidates in test_cases_on_or_after(earliest_date_to_check): - most_recent_date = max(most_recent_date, date) - - for url in candidates: - test_cases_seen_this_invocation.append(url) - if url in all_test_cases_seen: - continue - - all_test_cases_seen.add(url) - submit_test_case(url, forcey) - - # Persisting on each iteration of this loop isn't free, but it's the - # easiest way to not resubmit test-cases, and it's good to keep in mind - # that: - # - the state file will be small (<12KB, since it only keeps a few days - # worth of test-cases after the first run) - # - in addition to this, we're downloading+unzipping+reuploading multiple - # MB of test-case bytes. - # - # So comparatively, the overhead here probably isn't an issue. - persist_state(all_test_cases_seen, state_file_path, most_recent_date) - - persist_state(test_cases_seen_this_invocation, state_file_path, - most_recent_date) + """Submits new test-cases to forcey. + + This will persist state after each test-case is submitted. + + Args: + last_seen_test_cases: test-cases which have been submitted already, and + should be skipped if seen again. + earliest_date_to_check: the earliest date we should consider test-cases + from. + forcey: path to the forcey binary. + state_file_path: path to our state file. + """ + # `all_test_cases_seen` is the union of all test-cases seen on this and prior + # invocations. It guarantees, in all cases we care about, that we won't + # submit the same test-case twice. `test_cases_seen_this_invocation` is + # persisted as "all of the test-cases we've seen on this and prior + # invocations" if we successfully submit _all_ test-cases. + # + # Since you can visualize the test-cases this script considers as a sliding + # window that only moves forward, if we saw a test-case on a prior iteration + # but no longer see it, we'll never see it again (since it fell out of our + # sliding window by being too old). Hence, keeping it around is + # pointless. + # + # We only persist this minimized set of test-cases if _everything_ succeeds, + # since if something fails below, there's a chance that we haven't revisited + # test-cases that we've already seen. + all_test_cases_seen = set(last_seen_test_cases) + test_cases_seen_this_invocation = [] + most_recent_date = earliest_date_to_check + for date, candidates in test_cases_on_or_after(earliest_date_to_check): + most_recent_date = max(most_recent_date, date) + + for url in candidates: + test_cases_seen_this_invocation.append(url) + if url in all_test_cases_seen: + continue + + all_test_cases_seen.add(url) + submit_test_case(url, forcey) + + # Persisting on each iteration of this loop isn't free, but it's the + # easiest way to not resubmit test-cases, and it's good to keep in mind + # that: + # - the state file will be small (<12KB, since it only keeps a few days + # worth of test-cases after the first run) + # - in addition to this, we're downloading+unzipping+reuploading multiple + # MB of test-case bytes. + # + # So comparatively, the overhead here probably isn't an issue. + persist_state( + all_test_cases_seen, state_file_path, most_recent_date + ) + + persist_state( + test_cases_seen_this_invocation, state_file_path, most_recent_date + ) def main(argv: List[str]): - logging.basicConfig( - format='>> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: ' - '%(message)s', - level=logging.INFO, - ) - - my_dir = os.path.dirname(os.path.abspath(__file__)) - - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('--state_file', - default=os.path.join(my_dir, 'lexan-state.json')) - parser.add_argument( - '--last_date', - help='The earliest date that we care about. All test cases from here ' - 'on will be picked up. Format is YYYY-MM-DD.') - parser.add_argument('--4c', - dest='forcey', - required=True, - help='Path to a 4c client binary') - opts = parser.parse_args(argv) - - forcey = opts.forcey - state_file = opts.state_file - last_date_str = opts.last_date - - os.makedirs(os.path.dirname(state_file), 0o755, exist_ok=True) - - if last_date_str is None: - with open(state_file, encoding='utf-8') as f: - data = json.load(f) - most_recent_date = from_ymd(data['most_recent_date']) - submit_new_test_cases( - last_seen_test_cases=data['already_seen'], - # Note that we always subtract one day from this to avoid a race: - # uploads may appear slightly out-of-order (or builders may lag, or - # ...), so the last test-case uploaded for 2020/01/01 might appear - # _after_ the first test-case for 2020/01/02. Assuming that builders - # won't lag behind for over a day, the easiest way to handle this is to - # always check the previous and current days. - earliest_date_to_check=most_recent_date - datetime.timedelta(days=1), - forcey=forcey, - state_file_path=state_file, + logging.basicConfig( + format=">> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: " + "%(message)s", + level=logging.INFO, ) - else: - submit_new_test_cases( - last_seen_test_cases=(), - earliest_date_to_check=from_ymd(last_date_str), - forcey=forcey, - state_file_path=state_file, + + my_dir = os.path.dirname(os.path.abspath(__file__)) + + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--state_file", default=os.path.join(my_dir, "lexan-state.json") ) + parser.add_argument( + "--last_date", + help="The earliest date that we care about. All test cases from here " + "on will be picked up. Format is YYYY-MM-DD.", + ) + parser.add_argument( + "--4c", dest="forcey", required=True, help="Path to a 4c client binary" + ) + opts = parser.parse_args(argv) + + forcey = opts.forcey + state_file = opts.state_file + last_date_str = opts.last_date + + os.makedirs(os.path.dirname(state_file), 0o755, exist_ok=True) + + if last_date_str is None: + with open(state_file, encoding="utf-8") as f: + data = json.load(f) + most_recent_date = from_ymd(data["most_recent_date"]) + submit_new_test_cases( + last_seen_test_cases=data["already_seen"], + # Note that we always subtract one day from this to avoid a race: + # uploads may appear slightly out-of-order (or builders may lag, or + # ...), so the last test-case uploaded for 2020/01/01 might appear + # _after_ the first test-case for 2020/01/02. Assuming that builders + # won't lag behind for over a day, the easiest way to handle this is to + # always check the previous and current days. + earliest_date_to_check=most_recent_date + - datetime.timedelta(days=1), + forcey=forcey, + state_file_path=state_file, + ) + else: + submit_new_test_cases( + last_seen_test_cases=(), + earliest_date_to_check=from_ymd(last_date_str), + forcey=forcey, + state_file_path=state_file, + ) -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/llvm_tools/upload_lexan_crashes_to_forcey_test.py b/llvm_tools/upload_lexan_crashes_to_forcey_test.py index ba6298f4..6c5008d6 100755 --- a/llvm_tools/upload_lexan_crashes_to_forcey_test.py +++ b/llvm_tools/upload_lexan_crashes_to_forcey_test.py @@ -15,130 +15,152 @@ import upload_lexan_crashes_to_forcey class Test(unittest.TestCase): - """Tests for upload_lexan_crashes_to_forcey.""" - - def test_date_parsing_functions(self): - self.assertEqual(datetime.date(2020, 2, 1), - upload_lexan_crashes_to_forcey.from_ymd('2020-02-01')) - - @unittest.mock.patch('upload_lexan_crashes_to_forcey.test_cases_on_or_after', - return_value=( - ( - datetime.date(2020, 1, 1), - ('gs://test-case-1', 'gs://test-case-1.1'), - ), - (datetime.date(2020, 1, 2), ('gs://test-case-2', )), - (datetime.date(2020, 1, 1), ('gs://test-case-3', )), - (datetime.date(2020, 1, 4), ('gs://test-case-4', )), - )) - @unittest.mock.patch('upload_lexan_crashes_to_forcey.submit_test_case') - @unittest.mock.patch('upload_lexan_crashes_to_forcey.persist_state') - def test_new_test_case_submission_functions(self, persist_state_mock, - submit_test_case_mock, - test_cases_on_or_after_mock): - forcey_path = '/path/to/4c' - real_state_file_path = '/path/to/state/file' - earliest_date = datetime.date(2020, 1, 1) - - persist_state_calls = [] - - # Since the set this gets is mutated, we need to copy it somehow. - def persist_state_side_effect(test_cases_to_persist, state_file_path, - most_recent_date): - self.assertEqual(state_file_path, real_state_file_path) - persist_state_calls.append( - (sorted(test_cases_to_persist), most_recent_date)) - - persist_state_mock.side_effect = persist_state_side_effect - - upload_lexan_crashes_to_forcey.submit_new_test_cases( - last_seen_test_cases=( - 'gs://test-case-0', - 'gs://test-case-1', + """Tests for upload_lexan_crashes_to_forcey.""" + + def test_date_parsing_functions(self): + self.assertEqual( + datetime.date(2020, 2, 1), + upload_lexan_crashes_to_forcey.from_ymd("2020-02-01"), + ) + + @unittest.mock.patch( + "upload_lexan_crashes_to_forcey.test_cases_on_or_after", + return_value=( + ( + datetime.date(2020, 1, 1), + ("gs://test-case-1", "gs://test-case-1.1"), + ), + (datetime.date(2020, 1, 2), ("gs://test-case-2",)), + (datetime.date(2020, 1, 1), ("gs://test-case-3",)), + (datetime.date(2020, 1, 4), ("gs://test-case-4",)), ), - earliest_date_to_check=earliest_date, - forcey=forcey_path, - state_file_path=real_state_file_path, ) - - test_cases_on_or_after_mock.assert_called_once_with(earliest_date) - self.assertEqual(submit_test_case_mock.call_args_list, [ - unittest.mock.call('gs://test-case-1.1', forcey_path), - unittest.mock.call('gs://test-case-2', forcey_path), - unittest.mock.call('gs://test-case-3', forcey_path), - unittest.mock.call('gs://test-case-4', forcey_path), - ]) - - self.assertEqual(persist_state_calls, [ - ( - ['gs://test-case-0', 'gs://test-case-1', 'gs://test-case-1.1'], - datetime.date(2020, 1, 1), - ), - ( - [ - 'gs://test-case-0', - 'gs://test-case-1', - 'gs://test-case-1.1', - 'gs://test-case-2', - ], - datetime.date(2020, 1, 2), - ), - ( - [ - 'gs://test-case-0', - 'gs://test-case-1', - 'gs://test-case-1.1', - 'gs://test-case-2', - 'gs://test-case-3', - ], - datetime.date(2020, 1, 2), - ), - ( + @unittest.mock.patch("upload_lexan_crashes_to_forcey.submit_test_case") + @unittest.mock.patch("upload_lexan_crashes_to_forcey.persist_state") + def test_new_test_case_submission_functions( + self, + persist_state_mock, + submit_test_case_mock, + test_cases_on_or_after_mock, + ): + forcey_path = "/path/to/4c" + real_state_file_path = "/path/to/state/file" + earliest_date = datetime.date(2020, 1, 1) + + persist_state_calls = [] + + # Since the set this gets is mutated, we need to copy it somehow. + def persist_state_side_effect( + test_cases_to_persist, state_file_path, most_recent_date + ): + self.assertEqual(state_file_path, real_state_file_path) + persist_state_calls.append( + (sorted(test_cases_to_persist), most_recent_date) + ) + + persist_state_mock.side_effect = persist_state_side_effect + + upload_lexan_crashes_to_forcey.submit_new_test_cases( + last_seen_test_cases=( + "gs://test-case-0", + "gs://test-case-1", + ), + earliest_date_to_check=earliest_date, + forcey=forcey_path, + state_file_path=real_state_file_path, + ) + + test_cases_on_or_after_mock.assert_called_once_with(earliest_date) + self.assertEqual( + submit_test_case_mock.call_args_list, [ - 'gs://test-case-0', - 'gs://test-case-1', - 'gs://test-case-1.1', - 'gs://test-case-2', - 'gs://test-case-3', - 'gs://test-case-4', + unittest.mock.call("gs://test-case-1.1", forcey_path), + unittest.mock.call("gs://test-case-2", forcey_path), + unittest.mock.call("gs://test-case-3", forcey_path), + unittest.mock.call("gs://test-case-4", forcey_path), ], - datetime.date(2020, 1, 4), - ), - ( + ) + + self.assertEqual( + persist_state_calls, [ - 'gs://test-case-1', - 'gs://test-case-1.1', - 'gs://test-case-2', - 'gs://test-case-3', - 'gs://test-case-4', + ( + [ + "gs://test-case-0", + "gs://test-case-1", + "gs://test-case-1.1", + ], + datetime.date(2020, 1, 1), + ), + ( + [ + "gs://test-case-0", + "gs://test-case-1", + "gs://test-case-1.1", + "gs://test-case-2", + ], + datetime.date(2020, 1, 2), + ), + ( + [ + "gs://test-case-0", + "gs://test-case-1", + "gs://test-case-1.1", + "gs://test-case-2", + "gs://test-case-3", + ], + datetime.date(2020, 1, 2), + ), + ( + [ + "gs://test-case-0", + "gs://test-case-1", + "gs://test-case-1.1", + "gs://test-case-2", + "gs://test-case-3", + "gs://test-case-4", + ], + datetime.date(2020, 1, 4), + ), + ( + [ + "gs://test-case-1", + "gs://test-case-1.1", + "gs://test-case-2", + "gs://test-case-3", + "gs://test-case-4", + ], + datetime.date(2020, 1, 4), + ), ], - datetime.date(2020, 1, 4), - ), - ]) + ) - @unittest.mock.patch( - 'upload_lexan_crashes_to_forcey.download_and_unpack_test_case') - @unittest.mock.patch('subprocess.run') - def test_test_case_submission_functions(self, subprocess_run_mock, - download_and_unpack_mock): - mock_gs_url = 'gs://foo/bar/baz' + @unittest.mock.patch( + "upload_lexan_crashes_to_forcey.download_and_unpack_test_case" + ) + @unittest.mock.patch("subprocess.run") + def test_test_case_submission_functions( + self, subprocess_run_mock, download_and_unpack_mock + ): + mock_gs_url = "gs://foo/bar/baz" - def side_effect(gs_url: str, tempdir: str) -> None: - self.assertEqual(gs_url, mock_gs_url) + def side_effect(gs_url: str, tempdir: str) -> None: + self.assertEqual(gs_url, mock_gs_url) - with open(os.path.join(tempdir, 'test_case.c'), 'w') as f: - # All we need is an empty file here. - pass + with open(os.path.join(tempdir, "test_case.c"), "w") as f: + # All we need is an empty file here. + pass - with open(os.path.join(tempdir, 'test_case.sh'), 'w', - encoding='utf-8') as f: - f.write('# Crash reproducer for clang version 9.0.0 (...)\n') - f.write('clang something or other\n') + with open( + os.path.join(tempdir, "test_case.sh"), "w", encoding="utf-8" + ) as f: + f.write("# Crash reproducer for clang version 9.0.0 (...)\n") + f.write("clang something or other\n") - download_and_unpack_mock.side_effect = side_effect - upload_lexan_crashes_to_forcey.submit_test_case(mock_gs_url, '4c') - subprocess_run_mock.assert_not_called() + download_and_unpack_mock.side_effect = side_effect + upload_lexan_crashes_to_forcey.submit_test_case(mock_gs_url, "4c") + subprocess_run_mock.assert_not_called() -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/lock_machine.py b/lock_machine.py index b95678e8..030d7d45 100755 --- a/lock_machine.py +++ b/lock_machine.py @@ -22,496 +22,551 @@ import file_lock_machine class LockException(Exception): - """Base class for exceptions in this module.""" + """Base class for exceptions in this module.""" class MachineNotPingable(LockException): - """Raised when machine does not respond to ping.""" + """Raised when machine does not respond to ping.""" class LockingError(LockException): - """Raised when server fails to lock/unlock machine as requested.""" + """Raised when server fails to lock/unlock machine as requested.""" class DontOwnLock(LockException): - """Raised when user attmepts to unlock machine locked by someone else.""" - # This should not be raised if the user specified '--force' + """Raised when user attmepts to unlock machine locked by someone else.""" + # This should not be raised if the user specified '--force' -class MachineType(enum.Enum): - """Enum class to hold machine type.""" - LOCAL = 'local' - CROSFLEET = 'crosfleet' - - -class LockManager(object): - """Class for locking/unlocking machines vie three different modes. - - This class contains methods for checking the locked status of machines, - and for changing the locked status. It handles HW lab machines and local - machines, using appropriate locking mechanisms for each. - """ - - CROSFLEET_PATH = 'crosfleet' - - # TODO(zhizhouy): lease time may needs to be dynamically adjusted. For now we - # set it long enough to cover the period to finish nightly rotation tests. - LEASE_MINS = 1439 - - CROSFLEET_CREDENTIAL = ('/usr/local/google/home/mobiletc-prebuild' - '/sheriff_utils/credentials/skylab' - '/chromeos-swarming-credential.json') - SWARMING = '~/cipd_binaries/swarming' - SUCCESS = 0 - - def __init__(self, - remotes, - force_option, - chromeos_root, - locks_dir='', - log=None): - """Initializes an LockManager object. - - Args: - remotes: A list of machine names or ip addresses to be managed. Names - and ip addresses should be represented as strings. If the list is - empty, the lock manager will get all known machines. - force_option: A Boolean indicating whether or not to force an unlock of - a machine that was locked by someone else. - chromeos_root: The ChromeOS chroot to use for the autotest scripts. - locks_dir: A directory used for file locking local devices. - log: If not None, this is the logger object to be used for writing out - informational output messages. It is expected to be an instance of - Logger class from cros_utils/logger.py. - """ - self.chromeos_root = chromeos_root - self.user = getpass.getuser() - self.logger = log or logger.GetLogger() - self.ce = command_executer.GetCommandExecuter(self.logger) - - sys.path.append(chromeos_root) - - self.locks_dir = locks_dir - - self.machines = list(set(remotes)) or [] - self.toolchain_lab_machines = self.GetAllToolchainLabMachines() - - if not self.machines: - self.machines = self.toolchain_lab_machines - self.force = force_option - - self.local_machines = [] - self.crosfleet_machines = [] - - def CheckMachine(self, machine, error_msg): - """Verifies that machine is responding to ping. - - Args: - machine: String containing the name or ip address of machine to check. - error_msg: Message to print if ping fails. - - Raises: - MachineNotPingable: If machine is not responding to 'ping' - """ - if not machines.MachineIsPingable(machine, logging_level='none'): - cros_machine = machine + '.cros' - if not machines.MachineIsPingable(cros_machine, logging_level='none'): - raise MachineNotPingable(error_msg) - - def GetAllToolchainLabMachines(self): - """Gets a list of all the toolchain machines in the ChromeOS HW lab. - - Returns: - A list of names of the toolchain machines in the ChromeOS HW lab. - """ - machines_file = os.path.join(os.path.dirname(__file__), 'crosperf', - 'default_remotes') - machine_list = [] - with open(machines_file, 'r') as input_file: - lines = input_file.readlines() - for line in lines: - _, remotes = line.split(':') - remotes = remotes.strip() - for r in remotes.split(): - machine_list.append(r.strip()) - return machine_list - - def GetMachineType(self, m): - """Get where the machine is located. - - Args: - m: String containing the name or ip address of machine. - - Returns: - Value of the type in MachineType Enum. - """ - if m in self.local_machines: - return MachineType.LOCAL - if m in self.crosfleet_machines: - return MachineType.CROSFLEET - - def PrintStatusHeader(self): - """Prints the status header lines for machines.""" - print('\nMachine (Board)\t\t\t\t\tStatus') - print('---------------\t\t\t\t\t------') - - def PrintStatus(self, m, state, machine_type): - """Prints status for a single machine. - - Args: - m: String containing the name or ip address of machine. - state: A dictionary of the current state of the machine. - machine_type: MachineType to determine where the machine is located. - """ - if state['locked']: - print('%s (%s)\t\t%slocked by %s since %s' % - (m, state['board'], '\t\t' if machine_type == MachineType.LOCAL - else '', state['locked_by'], state['lock_time'])) - else: - print('%s (%s)\t\t%sunlocked' % - (m, state['board'], - '\t\t' if machine_type == MachineType.LOCAL else '')) - - def AddMachineToLocal(self, machine): - """Adds a machine to local machine list. - Args: - machine: The machine to be added. - """ - if machine not in self.local_machines: - self.local_machines.append(machine) - - def AddMachineToCrosfleet(self, machine): - """Adds a machine to crosfleet machine list. +class MachineType(enum.Enum): + """Enum class to hold machine type.""" - Args: - machine: The machine to be added. - """ - if machine not in self.crosfleet_machines: - self.crosfleet_machines.append(machine) + LOCAL = "local" + CROSFLEET = "crosfleet" - def ListMachineStates(self, machine_states): - """Gets and prints the current status for a list of machines. - Prints out the current status for all of the machines in the current - LockManager's list of machines (set when the object is initialized). +class LockManager(object): + """Class for locking/unlocking machines vie three different modes. - Args: - machine_states: A dictionary of the current state of every machine in - the current LockManager's list of machines. Normally obtained by - calling LockManager::GetMachineStates. + This class contains methods for checking the locked status of machines, + and for changing the locked status. It handles HW lab machines and local + machines, using appropriate locking mechanisms for each. """ - self.PrintStatusHeader() - for m in machine_states: - machine_type = self.GetMachineType(m) - state = machine_states[m] - self.PrintStatus(m, state, machine_type) - def UpdateLockInCrosfleet(self, should_lock_machine, machine): - """Ask crosfleet to lease/release a machine. + CROSFLEET_PATH = "crosfleet" + + # TODO(zhizhouy): lease time may needs to be dynamically adjusted. For now we + # set it long enough to cover the period to finish nightly rotation tests. + LEASE_MINS = 1439 + + CROSFLEET_CREDENTIAL = ( + "/usr/local/google/home/mobiletc-prebuild" + "/sheriff_utils/credentials/skylab" + "/chromeos-swarming-credential.json" + ) + SWARMING = "~/cipd_binaries/swarming" + SUCCESS = 0 + + def __init__( + self, remotes, force_option, chromeos_root, locks_dir="", log=None + ): + """Initializes an LockManager object. + + Args: + remotes: A list of machine names or ip addresses to be managed. Names + and ip addresses should be represented as strings. If the list is + empty, the lock manager will get all known machines. + force_option: A Boolean indicating whether or not to force an unlock of + a machine that was locked by someone else. + chromeos_root: The ChromeOS chroot to use for the autotest scripts. + locks_dir: A directory used for file locking local devices. + log: If not None, this is the logger object to be used for writing out + informational output messages. It is expected to be an instance of + Logger class from cros_utils/logger.py. + """ + self.chromeos_root = chromeos_root + self.user = getpass.getuser() + self.logger = log or logger.GetLogger() + self.ce = command_executer.GetCommandExecuter(self.logger) + + sys.path.append(chromeos_root) + + self.locks_dir = locks_dir + + self.machines = list(set(remotes)) or [] + self.toolchain_lab_machines = self.GetAllToolchainLabMachines() + + if not self.machines: + self.machines = self.toolchain_lab_machines + self.force = force_option + + self.local_machines = [] + self.crosfleet_machines = [] + + def CheckMachine(self, machine, error_msg): + """Verifies that machine is responding to ping. + + Args: + machine: String containing the name or ip address of machine to check. + error_msg: Message to print if ping fails. + + Raises: + MachineNotPingable: If machine is not responding to 'ping' + """ + if not machines.MachineIsPingable(machine, logging_level="none"): + cros_machine = machine + ".cros" + if not machines.MachineIsPingable( + cros_machine, logging_level="none" + ): + raise MachineNotPingable(error_msg) + + def GetAllToolchainLabMachines(self): + """Gets a list of all the toolchain machines in the ChromeOS HW lab. + + Returns: + A list of names of the toolchain machines in the ChromeOS HW lab. + """ + machines_file = os.path.join( + os.path.dirname(__file__), "crosperf", "default_remotes" + ) + machine_list = [] + with open(machines_file, "r") as input_file: + lines = input_file.readlines() + for line in lines: + _, remotes = line.split(":") + remotes = remotes.strip() + for r in remotes.split(): + machine_list.append(r.strip()) + return machine_list + + def GetMachineType(self, m): + """Get where the machine is located. + + Args: + m: String containing the name or ip address of machine. + + Returns: + Value of the type in MachineType Enum. + """ + if m in self.local_machines: + return MachineType.LOCAL + if m in self.crosfleet_machines: + return MachineType.CROSFLEET + + def PrintStatusHeader(self): + """Prints the status header lines for machines.""" + print("\nMachine (Board)\t\t\t\t\tStatus") + print("---------------\t\t\t\t\t------") + + def PrintStatus(self, m, state, machine_type): + """Prints status for a single machine. + + Args: + m: String containing the name or ip address of machine. + state: A dictionary of the current state of the machine. + machine_type: MachineType to determine where the machine is located. + """ + if state["locked"]: + print( + "%s (%s)\t\t%slocked by %s since %s" + % ( + m, + state["board"], + "\t\t" if machine_type == MachineType.LOCAL else "", + state["locked_by"], + state["lock_time"], + ) + ) + else: + print( + "%s (%s)\t\t%sunlocked" + % ( + m, + state["board"], + "\t\t" if machine_type == MachineType.LOCAL else "", + ) + ) + + def AddMachineToLocal(self, machine): + """Adds a machine to local machine list. + + Args: + machine: The machine to be added. + """ + if machine not in self.local_machines: + self.local_machines.append(machine) + + def AddMachineToCrosfleet(self, machine): + """Adds a machine to crosfleet machine list. + + Args: + machine: The machine to be added. + """ + if machine not in self.crosfleet_machines: + self.crosfleet_machines.append(machine) + + def ListMachineStates(self, machine_states): + """Gets and prints the current status for a list of machines. + + Prints out the current status for all of the machines in the current + LockManager's list of machines (set when the object is initialized). + + Args: + machine_states: A dictionary of the current state of every machine in + the current LockManager's list of machines. Normally obtained by + calling LockManager::GetMachineStates. + """ + self.PrintStatusHeader() + for m in machine_states: + machine_type = self.GetMachineType(m) + state = machine_states[m] + self.PrintStatus(m, state, machine_type) + + def UpdateLockInCrosfleet(self, should_lock_machine, machine): + """Ask crosfleet to lease/release a machine. + + Args: + should_lock_machine: Boolean indicating whether to lock the machine (True) + or unlock the machine (False). + machine: The machine to update. + + Returns: + True if requested action succeeded, else False. + """ + try: + if should_lock_machine: + ret = self.LeaseCrosfleetMachine(machine) + else: + ret = self.ReleaseCrosfleetMachine(machine) + except Exception: + return False + return ret + + def UpdateFileLock(self, should_lock_machine, machine): + """Use file lock for local machines, + + Args: + should_lock_machine: Boolean indicating whether to lock the machine (True) + or unlock the machine (False). + machine: The machine to update. + + Returns: + True if requested action succeeded, else False. + """ + try: + if should_lock_machine: + ret = file_lock_machine.Machine(machine, self.locks_dir).Lock( + True, sys.argv[0] + ) + else: + ret = file_lock_machine.Machine(machine, self.locks_dir).Unlock( + True + ) + except Exception: + return False + return ret + + def UpdateMachines(self, lock_machines): + """Sets the locked state of the machines to the requested value. + + The machines updated are the ones in self.machines (specified when the + class object was intialized). + + Args: + lock_machines: Boolean indicating whether to lock the machines (True) or + unlock the machines (False). + + Returns: + A list of the machines whose state was successfully updated. + """ + updated_machines = [] + action = "Locking" if lock_machines else "Unlocking" + for m in self.machines: + # TODO(zhizhouy): Handling exceptions with more details when locking + # doesn't succeed. + machine_type = self.GetMachineType(m) + if machine_type == MachineType.CROSFLEET: + ret = self.UpdateLockInCrosfleet(lock_machines, m) + elif machine_type == MachineType.LOCAL: + ret = self.UpdateFileLock(lock_machines, m) + + if ret: + self.logger.LogOutput( + "%s %s machine succeeded: %s." + % (action, machine_type.value, m) + ) + updated_machines.append(m) + else: + self.logger.LogOutput( + "%s %s machine failed: %s." + % (action, machine_type.value, m) + ) + + self.machines = updated_machines + return updated_machines + + def _InternalRemoveMachine(self, machine): + """Remove machine from internal list of machines. + + Args: + machine: Name of machine to be removed from internal list. + """ + # Check to see if machine is lab machine and if so, make sure it has + # ".cros" on the end. + cros_machine = machine + if machine.find("rack") > 0 and machine.find("row") > 0: + if machine.find(".cros") == -1: + cros_machine = cros_machine + ".cros" + + self.machines = [ + m for m in self.machines if m not in (cros_machine, machine) + ] + + def CheckMachineLocks(self, machine_states, cmd): + """Check that every machine in requested list is in the proper state. + + If the cmd is 'unlock' verify that every machine is locked by requestor. + If the cmd is 'lock' verify that every machine is currently unlocked. + + Args: + machine_states: A dictionary of the current state of every machine in + the current LockManager's list of machines. Normally obtained by + calling LockManager::GetMachineStates. + cmd: The user-requested action for the machines: 'lock' or 'unlock'. + + Raises: + DontOwnLock: The lock on a requested machine is owned by someone else. + """ + for k, state in machine_states.items(): + if cmd == "unlock": + if not state["locked"]: + self.logger.LogWarning( + "Attempt to unlock already unlocked machine " + "(%s)." % k + ) + self._InternalRemoveMachine(k) + + # TODO(zhizhouy): Crosfleet doesn't support host info such as locked_by. + # Need to update this when crosfleet supports it. + if ( + state["locked"] + and state["locked_by"] + and state["locked_by"] != self.user + ): + raise DontOwnLock( + "Attempt to unlock machine (%s) locked by someone " + "else (%s)." % (k, state["locked_by"]) + ) + elif cmd == "lock": + if state["locked"]: + self.logger.LogWarning( + "Attempt to lock already locked machine (%s)" % k + ) + self._InternalRemoveMachine(k) + + def GetMachineStates(self, cmd=""): + """Gets the current state of all the requested machines. + + Gets the current state of all the requested machines. Stores the data in a + dictionary keyed by machine name. + + Args: + cmd: The command for which we are getting the machine states. This is + important because if one of the requested machines is missing we raise + an exception, unless the requested command is 'add'. + + Returns: + A dictionary of machine states for all the machines in the LockManager + object. + """ + machine_list = {} + for m in self.machines: + # For local or crosfleet machines, we simply set {'locked': status} for + # them + # TODO(zhizhouy): This is a quick fix since crosfleet cannot return host + # info as afe does. We need to get more info such as locked_by when + # crosfleet supports that. + values = { + "locked": 0 if cmd == "lock" else 1, + "board": "??", + "locked_by": "", + "lock_time": "", + } + machine_list[m] = values + + self.ListMachineStates(machine_list) + + return machine_list + + def CheckMachineInCrosfleet(self, machine): + """Run command to check if machine is in Crosfleet or not. + + Returns: + True if machine in crosfleet, else False + """ + credential = "" + if os.path.exists(self.CROSFLEET_CREDENTIAL): + credential = "--service-account-json %s" % self.CROSFLEET_CREDENTIAL + server = "--server https://chromeos-swarming.appspot.com" + dimensions = "--dimension dut_name=%s" % machine.rstrip(".cros") + + cmd = f"{self.SWARMING} bots {server} {credential} {dimensions}" + exit_code, stdout, stderr = self.ce.RunCommandWOutput(cmd) + if exit_code: + raise ValueError( + "Querying bots failed (2); stdout: %r; stderr: %r" + % (stdout, stderr) + ) + + # The command will return a json output as stdout. If machine not in + # crosfleet, stdout will look like this: + # { + # "death_timeout": "600", + # "now": "TIMESTAMP" + # } + # Otherwise there will be a tuple starting with 'items', we simply detect + # this keyword for result. + return stdout != "[]" + + def LeaseCrosfleetMachine(self, machine): + """Run command to lease dut from crosfleet. + + Returns: + True if succeeded, False if failed. + """ + credential = "" + if os.path.exists(self.CROSFLEET_CREDENTIAL): + credential = "-service-account-json %s" % self.CROSFLEET_CREDENTIAL + cmd = ("%s dut lease -minutes %s %s %s %s") % ( + self.CROSFLEET_PATH, + self.LEASE_MINS, + credential, + "-host", + machine.rstrip(".cros"), + ) + # Wait 8 minutes for server to start the lease task, if not started, + # we will treat it as unavailable. + check_interval_time = 480 + retval = self.ce.RunCommand(cmd, command_timeout=check_interval_time) + return retval == self.SUCCESS + + def ReleaseCrosfleetMachine(self, machine): + """Run command to release dut from crosfleet. + + Returns: + True if succeeded, False if failed. + """ + credential = "" + if os.path.exists(self.CROSFLEET_CREDENTIAL): + credential = "-service-account-json %s" % self.CROSFLEET_CREDENTIAL + + cmd = ("%s dut abandon %s %s") % ( + self.CROSFLEET_PATH, + credential, + machine.rstrip(".cros"), + ) + retval = self.ce.RunCommand(cmd) + return retval == self.SUCCESS - Args: - should_lock_machine: Boolean indicating whether to lock the machine (True) - or unlock the machine (False). - machine: The machine to update. - Returns: - True if requested action succeeded, else False. - """ - try: - if should_lock_machine: - ret = self.LeaseCrosfleetMachine(machine) - else: - ret = self.ReleaseCrosfleetMachine(machine) - except Exception: - return False - return ret - - def UpdateFileLock(self, should_lock_machine, machine): - """Use file lock for local machines, - - Args: - should_lock_machine: Boolean indicating whether to lock the machine (True) - or unlock the machine (False). - machine: The machine to update. - - Returns: - True if requested action succeeded, else False. - """ - try: - if should_lock_machine: - ret = file_lock_machine.Machine(machine, self.locks_dir).Lock( - True, sys.argv[0]) - else: - ret = file_lock_machine.Machine(machine, self.locks_dir).Unlock(True) - except Exception: - return False - return ret - - def UpdateMachines(self, lock_machines): - """Sets the locked state of the machines to the requested value. - - The machines updated are the ones in self.machines (specified when the - class object was intialized). +def Main(argv): + """Parse the options, initialize lock manager and dispatch proper method. Args: - lock_machines: Boolean indicating whether to lock the machines (True) or - unlock the machines (False). + argv: The options with which this script was invoked. Returns: - A list of the machines whose state was successfully updated. + 0 unless an exception is raised. """ - updated_machines = [] - action = 'Locking' if lock_machines else 'Unlocking' - for m in self.machines: - # TODO(zhizhouy): Handling exceptions with more details when locking - # doesn't succeed. - machine_type = self.GetMachineType(m) - if machine_type == MachineType.CROSFLEET: - ret = self.UpdateLockInCrosfleet(lock_machines, m) - elif machine_type == MachineType.LOCAL: - ret = self.UpdateFileLock(lock_machines, m) - - if ret: - self.logger.LogOutput('%s %s machine succeeded: %s.' % - (action, machine_type.value, m)) - updated_machines.append(m) - else: - self.logger.LogOutput('%s %s machine failed: %s.' % - (action, machine_type.value, m)) - - self.machines = updated_machines - return updated_machines - - def _InternalRemoveMachine(self, machine): - """Remove machine from internal list of machines. + parser = argparse.ArgumentParser() + + parser.add_argument( + "--list", + dest="cmd", + action="store_const", + const="status", + help="List current status of all known machines.", + ) + parser.add_argument( + "--lock", + dest="cmd", + action="store_const", + const="lock", + help="Lock given machine(s).", + ) + parser.add_argument( + "--unlock", + dest="cmd", + action="store_const", + const="unlock", + help="Unlock given machine(s).", + ) + parser.add_argument( + "--status", + dest="cmd", + action="store_const", + const="status", + help="List current status of given machine(s).", + ) + parser.add_argument( + "--remote", dest="remote", help="machines on which to operate" + ) + parser.add_argument( + "--chromeos_root", + dest="chromeos_root", + required=True, + help="ChromeOS root to use for autotest scripts.", + ) + parser.add_argument( + "--force", + dest="force", + action="store_true", + default=False, + help="Force lock/unlock of machines, even if not" + " current lock owner.", + ) + + options = parser.parse_args(argv) + + if not options.remote and options.cmd != "status": + parser.error("No machines specified for operation.") + + if not os.path.isdir(options.chromeos_root): + parser.error("Cannot find chromeos_root: %s." % options.chromeos_root) + + if not options.cmd: + parser.error( + "No operation selected (--list, --status, --lock, --unlock," + " --add_machine, --remove_machine)." + ) - Args: - machine: Name of machine to be removed from internal list. - """ - # Check to see if machine is lab machine and if so, make sure it has - # ".cros" on the end. - cros_machine = machine - if machine.find('rack') > 0 and machine.find('row') > 0: - if machine.find('.cros') == -1: - cros_machine = cros_machine + '.cros' - - self.machines = [ - m for m in self.machines if m not in (cros_machine, machine) - ] - - def CheckMachineLocks(self, machine_states, cmd): - """Check that every machine in requested list is in the proper state. - - If the cmd is 'unlock' verify that every machine is locked by requestor. - If the cmd is 'lock' verify that every machine is currently unlocked. - - Args: - machine_states: A dictionary of the current state of every machine in - the current LockManager's list of machines. Normally obtained by - calling LockManager::GetMachineStates. - cmd: The user-requested action for the machines: 'lock' or 'unlock'. - - Raises: - DontOwnLock: The lock on a requested machine is owned by someone else. - """ - for k, state in machine_states.items(): - if cmd == 'unlock': - if not state['locked']: - self.logger.LogWarning('Attempt to unlock already unlocked machine ' - '(%s).' % k) - self._InternalRemoveMachine(k) - - # TODO(zhizhouy): Crosfleet doesn't support host info such as locked_by. - # Need to update this when crosfleet supports it. - if (state['locked'] and state['locked_by'] - and state['locked_by'] != self.user): - raise DontOwnLock('Attempt to unlock machine (%s) locked by someone ' - 'else (%s).' % (k, state['locked_by'])) - elif cmd == 'lock': - if state['locked']: - self.logger.LogWarning( - 'Attempt to lock already locked machine (%s)' % k) - self._InternalRemoveMachine(k) - - def GetMachineStates(self, cmd=''): - """Gets the current state of all the requested machines. - - Gets the current state of all the requested machines. Stores the data in a - dictionary keyed by machine name. + machine_list = [] + if options.remote: + machine_list = options.remote.split() - Args: - cmd: The command for which we are getting the machine states. This is - important because if one of the requested machines is missing we raise - an exception, unless the requested command is 'add'. + lock_manager = LockManager( + machine_list, options.force, options.chromeos_root + ) - Returns: - A dictionary of machine states for all the machines in the LockManager - object. - """ - machine_list = {} - for m in self.machines: - # For local or crosfleet machines, we simply set {'locked': status} for - # them - # TODO(zhizhouy): This is a quick fix since crosfleet cannot return host - # info as afe does. We need to get more info such as locked_by when - # crosfleet supports that. - values = { - 'locked': 0 if cmd == 'lock' else 1, - 'board': '??', - 'locked_by': '', - 'lock_time': '' - } - machine_list[m] = values - - self.ListMachineStates(machine_list) - - return machine_list - - def CheckMachineInCrosfleet(self, machine): - """Run command to check if machine is in Crosfleet or not. + machine_states = lock_manager.GetMachineStates(cmd=options.cmd) + cmd = options.cmd - Returns: - True if machine in crosfleet, else False - """ - credential = '' - if os.path.exists(self.CROSFLEET_CREDENTIAL): - credential = '--service-account-json %s' % self.CROSFLEET_CREDENTIAL - server = '--server https://chromeos-swarming.appspot.com' - dimensions = '--dimension dut_name=%s' % machine.rstrip('.cros') - - cmd = f'{self.SWARMING} bots {server} {credential} {dimensions}' - exit_code, stdout, stderr = self.ce.RunCommandWOutput(cmd) - if exit_code: - raise ValueError('Querying bots failed (2); stdout: %r; stderr: %r' % - (stdout, stderr)) - - # The command will return a json output as stdout. If machine not in - # crosfleet, stdout will look like this: - # { - # "death_timeout": "600", - # "now": "TIMESTAMP" - # } - # Otherwise there will be a tuple starting with 'items', we simply detect - # this keyword for result. - return stdout != '[]' - - def LeaseCrosfleetMachine(self, machine): - """Run command to lease dut from crosfleet. + if cmd == "status": + lock_manager.ListMachineStates(machine_states) - Returns: - True if succeeded, False if failed. - """ - credential = '' - if os.path.exists(self.CROSFLEET_CREDENTIAL): - credential = '-service-account-json %s' % self.CROSFLEET_CREDENTIAL - cmd = (('%s dut lease -minutes %s %s %s %s') % - (self.CROSFLEET_PATH, self.LEASE_MINS, credential, '-host', - machine.rstrip('.cros'))) - # Wait 8 minutes for server to start the lease task, if not started, - # we will treat it as unavailable. - check_interval_time = 480 - retval = self.ce.RunCommand(cmd, command_timeout=check_interval_time) - return retval == self.SUCCESS - - def ReleaseCrosfleetMachine(self, machine): - """Run command to release dut from crosfleet. + elif cmd == "lock": + if not lock_manager.force: + lock_manager.CheckMachineLocks(machine_states, cmd) + lock_manager.UpdateMachines(True) - Returns: - True if succeeded, False if failed. - """ - credential = '' - if os.path.exists(self.CROSFLEET_CREDENTIAL): - credential = '-service-account-json %s' % self.CROSFLEET_CREDENTIAL + elif cmd == "unlock": + if not lock_manager.force: + lock_manager.CheckMachineLocks(machine_states, cmd) + lock_manager.UpdateMachines(False) - cmd = (('%s dut abandon %s %s') % - (self.CROSFLEET_PATH, credential, machine.rstrip('.cros'))) - retval = self.ce.RunCommand(cmd) - return retval == self.SUCCESS + return 0 -def Main(argv): - """Parse the options, initialize lock manager and dispatch proper method. - - Args: - argv: The options with which this script was invoked. - - Returns: - 0 unless an exception is raised. - """ - parser = argparse.ArgumentParser() - - parser.add_argument('--list', - dest='cmd', - action='store_const', - const='status', - help='List current status of all known machines.') - parser.add_argument('--lock', - dest='cmd', - action='store_const', - const='lock', - help='Lock given machine(s).') - parser.add_argument('--unlock', - dest='cmd', - action='store_const', - const='unlock', - help='Unlock given machine(s).') - parser.add_argument('--status', - dest='cmd', - action='store_const', - const='status', - help='List current status of given machine(s).') - parser.add_argument('--remote', - dest='remote', - help='machines on which to operate') - parser.add_argument('--chromeos_root', - dest='chromeos_root', - required=True, - help='ChromeOS root to use for autotest scripts.') - parser.add_argument('--force', - dest='force', - action='store_true', - default=False, - help='Force lock/unlock of machines, even if not' - ' current lock owner.') - - options = parser.parse_args(argv) - - if not options.remote and options.cmd != 'status': - parser.error('No machines specified for operation.') - - if not os.path.isdir(options.chromeos_root): - parser.error('Cannot find chromeos_root: %s.' % options.chromeos_root) - - if not options.cmd: - parser.error('No operation selected (--list, --status, --lock, --unlock,' - ' --add_machine, --remove_machine).') - - machine_list = [] - if options.remote: - machine_list = options.remote.split() - - lock_manager = LockManager(machine_list, options.force, - options.chromeos_root) - - machine_states = lock_manager.GetMachineStates(cmd=options.cmd) - cmd = options.cmd - - if cmd == 'status': - lock_manager.ListMachineStates(machine_states) - - elif cmd == 'lock': - if not lock_manager.force: - lock_manager.CheckMachineLocks(machine_states, cmd) - lock_manager.UpdateMachines(True) - - elif cmd == 'unlock': - if not lock_manager.force: - lock_manager.CheckMachineLocks(machine_states, cmd) - lock_manager.UpdateMachines(False) - - return 0 - - -if __name__ == '__main__': - sys.exit(Main(sys.argv[1:])) +if __name__ == "__main__": + sys.exit(Main(sys.argv[1:])) diff --git a/make_root_writable.py b/make_root_writable.py index 5353a750..500f8fe5 100755 --- a/make_root_writable.py +++ b/make_root_writable.py @@ -12,7 +12,8 @@ This script updates a remote chromebook to make the / directory writable." from __future__ import print_function -__author__ = 'cmtice@google.com (Caroline Tice)' + +__author__ = "cmtice@google.com (Caroline Tice)" import argparse import os @@ -25,203 +26,238 @@ from cros_utils import logger from cros_utils import machines from cros_utils import misc -lock_file = '/tmp/image_chromeos_lock/image_chromeos_lock' + +lock_file = "/tmp/image_chromeos_lock/image_chromeos_lock" def Usage(parser, message): - print('ERROR: %s' % message) - parser.print_help() - sys.exit(0) + print("ERROR: %s" % message) + parser.print_help() + sys.exit(0) def RebootChromebook(chromeos_root, remote, cmd_executer): - cmd = 'sudo reboot' - cmd_executer.CrosRunCommand(cmd, chromeos_root=chromeos_root, machine=remote) - time.sleep(10) - success = False - for _ in range(1, 10): - if machines.MachineIsPingable(remote): - success = True - break - time.sleep(1) - return success + cmd = "sudo reboot" + cmd_executer.CrosRunCommand( + cmd, chromeos_root=chromeos_root, machine=remote + ) + time.sleep(10) + success = False + for _ in range(1, 10): + if machines.MachineIsPingable(remote): + success = True + break + time.sleep(1) + return success def ParseOutput(output): - # See comment in FindPartitionNum. - lines = output.split('\n') - num_str = '-1' - for line in lines: - l = line.strip() - words = l.split() - if (len(words) > 2 and words[0] == 'sudo' and - words[1] == '/usr/share/vboot/bin/make_dev_ssd.sh' and - words[-2] == '--partitions'): - num_str = words[-1] - break - num = int(num_str) - - return num + # See comment in FindPartitionNum. + lines = output.split("\n") + num_str = "-1" + for line in lines: + l = line.strip() + words = l.split() + if ( + len(words) > 2 + and words[0] == "sudo" + and words[1] == "/usr/share/vboot/bin/make_dev_ssd.sh" + and words[-2] == "--partitions" + ): + num_str = words[-1] + break + num = int(num_str) + + return num def FindPartitionNum(chromeos_root, remote, logs, cmd_executer): - partition_cmd = ('/usr/share/vboot/bin/make_dev_ssd.sh ' - '--remove_rootfs_verification') - _, output, _ = cmd_executer.CrosRunCommandWOutput( - partition_cmd, - chromeos_root=chromeos_root, - machine=remote, - terminated_timeout=10) - - # The command above, with no --partitions flag, should return output - # in the following form: - - # make_dev_ssd.sh: INFO: checking system firmware... - # - # ERROR: YOU ARE TRYING TO MODIFY THE LIVE SYSTEM IMAGE /dev/mmcblk0. - # - # The system may become unusable after that change, especially when you have - # some auto updates in progress. To make it safer, we suggest you to only - # change the partition you have booted with. To do that, re-execute this - # command as: - # - # sudo /usr/share/vboot/bin/make_dev_ssd.sh --partitions 4 - # - # If you are sure to modify other partition, please invoke the command again - # and explicitly assign only one target partition for each time - # (--partitions N ) - # - # make_dev_ssd.sh: ERROR: IMAGE /dev/mmcblk0 IS NOT MODIFIED. - - # We pass this output to the ParseOutput function where it finds the 'sudo' - # line with the partition number and returns the partition number. - - num = ParseOutput(output) - - if num == -1: - logs.LogOutput('Failed to find partition number in "%s"' % output) - return num - - -def TryRemoveRootfsFromPartition(chromeos_root, remote, cmd_executer, - partition_num): - partition_cmd = ('/usr/share/vboot/bin/make_dev_ssd.sh ' - '--remove_rootfs_verification --partition %d' % - partition_num) - ret = cmd_executer.CrosRunCommand( - partition_cmd, - chromeos_root=chromeos_root, - machine=remote, - terminated_timeout=10) - return ret + partition_cmd = ( + "/usr/share/vboot/bin/make_dev_ssd.sh " "--remove_rootfs_verification" + ) + _, output, _ = cmd_executer.CrosRunCommandWOutput( + partition_cmd, + chromeos_root=chromeos_root, + machine=remote, + terminated_timeout=10, + ) + + # The command above, with no --partitions flag, should return output + # in the following form: + + # make_dev_ssd.sh: INFO: checking system firmware... + # + # ERROR: YOU ARE TRYING TO MODIFY THE LIVE SYSTEM IMAGE /dev/mmcblk0. + # + # The system may become unusable after that change, especially when you have + # some auto updates in progress. To make it safer, we suggest you to only + # change the partition you have booted with. To do that, re-execute this + # command as: + # + # sudo /usr/share/vboot/bin/make_dev_ssd.sh --partitions 4 + # + # If you are sure to modify other partition, please invoke the command again + # and explicitly assign only one target partition for each time + # (--partitions N ) + # + # make_dev_ssd.sh: ERROR: IMAGE /dev/mmcblk0 IS NOT MODIFIED. + + # We pass this output to the ParseOutput function where it finds the 'sudo' + # line with the partition number and returns the partition number. + + num = ParseOutput(output) + + if num == -1: + logs.LogOutput('Failed to find partition number in "%s"' % output) + return num + + +def TryRemoveRootfsFromPartition( + chromeos_root, remote, cmd_executer, partition_num +): + partition_cmd = ( + "/usr/share/vboot/bin/make_dev_ssd.sh " + "--remove_rootfs_verification --partition %d" % partition_num + ) + ret = cmd_executer.CrosRunCommand( + partition_cmd, + chromeos_root=chromeos_root, + machine=remote, + terminated_timeout=10, + ) + return ret def TryRemountPartitionAsRW(chromeos_root, remote, cmd_executer): - command = 'sudo mount -o remount,rw /' - ret = cmd_executer.CrosRunCommand( - command, - chromeos_root=chromeos_root, - machine=remote, - terminated_timeout=10) - return ret + command = "sudo mount -o remount,rw /" + ret = cmd_executer.CrosRunCommand( + command, + chromeos_root=chromeos_root, + machine=remote, + terminated_timeout=10, + ) + return ret def Main(argv): - parser = argparse.ArgumentParser() - parser.add_argument( - '-c', - '--chromeos_root', - dest='chromeos_root', - help='Target directory for ChromeOS installation.') - parser.add_argument('-r', '--remote', dest='remote', help='Target device.') - parser.add_argument( - '-n', - '--no_lock', - dest='no_lock', - default=False, - action='store_true', - help='Do not attempt to lock remote before imaging. ' - 'This option should only be used in cases where the ' - 'exclusive lock has already been acquired (e.g. in ' - 'a script that calls this one).') - - options = parser.parse_args(argv[1:]) - - # Common initializations - log_level = 'average' - cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) - l = logger.GetLogger() - - if options.chromeos_root is None: - Usage(parser, '--chromeos_root must be set') - - if options.remote is None: - Usage(parser, '--remote must be set') - - options.chromeos_root = os.path.expanduser(options.chromeos_root) - - try: - should_unlock = False - if not options.no_lock: - try: - _ = locks.AcquireLock( - list(options.remote.split()), options.chromeos_root) - should_unlock = True - except Exception as e: - raise RuntimeError('Error acquiring machine: %s' % str(e)) - - # Workaround for crosbug.com/35684. - os.chmod(misc.GetChromeOSKeyFile(options.chromeos_root), 0o600) - - if log_level == 'average': - cmd_executer.SetLogLevel('verbose') - - if not machines.MachineIsPingable(options.remote): - raise RuntimeError('Machine %s does not appear to be up.' % - options.remote) - - ret = TryRemountPartitionAsRW(options.chromeos_root, options.remote, - cmd_executer) - - if ret != 0: - l.LogOutput('Initial mount command failed. Looking for root partition' - ' number.') - part_num = FindPartitionNum(options.chromeos_root, options.remote, l, - cmd_executer) - if part_num != -1: - l.LogOutput('Attempting to remove rootfs verification on partition %d' % - part_num) - ret = TryRemoveRootfsFromPartition(options.chromeos_root, - options.remote, cmd_executer, - part_num) - if ret == 0: - l.LogOutput('Succeeded in removing roofs verification from' - ' partition %d. Rebooting...' % part_num) - if not RebootChromebook(options.chromeos_root, options.remote, - cmd_executer): - raise RuntimeError('Chromebook failed to reboot.') - l.LogOutput('Reboot succeeded. Attempting to remount partition.') - ret = TryRemountPartitionAsRW(options.chromeos_root, options.remote, - cmd_executer) - if ret == 0: - l.LogOutput('Re-mounted / as writable.') - else: - l.LogOutput('Re-mount failed. / is not writable.') + parser = argparse.ArgumentParser() + parser.add_argument( + "-c", + "--chromeos_root", + dest="chromeos_root", + help="Target directory for ChromeOS installation.", + ) + parser.add_argument("-r", "--remote", dest="remote", help="Target device.") + parser.add_argument( + "-n", + "--no_lock", + dest="no_lock", + default=False, + action="store_true", + help="Do not attempt to lock remote before imaging. " + "This option should only be used in cases where the " + "exclusive lock has already been acquired (e.g. in " + "a script that calls this one).", + ) + + options = parser.parse_args(argv[1:]) + + # Common initializations + log_level = "average" + cmd_executer = command_executer.GetCommandExecuter(log_level=log_level) + l = logger.GetLogger() + + if options.chromeos_root is None: + Usage(parser, "--chromeos_root must be set") + + if options.remote is None: + Usage(parser, "--remote must be set") + + options.chromeos_root = os.path.expanduser(options.chromeos_root) + + try: + should_unlock = False + if not options.no_lock: + try: + _ = locks.AcquireLock( + list(options.remote.split()), options.chromeos_root + ) + should_unlock = True + except Exception as e: + raise RuntimeError("Error acquiring machine: %s" % str(e)) + + # Workaround for crosbug.com/35684. + os.chmod(misc.GetChromeOSKeyFile(options.chromeos_root), 0o600) + + if log_level == "average": + cmd_executer.SetLogLevel("verbose") + + if not machines.MachineIsPingable(options.remote): + raise RuntimeError( + "Machine %s does not appear to be up." % options.remote + ) + + ret = TryRemountPartitionAsRW( + options.chromeos_root, options.remote, cmd_executer + ) + + if ret != 0: + l.LogOutput( + "Initial mount command failed. Looking for root partition" + " number." + ) + part_num = FindPartitionNum( + options.chromeos_root, options.remote, l, cmd_executer + ) + if part_num != -1: + l.LogOutput( + "Attempting to remove rootfs verification on partition %d" + % part_num + ) + ret = TryRemoveRootfsFromPartition( + options.chromeos_root, + options.remote, + cmd_executer, + part_num, + ) + if ret == 0: + l.LogOutput( + "Succeeded in removing roofs verification from" + " partition %d. Rebooting..." % part_num + ) + if not RebootChromebook( + options.chromeos_root, options.remote, cmd_executer + ): + raise RuntimeError("Chromebook failed to reboot.") + l.LogOutput( + "Reboot succeeded. Attempting to remount partition." + ) + ret = TryRemountPartitionAsRW( + options.chromeos_root, options.remote, cmd_executer + ) + if ret == 0: + l.LogOutput("Re-mounted / as writable.") + else: + l.LogOutput("Re-mount failed. / is not writable.") + else: + l.LogOutput( + "Failed to remove rootfs verification from partition" + " %d." % part_num + ) else: - l.LogOutput('Failed to remove rootfs verification from partition' - ' %d.' % part_num) - else: - l.LogOutput('Re-mounted / as writable.') + l.LogOutput("Re-mounted / as writable.") - l.LogOutput('Exiting.') + l.LogOutput("Exiting.") - finally: - if should_unlock: - locks.ReleaseLock(list(options.remote.split()), options.chromeos_root) + finally: + if should_unlock: + locks.ReleaseLock( + list(options.remote.split()), options.chromeos_root + ) - return ret + return ret -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/orderfile/post_process_orderfile.py b/orderfile/post_process_orderfile.py index 748e5cf2..5d98756b 100755 --- a/orderfile/post_process_orderfile.py +++ b/orderfile/post_process_orderfile.py @@ -19,7 +19,8 @@ The results of the file is intended to be uploaded and consumed when linking Chrome in ChromeOS. """ -from __future__ import division, print_function +from __future__ import division +from __future__ import print_function import argparse import os @@ -27,65 +28,68 @@ import sys def _parse_nm_output(stream): - for line in (line.rstrip() for line in stream): - if not line: - continue + for line in (line.rstrip() for line in stream): + if not line: + continue - pieces = line.split() - if len(pieces) != 3: - continue + pieces = line.split() + if len(pieces) != 3: + continue - _, ty, symbol = pieces - if ty not in 'tT': - continue + _, ty, symbol = pieces + if ty not in "tT": + continue - # We'll sometimes see synthesized symbols that start with $. There isn't - # much we can do about or with them, regrettably. - if symbol.startswith('$'): - continue + # We'll sometimes see synthesized symbols that start with $. There isn't + # much we can do about or with them, regrettably. + if symbol.startswith("$"): + continue - yield symbol + yield symbol def _remove_duplicates(iterable): - seen = set() - for item in iterable: - if item in seen: - continue - seen.add(item) - yield item + seen = set() + for item in iterable: + if item in seen: + continue + seen.add(item) + yield item def run(c3_ordered_stream, chrome_nm_stream, output_stream): - head_marker = 'chrome_begin_ordered_code' - tail_marker = 'chrome_end_ordered_code' - - c3_ordered_syms = [x.strip() for x in c3_ordered_stream.readlines()] - all_chrome_syms = set(_parse_nm_output(chrome_nm_stream)) - # Sort by name, so it's predictable. Otherwise, these should all land in the - # same hugepage anyway, so order doesn't matter as much. - builtin_syms = sorted(s for s in all_chrome_syms if s.startswith('Builtins_')) - output = _remove_duplicates([head_marker] + c3_ordered_syms + builtin_syms + - [tail_marker]) - output_stream.write('\n'.join(output)) + head_marker = "chrome_begin_ordered_code" + tail_marker = "chrome_end_ordered_code" + + c3_ordered_syms = [x.strip() for x in c3_ordered_stream.readlines()] + all_chrome_syms = set(_parse_nm_output(chrome_nm_stream)) + # Sort by name, so it's predictable. Otherwise, these should all land in the + # same hugepage anyway, so order doesn't matter as much. + builtin_syms = sorted( + s for s in all_chrome_syms if s.startswith("Builtins_") + ) + output = _remove_duplicates( + [head_marker] + c3_ordered_syms + builtin_syms + [tail_marker] + ) + output_stream.write("\n".join(output)) def main(argv): - parser = argparse.ArgumentParser() - parser.add_argument('--chrome_nm', required=True, dest='chrome_nm') - parser.add_argument('--input', required=True, dest='input_file') - parser.add_argument('--output', required=True, dest='output_file') + parser = argparse.ArgumentParser() + parser.add_argument("--chrome_nm", required=True, dest="chrome_nm") + parser.add_argument("--input", required=True, dest="input_file") + parser.add_argument("--output", required=True, dest="output_file") - options = parser.parse_args(argv) + options = parser.parse_args(argv) - if not os.path.exists(options.input_file): - sys.exit("Input orderfile doesn\'t exist.") + if not os.path.exists(options.input_file): + sys.exit("Input orderfile doesn't exist.") - with open(options.input_file) as in_stream, \ - open(options.chrome_nm) as chrome_nm_stream, \ - open(options.output_file, 'w') as out_stream: - run(in_stream, chrome_nm_stream, out_stream) + with open(options.input_file) as in_stream, open( + options.chrome_nm + ) as chrome_nm_stream, open(options.output_file, "w") as out_stream: + run(in_stream, chrome_nm_stream, out_stream) -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/orderfile/post_process_orderfile_test.py b/orderfile/post_process_orderfile_test.py index 976de7fd..4eb36699 100755 --- a/orderfile/post_process_orderfile_test.py +++ b/orderfile/post_process_orderfile_test.py @@ -6,7 +6,8 @@ """Tests for post_process_orderfile.py.""" -from __future__ import division, print_function +from __future__ import division +from __future__ import print_function import os import shutil @@ -17,78 +18,79 @@ import post_process_orderfile def _write_nm_file(name): - with open(name, 'w') as out: - out.write('000001 s NotAValidSymbol1\n') - out.write('000002 S NotAValidSymbol2\n') - out.write('000010 t FirstValidSymbol\n') - out.write('000012 t \n') - out.write('000020 T Builtins_SecondValidSymbol\n') - out.write('000030 T $SymbolToIgnore\n') - out.write('000036 T Builtins_LastValidSymbol\n') + with open(name, "w") as out: + out.write("000001 s NotAValidSymbol1\n") + out.write("000002 S NotAValidSymbol2\n") + out.write("000010 t FirstValidSymbol\n") + out.write("000012 t \n") + out.write("000020 T Builtins_SecondValidSymbol\n") + out.write("000030 T $SymbolToIgnore\n") + out.write("000036 T Builtins_LastValidSymbol\n") def _write_orderfile(name): - with open(name, 'w') as out: - out.write('SymbolOrdered1\n') - out.write('SymbolOrdered2\n') + with open(name, "w") as out: + out.write("SymbolOrdered1\n") + out.write("SymbolOrdered2\n") def _cleanup(files): - for f in files: - shutil.rmtree(f, ignore_errors=True) + for f in files: + shutil.rmtree(f, ignore_errors=True) class Tests(unittest.TestCase): - """All of our tests for post_process_orderfile.""" - - # pylint: disable=protected-access - def test__parse_nm_output(self): - temp_dir = tempfile.mkdtemp() - self.addCleanup(_cleanup, [temp_dir]) - chrome_nm_file = os.path.join(temp_dir, 'chrome_nm.txt') - _write_nm_file(chrome_nm_file) - with open(chrome_nm_file) as f: - results = list(post_process_orderfile._parse_nm_output(f)) - self.assertEqual(len(results), 3) - self.assertIn('FirstValidSymbol', results) - self.assertIn('Builtins_SecondValidSymbol', results) - self.assertIn('Builtins_LastValidSymbol', results) - - def test__remove_duplicates(self): - duplicates = ['marker1', 'marker2', 'marker3', 'marker2', 'marker1'] - results = list(post_process_orderfile._remove_duplicates(duplicates)) - self.assertEqual(results, ['marker1', 'marker2', 'marker3']) - - def test_run(self): - temp_dir = tempfile.mkdtemp() - self.addCleanup(_cleanup, [temp_dir]) - orderfile_input = os.path.join(temp_dir, 'orderfile.in.txt') - orderfile_output = os.path.join(temp_dir, 'orderfile.out.txt') - chrome_nm_file = os.path.join(temp_dir, 'chrome_nm.txt') - _write_nm_file(chrome_nm_file) - _write_orderfile(orderfile_input) - with open(orderfile_input) as in_stream, \ - open(orderfile_output, 'w') as out_stream, \ - open(chrome_nm_file) as chrome_nm_stream: - post_process_orderfile.run(in_stream, chrome_nm_stream, out_stream) - - with open(orderfile_output) as check: - results = [x.strip() for x in check.readlines()] - self.assertEqual( - results, - [ - # Start marker should be put first. - 'chrome_begin_ordered_code', - # Symbols in orderfile come next. - 'SymbolOrdered1', - 'SymbolOrdered2', - # Builtin functions in chrome_nm come next, and sorted. - 'Builtins_LastValidSymbol', - 'Builtins_SecondValidSymbol', - # Last symbol should be end marker. - 'chrome_end_ordered_code' - ]) - - -if __name__ == '__main__': - unittest.main() + """All of our tests for post_process_orderfile.""" + + # pylint: disable=protected-access + def test__parse_nm_output(self): + temp_dir = tempfile.mkdtemp() + self.addCleanup(_cleanup, [temp_dir]) + chrome_nm_file = os.path.join(temp_dir, "chrome_nm.txt") + _write_nm_file(chrome_nm_file) + with open(chrome_nm_file) as f: + results = list(post_process_orderfile._parse_nm_output(f)) + self.assertEqual(len(results), 3) + self.assertIn("FirstValidSymbol", results) + self.assertIn("Builtins_SecondValidSymbol", results) + self.assertIn("Builtins_LastValidSymbol", results) + + def test__remove_duplicates(self): + duplicates = ["marker1", "marker2", "marker3", "marker2", "marker1"] + results = list(post_process_orderfile._remove_duplicates(duplicates)) + self.assertEqual(results, ["marker1", "marker2", "marker3"]) + + def test_run(self): + temp_dir = tempfile.mkdtemp() + self.addCleanup(_cleanup, [temp_dir]) + orderfile_input = os.path.join(temp_dir, "orderfile.in.txt") + orderfile_output = os.path.join(temp_dir, "orderfile.out.txt") + chrome_nm_file = os.path.join(temp_dir, "chrome_nm.txt") + _write_nm_file(chrome_nm_file) + _write_orderfile(orderfile_input) + with open(orderfile_input) as in_stream, open( + orderfile_output, "w" + ) as out_stream, open(chrome_nm_file) as chrome_nm_stream: + post_process_orderfile.run(in_stream, chrome_nm_stream, out_stream) + + with open(orderfile_output) as check: + results = [x.strip() for x in check.readlines()] + self.assertEqual( + results, + [ + # Start marker should be put first. + "chrome_begin_ordered_code", + # Symbols in orderfile come next. + "SymbolOrdered1", + "SymbolOrdered2", + # Builtin functions in chrome_nm come next, and sorted. + "Builtins_LastValidSymbol", + "Builtins_SecondValidSymbol", + # Last symbol should be end marker. + "chrome_end_ordered_code", + ], + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/pgo_tools/merge_profdata_and_upload.py b/pgo_tools/merge_profdata_and_upload.py index 851edcc9..768a1a57 100755 --- a/pgo_tools/merge_profdata_and_upload.py +++ b/pgo_tools/merge_profdata_and_upload.py @@ -48,328 +48,374 @@ import subprocess import sys import tempfile -_LLVM_PROFDATA = '/usr/bin/llvm-profdata' -_GS_PREFIX = 'gs://' -_LLVMMetadata = collections.namedtuple('_LLVMMetadata', ['head_sha']) +_LLVM_PROFDATA = "/usr/bin/llvm-profdata" +_GS_PREFIX = "gs://" + +_LLVMMetadata = collections.namedtuple("_LLVMMetadata", ["head_sha"]) def _fetch_gs_artifact(remote_name, local_name): - """Fetch single file from remote gs location to local. + """Fetch single file from remote gs location to local. - Args: - remote_name: full gs location to the file. - local_name: the name of local file to be copied to. - """ - assert remote_name.startswith(_GS_PREFIX) - subprocess.check_call(['gsutil', 'cp', remote_name, local_name]) + Args: + remote_name: full gs location to the file. + local_name: the name of local file to be copied to. + """ + assert remote_name.startswith(_GS_PREFIX) + subprocess.check_call(["gsutil", "cp", remote_name, local_name]) def _get_gs_profdata(remote_profdata, arch): - """Fetch and extract profdata from remote gs location. + """Fetch and extract profdata from remote gs location. - Args: - remote_profdata: remote gs location of the profdata tarball. - arch: directory named with arch to saperate each profdata. + Args: + remote_profdata: remote gs location of the profdata tarball. + arch: directory named with arch to saperate each profdata. - Returns: - Local location of the extracted profdata. - """ - tar = 'llvm_profdata.tar.xz' - _fetch_gs_artifact(remote_profdata, tar) - extract_cmd = ['tar', '-xvf', tar] + Returns: + Local location of the extracted profdata. + """ + tar = "llvm_profdata.tar.xz" + _fetch_gs_artifact(remote_profdata, tar) + extract_cmd = ["tar", "-xvf", tar] - profdata_name = subprocess.check_output(extract_cmd).strip() - # The output of the `tar` command should only contain one line of the - # extracted profdata name. - if b'.llvm.profdata' not in profdata_name: - raise RuntimeError('No profdata in the tarball: %s' % remote_profdata) + profdata_name = subprocess.check_output(extract_cmd).strip() + # The output of the `tar` command should only contain one line of the + # extracted profdata name. + if b".llvm.profdata" not in profdata_name: + raise RuntimeError("No profdata in the tarball: %s" % remote_profdata) - os.mkdir(arch) - profdata_loc = os.path.join(arch, 'llvm.profdata') - os.rename(profdata_name, profdata_loc) - print('Profdata extracted to: %s' % profdata_loc) - return profdata_loc + os.mkdir(arch) + profdata_loc = os.path.join(arch, "llvm.profdata") + os.rename(profdata_name, profdata_loc) + print("Profdata extracted to: %s" % profdata_loc) + return profdata_loc def _get_gs_metadata(remote_metadata): - """Fetch metadata from remote gs location and read the LLVM head_sha. + """Fetch metadata from remote gs location and read the LLVM head_sha. - Args: - remote_metadata: remote gs location of the metadata json file. + Args: + remote_metadata: remote gs location of the metadata json file. - Returns: - LLVM head_sha metadata - """ - metadata_basename = 'llvm_metadata.json' - _fetch_gs_artifact(remote_metadata, metadata_basename) + Returns: + LLVM head_sha metadata + """ + metadata_basename = "llvm_metadata.json" + _fetch_gs_artifact(remote_metadata, metadata_basename) - with open(metadata_basename) as f: - result = json.load(f) + with open(metadata_basename) as f: + result = json.load(f) - return _LLVMMetadata(head_sha=result['head_sha']) + return _LLVMMetadata(head_sha=result["head_sha"]) def _find_latest_artifacts(gs_url, arch): - """Fetch the latest profdata and metadata from a give gs location. - - Args: - gs_url: a gs location containing one or more artifacts to fetch. - arch: the arch profdata collected from. - - Returns: - A tuple of local profdata location and metadata - """ - assert gs_url.startswith(_GS_PREFIX) - try: - # List all artifacts in the gs location and sort by time. - output = subprocess.check_output(['gsutil', 'ls', '-l', gs_url], - encoding='utf-8').strip().split('\n') - lines = sorted(output, key=lambda x: x.split()[1], reverse=True) - except subprocess.CalledProcessError: - raise RuntimeError('Artifacts not found: %s' % gs_url) - - # Use a loop to go through all artifacts to find the latest profdata. - # An example of the output of latest builder bucket: - # pylint: disable=line-too-long - # 5006528 2020-05-31T10:08:48Z gs://chromeos-toolchain-artifacts/llvm-pgo/arm/llvm-11.0_pre387436_p20200403-r7-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm.profdata.tar.xz - # 56 2020-05-31T10:08:48Z gs://chromeos-toolchain-artifacts/llvm-pgo/arm/llvm-11.0_pre387436_p20200403-r7-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm_metadata.json - # 5005952 2020-05-24T10:53:34Z gs://chromeos-toolchain-artifacts/llvm-pgo/arm/llvm-11.0_pre387436_p20200403-r5-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm.profdata.tar.xz - # 56 2020-05-24T10:53:34Z gs://chromeos-toolchain-artifacts/llvm-pgo/arm/llvm-11.0_pre387436_p20200403-r5-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm_metadata.json - # An example for the lines of buildbucket location: - # 5004260 2020-05-29T09:48:04Z gs://chromeos-image-archive/arm-pgo-generate-llvm-next-toolchain/R85-13254.0.0-1-8879010326583123168/llvm-11.0_pre387436_p20200403-r7-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm.profdata.tar.xz - # 56 2020-05-29T09:48:04Z gs://chromeos-image-archive/arm-pgo-generate-llvm-next-toolchain/R85-13254.0.0-1-8879010326583123168/llvm-11.0_pre387436_p20200403-r7-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm_metadata.json - # pylint: enable=line-too-long - profdata_url = '' - for line in lines: - url = line.split()[-1] - if '.llvm.profdata.tar.xz' in url: - profile_path = _get_gs_profdata(url, arch) - profdata_url = url - break - if not profile_path or not profdata_url: - raise RuntimeError('No profdata found from %s' % gs_url) - - metadata_url = profdata_url.replace('.llvm.profdata.tar.xz', - '.llvm_metadata.json') - metadata = _get_gs_metadata(metadata_url) - if not metadata: - raise RuntimeError('No metadata found from %s' % gs_url) - return metadata, profile_path + """Fetch the latest profdata and metadata from a give gs location. + + Args: + gs_url: a gs location containing one or more artifacts to fetch. + arch: the arch profdata collected from. + + Returns: + A tuple of local profdata location and metadata + """ + assert gs_url.startswith(_GS_PREFIX) + try: + # List all artifacts in the gs location and sort by time. + output = ( + subprocess.check_output( + ["gsutil", "ls", "-l", gs_url], encoding="utf-8" + ) + .strip() + .split("\n") + ) + lines = sorted(output, key=lambda x: x.split()[1], reverse=True) + except subprocess.CalledProcessError: + raise RuntimeError("Artifacts not found: %s" % gs_url) + + # Use a loop to go through all artifacts to find the latest profdata. + # An example of the output of latest builder bucket: + # pylint: disable=line-too-long + # 5006528 2020-05-31T10:08:48Z gs://chromeos-toolchain-artifacts/llvm-pgo/arm/llvm-11.0_pre387436_p20200403-r7-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm.profdata.tar.xz + # 56 2020-05-31T10:08:48Z gs://chromeos-toolchain-artifacts/llvm-pgo/arm/llvm-11.0_pre387436_p20200403-r7-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm_metadata.json + # 5005952 2020-05-24T10:53:34Z gs://chromeos-toolchain-artifacts/llvm-pgo/arm/llvm-11.0_pre387436_p20200403-r5-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm.profdata.tar.xz + # 56 2020-05-24T10:53:34Z gs://chromeos-toolchain-artifacts/llvm-pgo/arm/llvm-11.0_pre387436_p20200403-r5-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm_metadata.json + # An example for the lines of buildbucket location: + # 5004260 2020-05-29T09:48:04Z gs://chromeos-image-archive/arm-pgo-generate-llvm-next-toolchain/R85-13254.0.0-1-8879010326583123168/llvm-11.0_pre387436_p20200403-r7-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm.profdata.tar.xz + # 56 2020-05-29T09:48:04Z gs://chromeos-image-archive/arm-pgo-generate-llvm-next-toolchain/R85-13254.0.0-1-8879010326583123168/llvm-11.0_pre387436_p20200403-r7-a8e5dcb072b1f794883ae8125fb08c06db678d56.llvm_metadata.json + # pylint: enable=line-too-long + profdata_url = "" + for line in lines: + url = line.split()[-1] + if ".llvm.profdata.tar.xz" in url: + profile_path = _get_gs_profdata(url, arch) + profdata_url = url + break + if not profile_path or not profdata_url: + raise RuntimeError("No profdata found from %s" % gs_url) + + metadata_url = profdata_url.replace( + ".llvm.profdata.tar.xz", ".llvm_metadata.json" + ) + metadata = _get_gs_metadata(metadata_url) + if not metadata: + raise RuntimeError("No metadata found from %s" % gs_url) + return metadata, profile_path def _fetch_from_latest(arch): - """Fetch artifacts from latest builders. + """Fetch artifacts from latest builders. - Args: - arch: the arch profdata collected from. + Args: + arch: the arch profdata collected from. - Returns: - A tuple of local profdata location and metadata - """ - print('\nFETCHING LATEST PROFDATA ON %s...' % arch.upper()) - remote_latest = ( - '%schromeos-toolchain-artifacts/llvm-pgo/%s' % (_GS_PREFIX, arch)) - return _find_latest_artifacts(remote_latest, arch) + Returns: + A tuple of local profdata location and metadata + """ + print("\nFETCHING LATEST PROFDATA ON %s..." % arch.upper()) + remote_latest = "%schromeos-toolchain-artifacts/llvm-pgo/%s" % ( + _GS_PREFIX, + arch, + ) + return _find_latest_artifacts(remote_latest, arch) def _fetch_from_buildbucket(arch, bb): - """Fetch artifacts from buildbucket task. - - Args: - arch: the arch profdata collected from. - bb: buildbucket id. - - Returns: - A tuple of local profdata location and metadata - """ - print('\nFETCHING BUILDBUCKET PROFDATA ON %s...' % arch.upper()) - remote_arch = ('%schromeos-image-archive/%s-pgo-generate-llvm-next-toolchain' - % (_GS_PREFIX, arch)) - # List all buckets under {arch}-pgo-generate-llvm-next-toolchain and - # grep with buildbucket id. - remote_bb = subprocess.check_output(['gsutil', 'ls', remote_arch], - encoding='utf-8').strip().split('\n') - for line in remote_bb: - if bb in line: - return _find_latest_artifacts(line, arch) - raise RuntimeError('No matched results found in %s with bb: %s' % (arch, bb)) + """Fetch artifacts from buildbucket task. + + Args: + arch: the arch profdata collected from. + bb: buildbucket id. + + Returns: + A tuple of local profdata location and metadata + """ + print("\nFETCHING BUILDBUCKET PROFDATA ON %s..." % arch.upper()) + remote_arch = ( + "%schromeos-image-archive/%s-pgo-generate-llvm-next-toolchain" + % ( + _GS_PREFIX, + arch, + ) + ) + # List all buckets under {arch}-pgo-generate-llvm-next-toolchain and + # grep with buildbucket id. + remote_bb = ( + subprocess.check_output(["gsutil", "ls", remote_arch], encoding="utf-8") + .strip() + .split("\n") + ) + for line in remote_bb: + if bb in line: + return _find_latest_artifacts(line, arch) + raise RuntimeError( + "No matched results found in %s with bb: %s" % (arch, bb) + ) def _merge_profdata(profdata_list, output_name): - """Merge profdata. - - Args: - profdata_list: list of profdata location of each arch. - output_name: name of merged profdata. - """ - merge_cmd = [_LLVM_PROFDATA, 'merge', '-output', output_name] + profdata_list - print('\nMerging PGO profiles.\nCMD: %s' % merge_cmd) - subprocess.check_call(merge_cmd) + """Merge profdata. + + Args: + profdata_list: list of profdata location of each arch. + output_name: name of merged profdata. + """ + merge_cmd = [ + _LLVM_PROFDATA, + "merge", + "-output", + output_name, + ] + profdata_list + print("\nMerging PGO profiles.\nCMD: %s" % merge_cmd) + subprocess.check_call(merge_cmd) def _tar_and_upload_profdata(profdata, name_suffix): - """Create a tarball of merged profdata and upload to certain gs location. - - Args: - profdata: location of merged profdata. - name_suffix: usually the LLVM head_sha. - """ - tarball = 'llvm-profdata-%s.tar.xz' % name_suffix - print('Making profdata tarball: %s' % tarball) - subprocess.check_call( - ['tar', '--sparse', '-I', 'xz', '-cf', tarball, profdata]) - - upload_location = '%schromeos-localmirror/distfiles/%s' % (_GS_PREFIX, - tarball) - - # TODO: it's better to create a subdir: distfiles/llvm_pgo_profile, but - # now llvm could only recognize distfiles. - upload_cmd = [ - 'gsutil', - '-m', - 'cp', - '-n', - '-a', - 'public-read', - tarball, - upload_location, - ] - print('\nUploading tarball to gs.\nCMD: %s\n' % upload_cmd) - - # gsutil prints all status to stderr, oddly enough. - gs_output = subprocess.check_output( - upload_cmd, stderr=subprocess.STDOUT, encoding='utf-8') - - # gsutil exits successfully even if it uploaded nothing. It prints a summary - # of what all it did, though. Successful uploads are just a progress bar, - # unsuccessful ones note that items were skipped. - if 'Skipping existing item' in gs_output: - raise ValueError('Profile upload failed: would overwrite an existing ' - 'profile at %s' % upload_location) + """Create a tarball of merged profdata and upload to certain gs location. + + Args: + profdata: location of merged profdata. + name_suffix: usually the LLVM head_sha. + """ + tarball = "llvm-profdata-%s.tar.xz" % name_suffix + print("Making profdata tarball: %s" % tarball) + subprocess.check_call( + ["tar", "--sparse", "-I", "xz", "-cf", tarball, profdata] + ) + + upload_location = "%schromeos-localmirror/distfiles/%s" % ( + _GS_PREFIX, + tarball, + ) + + # TODO: it's better to create a subdir: distfiles/llvm_pgo_profile, but + # now llvm could only recognize distfiles. + upload_cmd = [ + "gsutil", + "-m", + "cp", + "-n", + "-a", + "public-read", + tarball, + upload_location, + ] + print("\nUploading tarball to gs.\nCMD: %s\n" % upload_cmd) + + # gsutil prints all status to stderr, oddly enough. + gs_output = subprocess.check_output( + upload_cmd, stderr=subprocess.STDOUT, encoding="utf-8" + ) + + # gsutil exits successfully even if it uploaded nothing. It prints a summary + # of what all it did, though. Successful uploads are just a progress bar, + # unsuccessful ones note that items were skipped. + if "Skipping existing item" in gs_output: + raise ValueError( + "Profile upload failed: would overwrite an existing " + "profile at %s" % upload_location + ) def main(): - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - '-a', - '--all_latest_profiles', - action='store_true', - help='Merge and upload profiles from the latest builders.') - parser.add_argument( - '-l', - '--latest', - default=[], - action='append', - help='User can specify the profdata from which builder with specific ' - 'architecture to download. By default, we merge profdata from arm, ' - 'arm64, amd64.') - parser.add_argument( - '-b', - '--buildbucket', - default=[], - action='append', - help='Extra pgo-generate-llvm-next-toolchain buildbucket results to be ' - 'used. Format should be: {arch}/{bb_id}.') - parser.add_argument( - '-o', - '--output', - default='llvm.profdata', - help='Where to put merged PGO profile. The default is to not save it ' - 'anywhere.') - parser.add_argument( - '--llvm_hash', - help='The LLVM hash to select for the profiles. Generally autodetected.') - args = parser.parse_args() - - if not args.all_latest_profiles and not (args.latest or args.buildbucket): - parser.error('Please specify whether to use latest profiles or ' - 'profiles from buildbucket') - - if args.all_latest_profiles and (args.latest or args.buildbucket): - parser.error('--all_latest_profiles cannot be specified together ' - 'with --latest or --buildbucket') - - latest = ['arm', 'arm64', 'amd64'] \ - if args.all_latest_profiles else args.latest - - all_arch_list = latest.copy() - arch_bb_list = [] - if args.buildbucket: - for arch_bb in args.buildbucket: - arch, bb = arch_bb.split('/') - arch_bb_list.append((arch, bb)) - all_arch_list.append(arch) - - if len(set(all_arch_list)) != len(all_arch_list): - parser.error('Each arch can be only passed once.') - - if not distutils.spawn.find_executable(_LLVM_PROFDATA): - sys.exit(_LLVM_PROFDATA + ' not found; are you in the chroot?') - - initial_dir = os.getcwd() - temp_dir = tempfile.mkdtemp(prefix='merge_pgo') - success = True - try: - os.chdir(temp_dir) - profdata_list = [] - heads = set() - - def append_artifacts(fetched_tuple): - llvm_metadata, profdata_loc = fetched_tuple - if os.path.getsize(profdata_loc) < 512 * 1024: - raise RuntimeError('The PGO profile in local path %s is suspiciously ' - 'small. Something might have gone ' - 'wrong.' % profdata_loc) - heads.add(llvm_metadata.head_sha) - profdata_list.append(profdata_loc) - - for arch in latest: - append_artifacts(_fetch_from_latest(arch)) - - for arch, bb in arch_bb_list: - append_artifacts(_fetch_from_buildbucket(arch, bb)) - - assert heads, "Didn't fetch anything?" - - def die_with_head_complaint(complaint): - extra = ' (HEADs found: %s)' % sorted(heads) - raise RuntimeError(complaint.rstrip() + extra) - - llvm_hash = args.llvm_hash - if not llvm_hash: - if len(heads) != 1: - die_with_head_complaint( - '%d LLVM HEADs were found, which is more than one. You probably ' - 'want a consistent set of HEADs for a profile. If you know you ' - "don't, please specify --llvm_hash, and note that *all* profiles " - 'will be merged into this final profile, regardless of their ' - 'reported HEAD.' % len(heads)) - llvm_hash, = heads - - if llvm_hash not in heads: - assert llvm_hash == args.llvm_hash - die_with_head_complaint( - "HEAD %s wasn't found in any fetched artifacts." % llvm_hash) - - print('\nUsing LLVM hash: %s' % llvm_hash) - - _merge_profdata(profdata_list, args.output) - print('Merged profdata locates at %s' % os.path.abspath(args.output)) - _tar_and_upload_profdata(args.output, name_suffix=llvm_hash) - print('\nMerged profdata uploaded successfully.') - except: - success = False - raise - finally: - os.chdir(initial_dir) - if success: - print('Clearing temp directory.') - shutil.rmtree(temp_dir, ignore_errors=True) - else: - print('Script fails, temp directory is at: %s' % temp_dir) - - -if __name__ == '__main__': - sys.exit(main()) + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "-a", + "--all_latest_profiles", + action="store_true", + help="Merge and upload profiles from the latest builders.", + ) + parser.add_argument( + "-l", + "--latest", + default=[], + action="append", + help="User can specify the profdata from which builder with specific " + "architecture to download. By default, we merge profdata from arm, " + "arm64, amd64.", + ) + parser.add_argument( + "-b", + "--buildbucket", + default=[], + action="append", + help="Extra pgo-generate-llvm-next-toolchain buildbucket results to be " + "used. Format should be: {arch}/{bb_id}.", + ) + parser.add_argument( + "-o", + "--output", + default="llvm.profdata", + help="Where to put merged PGO profile. The default is to not save it " + "anywhere.", + ) + parser.add_argument( + "--llvm_hash", + help="The LLVM hash to select for the profiles. Generally autodetected.", + ) + args = parser.parse_args() + + if not args.all_latest_profiles and not (args.latest or args.buildbucket): + parser.error( + "Please specify whether to use latest profiles or " + "profiles from buildbucket" + ) + + if args.all_latest_profiles and (args.latest or args.buildbucket): + parser.error( + "--all_latest_profiles cannot be specified together " + "with --latest or --buildbucket" + ) + + latest = ( + ["arm", "arm64", "amd64"] if args.all_latest_profiles else args.latest + ) + + all_arch_list = latest.copy() + arch_bb_list = [] + if args.buildbucket: + for arch_bb in args.buildbucket: + arch, bb = arch_bb.split("/") + arch_bb_list.append((arch, bb)) + all_arch_list.append(arch) + + if len(set(all_arch_list)) != len(all_arch_list): + parser.error("Each arch can be only passed once.") + + if not distutils.spawn.find_executable(_LLVM_PROFDATA): + sys.exit(_LLVM_PROFDATA + " not found; are you in the chroot?") + + initial_dir = os.getcwd() + temp_dir = tempfile.mkdtemp(prefix="merge_pgo") + success = True + try: + os.chdir(temp_dir) + profdata_list = [] + heads = set() + + def append_artifacts(fetched_tuple): + llvm_metadata, profdata_loc = fetched_tuple + if os.path.getsize(profdata_loc) < 512 * 1024: + raise RuntimeError( + "The PGO profile in local path %s is suspiciously " + "small. Something might have gone " + "wrong." % profdata_loc + ) + heads.add(llvm_metadata.head_sha) + profdata_list.append(profdata_loc) + + for arch in latest: + append_artifacts(_fetch_from_latest(arch)) + + for arch, bb in arch_bb_list: + append_artifacts(_fetch_from_buildbucket(arch, bb)) + + assert heads, "Didn't fetch anything?" + + def die_with_head_complaint(complaint): + extra = " (HEADs found: %s)" % sorted(heads) + raise RuntimeError(complaint.rstrip() + extra) + + llvm_hash = args.llvm_hash + if not llvm_hash: + if len(heads) != 1: + die_with_head_complaint( + "%d LLVM HEADs were found, which is more than one. You probably " + "want a consistent set of HEADs for a profile. If you know you " + "don't, please specify --llvm_hash, and note that *all* profiles " + "will be merged into this final profile, regardless of their " + "reported HEAD." % len(heads) + ) + (llvm_hash,) = heads + + if llvm_hash not in heads: + assert llvm_hash == args.llvm_hash + die_with_head_complaint( + "HEAD %s wasn't found in any fetched artifacts." % llvm_hash + ) + + print("\nUsing LLVM hash: %s" % llvm_hash) + + _merge_profdata(profdata_list, args.output) + print("Merged profdata locates at %s" % os.path.abspath(args.output)) + _tar_and_upload_profdata(args.output, name_suffix=llvm_hash) + print("\nMerged profdata uploaded successfully.") + except: + success = False + raise + finally: + os.chdir(initial_dir) + if success: + print("Clearing temp directory.") + shutil.rmtree(temp_dir, ignore_errors=True) + else: + print("Script fails, temp directory is at: %s" % temp_dir) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/pgo_tools/monitor_pgo_profiles.py b/pgo_tools/monitor_pgo_profiles.py index e56db427..846cdc2b 100755 --- a/pgo_tools/monitor_pgo_profiles.py +++ b/pgo_tools/monitor_pgo_profiles.py @@ -12,105 +12,112 @@ import subprocess import sys from typing import List, NamedTuple, Optional, Tuple -PGO_BUILDBOT_LINK = ('https://ci.chromium.org/p/chromeos/builders/toolchain/' - 'pgo-generate-llvm-next-orchestrator') + +PGO_BUILDBOT_LINK = ( + "https://ci.chromium.org/p/chromeos/builders/toolchain/" + "pgo-generate-llvm-next-orchestrator" +) class ProfdataInfo(NamedTuple): - """Data about an llvm profdata in our gs:// bucket.""" - date: datetime.datetime - location: str + """Data about an llvm profdata in our gs:// bucket.""" + + date: datetime.datetime + location: str def parse_date(date: str) -> datetime.datetime: - time_format = '%Y-%m-%dT%H:%M:%SZ' - if not date.endswith('Z'): - time_format += '%z' - return datetime.datetime.strptime(date, time_format) + time_format = "%Y-%m-%dT%H:%M:%SZ" + if not date.endswith("Z"): + time_format += "%z" + return datetime.datetime.strptime(date, time_format) def fetch_most_recent_profdata(arch: str) -> ProfdataInfo: - result = subprocess.run( - [ - 'gsutil.py', - 'ls', - '-l', - f'gs://chromeos-toolchain-artifacts/llvm-pgo/{arch}/' - '*.profdata.tar.xz', - ], - check=True, - stdout=subprocess.PIPE, - encoding='utf-8', - ) - - # Each line will be a profdata; the last one is a summary, so drop it. - infos = [] - for rec in result.stdout.strip().splitlines()[:-1]: - _size, date, url = rec.strip().split() - infos.append(ProfdataInfo(date=parse_date(date), location=url)) - return max(infos) + result = subprocess.run( + [ + "gsutil.py", + "ls", + "-l", + f"gs://chromeos-toolchain-artifacts/llvm-pgo/{arch}/" + "*.profdata.tar.xz", + ], + check=True, + stdout=subprocess.PIPE, + encoding="utf-8", + ) + + # Each line will be a profdata; the last one is a summary, so drop it. + infos = [] + for rec in result.stdout.strip().splitlines()[:-1]: + _size, date, url = rec.strip().split() + infos.append(ProfdataInfo(date=parse_date(date), location=url)) + return max(infos) def compose_complaint( out_of_date_profiles: List[Tuple[datetime.datetime, ProfdataInfo]] ) -> Optional[str]: - if not out_of_date_profiles: - return None + if not out_of_date_profiles: + return None - if len(out_of_date_profiles) == 1: - body_lines = ['1 profile is out of date:'] - else: - body_lines = [f'{len(out_of_date_profiles)} profiles are out of date:'] + if len(out_of_date_profiles) == 1: + body_lines = ["1 profile is out of date:"] + else: + body_lines = [f"{len(out_of_date_profiles)} profiles are out of date:"] - for arch, profdata_info in out_of_date_profiles: - body_lines.append( - f'- {arch} (most recent profile was from {profdata_info.date} at ' - f'{profdata_info.location!r})') + for arch, profdata_info in out_of_date_profiles: + body_lines.append( + f"- {arch} (most recent profile was from {profdata_info.date} at " + f"{profdata_info.location!r})" + ) - body_lines.append('\n') - body_lines.append( - 'PTAL to see if the llvm-pgo-generate bots are functioning normally. ' - f'Their status can be found at {PGO_BUILDBOT_LINK}.') - return '\n'.join(body_lines) + body_lines.append("\n") + body_lines.append( + "PTAL to see if the llvm-pgo-generate bots are functioning normally. " + f"Their status can be found at {PGO_BUILDBOT_LINK}." + ) + return "\n".join(body_lines) def main() -> None: - logging.basicConfig(level=logging.INFO) - - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - '--max_age_days', - # These builders run ~weekly. If we fail to generate two in a row, - # something's probably wrong. - default=15, - type=int, - help='How old to let profiles get before complaining, in days', - ) - args = parser.parse_args() - - now = datetime.datetime.now() - logging.info('Start time is %r', now) - - max_age = datetime.timedelta(days=args.max_age_days) - out_of_date_profiles = [] - for arch in ('arm', 'arm64', 'amd64'): - logging.info('Fetching most recent profdata for %r', arch) - most_recent = fetch_most_recent_profdata(arch) - logging.info('Most recent profdata for %r is %r', arch, most_recent) - - age = now - most_recent.date - if age >= max_age: - out_of_date_profiles.append((arch, most_recent)) - - complaint = compose_complaint(out_of_date_profiles) - if complaint: - logging.error('%s', complaint) - sys.exit(1) - - logging.info('Nothing seems wrong') - - -if __name__ == '__main__': - sys.exit(main()) + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--max_age_days", + # These builders run ~weekly. If we fail to generate two in a row, + # something's probably wrong. + default=15, + type=int, + help="How old to let profiles get before complaining, in days", + ) + args = parser.parse_args() + + now = datetime.datetime.now() + logging.info("Start time is %r", now) + + max_age = datetime.timedelta(days=args.max_age_days) + out_of_date_profiles = [] + for arch in ("arm", "arm64", "amd64"): + logging.info("Fetching most recent profdata for %r", arch) + most_recent = fetch_most_recent_profdata(arch) + logging.info("Most recent profdata for %r is %r", arch, most_recent) + + age = now - most_recent.date + if age >= max_age: + out_of_date_profiles.append((arch, most_recent)) + + complaint = compose_complaint(out_of_date_profiles) + if complaint: + logging.error("%s", complaint) + sys.exit(1) + + logging.info("Nothing seems wrong") + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/pgo_tools/monitor_pgo_profiles_unittest.py b/pgo_tools/monitor_pgo_profiles_unittest.py index dab529b8..d129c6ba 100755 --- a/pgo_tools/monitor_pgo_profiles_unittest.py +++ b/pgo_tools/monitor_pgo_profiles_unittest.py @@ -14,79 +14,91 @@ import monitor_pgo_profiles class Test(unittest.TestCase): - """Tests for monitor_pgo_profiles.""" - def test_compose_complaint_with_zero_out_of_date(self): - self.assertIsNone(monitor_pgo_profiles.compose_complaint([])) + """Tests for monitor_pgo_profiles.""" - def test_compose_complaint_with_one_out_of_date(self): - profdata_info = monitor_pgo_profiles.ProfdataInfo( - date=datetime.datetime(2020, 1, 2, 3, 4, 5), - location='gs://somewhere', - ) - result = monitor_pgo_profiles.compose_complaint([ - ('some_arch', profdata_info), - ]) - self.assertEqual( - result, - '\n'.join(( - '1 profile is out of date:', - f'- some_arch (most recent profile was from {profdata_info.date} ' - f'at {profdata_info.location!r})', - '', - '', - 'PTAL to see if the llvm-pgo-generate bots are functioning ' - 'normally. Their status can be found at ' - f'{monitor_pgo_profiles.PGO_BUILDBOT_LINK}.', - )), - ) + def test_compose_complaint_with_zero_out_of_date(self): + self.assertIsNone(monitor_pgo_profiles.compose_complaint([])) - def test_compose_complaint_with_two_out_of_date(self): - profdata_info_1 = monitor_pgo_profiles.ProfdataInfo( - date=datetime.datetime(2020, 1, 2, 3, 4, 5), - location='gs://somewhere', - ) - profdata_info_2 = monitor_pgo_profiles.ProfdataInfo( - date=datetime.datetime(2020, 3, 2, 1, 4, 5), - location='gs://somewhere-else', - ) - result = monitor_pgo_profiles.compose_complaint([ - ('some_arch', profdata_info_1), - ('some_other_arch', profdata_info_2), - ]) - self.assertEqual( - result, - '\n'.join(( - '2 profiles are out of date:', - f'- some_arch (most recent profile was from {profdata_info_1.date} ' - f'at {profdata_info_1.location!r})', - f'- some_other_arch (most recent profile was from ' - f'{profdata_info_2.date} at {profdata_info_2.location!r})', - '', - '', - 'PTAL to see if the llvm-pgo-generate bots are functioning ' - 'normally. Their status can be found at ' - f'{monitor_pgo_profiles.PGO_BUILDBOT_LINK}.', - )), - ) + def test_compose_complaint_with_one_out_of_date(self): + profdata_info = monitor_pgo_profiles.ProfdataInfo( + date=datetime.datetime(2020, 1, 2, 3, 4, 5), + location="gs://somewhere", + ) + result = monitor_pgo_profiles.compose_complaint( + [ + ("some_arch", profdata_info), + ] + ) + self.assertEqual( + result, + "\n".join( + ( + "1 profile is out of date:", + f"- some_arch (most recent profile was from {profdata_info.date} " + f"at {profdata_info.location!r})", + "", + "", + "PTAL to see if the llvm-pgo-generate bots are functioning " + "normally. Their status can be found at " + f"{monitor_pgo_profiles.PGO_BUILDBOT_LINK}.", + ) + ), + ) - @unittest.mock.patch.object(subprocess, 'run') - def test_fetching_profdata_functions(self, subprocess_run_mock): - ls_return_value = unittest.mock.MagicMock() - ls_return_value.stdout = '\n'.join(( - ' 1234 2020-06-26T05:26:40Z gs://bar', - ' 44 2020-06-23T05:26:40Z gs://foo', - ' 1234 2020-06-25T05:26:40Z gs://zzz', - )) - subprocess_run_mock.return_value = ls_return_value + def test_compose_complaint_with_two_out_of_date(self): + profdata_info_1 = monitor_pgo_profiles.ProfdataInfo( + date=datetime.datetime(2020, 1, 2, 3, 4, 5), + location="gs://somewhere", + ) + profdata_info_2 = monitor_pgo_profiles.ProfdataInfo( + date=datetime.datetime(2020, 3, 2, 1, 4, 5), + location="gs://somewhere-else", + ) + result = monitor_pgo_profiles.compose_complaint( + [ + ("some_arch", profdata_info_1), + ("some_other_arch", profdata_info_2), + ] + ) + self.assertEqual( + result, + "\n".join( + ( + "2 profiles are out of date:", + f"- some_arch (most recent profile was from {profdata_info_1.date} " + f"at {profdata_info_1.location!r})", + f"- some_other_arch (most recent profile was from " + f"{profdata_info_2.date} at {profdata_info_2.location!r})", + "", + "", + "PTAL to see if the llvm-pgo-generate bots are functioning " + "normally. Their status can be found at " + f"{monitor_pgo_profiles.PGO_BUILDBOT_LINK}.", + ) + ), + ) - most_recent = monitor_pgo_profiles.fetch_most_recent_profdata('arm') - self.assertEqual( - most_recent, - monitor_pgo_profiles.ProfdataInfo( - date=datetime.datetime(2020, 6, 26, 5, 26, 40), - location='gs://bar', - )) + @unittest.mock.patch.object(subprocess, "run") + def test_fetching_profdata_functions(self, subprocess_run_mock): + ls_return_value = unittest.mock.MagicMock() + ls_return_value.stdout = "\n".join( + ( + " 1234 2020-06-26T05:26:40Z gs://bar", + " 44 2020-06-23T05:26:40Z gs://foo", + " 1234 2020-06-25T05:26:40Z gs://zzz", + ) + ) + subprocess_run_mock.return_value = ls_return_value + most_recent = monitor_pgo_profiles.fetch_most_recent_profdata("arm") + self.assertEqual( + most_recent, + monitor_pgo_profiles.ProfdataInfo( + date=datetime.datetime(2020, 6, 26, 5, 26, 40), + location="gs://bar", + ), + ) -if __name__ == '__main__': - unittest.main() + +if __name__ == "__main__": + unittest.main() diff --git a/pgo_tools_rust/pgo_rust.py b/pgo_tools_rust/pgo_rust.py index 5e09c1c0..6aedc9b7 100755 --- a/pgo_tools_rust/pgo_rust.py +++ b/pgo_tools_rust/pgo_rust.py @@ -112,430 +112,515 @@ from typing import Dict, List, Optional TARGET_TRIPLES = [ - 'x86_64-cros-linux-gnu', - 'x86_64-pc-linux-gnu', - 'armv7a-cros-linux-gnueabihf', - 'aarch64-cros-linux-gnu', + "x86_64-cros-linux-gnu", + "x86_64-pc-linux-gnu", + "armv7a-cros-linux-gnueabihf", + "aarch64-cros-linux-gnu", ] -LOCAL_BASE = Path('/tmp/rust-pgo') +LOCAL_BASE = Path("/tmp/rust-pgo") -GS_BASE = PurePosixPath('/chromeos-toolchain-artifacts/rust-pgo') +GS_BASE = PurePosixPath("/chromeos-toolchain-artifacts/rust-pgo") -GS_DISTFILES = PurePosixPath('/chromeos-localmirror/distfiles') +GS_DISTFILES = PurePosixPath("/chromeos-localmirror/distfiles") -CRATE_NAME = 'ripgrep' +CRATE_NAME = "ripgrep" -CRATE_VERSION = '13.0.0' +CRATE_VERSION = "13.0.0" @contextlib.contextmanager def chdir(new_directory: Path): - initial_directory = Path.cwd() - os.chdir(new_directory) - try: - yield - finally: - os.chdir(initial_directory) - - -def run(args: List, - *, - indent: int = 4, - env: Optional[Dict[str, str]] = None, - capture_stdout: bool = False, - message: bool = True) -> Optional[str]: - args = [str(arg) for arg in args] - - if env is None: - new_env = os.environ - else: - new_env = os.environ.copy() - new_env.update(env) - - if message: - if env is None: - logging.info('Running %s', args) - else: - logging.info('Running %s in environment %s', args, env) - - result = subprocess.run(args, - env=new_env, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - encoding='utf-8', - check=False) - - stdout = result.stdout - stderr = result.stderr - if indent != 0: - stdout = re.sub('^', ' ' * indent, stdout, flags=re.MULTILINE) - stderr = re.sub('^', ' ' * indent, stderr, flags=re.MULTILINE) - - if capture_stdout: - ret = result.stdout - else: - logging.info('STDOUT:') - logging.info(stdout) - logging.info('STDERR:') - logging.info(stderr) - ret = None - - result.check_returncode() - - if message: + initial_directory = Path.cwd() + os.chdir(new_directory) + try: + yield + finally: + os.chdir(initial_directory) + + +def run( + args: List, + *, + indent: int = 4, + env: Optional[Dict[str, str]] = None, + capture_stdout: bool = False, + message: bool = True, +) -> Optional[str]: + args = [str(arg) for arg in args] + if env is None: - logging.info('Ran %s\n', args) + new_env = os.environ else: - logging.info('Ran %s in environment %s\n', args, env) - - return ret - - -def get_rust_version() -> str: - s = run(['rustc', '--version'], capture_stdout=True) - m = re.search(r'\d+\.\d+\.\d+', s) - assert m is not None, repr(s) - return m.group(0) + new_env = os.environ.copy() + new_env.update(env) + + if message: + if env is None: + logging.info("Running %s", args) + else: + logging.info("Running %s in environment %s", args, env) + + result = subprocess.run( + args, + env=new_env, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding="utf-8", + check=False, + ) + stdout = result.stdout + stderr = result.stderr + if indent != 0: + stdout = re.sub("^", " " * indent, stdout, flags=re.MULTILINE) + stderr = re.sub("^", " " * indent, stderr, flags=re.MULTILINE) -def download_unpack_crate(*, crate_name: str, crate_version: str): - filename_no_extension = f'{crate_name}-{crate_version}' - gs_path = GS_BASE / 'crates' / f'{filename_no_extension}.tar.xz' - local_path = LOCAL_BASE / 'crates' - shutil.rmtree(local_path / f'{crate_name}-{crate_version}', - ignore_errors=True) - with chdir(local_path): - run(['gsutil.py', 'cp', f'gs:/{gs_path}', '.']) - run(['xz', '-d', f'{filename_no_extension}.tar.xz']) - run(['tar', 'xvf', f'{filename_no_extension}.tar']) - - -def build_crate(*, - crate_name: str, - crate_version: str, - target_triple: str, - time_file: Optional[str] = None): - local_path = LOCAL_BASE / 'crates' / f'{crate_name}-{crate_version}' - with chdir(local_path): - Path('.cargo').mkdir(exist_ok=True) - with open('.cargo/config.toml', 'w') as f: - f.write('\n'.join(( - '[source.crates-io]', - 'replace-with = "vendored-sources"', - '', - '[source.vendored-sources]', - 'directory = "vendor"', - '', - f'[target.{target_triple}]', - f'linker = "{target_triple}-clang"', - '', - "[target.'cfg(all())']", - 'rustflags = [', - ' "-Clto=thin",', - ' "-Cembed-bitcode=yes",', - ']', - ))) - - run(['cargo', 'clean']) - - cargo_cmd = ['cargo', 'build', '--release', '--target', target_triple] - - if time_file is None: - run(cargo_cmd) + if capture_stdout: + ret = result.stdout else: - time_cmd = [ - '/usr/bin/time', f'--output={time_file}', - '--format=wall time (s) %e\nuser time (s) %U\nmax RSS %M\n' - ] - run(time_cmd + cargo_cmd) + logging.info("STDOUT:") + logging.info(stdout) + logging.info("STDERR:") + logging.info(stderr) + ret = None + result.check_returncode() -def build_rust(*, - generate_frontend_profile: bool = False, - generate_llvm_profile: bool = False, - use_frontend_profile: bool = False, - use_llvm_profile: bool = False): + if message: + if env is None: + logging.info("Ran %s\n", args) + else: + logging.info("Ran %s in environment %s\n", args, env) - if use_frontend_profile or use_llvm_profile: - assert not generate_frontend_profile and not generate_llvm_profile, ( - "Can't build a compiler to both use profile information and generate it" - ) + return ret - assert not generate_frontend_profile or not generate_llvm_profile, ( - "Can't generate both frontend and LLVM profile information") - use = '-rust_profile_frontend_use -rust_profile_llvm_use ' - if generate_frontend_profile: - use += 'rust_profile_frontend_generate ' - if generate_llvm_profile: - use += 'rust_profile_llvm_generate ' - if use_frontend_profile: - use += 'rust_profile_frontend_use_local ' - if use_llvm_profile: - use += 'rust_profile_llvm_use_local ' +def get_rust_version() -> str: + s = run(["rustc", "--version"], capture_stdout=True) + m = re.search(r"\d+\.\d+\.\d+", s) + assert m is not None, repr(s) + return m.group(0) + - # -E to preserve our USE environment variable. - run(['sudo', '-E', 'emerge', 'dev-lang/rust', 'dev-lang/rust-host'], - env={'USE': use}) +def download_unpack_crate(*, crate_name: str, crate_version: str): + filename_no_extension = f"{crate_name}-{crate_version}" + gs_path = GS_BASE / "crates" / f"{filename_no_extension}.tar.xz" + local_path = LOCAL_BASE / "crates" + shutil.rmtree( + local_path / f"{crate_name}-{crate_version}", ignore_errors=True + ) + with chdir(local_path): + run(["gsutil.py", "cp", f"gs:/{gs_path}", "."]) + run(["xz", "-d", f"{filename_no_extension}.tar.xz"]) + run(["tar", "xvf", f"{filename_no_extension}.tar"]) + + +def build_crate( + *, + crate_name: str, + crate_version: str, + target_triple: str, + time_file: Optional[str] = None, +): + local_path = LOCAL_BASE / "crates" / f"{crate_name}-{crate_version}" + with chdir(local_path): + Path(".cargo").mkdir(exist_ok=True) + with open(".cargo/config.toml", "w") as f: + f.write( + "\n".join( + ( + "[source.crates-io]", + 'replace-with = "vendored-sources"', + "", + "[source.vendored-sources]", + 'directory = "vendor"', + "", + f"[target.{target_triple}]", + f'linker = "{target_triple}-clang"', + "", + "[target.'cfg(all())']", + "rustflags = [", + ' "-Clto=thin",', + ' "-Cembed-bitcode=yes",', + "]", + ) + ) + ) + + run(["cargo", "clean"]) + + cargo_cmd = ["cargo", "build", "--release", "--target", target_triple] + + if time_file is None: + run(cargo_cmd) + else: + time_cmd = [ + "/usr/bin/time", + f"--output={time_file}", + "--format=wall time (s) %e\nuser time (s) %U\nmax RSS %M\n", + ] + run(time_cmd + cargo_cmd) + + +def build_rust( + *, + generate_frontend_profile: bool = False, + generate_llvm_profile: bool = False, + use_frontend_profile: bool = False, + use_llvm_profile: bool = False, +): + + if use_frontend_profile or use_llvm_profile: + assert ( + not generate_frontend_profile and not generate_llvm_profile + ), "Can't build a compiler to both use profile information and generate it" + + assert ( + not generate_frontend_profile or not generate_llvm_profile + ), "Can't generate both frontend and LLVM profile information" + + use = "-rust_profile_frontend_use -rust_profile_llvm_use " + if generate_frontend_profile: + use += "rust_profile_frontend_generate " + if generate_llvm_profile: + use += "rust_profile_llvm_generate " + if use_frontend_profile: + use += "rust_profile_frontend_use_local " + if use_llvm_profile: + use += "rust_profile_llvm_use_local " + + # -E to preserve our USE environment variable. + run( + ["sudo", "-E", "emerge", "dev-lang/rust", "dev-lang/rust-host"], + env={"USE": use}, + ) def merge_profdata(llvm_or_frontend, *, source_directory: Path, dest: Path): - assert llvm_or_frontend in ('llvm', 'frontend') - - # The two `llvm-profdata` programs come from different LLVM versions, and may - # support different versions of the profdata format, so make sure to use the - # right one. - llvm_profdata = ('/usr/bin/llvm-profdata' if llvm_or_frontend == 'llvm' else - '/usr/libexec/rust/llvm-profdata') + assert llvm_or_frontend in ("llvm", "frontend") + + # The two `llvm-profdata` programs come from different LLVM versions, and may + # support different versions of the profdata format, so make sure to use the + # right one. + llvm_profdata = ( + "/usr/bin/llvm-profdata" + if llvm_or_frontend == "llvm" + else "/usr/libexec/rust/llvm-profdata" + ) - dest.parent.mkdir(parents=True, exist_ok=True) + dest.parent.mkdir(parents=True, exist_ok=True) - files = list(source_directory.glob('*.profraw')) - run([llvm_profdata, 'merge', f'--output={dest}'] + files) + files = list(source_directory.glob("*.profraw")) + run([llvm_profdata, "merge", f"--output={dest}"] + files) def do_upload_profdata(*, source: Path, dest: PurePosixPath): - new_path = source.parent / source.name / '.xz' - run(['xz', '--keep', '--compress', '--force', source]) - upload_file(source=new_path, dest=dest, public_read=True) + new_path = source.parent / source.name / ".xz" + run(["xz", "--keep", "--compress", "--force", source]) + upload_file(source=new_path, dest=dest, public_read=True) -def upload_file(*, - source: Path, - dest: PurePosixPath, - public_read: bool = False): - if public_read: - run(['gsutil.py', 'cp', '-a', 'public-read', source, f'gs:/{dest}']) - else: - run(['gsutil.py', 'cp', source, f'gs:/{dest}']) +def upload_file( + *, source: Path, dest: PurePosixPath, public_read: bool = False +): + if public_read: + run(["gsutil.py", "cp", "-a", "public-read", source, f"gs:/{dest}"]) + else: + run(["gsutil.py", "cp", source, f"gs:/{dest}"]) def maybe_download_crate(*, crate_name: str, crate_version: str): - directory = LOCAL_BASE / 'crates' / f'{crate_name}-{crate_version}' - if directory.is_dir(): - logging.info('Crate already downloaded') - else: - logging.info('Downloading crate') - download_unpack_crate(crate_name=crate_name, crate_version=crate_version) + directory = LOCAL_BASE / "crates" / f"{crate_name}-{crate_version}" + if directory.is_dir(): + logging.info("Crate already downloaded") + else: + logging.info("Downloading crate") + download_unpack_crate( + crate_name=crate_name, crate_version=crate_version + ) def generate(args): - maybe_download_crate(crate_name=args.crate_name, - crate_version=args.crate_version) - - llvm_dir = LOCAL_BASE / 'llvm-profraw' - shutil.rmtree(llvm_dir, ignore_errors=True) - frontend_dir = LOCAL_BASE / 'frontend-profraw' - shutil.rmtree(frontend_dir, ignore_errors=True) - - logging.info('Building Rust instrumented for llvm') - build_rust(generate_llvm_profile=True) - - llvm_dir.mkdir(parents=True, exist_ok=True) - for triple in TARGET_TRIPLES: - logging.info('Building crate with LLVM instrumentation, for triple %s', - triple) - build_crate(crate_name=args.crate_name, - crate_version=args.crate_version, - target_triple=triple) - - logging.info('Merging LLVM profile data') - merge_profdata( - 'llvm', - source_directory=LOCAL_BASE / 'llvm-profraw', - dest=(LOCAL_BASE / 'profdata' / - f'{args.crate_name}-{args.crate_version}' / 'llvm.profdata')) - - logging.info('Building Rust instrumented for frontend') - build_rust(generate_frontend_profile=True) - - frontend_dir.mkdir(parents=True, exist_ok=True) - for triple in TARGET_TRIPLES: - logging.info('Building crate with frontend instrumentation, for triple %s', - triple) - build_crate(crate_name=args.crate_name, - crate_version=args.crate_version, - target_triple=triple) - - logging.info('Merging frontend profile data') - merge_profdata( - 'frontend', - source_directory=LOCAL_BASE / 'frontend-profraw', - dest=(LOCAL_BASE / 'profdata' / - f'{args.crate_name}-{args.crate_version}' / 'frontend.profdata')) + maybe_download_crate( + crate_name=args.crate_name, crate_version=args.crate_version + ) + + llvm_dir = LOCAL_BASE / "llvm-profraw" + shutil.rmtree(llvm_dir, ignore_errors=True) + frontend_dir = LOCAL_BASE / "frontend-profraw" + shutil.rmtree(frontend_dir, ignore_errors=True) + + logging.info("Building Rust instrumented for llvm") + build_rust(generate_llvm_profile=True) + + llvm_dir.mkdir(parents=True, exist_ok=True) + for triple in TARGET_TRIPLES: + logging.info( + "Building crate with LLVM instrumentation, for triple %s", triple + ) + build_crate( + crate_name=args.crate_name, + crate_version=args.crate_version, + target_triple=triple, + ) + + logging.info("Merging LLVM profile data") + merge_profdata( + "llvm", + source_directory=LOCAL_BASE / "llvm-profraw", + dest=( + LOCAL_BASE + / "profdata" + / f"{args.crate_name}-{args.crate_version}" + / "llvm.profdata" + ), + ) + + logging.info("Building Rust instrumented for frontend") + build_rust(generate_frontend_profile=True) + + frontend_dir.mkdir(parents=True, exist_ok=True) + for triple in TARGET_TRIPLES: + logging.info( + "Building crate with frontend instrumentation, for triple %s", + triple, + ) + build_crate( + crate_name=args.crate_name, + crate_version=args.crate_version, + target_triple=triple, + ) + + logging.info("Merging frontend profile data") + merge_profdata( + "frontend", + source_directory=LOCAL_BASE / "frontend-profraw", + dest=( + LOCAL_BASE + / "profdata" + / f"{args.crate_name}-{args.crate_version}" + / "frontend.profdata" + ), + ) def benchmark_nopgo(args): - logging.info('Building Rust, no PGO') - build_rust() - - time_directory = LOCAL_BASE / 'benchmarks' / 'nopgo' - logging.info('Benchmarking crate build with no PGO') - time_directory.mkdir(parents=True, exist_ok=True) - for triple in TARGET_TRIPLES: - build_crate( - crate_name=args.bench_crate_name, - crate_version=args.bench_crate_version, - target_triple=triple, - time_file=( - time_directory / - f'{args.bench_crate_name}-{args.bench_crate_version}-{triple}')) - - rust_version = get_rust_version() - dest_directory = GS_BASE / 'benchmarks' / rust_version / 'nopgo' - logging.info('Uploading benchmark data') - for file in time_directory.iterdir(): - upload_file(source=time_directory / file.name, - dest=dest_directory / file.name) + logging.info("Building Rust, no PGO") + build_rust() + + time_directory = LOCAL_BASE / "benchmarks" / "nopgo" + logging.info("Benchmarking crate build with no PGO") + time_directory.mkdir(parents=True, exist_ok=True) + for triple in TARGET_TRIPLES: + build_crate( + crate_name=args.bench_crate_name, + crate_version=args.bench_crate_version, + target_triple=triple, + time_file=( + time_directory + / f"{args.bench_crate_name}-{args.bench_crate_version}-{triple}" + ), + ) + + rust_version = get_rust_version() + dest_directory = GS_BASE / "benchmarks" / rust_version / "nopgo" + logging.info("Uploading benchmark data") + for file in time_directory.iterdir(): + upload_file( + source=time_directory / file.name, dest=dest_directory / file.name + ) def benchmark_pgo(args): - maybe_download_crate(crate_name=args.bench_crate_name, - crate_version=args.bench_crate_version) - - files_dir = Path('/mnt/host/source/src/third_party/chromiumos-overlay', - 'dev-lang/rust/files') - - logging.info('Copying profile data to be used in building Rust') - run([ - 'cp', - (LOCAL_BASE / 'profdata' / f'{args.crate_name}-{args.crate_version}' / - 'llvm.profdata'), files_dir - ]) - run([ - 'cp', - (LOCAL_BASE / 'profdata' / f'{args.crate_name}-{args.crate_version}' / - 'frontend.profdata'), files_dir - ]) - - logging.info('Building Rust with PGO') - build_rust(use_llvm_profile=True, use_frontend_profile=True) - - time_directory = (LOCAL_BASE / 'benchmarks' / - f'{args.crate_name}-{args.crate_version}') - time_directory.mkdir(parents=True, exist_ok=True) - logging.info('Benchmarking crate built with PGO') - for triple in TARGET_TRIPLES: - build_crate( - crate_name=args.bench_crate_name, - crate_version=args.bench_crate_version, - target_triple=triple, - time_file=( - time_directory / - f'{args.bench_crate_name}-{args.bench_crate_version}-{triple}')) - - rust_version = get_rust_version() - dest_directory = (GS_BASE / 'benchmarks' / rust_version / - f'{args.crate_name}-{args.crate_version}') - logging.info('Uploading benchmark data') - for file in time_directory.iterdir(): - upload_file(source=time_directory / file.name, - dest=dest_directory / file.name) + maybe_download_crate( + crate_name=args.bench_crate_name, crate_version=args.bench_crate_version + ) + + files_dir = Path( + "/mnt/host/source/src/third_party/chromiumos-overlay", + "dev-lang/rust/files", + ) + + logging.info("Copying profile data to be used in building Rust") + run( + [ + "cp", + ( + LOCAL_BASE + / "profdata" + / f"{args.crate_name}-{args.crate_version}" + / "llvm.profdata" + ), + files_dir, + ] + ) + run( + [ + "cp", + ( + LOCAL_BASE + / "profdata" + / f"{args.crate_name}-{args.crate_version}" + / "frontend.profdata" + ), + files_dir, + ] + ) + + logging.info("Building Rust with PGO") + build_rust(use_llvm_profile=True, use_frontend_profile=True) + + time_directory = ( + LOCAL_BASE / "benchmarks" / f"{args.crate_name}-{args.crate_version}" + ) + time_directory.mkdir(parents=True, exist_ok=True) + logging.info("Benchmarking crate built with PGO") + for triple in TARGET_TRIPLES: + build_crate( + crate_name=args.bench_crate_name, + crate_version=args.bench_crate_version, + target_triple=triple, + time_file=( + time_directory + / f"{args.bench_crate_name}-{args.bench_crate_version}-{triple}" + ), + ) + + rust_version = get_rust_version() + dest_directory = ( + GS_BASE + / "benchmarks" + / rust_version + / f"{args.crate_name}-{args.crate_version}" + ) + logging.info("Uploading benchmark data") + for file in time_directory.iterdir(): + upload_file( + source=time_directory / file.name, dest=dest_directory / file.name + ) def upload_profdata(args): - directory = (LOCAL_BASE / 'profdata /' - f'{args.crate_name}-{args.crate_version}') - rust_version = get_rust_version() + directory = ( + LOCAL_BASE / "profdata /" f"{args.crate_name}-{args.crate_version}" + ) + rust_version = get_rust_version() - logging.info('Uploading LLVM profdata') - do_upload_profdata(source=directory / 'llvm.profdata', - dest=(GS_DISTFILES / - f'rust-pgo-{rust_version}-llvm.profdata.xz')) + logging.info("Uploading LLVM profdata") + do_upload_profdata( + source=directory / "llvm.profdata", + dest=(GS_DISTFILES / f"rust-pgo-{rust_version}-llvm.profdata.xz"), + ) - logging.info('Uploading frontend profdata') - do_upload_profdata(source=directory / 'frontend.profdata', - dest=(GS_DISTFILES / - f'rust-pgo-{rust_version}-frontend.profdata.xz')) + logging.info("Uploading frontend profdata") + do_upload_profdata( + source=directory / "frontend.profdata", + dest=(GS_DISTFILES / f"rust-pgo-{rust_version}-frontend.profdata.xz"), + ) def main(): - logging.basicConfig(stream=sys.stdout, - level=logging.NOTSET, - format='%(message)s') - - parser = argparse.ArgumentParser( - prog=sys.argv[0], - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - subparsers = parser.add_subparsers(dest='command', help='') - subparsers.required = True - - parser_generate = subparsers.add_parser( - 'generate', - help='Generate LLVM and frontend profdata files by building ' - 'instrumented Rust compilers, and using them to build the ' - 'indicated crate (downloading the crate if necessary).') - parser_generate.set_defaults(func=generate) - parser_generate.add_argument('--crate-name', - default=CRATE_NAME, - help='Name of the crate to build') - parser_generate.add_argument('--crate-version', - default=CRATE_VERSION, - help='Version of the crate to build') - - parser_benchmark_nopgo = subparsers.add_parser( - 'benchmark-nopgo', - help='Build the Rust compiler without PGO, benchmark ' - 'the build of the indicated crate, and upload ' - 'the benchmark data.') - parser_benchmark_nopgo.set_defaults(func=benchmark_nopgo) - parser_benchmark_nopgo.add_argument( - '--bench-crate-name', - default=CRATE_NAME, - help='Name of the crate whose build to benchmark') - parser_benchmark_nopgo.add_argument( - '--bench-crate-version', - default=CRATE_VERSION, - help='Version of the crate whose benchmark to build') - - parser_benchmark_pgo = subparsers.add_parser( - 'benchmark-pgo', - help='Build the Rust compiler using PGO with the indicated ' - 'profdata files, benchmark the build of the indicated crate, ' - 'and upload the benchmark data.') - parser_benchmark_pgo.set_defaults(func=benchmark_pgo) - parser_benchmark_pgo.add_argument( - '--bench-crate-name', - default=CRATE_NAME, - help='Name of the crate whose build to benchmark') - parser_benchmark_pgo.add_argument( - '--bench-crate-version', - default=CRATE_VERSION, - help='Version of the crate whose benchmark to build') - parser_benchmark_pgo.add_argument( - '--crate-name', - default=CRATE_NAME, - help='Name of the crate whose profile to use') - parser_benchmark_pgo.add_argument( - '--crate-version', - default=CRATE_VERSION, - help='Version of the crate whose profile to use') - - parser_upload_profdata = subparsers.add_parser( - 'upload-profdata', help='Upload the profdata files') - parser_upload_profdata.set_defaults(func=upload_profdata) - parser_upload_profdata.add_argument( - '--crate-name', - default=CRATE_NAME, - help='Name of the crate whose profile to use') - parser_upload_profdata.add_argument( - '--crate-version', - default=CRATE_VERSION, - help='Version of the crate whose profile to use') - - args = parser.parse_args() - - (LOCAL_BASE / 'crates').mkdir(parents=True, exist_ok=True) - (LOCAL_BASE / 'llvm-profraw').mkdir(parents=True, exist_ok=True) - (LOCAL_BASE / 'frontend-profraw').mkdir(parents=True, exist_ok=True) - (LOCAL_BASE / 'benchmarks').mkdir(parents=True, exist_ok=True) - - args.func(args) - - return 0 - - -if __name__ == '__main__': - sys.exit(main()) + logging.basicConfig( + stream=sys.stdout, level=logging.NOTSET, format="%(message)s" + ) + + parser = argparse.ArgumentParser( + prog=sys.argv[0], + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + subparsers = parser.add_subparsers(dest="command", help="") + subparsers.required = True + + parser_generate = subparsers.add_parser( + "generate", + help="Generate LLVM and frontend profdata files by building " + "instrumented Rust compilers, and using them to build the " + "indicated crate (downloading the crate if necessary).", + ) + parser_generate.set_defaults(func=generate) + parser_generate.add_argument( + "--crate-name", default=CRATE_NAME, help="Name of the crate to build" + ) + parser_generate.add_argument( + "--crate-version", + default=CRATE_VERSION, + help="Version of the crate to build", + ) + + parser_benchmark_nopgo = subparsers.add_parser( + "benchmark-nopgo", + help="Build the Rust compiler without PGO, benchmark " + "the build of the indicated crate, and upload " + "the benchmark data.", + ) + parser_benchmark_nopgo.set_defaults(func=benchmark_nopgo) + parser_benchmark_nopgo.add_argument( + "--bench-crate-name", + default=CRATE_NAME, + help="Name of the crate whose build to benchmark", + ) + parser_benchmark_nopgo.add_argument( + "--bench-crate-version", + default=CRATE_VERSION, + help="Version of the crate whose benchmark to build", + ) + + parser_benchmark_pgo = subparsers.add_parser( + "benchmark-pgo", + help="Build the Rust compiler using PGO with the indicated " + "profdata files, benchmark the build of the indicated crate, " + "and upload the benchmark data.", + ) + parser_benchmark_pgo.set_defaults(func=benchmark_pgo) + parser_benchmark_pgo.add_argument( + "--bench-crate-name", + default=CRATE_NAME, + help="Name of the crate whose build to benchmark", + ) + parser_benchmark_pgo.add_argument( + "--bench-crate-version", + default=CRATE_VERSION, + help="Version of the crate whose benchmark to build", + ) + parser_benchmark_pgo.add_argument( + "--crate-name", + default=CRATE_NAME, + help="Name of the crate whose profile to use", + ) + parser_benchmark_pgo.add_argument( + "--crate-version", + default=CRATE_VERSION, + help="Version of the crate whose profile to use", + ) + + parser_upload_profdata = subparsers.add_parser( + "upload-profdata", help="Upload the profdata files" + ) + parser_upload_profdata.set_defaults(func=upload_profdata) + parser_upload_profdata.add_argument( + "--crate-name", + default=CRATE_NAME, + help="Name of the crate whose profile to use", + ) + parser_upload_profdata.add_argument( + "--crate-version", + default=CRATE_VERSION, + help="Version of the crate whose profile to use", + ) + + args = parser.parse_args() + + (LOCAL_BASE / "crates").mkdir(parents=True, exist_ok=True) + (LOCAL_BASE / "llvm-profraw").mkdir(parents=True, exist_ok=True) + (LOCAL_BASE / "frontend-profraw").mkdir(parents=True, exist_ok=True) + (LOCAL_BASE / "benchmarks").mkdir(parents=True, exist_ok=True) + + args.func(args) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/remote_test.py b/remote_test.py index 197bae68..574d6762 100755 --- a/remote_test.py +++ b/remote_test.py @@ -12,7 +12,8 @@ This script can login to the chromeos machine using the test private key. from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" import argparse import os @@ -23,84 +24,93 @@ from cros_utils import misc def Usage(parser, message): - print('ERROR: %s' % message) - parser.print_help() - sys.exit(0) + print("ERROR: %s" % message) + parser.print_help() + sys.exit(0) def Main(argv): - parser = argparse.ArgumentParser() - parser.add_argument( - '-c', - '--chromeos_root', - dest='chromeos_root', - help='ChromeOS root checkout directory') - parser.add_argument( - '-r', '--remote', dest='remote', help='Remote chromeos device.') - options = parser.parse_args(argv) - if options.chromeos_root is None: - Usage(parser, 'chromeos_root must be given') - - if options.remote is None: - Usage(parser, 'remote must be given') - - options.chromeos_root = os.path.expanduser(options.chromeos_root) - - command = 'ls -lt /' - ce = command_executer.GetCommandExecuter() - ce.CrosRunCommand( - command, chromeos_root=options.chromeos_root, machine=options.remote) - - version_dir_path, script_name = misc.GetRoot(sys.argv[0]) - version_dir = misc.GetRoot(version_dir_path)[1] - - # Tests to copy directories and files to the chromeos box. - ce.CopyFiles( - version_dir_path, - '/tmp/' + version_dir, - dest_machine=options.remote, - dest_cros=True, - chromeos_root=options.chromeos_root) - ce.CopyFiles( - version_dir_path, - '/tmp/' + version_dir + '1', - dest_machine=options.remote, - dest_cros=True, - chromeos_root=options.chromeos_root) - ce.CopyFiles( - sys.argv[0], - '/tmp/' + script_name, - recursive=False, - dest_machine=options.remote, - dest_cros=True, - chromeos_root=options.chromeos_root) - ce.CopyFiles( - sys.argv[0], - '/tmp/' + script_name + '1', - recursive=False, - dest_machine=options.remote, - dest_cros=True, - chromeos_root=options.chromeos_root) - - # Test to copy directories and files from the chromeos box. - ce.CopyFiles( - '/tmp/' + script_name, - '/tmp/hello', - recursive=False, - src_machine=options.remote, - src_cros=True, - chromeos_root=options.chromeos_root) - ce.CopyFiles( - '/tmp/' + script_name, - '/tmp/' + script_name, - recursive=False, - src_machine=options.remote, - src_cros=True, - chromeos_root=options.chromeos_root) - board = ce.CrosLearnBoard(options.chromeos_root, options.remote) - print(board) - return 0 - - -if __name__ == '__main__': - Main(sys.argv[1:]) + parser = argparse.ArgumentParser() + parser.add_argument( + "-c", + "--chromeos_root", + dest="chromeos_root", + help="ChromeOS root checkout directory", + ) + parser.add_argument( + "-r", "--remote", dest="remote", help="Remote chromeos device." + ) + options = parser.parse_args(argv) + if options.chromeos_root is None: + Usage(parser, "chromeos_root must be given") + + if options.remote is None: + Usage(parser, "remote must be given") + + options.chromeos_root = os.path.expanduser(options.chromeos_root) + + command = "ls -lt /" + ce = command_executer.GetCommandExecuter() + ce.CrosRunCommand( + command, chromeos_root=options.chromeos_root, machine=options.remote + ) + + version_dir_path, script_name = misc.GetRoot(sys.argv[0]) + version_dir = misc.GetRoot(version_dir_path)[1] + + # Tests to copy directories and files to the chromeos box. + ce.CopyFiles( + version_dir_path, + "/tmp/" + version_dir, + dest_machine=options.remote, + dest_cros=True, + chromeos_root=options.chromeos_root, + ) + ce.CopyFiles( + version_dir_path, + "/tmp/" + version_dir + "1", + dest_machine=options.remote, + dest_cros=True, + chromeos_root=options.chromeos_root, + ) + ce.CopyFiles( + sys.argv[0], + "/tmp/" + script_name, + recursive=False, + dest_machine=options.remote, + dest_cros=True, + chromeos_root=options.chromeos_root, + ) + ce.CopyFiles( + sys.argv[0], + "/tmp/" + script_name + "1", + recursive=False, + dest_machine=options.remote, + dest_cros=True, + chromeos_root=options.chromeos_root, + ) + + # Test to copy directories and files from the chromeos box. + ce.CopyFiles( + "/tmp/" + script_name, + "/tmp/hello", + recursive=False, + src_machine=options.remote, + src_cros=True, + chromeos_root=options.chromeos_root, + ) + ce.CopyFiles( + "/tmp/" + script_name, + "/tmp/" + script_name, + recursive=False, + src_machine=options.remote, + src_cros=True, + chromeos_root=options.chromeos_root, + ) + board = ce.CrosLearnBoard(options.chromeos_root, options.remote) + print(board) + return 0 + + +if __name__ == "__main__": + Main(sys.argv[1:]) diff --git a/rust_tools/rust_uprev.py b/rust_tools/rust_uprev.py index 7e170443..cd35fa0b 100755 --- a/rust_tools/rust_uprev.py +++ b/rust_tools/rust_uprev.py @@ -49,351 +49,380 @@ from llvm_tools import chroot from llvm_tools import git -EQUERY = 'equery' -GSUTIL = 'gsutil.py' -MIRROR_PATH = 'gs://chromeos-localmirror/distfiles' +EQUERY = "equery" +GSUTIL = "gsutil.py" +MIRROR_PATH = "gs://chromeos-localmirror/distfiles" RUST_PATH = Path( - '/mnt/host/source/src/third_party/chromiumos-overlay/dev-lang/rust') + "/mnt/host/source/src/third_party/chromiumos-overlay/dev-lang/rust" +) def get_command_output(command: List[str], *args, **kwargs) -> str: - return subprocess.check_output(command, encoding='utf-8', *args, - **kwargs).strip() + return subprocess.check_output( + command, encoding="utf-8", *args, **kwargs + ).strip() def get_command_output_unchecked(command: List[str], *args, **kwargs) -> str: - return subprocess.run(command, - check=False, - stdout=subprocess.PIPE, - encoding='utf-8', - *args, - **kwargs).stdout.strip() + return subprocess.run( + command, + check=False, + stdout=subprocess.PIPE, + encoding="utf-8", + *args, + **kwargs, + ).stdout.strip() class RustVersion(NamedTuple): - """NamedTuple represents a Rust version""" - major: int - minor: int - patch: int - - def __str__(self): - return f'{self.major}.{self.minor}.{self.patch}' - - @staticmethod - def parse_from_ebuild(ebuild_name: str) -> 'RustVersion': - input_re = re.compile(r'^rust-' - r'(?P<major>\d+)\.' - r'(?P<minor>\d+)\.' - r'(?P<patch>\d+)' - r'(:?-r\d+)?' - r'\.ebuild$') - m = input_re.match(ebuild_name) - assert m, f'failed to parse {ebuild_name!r}' - return RustVersion(int(m.group('major')), int(m.group('minor')), - int(m.group('patch'))) - - @staticmethod - def parse(x: str) -> 'RustVersion': - input_re = re.compile(r'^(?:rust-)?' - r'(?P<major>\d+)\.' - r'(?P<minor>\d+)\.' - r'(?P<patch>\d+)' - r'(?:.ebuild)?$') - m = input_re.match(x) - assert m, f'failed to parse {x!r}' - return RustVersion(int(m.group('major')), int(m.group('minor')), - int(m.group('patch'))) + """NamedTuple represents a Rust version""" + + major: int + minor: int + patch: int + + def __str__(self): + return f"{self.major}.{self.minor}.{self.patch}" + + @staticmethod + def parse_from_ebuild(ebuild_name: str) -> "RustVersion": + input_re = re.compile( + r"^rust-" + r"(?P<major>\d+)\." + r"(?P<minor>\d+)\." + r"(?P<patch>\d+)" + r"(:?-r\d+)?" + r"\.ebuild$" + ) + m = input_re.match(ebuild_name) + assert m, f"failed to parse {ebuild_name!r}" + return RustVersion( + int(m.group("major")), int(m.group("minor")), int(m.group("patch")) + ) + + @staticmethod + def parse(x: str) -> "RustVersion": + input_re = re.compile( + r"^(?:rust-)?" + r"(?P<major>\d+)\." + r"(?P<minor>\d+)\." + r"(?P<patch>\d+)" + r"(?:.ebuild)?$" + ) + m = input_re.match(x) + assert m, f"failed to parse {x!r}" + return RustVersion( + int(m.group("major")), int(m.group("minor")), int(m.group("patch")) + ) def compute_rustc_src_name(version: RustVersion) -> str: - return f'rustc-{version}-src.tar.gz' + return f"rustc-{version}-src.tar.gz" def compute_rust_bootstrap_prebuilt_name(version: RustVersion) -> str: - return f'rust-bootstrap-{version}.tbz2' + return f"rust-bootstrap-{version}.tbz2" def find_ebuild_for_package(name: str) -> os.PathLike: - """Returns the path to the ebuild for the named package.""" - return get_command_output([EQUERY, 'w', name]) + """Returns the path to the ebuild for the named package.""" + return get_command_output([EQUERY, "w", name]) -def find_ebuild_path(directory: Path, - name: str, - version: Optional[RustVersion] = None) -> Path: - """Finds an ebuild in a directory. +def find_ebuild_path( + directory: Path, name: str, version: Optional[RustVersion] = None +) -> Path: + """Finds an ebuild in a directory. - Returns the path to the ebuild file. Asserts if there is not - exactly one match. The match is constrained by name and optionally - by version, but can match any patch level. E.g. "rust" version - 1.3.4 can match rust-1.3.4.ebuild but also rust-1.3.4-r6.ebuild. - """ - if version: - pattern = f'{name}-{version}*.ebuild' - else: - pattern = f'{name}-*.ebuild' - matches = list(Path(directory).glob(pattern)) - assert len(matches) == 1, matches - return matches[0] + Returns the path to the ebuild file. Asserts if there is not + exactly one match. The match is constrained by name and optionally + by version, but can match any patch level. E.g. "rust" version + 1.3.4 can match rust-1.3.4.ebuild but also rust-1.3.4-r6.ebuild. + """ + if version: + pattern = f"{name}-{version}*.ebuild" + else: + pattern = f"{name}-*.ebuild" + matches = list(Path(directory).glob(pattern)) + assert len(matches) == 1, matches + return matches[0] def get_rust_bootstrap_version(): - """Get the version of the current rust-bootstrap package.""" - bootstrap_ebuild = find_ebuild_path(rust_bootstrap_path(), 'rust-bootstrap') - m = re.match(r'^rust-bootstrap-(\d+).(\d+).(\d+)', bootstrap_ebuild.name) - assert m, bootstrap_ebuild.name - return RustVersion(int(m.group(1)), int(m.group(2)), int(m.group(3))) + """Get the version of the current rust-bootstrap package.""" + bootstrap_ebuild = find_ebuild_path(rust_bootstrap_path(), "rust-bootstrap") + m = re.match(r"^rust-bootstrap-(\d+).(\d+).(\d+)", bootstrap_ebuild.name) + assert m, bootstrap_ebuild.name + return RustVersion(int(m.group(1)), int(m.group(2)), int(m.group(3))) def parse_commandline_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - '--state_file', - required=True, - help='A state file to hold previous completed steps. If the file ' - 'exists, it needs to be used together with --continue or --restart. ' - 'If not exist (do not use --continue in this case), we will create a ' - 'file for you.', - ) - parser.add_argument( - '--restart', - action='store_true', - help='Restart from the first step. Ignore the completed steps in ' - 'the state file', - ) - parser.add_argument( - '--continue', - dest='cont', - action='store_true', - help='Continue the steps from the state file', - ) - - create_parser_template = argparse.ArgumentParser(add_help=False) - create_parser_template.add_argument( - '--template', - type=RustVersion.parse, - default=None, - help='A template to use for creating a Rust uprev from, in the form ' - 'a.b.c The ebuild has to exist in the chroot. If not specified, the ' - 'tool will use the current Rust version in the chroot as template.', - ) - create_parser_template.add_argument( - '--skip_compile', - action='store_true', - help='Skip compiling rust to test the tool. Only for testing', - ) - - subparsers = parser.add_subparsers(dest='subparser_name') - subparser_names = [] - subparser_names.append('create') - create_parser = subparsers.add_parser( - 'create', - parents=[create_parser_template], - help='Create changes uprevs Rust to a new version', - ) - create_parser.add_argument( - '--rust_version', - type=RustVersion.parse, - required=True, - help='Rust version to uprev to, in the form a.b.c', - ) - - subparser_names.append('remove') - remove_parser = subparsers.add_parser( - 'remove', - help='Clean up old Rust version from chroot', - ) - remove_parser.add_argument( - '--rust_version', - type=RustVersion.parse, - default=None, - help='Rust version to remove, in the form a.b.c If not ' - 'specified, the tool will remove the oldest version in the chroot', - ) - - subparser_names.append('remove-bootstrap') - remove_bootstrap_parser = subparsers.add_parser( - 'remove-bootstrap', - help='Remove an old rust-bootstrap version', - ) - remove_bootstrap_parser.add_argument( - '--version', - type=RustVersion.parse, - required=True, - help='rust-bootstrap version to remove', - ) - - subparser_names.append('roll') - roll_parser = subparsers.add_parser( - 'roll', - parents=[create_parser_template], - help='A command can create and upload a Rust uprev CL, including ' - 'preparing the repo, creating new Rust uprev, deleting old uprev, ' - 'and upload a CL to crrev.', - ) - roll_parser.add_argument( - '--uprev', - type=RustVersion.parse, - required=True, - help='Rust version to uprev to, in the form a.b.c', - ) - roll_parser.add_argument( - '--remove', - type=RustVersion.parse, - default=None, - help='Rust version to remove, in the form a.b.c If not ' - 'specified, the tool will remove the oldest version in the chroot', - ) - roll_parser.add_argument( - '--skip_cross_compiler', - action='store_true', - help='Skip updating cross-compiler in the chroot', - ) - roll_parser.add_argument( - '--no_upload', - action='store_true', - help='If specified, the tool will not upload the CL for review', - ) - - args = parser.parse_args() - if args.subparser_name not in subparser_names: - parser.error('one of %s must be specified' % subparser_names) - - if args.cont and args.restart: - parser.error('Please select either --continue or --restart') - - if os.path.exists(args.state_file): - if not args.cont and not args.restart: - parser.error('State file exists, so you should either --continue ' - 'or --restart') - if args.cont and not os.path.exists(args.state_file): - parser.error('Indicate --continue but the state file does not exist') - - if args.restart and os.path.exists(args.state_file): - os.remove(args.state_file) - - return args + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--state_file", + required=True, + help="A state file to hold previous completed steps. If the file " + "exists, it needs to be used together with --continue or --restart. " + "If not exist (do not use --continue in this case), we will create a " + "file for you.", + ) + parser.add_argument( + "--restart", + action="store_true", + help="Restart from the first step. Ignore the completed steps in " + "the state file", + ) + parser.add_argument( + "--continue", + dest="cont", + action="store_true", + help="Continue the steps from the state file", + ) + + create_parser_template = argparse.ArgumentParser(add_help=False) + create_parser_template.add_argument( + "--template", + type=RustVersion.parse, + default=None, + help="A template to use for creating a Rust uprev from, in the form " + "a.b.c The ebuild has to exist in the chroot. If not specified, the " + "tool will use the current Rust version in the chroot as template.", + ) + create_parser_template.add_argument( + "--skip_compile", + action="store_true", + help="Skip compiling rust to test the tool. Only for testing", + ) + + subparsers = parser.add_subparsers(dest="subparser_name") + subparser_names = [] + subparser_names.append("create") + create_parser = subparsers.add_parser( + "create", + parents=[create_parser_template], + help="Create changes uprevs Rust to a new version", + ) + create_parser.add_argument( + "--rust_version", + type=RustVersion.parse, + required=True, + help="Rust version to uprev to, in the form a.b.c", + ) + + subparser_names.append("remove") + remove_parser = subparsers.add_parser( + "remove", + help="Clean up old Rust version from chroot", + ) + remove_parser.add_argument( + "--rust_version", + type=RustVersion.parse, + default=None, + help="Rust version to remove, in the form a.b.c If not " + "specified, the tool will remove the oldest version in the chroot", + ) + + subparser_names.append("remove-bootstrap") + remove_bootstrap_parser = subparsers.add_parser( + "remove-bootstrap", + help="Remove an old rust-bootstrap version", + ) + remove_bootstrap_parser.add_argument( + "--version", + type=RustVersion.parse, + required=True, + help="rust-bootstrap version to remove", + ) + + subparser_names.append("roll") + roll_parser = subparsers.add_parser( + "roll", + parents=[create_parser_template], + help="A command can create and upload a Rust uprev CL, including " + "preparing the repo, creating new Rust uprev, deleting old uprev, " + "and upload a CL to crrev.", + ) + roll_parser.add_argument( + "--uprev", + type=RustVersion.parse, + required=True, + help="Rust version to uprev to, in the form a.b.c", + ) + roll_parser.add_argument( + "--remove", + type=RustVersion.parse, + default=None, + help="Rust version to remove, in the form a.b.c If not " + "specified, the tool will remove the oldest version in the chroot", + ) + roll_parser.add_argument( + "--skip_cross_compiler", + action="store_true", + help="Skip updating cross-compiler in the chroot", + ) + roll_parser.add_argument( + "--no_upload", + action="store_true", + help="If specified, the tool will not upload the CL for review", + ) + + args = parser.parse_args() + if args.subparser_name not in subparser_names: + parser.error("one of %s must be specified" % subparser_names) + + if args.cont and args.restart: + parser.error("Please select either --continue or --restart") + + if os.path.exists(args.state_file): + if not args.cont and not args.restart: + parser.error( + "State file exists, so you should either --continue " + "or --restart" + ) + if args.cont and not os.path.exists(args.state_file): + parser.error("Indicate --continue but the state file does not exist") + + if args.restart and os.path.exists(args.state_file): + os.remove(args.state_file) + + return args def prepare_uprev( rust_version: RustVersion, template: Optional[RustVersion] ) -> Optional[Tuple[RustVersion, str, RustVersion]]: - if template is None: - ebuild_path = find_ebuild_for_package('rust') - ebuild_name = os.path.basename(ebuild_path) - template_version = RustVersion.parse_from_ebuild(ebuild_name) - else: - ebuild_path = find_ebuild_for_rust_version(template) - template_version = template - - bootstrap_version = get_rust_bootstrap_version() + if template is None: + ebuild_path = find_ebuild_for_package("rust") + ebuild_name = os.path.basename(ebuild_path) + template_version = RustVersion.parse_from_ebuild(ebuild_name) + else: + ebuild_path = find_ebuild_for_rust_version(template) + template_version = template + + bootstrap_version = get_rust_bootstrap_version() + + if rust_version <= template_version: + logging.info( + "Requested version %s is not newer than the template version %s.", + rust_version, + template_version, + ) + return None - if rust_version <= template_version: logging.info( - 'Requested version %s is not newer than the template version %s.', - rust_version, template_version) - return None - - logging.info('Template Rust version is %s (ebuild: %r)', template_version, - ebuild_path) - logging.info('rust-bootstrap version is %s', bootstrap_version) - - return template_version, ebuild_path, bootstrap_version - - -def copy_patches(directory: Path, template_version: RustVersion, - new_version: RustVersion) -> None: - patch_path = directory.joinpath('files') - prefix = '%s-%s-' % (directory.name, template_version) - new_prefix = '%s-%s-' % (directory.name, new_version) - for f in os.listdir(patch_path): - if not f.startswith(prefix): - continue - logging.info('Copy patch %s to new version', f) - new_name = f.replace(str(template_version), str(new_version)) - shutil.copyfile( - os.path.join(patch_path, f), - os.path.join(patch_path, new_name), + "Template Rust version is %s (ebuild: %r)", + template_version, + ebuild_path, + ) + logging.info("rust-bootstrap version is %s", bootstrap_version) + + return template_version, ebuild_path, bootstrap_version + + +def copy_patches( + directory: Path, template_version: RustVersion, new_version: RustVersion +) -> None: + patch_path = directory.joinpath("files") + prefix = "%s-%s-" % (directory.name, template_version) + new_prefix = "%s-%s-" % (directory.name, new_version) + for f in os.listdir(patch_path): + if not f.startswith(prefix): + continue + logging.info("Copy patch %s to new version", f) + new_name = f.replace(str(template_version), str(new_version)) + shutil.copyfile( + os.path.join(patch_path, f), + os.path.join(patch_path, new_name), + ) + + subprocess.check_call( + ["git", "add", f"{new_prefix}*.patch"], cwd=patch_path ) - - subprocess.check_call(['git', 'add', f'{new_prefix}*.patch'], cwd=patch_path) def create_ebuild(template_ebuild: str, new_version: RustVersion) -> str: - shutil.copyfile(template_ebuild, - RUST_PATH.joinpath(f'rust-{new_version}.ebuild')) - subprocess.check_call(['git', 'add', f'rust-{new_version}.ebuild'], - cwd=RUST_PATH) - return os.path.join(RUST_PATH, f'rust-{new_version}.ebuild') + shutil.copyfile( + template_ebuild, RUST_PATH.joinpath(f"rust-{new_version}.ebuild") + ) + subprocess.check_call( + ["git", "add", f"rust-{new_version}.ebuild"], cwd=RUST_PATH + ) + return os.path.join(RUST_PATH, f"rust-{new_version}.ebuild") def update_bootstrap_ebuild(new_bootstrap_version: RustVersion) -> None: - old_ebuild = find_ebuild_path(rust_bootstrap_path(), 'rust-bootstrap') - m = re.match(r'^rust-bootstrap-(\d+).(\d+).(\d+)', old_ebuild.name) - assert m, old_ebuild.name - old_version = RustVersion(m.group(1), m.group(2), m.group(3)) - new_ebuild = old_ebuild.parent.joinpath( - f'rust-bootstrap-{new_bootstrap_version}.ebuild') - old_text = old_ebuild.read_text(encoding='utf-8') - new_text, changes = re.subn(r'(RUSTC_RAW_FULL_BOOTSTRAP_SEQUENCE=\([^)]*)', - f'\\1\t{old_version}\n', - old_text, - flags=re.MULTILINE) - assert changes == 1, 'Failed to update RUSTC_RAW_FULL_BOOTSTRAP_SEQUENCE' - new_ebuild.write_text(new_text, encoding='utf-8') - - -def update_ebuild(ebuild_file: str, - new_bootstrap_version: RustVersion) -> None: - contents = open(ebuild_file, encoding='utf-8').read() - contents, subs = re.subn(r'^BOOTSTRAP_VERSION=.*$', - 'BOOTSTRAP_VERSION="%s"' % - (new_bootstrap_version, ), - contents, - flags=re.MULTILINE) - if not subs: - raise RuntimeError('BOOTSTRAP_VERSION not found in rust ebuild') - open(ebuild_file, 'w', encoding='utf-8').write(contents) - logging.info('Rust ebuild file has BOOTSTRAP_VERSION updated to %s', - new_bootstrap_version) - - -def ebuild_actions(package: str, - actions: List[str], - sudo: bool = False) -> None: - ebuild_path_inchroot = find_ebuild_for_package(package) - cmd = ['ebuild', ebuild_path_inchroot] + actions - if sudo: - cmd = ['sudo'] + cmd - subprocess.check_call(cmd) + old_ebuild = find_ebuild_path(rust_bootstrap_path(), "rust-bootstrap") + m = re.match(r"^rust-bootstrap-(\d+).(\d+).(\d+)", old_ebuild.name) + assert m, old_ebuild.name + old_version = RustVersion(m.group(1), m.group(2), m.group(3)) + new_ebuild = old_ebuild.parent.joinpath( + f"rust-bootstrap-{new_bootstrap_version}.ebuild" + ) + old_text = old_ebuild.read_text(encoding="utf-8") + new_text, changes = re.subn( + r"(RUSTC_RAW_FULL_BOOTSTRAP_SEQUENCE=\([^)]*)", + f"\\1\t{old_version}\n", + old_text, + flags=re.MULTILINE, + ) + assert changes == 1, "Failed to update RUSTC_RAW_FULL_BOOTSTRAP_SEQUENCE" + new_ebuild.write_text(new_text, encoding="utf-8") + + +def update_ebuild(ebuild_file: str, new_bootstrap_version: RustVersion) -> None: + contents = open(ebuild_file, encoding="utf-8").read() + contents, subs = re.subn( + r"^BOOTSTRAP_VERSION=.*$", + 'BOOTSTRAP_VERSION="%s"' % (new_bootstrap_version,), + contents, + flags=re.MULTILINE, + ) + if not subs: + raise RuntimeError("BOOTSTRAP_VERSION not found in rust ebuild") + open(ebuild_file, "w", encoding="utf-8").write(contents) + logging.info( + "Rust ebuild file has BOOTSTRAP_VERSION updated to %s", + new_bootstrap_version, + ) + + +def ebuild_actions( + package: str, actions: List[str], sudo: bool = False +) -> None: + ebuild_path_inchroot = find_ebuild_for_package(package) + cmd = ["ebuild", ebuild_path_inchroot] + actions + if sudo: + cmd = ["sudo"] + cmd + subprocess.check_call(cmd) def fetch_distfile_from_mirror(name: str) -> None: - """Gets the named file from the local mirror. - - This ensures that the file exists on the mirror, and - that we can read it. We overwrite any existing distfile - to ensure the checksums that update_manifest() records - match the file as it exists on the mirror. - - This function also attempts to verify the ACL for - the file (which is expected to have READER permission - for allUsers). We can only see the ACL if the user - gsutil runs with is the owner of the file. If not, - we get an access denied error. We also count this - as a success, because it means we were able to fetch - the file even though we don't own it. - """ - mirror_file = MIRROR_PATH + '/' + name - local_file = Path(get_distdir(), name) - cmd = [GSUTIL, 'cp', mirror_file, local_file] - logging.info('Running %r', cmd) - rc = subprocess.call(cmd) - if rc != 0: - logging.error( - """Could not fetch %s + """Gets the named file from the local mirror. + + This ensures that the file exists on the mirror, and + that we can read it. We overwrite any existing distfile + to ensure the checksums that update_manifest() records + match the file as it exists on the mirror. + + This function also attempts to verify the ACL for + the file (which is expected to have READER permission + for allUsers). We can only see the ACL if the user + gsutil runs with is the owner of the file. If not, + we get an access denied error. We also count this + as a success, because it means we were able to fetch + the file even though we don't own it. + """ + mirror_file = MIRROR_PATH + "/" + name + local_file = Path(get_distdir(), name) + cmd = [GSUTIL, "cp", mirror_file, local_file] + logging.info("Running %r", cmd) + rc = subprocess.call(cmd) + if rc != 0: + logging.error( + """Could not fetch %s If the file does not yet exist at %s please download the file, verify its integrity @@ -408,349 +437,420 @@ gpg --recv-keys 85AB96E6FA1BE5FE Once you have verify the integrity of the file, upload it to the local mirror using gsutil cp. -""", mirror_file, MIRROR_PATH, name, name) - raise Exception(f'Could not fetch {mirror_file}') - # Check that the ACL allows allUsers READER access. - # If we get an AccessDeniedAcception here, that also - # counts as a success, because we were able to fetch - # the file as a non-owner. - cmd = [GSUTIL, 'acl', 'get', mirror_file] - logging.info('Running %r', cmd) - output = get_command_output_unchecked(cmd, stderr=subprocess.STDOUT) - acl_verified = False - if 'AccessDeniedException:' in output: - acl_verified = True - else: - acl = json.loads(output) - for x in acl: - if x['entity'] == 'allUsers' and x['role'] == 'READER': +""", + mirror_file, + MIRROR_PATH, + name, + name, + ) + raise Exception(f"Could not fetch {mirror_file}") + # Check that the ACL allows allUsers READER access. + # If we get an AccessDeniedAcception here, that also + # counts as a success, because we were able to fetch + # the file as a non-owner. + cmd = [GSUTIL, "acl", "get", mirror_file] + logging.info("Running %r", cmd) + output = get_command_output_unchecked(cmd, stderr=subprocess.STDOUT) + acl_verified = False + if "AccessDeniedException:" in output: acl_verified = True - break - if not acl_verified: - logging.error('Output from acl get:\n%s', output) - raise Exception('Could not verify that allUsers has READER permission') - - -def fetch_bootstrap_distfiles(old_version: RustVersion, - new_version: RustVersion) -> None: - """Fetches rust-bootstrap distfiles from the local mirror - - Fetches the distfiles for a rust-bootstrap ebuild to ensure they - are available on the mirror and the local copies are the same as - the ones on the mirror. - """ - fetch_distfile_from_mirror(compute_rust_bootstrap_prebuilt_name(old_version)) - fetch_distfile_from_mirror(compute_rustc_src_name(new_version)) + else: + acl = json.loads(output) + for x in acl: + if x["entity"] == "allUsers" and x["role"] == "READER": + acl_verified = True + break + if not acl_verified: + logging.error("Output from acl get:\n%s", output) + raise Exception("Could not verify that allUsers has READER permission") + + +def fetch_bootstrap_distfiles( + old_version: RustVersion, new_version: RustVersion +) -> None: + """Fetches rust-bootstrap distfiles from the local mirror + + Fetches the distfiles for a rust-bootstrap ebuild to ensure they + are available on the mirror and the local copies are the same as + the ones on the mirror. + """ + fetch_distfile_from_mirror( + compute_rust_bootstrap_prebuilt_name(old_version) + ) + fetch_distfile_from_mirror(compute_rustc_src_name(new_version)) def fetch_rust_distfiles(version: RustVersion) -> None: - """Fetches rust distfiles from the local mirror + """Fetches rust distfiles from the local mirror - Fetches the distfiles for a rust ebuild to ensure they - are available on the mirror and the local copies are - the same as the ones on the mirror. - """ - fetch_distfile_from_mirror(compute_rustc_src_name(version)) + Fetches the distfiles for a rust ebuild to ensure they + are available on the mirror and the local copies are + the same as the ones on the mirror. + """ + fetch_distfile_from_mirror(compute_rustc_src_name(version)) def get_distdir() -> os.PathLike: - """Returns portage's distdir.""" - return get_command_output(['portageq', 'distdir']) + """Returns portage's distdir.""" + return get_command_output(["portageq", "distdir"]) def update_manifest(ebuild_file: os.PathLike) -> None: - """Updates the MANIFEST for the ebuild at the given path.""" - ebuild = Path(ebuild_file) - ebuild_actions(ebuild.parent.name, ['manifest']) + """Updates the MANIFEST for the ebuild at the given path.""" + ebuild = Path(ebuild_file) + ebuild_actions(ebuild.parent.name, ["manifest"]) def update_rust_packages(rust_version: RustVersion, add: bool) -> None: - package_file = RUST_PATH.joinpath( - '../../profiles/targets/chromeos/package.provided') - with open(package_file, encoding='utf-8') as f: - contents = f.read() - if add: - rust_packages_re = re.compile(r'dev-lang/rust-(\d+\.\d+\.\d+)') - rust_packages = rust_packages_re.findall(contents) - # Assume all the rust packages are in alphabetical order, so insert the new - # version to the place after the last rust_packages - new_str = f'dev-lang/rust-{rust_version}' - new_contents = contents.replace(rust_packages[-1], - f'{rust_packages[-1]}\n{new_str}') - logging.info('%s has been inserted into package.provided', new_str) - else: - old_str = f'dev-lang/rust-{rust_version}\n' - assert old_str in contents, f'{old_str!r} not found in package.provided' - new_contents = contents.replace(old_str, '') - logging.info('%s has been removed from package.provided', old_str) - - with open(package_file, 'w', encoding='utf-8') as f: - f.write(new_contents) - - -def update_virtual_rust(template_version: RustVersion, - new_version: RustVersion) -> None: - template_ebuild = find_ebuild_path(RUST_PATH.joinpath('../../virtual/rust'), - 'rust', template_version) - virtual_rust_dir = template_ebuild.parent - new_name = f'rust-{new_version}.ebuild' - new_ebuild = virtual_rust_dir.joinpath(new_name) - shutil.copyfile(template_ebuild, new_ebuild) - subprocess.check_call(['git', 'add', new_name], cwd=virtual_rust_dir) - - -def perform_step(state_file: pathlib.Path, - tmp_state_file: pathlib.Path, - completed_steps: Dict[str, Any], - step_name: str, - step_fn: Callable[[], T], - result_from_json: Optional[Callable[[Any], T]] = None, - result_to_json: Optional[Callable[[T], Any]] = None) -> T: - if step_name in completed_steps: - logging.info('Skipping previously completed step %s', step_name) - if result_from_json: - return result_from_json(completed_steps[step_name]) - return completed_steps[step_name] - - logging.info('Running step %s', step_name) - val = step_fn() - logging.info('Step %s complete', step_name) - if result_to_json: - completed_steps[step_name] = result_to_json(val) - else: - completed_steps[step_name] = val - - with tmp_state_file.open('w', encoding='utf-8') as f: - json.dump(completed_steps, f, indent=4) - tmp_state_file.rename(state_file) - return val + package_file = RUST_PATH.joinpath( + "../../profiles/targets/chromeos/package.provided" + ) + with open(package_file, encoding="utf-8") as f: + contents = f.read() + if add: + rust_packages_re = re.compile(r"dev-lang/rust-(\d+\.\d+\.\d+)") + rust_packages = rust_packages_re.findall(contents) + # Assume all the rust packages are in alphabetical order, so insert the new + # version to the place after the last rust_packages + new_str = f"dev-lang/rust-{rust_version}" + new_contents = contents.replace( + rust_packages[-1], f"{rust_packages[-1]}\n{new_str}" + ) + logging.info("%s has been inserted into package.provided", new_str) + else: + old_str = f"dev-lang/rust-{rust_version}\n" + assert old_str in contents, f"{old_str!r} not found in package.provided" + new_contents = contents.replace(old_str, "") + logging.info("%s has been removed from package.provided", old_str) + + with open(package_file, "w", encoding="utf-8") as f: + f.write(new_contents) + + +def update_virtual_rust( + template_version: RustVersion, new_version: RustVersion +) -> None: + template_ebuild = find_ebuild_path( + RUST_PATH.joinpath("../../virtual/rust"), "rust", template_version + ) + virtual_rust_dir = template_ebuild.parent + new_name = f"rust-{new_version}.ebuild" + new_ebuild = virtual_rust_dir.joinpath(new_name) + shutil.copyfile(template_ebuild, new_ebuild) + subprocess.check_call(["git", "add", new_name], cwd=virtual_rust_dir) + + +def perform_step( + state_file: pathlib.Path, + tmp_state_file: pathlib.Path, + completed_steps: Dict[str, Any], + step_name: str, + step_fn: Callable[[], T], + result_from_json: Optional[Callable[[Any], T]] = None, + result_to_json: Optional[Callable[[T], Any]] = None, +) -> T: + if step_name in completed_steps: + logging.info("Skipping previously completed step %s", step_name) + if result_from_json: + return result_from_json(completed_steps[step_name]) + return completed_steps[step_name] + + logging.info("Running step %s", step_name) + val = step_fn() + logging.info("Step %s complete", step_name) + if result_to_json: + completed_steps[step_name] = result_to_json(val) + else: + completed_steps[step_name] = val + + with tmp_state_file.open("w", encoding="utf-8") as f: + json.dump(completed_steps, f, indent=4) + tmp_state_file.rename(state_file) + return val def prepare_uprev_from_json( - obj: Any) -> Optional[Tuple[RustVersion, str, RustVersion]]: - if not obj: - return None - version, ebuild_path, bootstrap_version = obj - return RustVersion(*version), ebuild_path, RustVersion(*bootstrap_version) - - -def create_rust_uprev(rust_version: RustVersion, - maybe_template_version: Optional[RustVersion], - skip_compile: bool, run_step: Callable[[], T]) -> None: - template_version, template_ebuild, old_bootstrap_version = run_step( - 'prepare uprev', - lambda: prepare_uprev(rust_version, maybe_template_version), - result_from_json=prepare_uprev_from_json, - ) - if template_ebuild is None: - return - - # The fetch steps will fail (on purpose) if the files they check for - # are not available on the mirror. To make them pass, fetch the - # required files yourself, verify their checksums, then upload them - # to the mirror. - run_step( - 'fetch bootstrap distfiles', lambda: fetch_bootstrap_distfiles( - old_bootstrap_version, template_version)) - run_step('fetch rust distfiles', lambda: fetch_rust_distfiles(rust_version)) - run_step('update bootstrap ebuild', - lambda: update_bootstrap_ebuild(template_version)) - run_step( - 'update bootstrap manifest', lambda: update_manifest(rust_bootstrap_path( - ).joinpath(f'rust-bootstrap-{template_version}.ebuild'))) - run_step('copy patches', - lambda: copy_patches(RUST_PATH, template_version, rust_version)) - ebuild_file = run_step('create ebuild', - lambda: create_ebuild(template_ebuild, rust_version)) - run_step('update ebuild', - lambda: update_ebuild(ebuild_file, template_version)) - run_step('update manifest to add new version', - lambda: update_manifest(Path(ebuild_file))) - if not skip_compile: + obj: Any, +) -> Optional[Tuple[RustVersion, str, RustVersion]]: + if not obj: + return None + version, ebuild_path, bootstrap_version = obj + return RustVersion(*version), ebuild_path, RustVersion(*bootstrap_version) + + +def create_rust_uprev( + rust_version: RustVersion, + maybe_template_version: Optional[RustVersion], + skip_compile: bool, + run_step: Callable[[], T], +) -> None: + template_version, template_ebuild, old_bootstrap_version = run_step( + "prepare uprev", + lambda: prepare_uprev(rust_version, maybe_template_version), + result_from_json=prepare_uprev_from_json, + ) + if template_ebuild is None: + return + + # The fetch steps will fail (on purpose) if the files they check for + # are not available on the mirror. To make them pass, fetch the + # required files yourself, verify their checksums, then upload them + # to the mirror. + run_step( + "fetch bootstrap distfiles", + lambda: fetch_bootstrap_distfiles( + old_bootstrap_version, template_version + ), + ) + run_step("fetch rust distfiles", lambda: fetch_rust_distfiles(rust_version)) + run_step( + "update bootstrap ebuild", + lambda: update_bootstrap_ebuild(template_version), + ) + run_step( + "update bootstrap manifest", + lambda: update_manifest( + rust_bootstrap_path().joinpath( + f"rust-bootstrap-{template_version}.ebuild" + ) + ), + ) + run_step( + "copy patches", + lambda: copy_patches(RUST_PATH, template_version, rust_version), + ) + ebuild_file = run_step( + "create ebuild", lambda: create_ebuild(template_ebuild, rust_version) + ) + run_step( + "update ebuild", lambda: update_ebuild(ebuild_file, template_version) + ) + run_step( + "update manifest to add new version", + lambda: update_manifest(Path(ebuild_file)), + ) + if not skip_compile: + run_step( + "emerge rust", + lambda: subprocess.check_call(["sudo", "emerge", "dev-lang/rust"]), + ) + run_step( + "insert version into rust packages", + lambda: update_rust_packages(rust_version, add=True), + ) run_step( - 'emerge rust', - lambda: subprocess.check_call(['sudo', 'emerge', 'dev-lang/rust'])) - run_step('insert version into rust packages', - lambda: update_rust_packages(rust_version, add=True)) - run_step('upgrade virtual/rust', - lambda: update_virtual_rust(template_version, rust_version)) + "upgrade virtual/rust", + lambda: update_virtual_rust(template_version, rust_version), + ) def find_rust_versions_in_chroot() -> List[Tuple[RustVersion, str]]: - return [(RustVersion.parse_from_ebuild(x), os.path.join(RUST_PATH, x)) - for x in os.listdir(RUST_PATH) if x.endswith('.ebuild')] + return [ + (RustVersion.parse_from_ebuild(x), os.path.join(RUST_PATH, x)) + for x in os.listdir(RUST_PATH) + if x.endswith(".ebuild") + ] def find_oldest_rust_version_in_chroot() -> Tuple[RustVersion, str]: - rust_versions = find_rust_versions_in_chroot() - if len(rust_versions) <= 1: - raise RuntimeError('Expect to find more than one Rust versions') - return min(rust_versions) + rust_versions = find_rust_versions_in_chroot() + if len(rust_versions) <= 1: + raise RuntimeError("Expect to find more than one Rust versions") + return min(rust_versions) def find_ebuild_for_rust_version(version: RustVersion) -> str: - rust_ebuilds = [ - ebuild for x, ebuild in find_rust_versions_in_chroot() if x == version - ] - if not rust_ebuilds: - raise ValueError(f'No Rust ebuilds found matching {version}') - if len(rust_ebuilds) > 1: - raise ValueError(f'Multiple Rust ebuilds found matching {version}: ' - f'{rust_ebuilds}') - return rust_ebuilds[0] + rust_ebuilds = [ + ebuild for x, ebuild in find_rust_versions_in_chroot() if x == version + ] + if not rust_ebuilds: + raise ValueError(f"No Rust ebuilds found matching {version}") + if len(rust_ebuilds) > 1: + raise ValueError( + f"Multiple Rust ebuilds found matching {version}: " + f"{rust_ebuilds}" + ) + return rust_ebuilds[0] def remove_files(filename: str, path: str) -> None: - subprocess.check_call(['git', 'rm', filename], cwd=path) - - -def remove_rust_bootstrap_version(version: RustVersion, - run_step: Callable[[], T]) -> None: - prefix = f'rust-bootstrap-{version}' - run_step('remove old bootstrap ebuild', - lambda: remove_files(f'{prefix}*.ebuild', rust_bootstrap_path())) - ebuild_file = find_ebuild_for_package('rust-bootstrap') - run_step('update bootstrap manifest to delete old version', - lambda: update_manifest(ebuild_file)) - - -def remove_rust_uprev(rust_version: Optional[RustVersion], - run_step: Callable[[], T]) -> None: - - def find_desired_rust_version(): - if rust_version: - return rust_version, find_ebuild_for_rust_version(rust_version) - return find_oldest_rust_version_in_chroot() - - def find_desired_rust_version_from_json(obj: Any) -> Tuple[RustVersion, str]: - version, ebuild_path = obj - return RustVersion(*version), ebuild_path - - delete_version, delete_ebuild = run_step( - 'find rust version to delete', - find_desired_rust_version, - result_from_json=find_desired_rust_version_from_json, - ) - run_step( - 'remove patches', - lambda: remove_files(f'files/rust-{delete_version}-*.patch', RUST_PATH)) - run_step('remove ebuild', lambda: remove_files(delete_ebuild, RUST_PATH)) - ebuild_file = find_ebuild_for_package('rust') - run_step('update manifest to delete old version', - lambda: update_manifest(ebuild_file)) - run_step('remove version from rust packages', - lambda: update_rust_packages(delete_version, add=False)) - run_step('remove virtual/rust', lambda: remove_virtual_rust(delete_version)) + subprocess.check_call(["git", "rm", filename], cwd=path) + + +def remove_rust_bootstrap_version( + version: RustVersion, run_step: Callable[[], T] +) -> None: + prefix = f"rust-bootstrap-{version}" + run_step( + "remove old bootstrap ebuild", + lambda: remove_files(f"{prefix}*.ebuild", rust_bootstrap_path()), + ) + ebuild_file = find_ebuild_for_package("rust-bootstrap") + run_step( + "update bootstrap manifest to delete old version", + lambda: update_manifest(ebuild_file), + ) + + +def remove_rust_uprev( + rust_version: Optional[RustVersion], run_step: Callable[[], T] +) -> None: + def find_desired_rust_version(): + if rust_version: + return rust_version, find_ebuild_for_rust_version(rust_version) + return find_oldest_rust_version_in_chroot() + + def find_desired_rust_version_from_json( + obj: Any, + ) -> Tuple[RustVersion, str]: + version, ebuild_path = obj + return RustVersion(*version), ebuild_path + + delete_version, delete_ebuild = run_step( + "find rust version to delete", + find_desired_rust_version, + result_from_json=find_desired_rust_version_from_json, + ) + run_step( + "remove patches", + lambda: remove_files(f"files/rust-{delete_version}-*.patch", RUST_PATH), + ) + run_step("remove ebuild", lambda: remove_files(delete_ebuild, RUST_PATH)) + ebuild_file = find_ebuild_for_package("rust") + run_step( + "update manifest to delete old version", + lambda: update_manifest(ebuild_file), + ) + run_step( + "remove version from rust packages", + lambda: update_rust_packages(delete_version, add=False), + ) + run_step("remove virtual/rust", lambda: remove_virtual_rust(delete_version)) def remove_virtual_rust(delete_version: RustVersion) -> None: - ebuild = find_ebuild_path(RUST_PATH.joinpath('../../virtual/rust'), 'rust', - delete_version) - subprocess.check_call(['git', 'rm', str(ebuild.name)], cwd=ebuild.parent) + ebuild = find_ebuild_path( + RUST_PATH.joinpath("../../virtual/rust"), "rust", delete_version + ) + subprocess.check_call(["git", "rm", str(ebuild.name)], cwd=ebuild.parent) def rust_bootstrap_path() -> Path: - return RUST_PATH.joinpath('../rust-bootstrap') + return RUST_PATH.joinpath("../rust-bootstrap") def create_new_repo(rust_version: RustVersion) -> None: - output = get_command_output(['git', 'status', '--porcelain'], cwd=RUST_PATH) - if output: - raise RuntimeError( - f'{RUST_PATH} has uncommitted changes, please either discard them ' - 'or commit them.') - git.CreateBranch(RUST_PATH, f'rust-to-{rust_version}') + output = get_command_output(["git", "status", "--porcelain"], cwd=RUST_PATH) + if output: + raise RuntimeError( + f"{RUST_PATH} has uncommitted changes, please either discard them " + "or commit them." + ) + git.CreateBranch(RUST_PATH, f"rust-to-{rust_version}") def build_cross_compiler() -> None: - # Get target triples in ebuild - rust_ebuild = find_ebuild_for_package('rust') - with open(rust_ebuild, encoding='utf-8') as f: - contents = f.read() - - target_triples_re = re.compile(r'RUSTC_TARGET_TRIPLES=\(([^)]+)\)') - m = target_triples_re.search(contents) - assert m, 'RUST_TARGET_TRIPLES not found in rust ebuild' - target_triples = m.group(1).strip().split('\n') - - compiler_targets_to_install = [ - target.strip() for target in target_triples if 'cros-' in target - ] - for target in target_triples: - if 'cros-' not in target: - continue - target = target.strip() - - # We also always need arm-none-eabi, though it's not mentioned in - # RUSTC_TARGET_TRIPLES. - compiler_targets_to_install.append('arm-none-eabi') - - logging.info('Emerging cross compilers %s', compiler_targets_to_install) - subprocess.check_call( - ['sudo', 'emerge', '-j', '-G'] + - [f'cross-{target}/gcc' for target in compiler_targets_to_install]) + # Get target triples in ebuild + rust_ebuild = find_ebuild_for_package("rust") + with open(rust_ebuild, encoding="utf-8") as f: + contents = f.read() + + target_triples_re = re.compile(r"RUSTC_TARGET_TRIPLES=\(([^)]+)\)") + m = target_triples_re.search(contents) + assert m, "RUST_TARGET_TRIPLES not found in rust ebuild" + target_triples = m.group(1).strip().split("\n") + + compiler_targets_to_install = [ + target.strip() for target in target_triples if "cros-" in target + ] + for target in target_triples: + if "cros-" not in target: + continue + target = target.strip() + + # We also always need arm-none-eabi, though it's not mentioned in + # RUSTC_TARGET_TRIPLES. + compiler_targets_to_install.append("arm-none-eabi") + + logging.info("Emerging cross compilers %s", compiler_targets_to_install) + subprocess.check_call( + ["sudo", "emerge", "-j", "-G"] + + [f"cross-{target}/gcc" for target in compiler_targets_to_install] + ) def create_new_commit(rust_version: RustVersion) -> None: - subprocess.check_call(['git', 'add', '-A'], cwd=RUST_PATH) - messages = [ - f'[DO NOT SUBMIT] dev-lang/rust: upgrade to Rust {rust_version}', - '', - 'This CL is created by rust_uprev tool automatically.' - '', - 'BUG=None', - 'TEST=Use CQ to test the new Rust version', - ] - git.UploadChanges(RUST_PATH, f'rust-to-{rust_version}', messages) + subprocess.check_call(["git", "add", "-A"], cwd=RUST_PATH) + messages = [ + f"[DO NOT SUBMIT] dev-lang/rust: upgrade to Rust {rust_version}", + "", + "This CL is created by rust_uprev tool automatically." "", + "BUG=None", + "TEST=Use CQ to test the new Rust version", + ] + git.UploadChanges(RUST_PATH, f"rust-to-{rust_version}", messages) def main() -> None: - if not chroot.InChroot(): - raise RuntimeError('This script must be executed inside chroot') - - logging.basicConfig(level=logging.INFO) - - args = parse_commandline_args() - - state_file = pathlib.Path(args.state_file) - tmp_state_file = state_file.with_suffix('.tmp') - - try: - with state_file.open(encoding='utf-8') as f: - completed_steps = json.load(f) - except FileNotFoundError: - completed_steps = {} - - def run_step( - step_name: str, - step_fn: Callable[[], T], - result_from_json: Optional[Callable[[Any], T]] = None, - result_to_json: Optional[Callable[[T], Any]] = None, - ) -> T: - return perform_step(state_file, tmp_state_file, completed_steps, step_name, - step_fn, result_from_json, result_to_json) - - if args.subparser_name == 'create': - create_rust_uprev(args.rust_version, args.template, args.skip_compile, - run_step) - elif args.subparser_name == 'remove': - remove_rust_uprev(args.rust_version, run_step) - elif args.subparser_name == 'remove-bootstrap': - remove_rust_bootstrap_version(args.version, run_step) - else: - # If you have added more subparser_name, please also add the handlers above - assert args.subparser_name == 'roll' - run_step('create new repo', lambda: create_new_repo(args.uprev)) - if not args.skip_cross_compiler: - run_step('build cross compiler', build_cross_compiler) - create_rust_uprev(args.uprev, args.template, args.skip_compile, run_step) - remove_rust_uprev(args.remove, run_step) - bootstrap_version = prepare_uprev_from_json( - completed_steps['prepare uprev'])[2] - remove_rust_bootstrap_version(bootstrap_version, run_step) - if not args.no_upload: - run_step('create rust uprev CL', lambda: create_new_commit(args.uprev)) - - -if __name__ == '__main__': - sys.exit(main()) + if not chroot.InChroot(): + raise RuntimeError("This script must be executed inside chroot") + + logging.basicConfig(level=logging.INFO) + + args = parse_commandline_args() + + state_file = pathlib.Path(args.state_file) + tmp_state_file = state_file.with_suffix(".tmp") + + try: + with state_file.open(encoding="utf-8") as f: + completed_steps = json.load(f) + except FileNotFoundError: + completed_steps = {} + + def run_step( + step_name: str, + step_fn: Callable[[], T], + result_from_json: Optional[Callable[[Any], T]] = None, + result_to_json: Optional[Callable[[T], Any]] = None, + ) -> T: + return perform_step( + state_file, + tmp_state_file, + completed_steps, + step_name, + step_fn, + result_from_json, + result_to_json, + ) + + if args.subparser_name == "create": + create_rust_uprev( + args.rust_version, args.template, args.skip_compile, run_step + ) + elif args.subparser_name == "remove": + remove_rust_uprev(args.rust_version, run_step) + elif args.subparser_name == "remove-bootstrap": + remove_rust_bootstrap_version(args.version, run_step) + else: + # If you have added more subparser_name, please also add the handlers above + assert args.subparser_name == "roll" + run_step("create new repo", lambda: create_new_repo(args.uprev)) + if not args.skip_cross_compiler: + run_step("build cross compiler", build_cross_compiler) + create_rust_uprev( + args.uprev, args.template, args.skip_compile, run_step + ) + remove_rust_uprev(args.remove, run_step) + bootstrap_version = prepare_uprev_from_json( + completed_steps["prepare uprev"] + )[2] + remove_rust_bootstrap_version(bootstrap_version, run_step) + if not args.no_upload: + run_step( + "create rust uprev CL", lambda: create_new_commit(args.uprev) + ) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/rust_tools/rust_uprev_test.py b/rust_tools/rust_uprev_test.py index 2e6e8713..e737d77c 100755 --- a/rust_tools/rust_uprev_test.py +++ b/rust_tools/rust_uprev_test.py @@ -20,256 +20,306 @@ from rust_uprev import RustVersion def _fail_command(cmd, *_args, **_kwargs): - err = subprocess.CalledProcessError(returncode=1, cmd=cmd) - err.stderr = b'mock failure' - raise err + err = subprocess.CalledProcessError(returncode=1, cmd=cmd) + err.stderr = b"mock failure" + raise err class FetchDistfileTest(unittest.TestCase): - """Tests rust_uprev.fetch_distfile_from_mirror()""" - - @mock.patch.object(rust_uprev, 'get_distdir', return_value='/fake/distfiles') - @mock.patch.object(subprocess, 'call', side_effect=_fail_command) - def test_fetch_difstfile_fail(self, *_args) -> None: - with self.assertRaises(subprocess.CalledProcessError): - rust_uprev.fetch_distfile_from_mirror('test_distfile.tar.gz') - - @mock.patch.object(rust_uprev, - 'get_command_output_unchecked', - return_value='AccessDeniedException: Access denied.') - @mock.patch.object(rust_uprev, 'get_distdir', return_value='/fake/distfiles') - @mock.patch.object(subprocess, 'call', return_value=0) - def test_fetch_distfile_acl_access_denied(self, *_args) -> None: - rust_uprev.fetch_distfile_from_mirror('test_distfile.tar.gz') - - @mock.patch.object( - rust_uprev, - 'get_command_output_unchecked', - return_value='[ { "entity": "allUsers", "role": "READER" } ]') - @mock.patch.object(rust_uprev, 'get_distdir', return_value='/fake/distfiles') - @mock.patch.object(subprocess, 'call', return_value=0) - def test_fetch_distfile_acl_ok(self, *_args) -> None: - rust_uprev.fetch_distfile_from_mirror('test_distfile.tar.gz') - - @mock.patch.object( - rust_uprev, - 'get_command_output_unchecked', - return_value='[ { "entity": "___fake@google.com", "role": "OWNER" } ]') - @mock.patch.object(rust_uprev, 'get_distdir', return_value='/fake/distfiles') - @mock.patch.object(subprocess, 'call', return_value=0) - def test_fetch_distfile_acl_wrong(self, *_args) -> None: - with self.assertRaisesRegex(Exception, 'allUsers.*READER'): - with self.assertLogs(level='ERROR') as log: - rust_uprev.fetch_distfile_from_mirror('test_distfile.tar.gz') - self.assertIn( - '[ { "entity": "___fake@google.com", "role": "OWNER" } ]', - '\n'.join(log.output)) + """Tests rust_uprev.fetch_distfile_from_mirror()""" + + @mock.patch.object( + rust_uprev, "get_distdir", return_value="/fake/distfiles" + ) + @mock.patch.object(subprocess, "call", side_effect=_fail_command) + def test_fetch_difstfile_fail(self, *_args) -> None: + with self.assertRaises(subprocess.CalledProcessError): + rust_uprev.fetch_distfile_from_mirror("test_distfile.tar.gz") + + @mock.patch.object( + rust_uprev, + "get_command_output_unchecked", + return_value="AccessDeniedException: Access denied.", + ) + @mock.patch.object( + rust_uprev, "get_distdir", return_value="/fake/distfiles" + ) + @mock.patch.object(subprocess, "call", return_value=0) + def test_fetch_distfile_acl_access_denied(self, *_args) -> None: + rust_uprev.fetch_distfile_from_mirror("test_distfile.tar.gz") + + @mock.patch.object( + rust_uprev, + "get_command_output_unchecked", + return_value='[ { "entity": "allUsers", "role": "READER" } ]', + ) + @mock.patch.object( + rust_uprev, "get_distdir", return_value="/fake/distfiles" + ) + @mock.patch.object(subprocess, "call", return_value=0) + def test_fetch_distfile_acl_ok(self, *_args) -> None: + rust_uprev.fetch_distfile_from_mirror("test_distfile.tar.gz") + + @mock.patch.object( + rust_uprev, + "get_command_output_unchecked", + return_value='[ { "entity": "___fake@google.com", "role": "OWNER" } ]', + ) + @mock.patch.object( + rust_uprev, "get_distdir", return_value="/fake/distfiles" + ) + @mock.patch.object(subprocess, "call", return_value=0) + def test_fetch_distfile_acl_wrong(self, *_args) -> None: + with self.assertRaisesRegex(Exception, "allUsers.*READER"): + with self.assertLogs(level="ERROR") as log: + rust_uprev.fetch_distfile_from_mirror("test_distfile.tar.gz") + self.assertIn( + '[ { "entity": "___fake@google.com", "role": "OWNER" } ]', + "\n".join(log.output), + ) class FindEbuildPathTest(unittest.TestCase): - """Tests for rust_uprev.find_ebuild_path()""" - - def test_exact_version(self): - with tempfile.TemporaryDirectory() as tmpdir: - ebuild = Path(tmpdir, 'test-1.3.4.ebuild') - ebuild.touch() - Path(tmpdir, 'test-1.2.3.ebuild').touch() - result = rust_uprev.find_ebuild_path(tmpdir, 'test', - rust_uprev.RustVersion(1, 3, 4)) - self.assertEqual(result, ebuild) - - def test_no_version(self): - with tempfile.TemporaryDirectory() as tmpdir: - ebuild = Path(tmpdir, 'test-1.2.3.ebuild') - ebuild.touch() - result = rust_uprev.find_ebuild_path(tmpdir, 'test') - self.assertEqual(result, ebuild) - - def test_patch_version(self): - with tempfile.TemporaryDirectory() as tmpdir: - ebuild = Path(tmpdir, 'test-1.3.4-r3.ebuild') - ebuild.touch() - Path(tmpdir, 'test-1.2.3.ebuild').touch() - result = rust_uprev.find_ebuild_path(tmpdir, 'test', - rust_uprev.RustVersion(1, 3, 4)) - self.assertEqual(result, ebuild) + """Tests for rust_uprev.find_ebuild_path()""" + + def test_exact_version(self): + with tempfile.TemporaryDirectory() as tmpdir: + ebuild = Path(tmpdir, "test-1.3.4.ebuild") + ebuild.touch() + Path(tmpdir, "test-1.2.3.ebuild").touch() + result = rust_uprev.find_ebuild_path( + tmpdir, "test", rust_uprev.RustVersion(1, 3, 4) + ) + self.assertEqual(result, ebuild) + + def test_no_version(self): + with tempfile.TemporaryDirectory() as tmpdir: + ebuild = Path(tmpdir, "test-1.2.3.ebuild") + ebuild.touch() + result = rust_uprev.find_ebuild_path(tmpdir, "test") + self.assertEqual(result, ebuild) + + def test_patch_version(self): + with tempfile.TemporaryDirectory() as tmpdir: + ebuild = Path(tmpdir, "test-1.3.4-r3.ebuild") + ebuild.touch() + Path(tmpdir, "test-1.2.3.ebuild").touch() + result = rust_uprev.find_ebuild_path( + tmpdir, "test", rust_uprev.RustVersion(1, 3, 4) + ) + self.assertEqual(result, ebuild) class RustVersionTest(unittest.TestCase): - """Tests for RustVersion class""" + """Tests for RustVersion class""" - def test_str(self): - obj = rust_uprev.RustVersion(major=1, minor=2, patch=3) - self.assertEqual(str(obj), '1.2.3') + def test_str(self): + obj = rust_uprev.RustVersion(major=1, minor=2, patch=3) + self.assertEqual(str(obj), "1.2.3") - def test_parse_version_only(self): - expected = rust_uprev.RustVersion(major=1, minor=2, patch=3) - actual = rust_uprev.RustVersion.parse('1.2.3') - self.assertEqual(expected, actual) + def test_parse_version_only(self): + expected = rust_uprev.RustVersion(major=1, minor=2, patch=3) + actual = rust_uprev.RustVersion.parse("1.2.3") + self.assertEqual(expected, actual) - def test_parse_ebuild_name(self): - expected = rust_uprev.RustVersion(major=1, minor=2, patch=3) - actual = rust_uprev.RustVersion.parse_from_ebuild('rust-1.2.3.ebuild') - self.assertEqual(expected, actual) + def test_parse_ebuild_name(self): + expected = rust_uprev.RustVersion(major=1, minor=2, patch=3) + actual = rust_uprev.RustVersion.parse_from_ebuild("rust-1.2.3.ebuild") + self.assertEqual(expected, actual) - actual = rust_uprev.RustVersion.parse_from_ebuild('rust-1.2.3-r1.ebuild') - self.assertEqual(expected, actual) + actual = rust_uprev.RustVersion.parse_from_ebuild( + "rust-1.2.3-r1.ebuild" + ) + self.assertEqual(expected, actual) - def test_parse_fail(self): - with self.assertRaises(AssertionError) as context: - rust_uprev.RustVersion.parse('invalid-rust-1.2.3') - self.assertEqual("failed to parse 'invalid-rust-1.2.3'", - str(context.exception)) + def test_parse_fail(self): + with self.assertRaises(AssertionError) as context: + rust_uprev.RustVersion.parse("invalid-rust-1.2.3") + self.assertEqual( + "failed to parse 'invalid-rust-1.2.3'", str(context.exception) + ) class PrepareUprevTest(unittest.TestCase): - """Tests for prepare_uprev step in rust_uprev""" - - def setUp(self): - self.bootstrap_version = rust_uprev.RustVersion(1, 1, 0) - self.version_old = rust_uprev.RustVersion(1, 2, 3) - self.version_new = rust_uprev.RustVersion(1, 3, 5) - - @mock.patch.object(rust_uprev, - 'find_ebuild_for_rust_version', - return_value='/path/to/ebuild') - @mock.patch.object(rust_uprev, 'find_ebuild_path') - @mock.patch.object(rust_uprev, 'get_command_output') - def test_success_with_template(self, mock_command, mock_find_ebuild, - _ebuild_for_version): - bootstrap_ebuild_path = Path( - '/path/to/rust-bootstrap/', - f'rust-bootstrap-{self.bootstrap_version}.ebuild') - mock_find_ebuild.return_value = bootstrap_ebuild_path - expected = (self.version_old, '/path/to/ebuild', self.bootstrap_version) - actual = rust_uprev.prepare_uprev(rust_version=self.version_new, - template=self.version_old) - self.assertEqual(expected, actual) - mock_command.assert_not_called() - - @mock.patch.object(rust_uprev, - 'find_ebuild_for_rust_version', - return_value='/path/to/ebuild') - @mock.patch.object(rust_uprev, - 'get_rust_bootstrap_version', - return_value=RustVersion(0, 41, 12)) - @mock.patch.object(rust_uprev, 'get_command_output') - def test_return_none_with_template_larger_than_input(self, mock_command, - *_args): - ret = rust_uprev.prepare_uprev(rust_version=self.version_old, - template=self.version_new) - self.assertIsNone(ret) - mock_command.assert_not_called() - - @mock.patch.object(rust_uprev, 'find_ebuild_path') - @mock.patch.object(os.path, 'exists') - @mock.patch.object(rust_uprev, 'get_command_output') - def test_success_without_template(self, mock_command, mock_exists, - mock_find_ebuild): - rust_ebuild_path = f'/path/to/rust/rust-{self.version_old}-r3.ebuild' - mock_command.return_value = rust_ebuild_path - bootstrap_ebuild_path = Path( - '/path/to/rust-bootstrap', - f'rust-bootstrap-{self.bootstrap_version}.ebuild') - mock_find_ebuild.return_value = bootstrap_ebuild_path - expected = (self.version_old, rust_ebuild_path, self.bootstrap_version) - actual = rust_uprev.prepare_uprev(rust_version=self.version_new, - template=None) - self.assertEqual(expected, actual) - mock_command.assert_called_once_with(['equery', 'w', 'rust']) - mock_exists.assert_not_called() - - @mock.patch.object(rust_uprev, - 'get_rust_bootstrap_version', - return_value=RustVersion(0, 41, 12)) - @mock.patch.object(os.path, 'exists') - @mock.patch.object(rust_uprev, 'get_command_output') - def test_return_none_with_ebuild_larger_than_input(self, mock_command, - mock_exists, *_args): - mock_command.return_value = f'/path/to/rust/rust-{self.version_new}.ebuild' - ret = rust_uprev.prepare_uprev(rust_version=self.version_old, - template=None) - self.assertIsNone(ret) - mock_exists.assert_not_called() - - def test_prepare_uprev_from_json(self): - ebuild_path = '/path/to/the/ebuild' - json_result = (list(self.version_new), ebuild_path, - list(self.bootstrap_version)) - expected = (self.version_new, ebuild_path, self.bootstrap_version) - actual = rust_uprev.prepare_uprev_from_json(json_result) - self.assertEqual(expected, actual) + """Tests for prepare_uprev step in rust_uprev""" + + def setUp(self): + self.bootstrap_version = rust_uprev.RustVersion(1, 1, 0) + self.version_old = rust_uprev.RustVersion(1, 2, 3) + self.version_new = rust_uprev.RustVersion(1, 3, 5) + + @mock.patch.object( + rust_uprev, + "find_ebuild_for_rust_version", + return_value="/path/to/ebuild", + ) + @mock.patch.object(rust_uprev, "find_ebuild_path") + @mock.patch.object(rust_uprev, "get_command_output") + def test_success_with_template( + self, mock_command, mock_find_ebuild, _ebuild_for_version + ): + bootstrap_ebuild_path = Path( + "/path/to/rust-bootstrap/", + f"rust-bootstrap-{self.bootstrap_version}.ebuild", + ) + mock_find_ebuild.return_value = bootstrap_ebuild_path + expected = (self.version_old, "/path/to/ebuild", self.bootstrap_version) + actual = rust_uprev.prepare_uprev( + rust_version=self.version_new, template=self.version_old + ) + self.assertEqual(expected, actual) + mock_command.assert_not_called() + + @mock.patch.object( + rust_uprev, + "find_ebuild_for_rust_version", + return_value="/path/to/ebuild", + ) + @mock.patch.object( + rust_uprev, + "get_rust_bootstrap_version", + return_value=RustVersion(0, 41, 12), + ) + @mock.patch.object(rust_uprev, "get_command_output") + def test_return_none_with_template_larger_than_input( + self, mock_command, *_args + ): + ret = rust_uprev.prepare_uprev( + rust_version=self.version_old, template=self.version_new + ) + self.assertIsNone(ret) + mock_command.assert_not_called() + + @mock.patch.object(rust_uprev, "find_ebuild_path") + @mock.patch.object(os.path, "exists") + @mock.patch.object(rust_uprev, "get_command_output") + def test_success_without_template( + self, mock_command, mock_exists, mock_find_ebuild + ): + rust_ebuild_path = f"/path/to/rust/rust-{self.version_old}-r3.ebuild" + mock_command.return_value = rust_ebuild_path + bootstrap_ebuild_path = Path( + "/path/to/rust-bootstrap", + f"rust-bootstrap-{self.bootstrap_version}.ebuild", + ) + mock_find_ebuild.return_value = bootstrap_ebuild_path + expected = (self.version_old, rust_ebuild_path, self.bootstrap_version) + actual = rust_uprev.prepare_uprev( + rust_version=self.version_new, template=None + ) + self.assertEqual(expected, actual) + mock_command.assert_called_once_with(["equery", "w", "rust"]) + mock_exists.assert_not_called() + + @mock.patch.object( + rust_uprev, + "get_rust_bootstrap_version", + return_value=RustVersion(0, 41, 12), + ) + @mock.patch.object(os.path, "exists") + @mock.patch.object(rust_uprev, "get_command_output") + def test_return_none_with_ebuild_larger_than_input( + self, mock_command, mock_exists, *_args + ): + mock_command.return_value = ( + f"/path/to/rust/rust-{self.version_new}.ebuild" + ) + ret = rust_uprev.prepare_uprev( + rust_version=self.version_old, template=None + ) + self.assertIsNone(ret) + mock_exists.assert_not_called() + + def test_prepare_uprev_from_json(self): + ebuild_path = "/path/to/the/ebuild" + json_result = ( + list(self.version_new), + ebuild_path, + list(self.bootstrap_version), + ) + expected = (self.version_new, ebuild_path, self.bootstrap_version) + actual = rust_uprev.prepare_uprev_from_json(json_result) + self.assertEqual(expected, actual) class UpdateEbuildTest(unittest.TestCase): - """Tests for update_ebuild step in rust_uprev""" - ebuild_file_before = """ + """Tests for update_ebuild step in rust_uprev""" + + ebuild_file_before = """ BOOTSTRAP_VERSION="1.2.0" """ - ebuild_file_after = """ + ebuild_file_after = """ BOOTSTRAP_VERSION="1.3.6" """ - def test_success(self): - mock_open = mock.mock_open(read_data=self.ebuild_file_before) - # ebuild_file and new bootstrap version are deliberately different - ebuild_file = '/path/to/rust/rust-1.3.5.ebuild' - with mock.patch('builtins.open', mock_open): - rust_uprev.update_ebuild(ebuild_file, - rust_uprev.RustVersion.parse('1.3.6')) - mock_open.return_value.__enter__().write.assert_called_once_with( - self.ebuild_file_after) - - def test_fail_when_ebuild_misses_a_variable(self): - mock_open = mock.mock_open(read_data='') - ebuild_file = '/path/to/rust/rust-1.3.5.ebuild' - with mock.patch('builtins.open', mock_open): - with self.assertRaises(RuntimeError) as context: - rust_uprev.update_ebuild(ebuild_file, - rust_uprev.RustVersion.parse('1.2.0')) - self.assertEqual('BOOTSTRAP_VERSION not found in rust ebuild', - str(context.exception)) + def test_success(self): + mock_open = mock.mock_open(read_data=self.ebuild_file_before) + # ebuild_file and new bootstrap version are deliberately different + ebuild_file = "/path/to/rust/rust-1.3.5.ebuild" + with mock.patch("builtins.open", mock_open): + rust_uprev.update_ebuild( + ebuild_file, rust_uprev.RustVersion.parse("1.3.6") + ) + mock_open.return_value.__enter__().write.assert_called_once_with( + self.ebuild_file_after + ) + + def test_fail_when_ebuild_misses_a_variable(self): + mock_open = mock.mock_open(read_data="") + ebuild_file = "/path/to/rust/rust-1.3.5.ebuild" + with mock.patch("builtins.open", mock_open): + with self.assertRaises(RuntimeError) as context: + rust_uprev.update_ebuild( + ebuild_file, rust_uprev.RustVersion.parse("1.2.0") + ) + self.assertEqual( + "BOOTSTRAP_VERSION not found in rust ebuild", str(context.exception) + ) class UpdateManifestTest(unittest.TestCase): - """Tests for update_manifest step in rust_uprev""" + """Tests for update_manifest step in rust_uprev""" - @mock.patch.object(rust_uprev, 'ebuild_actions') - def test_update_manifest(self, mock_run): - ebuild_file = Path('/path/to/rust/rust-1.1.1.ebuild') - rust_uprev.update_manifest(ebuild_file) - mock_run.assert_called_once_with('rust', ['manifest']) + @mock.patch.object(rust_uprev, "ebuild_actions") + def test_update_manifest(self, mock_run): + ebuild_file = Path("/path/to/rust/rust-1.1.1.ebuild") + rust_uprev.update_manifest(ebuild_file) + mock_run.assert_called_once_with("rust", ["manifest"]) class UpdateBootstrapEbuildTest(unittest.TestCase): - """Tests for rust_uprev.update_bootstrap_ebuild()""" - - def test_update_bootstrap_ebuild(self): - # The update should do two things: - # 1. Create a copy of rust-bootstrap's ebuild with the new version number. - # 2. Add the old PV to RUSTC_RAW_FULL_BOOTSTRAP_SEQUENCE. - with tempfile.TemporaryDirectory() as tmpdir_str, \ - mock.patch.object(rust_uprev, 'find_ebuild_path') as mock_find_ebuild: - tmpdir = Path(tmpdir_str) - bootstrapdir = Path.joinpath(tmpdir, 'rust-bootstrap') - bootstrapdir.mkdir() - old_ebuild = bootstrapdir.joinpath('rust-bootstrap-1.45.2.ebuild') - old_ebuild.write_text(encoding='utf-8', - data=""" + """Tests for rust_uprev.update_bootstrap_ebuild()""" + + def test_update_bootstrap_ebuild(self): + # The update should do two things: + # 1. Create a copy of rust-bootstrap's ebuild with the new version number. + # 2. Add the old PV to RUSTC_RAW_FULL_BOOTSTRAP_SEQUENCE. + with tempfile.TemporaryDirectory() as tmpdir_str, mock.patch.object( + rust_uprev, "find_ebuild_path" + ) as mock_find_ebuild: + tmpdir = Path(tmpdir_str) + bootstrapdir = Path.joinpath(tmpdir, "rust-bootstrap") + bootstrapdir.mkdir() + old_ebuild = bootstrapdir.joinpath("rust-bootstrap-1.45.2.ebuild") + old_ebuild.write_text( + encoding="utf-8", + data=""" some text RUSTC_RAW_FULL_BOOTSTRAP_SEQUENCE=( \t1.43.1 \t1.44.1 ) some more text -""") - mock_find_ebuild.return_value = old_ebuild - rust_uprev.update_bootstrap_ebuild(rust_uprev.RustVersion(1, 46, 0)) - new_ebuild = bootstrapdir.joinpath('rust-bootstrap-1.46.0.ebuild') - self.assertTrue(new_ebuild.exists()) - text = new_ebuild.read_text() - self.assertEqual( - text, """ +""", + ) + mock_find_ebuild.return_value = old_ebuild + rust_uprev.update_bootstrap_ebuild(rust_uprev.RustVersion(1, 46, 0)) + new_ebuild = bootstrapdir.joinpath("rust-bootstrap-1.46.0.ebuild") + self.assertTrue(new_ebuild.exists()) + text = new_ebuild.read_text() + self.assertEqual( + text, + """ some text RUSTC_RAW_FULL_BOOTSTRAP_SEQUENCE=( \t1.43.1 @@ -277,186 +327,243 @@ RUSTC_RAW_FULL_BOOTSTRAP_SEQUENCE=( \t1.45.2 ) some more text -""") +""", + ) class UpdateRustPackagesTests(unittest.TestCase): - """Tests for update_rust_packages step.""" - - def setUp(self): - self.old_version = rust_uprev.RustVersion(1, 1, 0) - self.current_version = rust_uprev.RustVersion(1, 2, 3) - self.new_version = rust_uprev.RustVersion(1, 3, 5) - self.ebuild_file = os.path.join(rust_uprev.RUST_PATH, - 'rust-{self.new_version}.ebuild') - - def test_add_new_rust_packages(self): - package_before = (f'dev-lang/rust-{self.old_version}\n' - f'dev-lang/rust-{self.current_version}') - package_after = (f'dev-lang/rust-{self.old_version}\n' - f'dev-lang/rust-{self.current_version}\n' - f'dev-lang/rust-{self.new_version}') - mock_open = mock.mock_open(read_data=package_before) - with mock.patch('builtins.open', mock_open): - rust_uprev.update_rust_packages(self.new_version, add=True) - mock_open.return_value.__enter__().write.assert_called_once_with( - package_after) - - def test_remove_old_rust_packages(self): - package_before = (f'dev-lang/rust-{self.old_version}\n' - f'dev-lang/rust-{self.current_version}\n' - f'dev-lang/rust-{self.new_version}') - package_after = (f'dev-lang/rust-{self.current_version}\n' - f'dev-lang/rust-{self.new_version}') - mock_open = mock.mock_open(read_data=package_before) - with mock.patch('builtins.open', mock_open): - rust_uprev.update_rust_packages(self.old_version, add=False) - mock_open.return_value.__enter__().write.assert_called_once_with( - package_after) + """Tests for update_rust_packages step.""" + + def setUp(self): + self.old_version = rust_uprev.RustVersion(1, 1, 0) + self.current_version = rust_uprev.RustVersion(1, 2, 3) + self.new_version = rust_uprev.RustVersion(1, 3, 5) + self.ebuild_file = os.path.join( + rust_uprev.RUST_PATH, "rust-{self.new_version}.ebuild" + ) + + def test_add_new_rust_packages(self): + package_before = ( + f"dev-lang/rust-{self.old_version}\n" + f"dev-lang/rust-{self.current_version}" + ) + package_after = ( + f"dev-lang/rust-{self.old_version}\n" + f"dev-lang/rust-{self.current_version}\n" + f"dev-lang/rust-{self.new_version}" + ) + mock_open = mock.mock_open(read_data=package_before) + with mock.patch("builtins.open", mock_open): + rust_uprev.update_rust_packages(self.new_version, add=True) + mock_open.return_value.__enter__().write.assert_called_once_with( + package_after + ) + + def test_remove_old_rust_packages(self): + package_before = ( + f"dev-lang/rust-{self.old_version}\n" + f"dev-lang/rust-{self.current_version}\n" + f"dev-lang/rust-{self.new_version}" + ) + package_after = ( + f"dev-lang/rust-{self.current_version}\n" + f"dev-lang/rust-{self.new_version}" + ) + mock_open = mock.mock_open(read_data=package_before) + with mock.patch("builtins.open", mock_open): + rust_uprev.update_rust_packages(self.old_version, add=False) + mock_open.return_value.__enter__().write.assert_called_once_with( + package_after + ) class RustUprevOtherStagesTests(unittest.TestCase): - """Tests for other steps in rust_uprev""" - - def setUp(self): - self.old_version = rust_uprev.RustVersion(1, 1, 0) - self.current_version = rust_uprev.RustVersion(1, 2, 3) - self.new_version = rust_uprev.RustVersion(1, 3, 5) - self.ebuild_file = os.path.join(rust_uprev.RUST_PATH, - 'rust-{self.new_version}.ebuild') - - @mock.patch.object(shutil, 'copyfile') - @mock.patch.object(os, 'listdir') - @mock.patch.object(subprocess, 'check_call') - def test_copy_patches(self, mock_call, mock_ls, mock_copy): - mock_ls.return_value = [ - f'rust-{self.old_version}-patch-1.patch', - f'rust-{self.old_version}-patch-2-old.patch', - f'rust-{self.current_version}-patch-1.patch', - f'rust-{self.current_version}-patch-2-new.patch' - ] - rust_uprev.copy_patches(rust_uprev.RUST_PATH, self.current_version, - self.new_version) - mock_copy.assert_has_calls([ - mock.call( - os.path.join(rust_uprev.RUST_PATH, 'files', - f'rust-{self.current_version}-patch-1.patch'), - os.path.join(rust_uprev.RUST_PATH, 'files', - f'rust-{self.new_version}-patch-1.patch'), - ), - mock.call( - os.path.join(rust_uprev.RUST_PATH, 'files', - f'rust-{self.current_version}-patch-2-new.patch'), - os.path.join(rust_uprev.RUST_PATH, 'files', - f'rust-{self.new_version}-patch-2-new.patch')) - ]) - mock_call.assert_called_once_with( - ['git', 'add', f'rust-{self.new_version}-*.patch'], - cwd=rust_uprev.RUST_PATH.joinpath('files')) - - @mock.patch.object(shutil, 'copyfile') - @mock.patch.object(subprocess, 'check_call') - def test_create_ebuild(self, mock_call, mock_copy): - template_ebuild = f'/path/to/rust-{self.current_version}-r2.ebuild' - rust_uprev.create_ebuild(template_ebuild, self.new_version) - mock_copy.assert_called_once_with( - template_ebuild, - rust_uprev.RUST_PATH.joinpath(f'rust-{self.new_version}.ebuild')) - mock_call.assert_called_once_with( - ['git', 'add', f'rust-{self.new_version}.ebuild'], - cwd=rust_uprev.RUST_PATH) - - @mock.patch.object(rust_uprev, 'find_ebuild_for_package') - @mock.patch.object(subprocess, 'check_call') - def test_remove_rust_bootstrap_version(self, mock_call, *_args): - bootstrap_path = os.path.join(rust_uprev.RUST_PATH, '..', 'rust-bootstrap') - rust_uprev.remove_rust_bootstrap_version(self.old_version, lambda *x: ()) - mock_call.has_calls([ - [ - 'git', 'rm', - os.path.join(bootstrap_path, 'files', - f'rust-bootstrap-{self.old_version}-*.patch') - ], - [ - 'git', 'rm', - os.path.join(bootstrap_path, - f'rust-bootstrap-{self.old_version}.ebuild') - ], - ]) - - @mock.patch.object(rust_uprev, 'find_ebuild_path') - @mock.patch.object(subprocess, 'check_call') - def test_remove_virtual_rust(self, mock_call, mock_find_ebuild): - ebuild_path = Path( - f'/some/dir/virtual/rust/rust-{self.old_version}.ebuild') - mock_find_ebuild.return_value = Path(ebuild_path) - rust_uprev.remove_virtual_rust(self.old_version) - mock_call.assert_called_once_with( - ['git', 'rm', str(ebuild_path.name)], cwd=ebuild_path.parent) - - @mock.patch.object(rust_uprev, 'find_ebuild_path') - @mock.patch.object(shutil, 'copyfile') - @mock.patch.object(subprocess, 'check_call') - def test_update_virtual_rust(self, mock_call, mock_copy, mock_find_ebuild): - ebuild_path = Path( - f'/some/dir/virtual/rust/rust-{self.current_version}.ebuild') - mock_find_ebuild.return_value = Path(ebuild_path) - rust_uprev.update_virtual_rust(self.current_version, self.new_version) - mock_call.assert_called_once_with( - ['git', 'add', f'rust-{self.new_version}.ebuild'], - cwd=ebuild_path.parent) - mock_copy.assert_called_once_with( - ebuild_path.parent.joinpath(f'rust-{self.current_version}.ebuild'), - ebuild_path.parent.joinpath(f'rust-{self.new_version}.ebuild')) - - @mock.patch.object(os, 'listdir') - def test_find_oldest_rust_version_in_chroot_pass(self, mock_ls): - oldest_version_name = f'rust-{self.old_version}.ebuild' - mock_ls.return_value = [ - oldest_version_name, f'rust-{self.current_version}.ebuild', - f'rust-{self.new_version}.ebuild' - ] - actual = rust_uprev.find_oldest_rust_version_in_chroot() - expected = (self.old_version, - os.path.join(rust_uprev.RUST_PATH, oldest_version_name)) - self.assertEqual(expected, actual) - - @mock.patch.object(os, 'listdir') - def test_find_oldest_rust_version_in_chroot_fail_with_only_one_ebuild( - self, mock_ls): - mock_ls.return_value = [f'rust-{self.new_version}.ebuild'] - with self.assertRaises(RuntimeError) as context: - rust_uprev.find_oldest_rust_version_in_chroot() - self.assertEqual('Expect to find more than one Rust versions', - str(context.exception)) - - @mock.patch.object(rust_uprev, 'get_command_output') - @mock.patch.object(git, 'CreateBranch') - def test_create_new_repo(self, mock_branch, mock_output): - mock_output.return_value = '' - rust_uprev.create_new_repo(self.new_version) - mock_branch.assert_called_once_with(rust_uprev.RUST_PATH, - f'rust-to-{self.new_version}') - - @mock.patch.object(rust_uprev, 'get_command_output') - @mock.patch.object(subprocess, 'check_call') - def test_build_cross_compiler(self, mock_call, mock_output): - mock_output.return_value = f'rust-{self.new_version}.ebuild' - cros_targets = [ - 'x86_64-cros-linux-gnu', - 'armv7a-cros-linux-gnueabihf', - 'aarch64-cros-linux-gnu', - ] - all_triples = ['x86_64-pc-linux-gnu'] + cros_targets - rust_ebuild = 'RUSTC_TARGET_TRIPLES=(' + '\n\t'.join(all_triples) + ')' - mock_open = mock.mock_open(read_data=rust_ebuild) - with mock.patch('builtins.open', mock_open): - rust_uprev.build_cross_compiler() - - mock_call.assert_called_once_with( - ['sudo', 'emerge', '-j', '-G'] + - [f'cross-{x}/gcc' for x in cros_targets + ['arm-none-eabi']]) - - -if __name__ == '__main__': - unittest.main() + """Tests for other steps in rust_uprev""" + + def setUp(self): + self.old_version = rust_uprev.RustVersion(1, 1, 0) + self.current_version = rust_uprev.RustVersion(1, 2, 3) + self.new_version = rust_uprev.RustVersion(1, 3, 5) + self.ebuild_file = os.path.join( + rust_uprev.RUST_PATH, "rust-{self.new_version}.ebuild" + ) + + @mock.patch.object(shutil, "copyfile") + @mock.patch.object(os, "listdir") + @mock.patch.object(subprocess, "check_call") + def test_copy_patches(self, mock_call, mock_ls, mock_copy): + mock_ls.return_value = [ + f"rust-{self.old_version}-patch-1.patch", + f"rust-{self.old_version}-patch-2-old.patch", + f"rust-{self.current_version}-patch-1.patch", + f"rust-{self.current_version}-patch-2-new.patch", + ] + rust_uprev.copy_patches( + rust_uprev.RUST_PATH, self.current_version, self.new_version + ) + mock_copy.assert_has_calls( + [ + mock.call( + os.path.join( + rust_uprev.RUST_PATH, + "files", + f"rust-{self.current_version}-patch-1.patch", + ), + os.path.join( + rust_uprev.RUST_PATH, + "files", + f"rust-{self.new_version}-patch-1.patch", + ), + ), + mock.call( + os.path.join( + rust_uprev.RUST_PATH, + "files", + f"rust-{self.current_version}-patch-2-new.patch", + ), + os.path.join( + rust_uprev.RUST_PATH, + "files", + f"rust-{self.new_version}-patch-2-new.patch", + ), + ), + ] + ) + mock_call.assert_called_once_with( + ["git", "add", f"rust-{self.new_version}-*.patch"], + cwd=rust_uprev.RUST_PATH.joinpath("files"), + ) + + @mock.patch.object(shutil, "copyfile") + @mock.patch.object(subprocess, "check_call") + def test_create_ebuild(self, mock_call, mock_copy): + template_ebuild = f"/path/to/rust-{self.current_version}-r2.ebuild" + rust_uprev.create_ebuild(template_ebuild, self.new_version) + mock_copy.assert_called_once_with( + template_ebuild, + rust_uprev.RUST_PATH.joinpath(f"rust-{self.new_version}.ebuild"), + ) + mock_call.assert_called_once_with( + ["git", "add", f"rust-{self.new_version}.ebuild"], + cwd=rust_uprev.RUST_PATH, + ) + + @mock.patch.object(rust_uprev, "find_ebuild_for_package") + @mock.patch.object(subprocess, "check_call") + def test_remove_rust_bootstrap_version(self, mock_call, *_args): + bootstrap_path = os.path.join( + rust_uprev.RUST_PATH, "..", "rust-bootstrap" + ) + rust_uprev.remove_rust_bootstrap_version( + self.old_version, lambda *x: () + ) + mock_call.has_calls( + [ + [ + "git", + "rm", + os.path.join( + bootstrap_path, + "files", + f"rust-bootstrap-{self.old_version}-*.patch", + ), + ], + [ + "git", + "rm", + os.path.join( + bootstrap_path, + f"rust-bootstrap-{self.old_version}.ebuild", + ), + ], + ] + ) + + @mock.patch.object(rust_uprev, "find_ebuild_path") + @mock.patch.object(subprocess, "check_call") + def test_remove_virtual_rust(self, mock_call, mock_find_ebuild): + ebuild_path = Path( + f"/some/dir/virtual/rust/rust-{self.old_version}.ebuild" + ) + mock_find_ebuild.return_value = Path(ebuild_path) + rust_uprev.remove_virtual_rust(self.old_version) + mock_call.assert_called_once_with( + ["git", "rm", str(ebuild_path.name)], cwd=ebuild_path.parent + ) + + @mock.patch.object(rust_uprev, "find_ebuild_path") + @mock.patch.object(shutil, "copyfile") + @mock.patch.object(subprocess, "check_call") + def test_update_virtual_rust(self, mock_call, mock_copy, mock_find_ebuild): + ebuild_path = Path( + f"/some/dir/virtual/rust/rust-{self.current_version}.ebuild" + ) + mock_find_ebuild.return_value = Path(ebuild_path) + rust_uprev.update_virtual_rust(self.current_version, self.new_version) + mock_call.assert_called_once_with( + ["git", "add", f"rust-{self.new_version}.ebuild"], + cwd=ebuild_path.parent, + ) + mock_copy.assert_called_once_with( + ebuild_path.parent.joinpath(f"rust-{self.current_version}.ebuild"), + ebuild_path.parent.joinpath(f"rust-{self.new_version}.ebuild"), + ) + + @mock.patch.object(os, "listdir") + def test_find_oldest_rust_version_in_chroot_pass(self, mock_ls): + oldest_version_name = f"rust-{self.old_version}.ebuild" + mock_ls.return_value = [ + oldest_version_name, + f"rust-{self.current_version}.ebuild", + f"rust-{self.new_version}.ebuild", + ] + actual = rust_uprev.find_oldest_rust_version_in_chroot() + expected = ( + self.old_version, + os.path.join(rust_uprev.RUST_PATH, oldest_version_name), + ) + self.assertEqual(expected, actual) + + @mock.patch.object(os, "listdir") + def test_find_oldest_rust_version_in_chroot_fail_with_only_one_ebuild( + self, mock_ls + ): + mock_ls.return_value = [f"rust-{self.new_version}.ebuild"] + with self.assertRaises(RuntimeError) as context: + rust_uprev.find_oldest_rust_version_in_chroot() + self.assertEqual( + "Expect to find more than one Rust versions", str(context.exception) + ) + + @mock.patch.object(rust_uprev, "get_command_output") + @mock.patch.object(git, "CreateBranch") + def test_create_new_repo(self, mock_branch, mock_output): + mock_output.return_value = "" + rust_uprev.create_new_repo(self.new_version) + mock_branch.assert_called_once_with( + rust_uprev.RUST_PATH, f"rust-to-{self.new_version}" + ) + + @mock.patch.object(rust_uprev, "get_command_output") + @mock.patch.object(subprocess, "check_call") + def test_build_cross_compiler(self, mock_call, mock_output): + mock_output.return_value = f"rust-{self.new_version}.ebuild" + cros_targets = [ + "x86_64-cros-linux-gnu", + "armv7a-cros-linux-gnueabihf", + "aarch64-cros-linux-gnu", + ] + all_triples = ["x86_64-pc-linux-gnu"] + cros_targets + rust_ebuild = "RUSTC_TARGET_TRIPLES=(" + "\n\t".join(all_triples) + ")" + mock_open = mock.mock_open(read_data=rust_ebuild) + with mock.patch("builtins.open", mock_open): + rust_uprev.build_cross_compiler() + + mock_call.assert_called_once_with( + ["sudo", "emerge", "-j", "-G"] + + [f"cross-{x}/gcc" for x in cros_targets + ["arm-none-eabi"]] + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/rust_tools/rust_watch.py b/rust_tools/rust_watch.py index 59de0ca8..1ab27413 100755 --- a/rust_tools/rust_watch.py +++ b/rust_tools/rust_watch.py @@ -21,362 +21,391 @@ import sys import time from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Tuple -from cros_utils import bugs, email_sender, tiny_render +from cros_utils import bugs +from cros_utils import email_sender +from cros_utils import tiny_render def gentoo_sha_to_link(sha: str) -> str: - """Gets a URL to a webpage that shows the Gentoo commit at `sha`.""" - return f'https://gitweb.gentoo.org/repo/gentoo.git/commit?id={sha}' + """Gets a URL to a webpage that shows the Gentoo commit at `sha`.""" + return f"https://gitweb.gentoo.org/repo/gentoo.git/commit?id={sha}" def send_email(subject: str, body: List[tiny_render.Piece]) -> None: - """Sends an email with the given title and body to... whoever cares.""" - email_sender.EmailSender().SendX20Email( - subject=subject, - identifier='rust-watch', - well_known_recipients=['cros-team'], - text_body=tiny_render.render_text_pieces(body), - html_body=tiny_render.render_html_pieces(body), - ) + """Sends an email with the given title and body to... whoever cares.""" + email_sender.EmailSender().SendX20Email( + subject=subject, + identifier="rust-watch", + well_known_recipients=["cros-team"], + text_body=tiny_render.render_text_pieces(body), + html_body=tiny_render.render_html_pieces(body), + ) class RustReleaseVersion(NamedTuple): - """Represents a version of Rust's stable compiler.""" - major: int - minor: int - patch: int + """Represents a version of Rust's stable compiler.""" + + major: int + minor: int + patch: int - @staticmethod - def from_string(version_string: str) -> 'RustReleaseVersion': - m = re.match(r'(\d+)\.(\d+)\.(\d+)', version_string) - if not m: - raise ValueError(f"{version_string!r} isn't a valid version string") - return RustReleaseVersion(*[int(x) for x in m.groups()]) + @staticmethod + def from_string(version_string: str) -> "RustReleaseVersion": + m = re.match(r"(\d+)\.(\d+)\.(\d+)", version_string) + if not m: + raise ValueError(f"{version_string!r} isn't a valid version string") + return RustReleaseVersion(*[int(x) for x in m.groups()]) - def __str__(self) -> str: - return f'{self.major}.{self.minor}.{self.patch}' + def __str__(self) -> str: + return f"{self.major}.{self.minor}.{self.patch}" - def to_json(self) -> str: - return str(self) + def to_json(self) -> str: + return str(self) - @staticmethod - def from_json(s: str) -> 'RustReleaseVersion': - return RustReleaseVersion.from_string(s) + @staticmethod + def from_json(s: str) -> "RustReleaseVersion": + return RustReleaseVersion.from_string(s) class State(NamedTuple): - """State that we keep around from run to run.""" - # The last Rust release tag that we've seen. - last_seen_release: RustReleaseVersion - - # We track Gentoo's upstream Rust ebuild. This is the last SHA we've seen - # that updates it. - last_gentoo_sha: str - - def to_json(self) -> Dict[str, Any]: - return { - 'last_seen_release': self.last_seen_release.to_json(), - 'last_gentoo_sha': self.last_gentoo_sha, - } - - @staticmethod - def from_json(s: Dict[str, Any]) -> 'State': - return State( - last_seen_release=RustReleaseVersion.from_json(s['last_seen_release']), - last_gentoo_sha=s['last_gentoo_sha'], - ) + """State that we keep around from run to run.""" + + # The last Rust release tag that we've seen. + last_seen_release: RustReleaseVersion + + # We track Gentoo's upstream Rust ebuild. This is the last SHA we've seen + # that updates it. + last_gentoo_sha: str + + def to_json(self) -> Dict[str, Any]: + return { + "last_seen_release": self.last_seen_release.to_json(), + "last_gentoo_sha": self.last_gentoo_sha, + } + + @staticmethod + def from_json(s: Dict[str, Any]) -> "State": + return State( + last_seen_release=RustReleaseVersion.from_json( + s["last_seen_release"] + ), + last_gentoo_sha=s["last_gentoo_sha"], + ) def parse_release_tags(lines: Iterable[str]) -> Iterable[RustReleaseVersion]: - """Parses `git ls-remote --tags` output into Rust stable release versions.""" - refs_tags = 'refs/tags/' - for line in lines: - _sha, tag = line.split(None, 1) - tag = tag.strip() - # Each tag has an associated 'refs/tags/name^{}', which is the actual - # object that the tag points to. That's irrelevant to us. - if tag.endswith('^{}'): - continue - - if not tag.startswith(refs_tags): - continue - - short_tag = tag[len(refs_tags):] - # There are a few old versioning schemes. Ignore them. - if short_tag.startswith('0.') or short_tag.startswith('release-'): - continue - yield RustReleaseVersion.from_string(short_tag) + """Parses `git ls-remote --tags` output into Rust stable release versions.""" + refs_tags = "refs/tags/" + for line in lines: + _sha, tag = line.split(None, 1) + tag = tag.strip() + # Each tag has an associated 'refs/tags/name^{}', which is the actual + # object that the tag points to. That's irrelevant to us. + if tag.endswith("^{}"): + continue + + if not tag.startswith(refs_tags): + continue + + short_tag = tag[len(refs_tags) :] + # There are a few old versioning schemes. Ignore them. + if short_tag.startswith("0.") or short_tag.startswith("release-"): + continue + yield RustReleaseVersion.from_string(short_tag) def fetch_most_recent_release() -> RustReleaseVersion: - """Fetches the most recent stable `rustc` version.""" - result = subprocess.run( - ['git', 'ls-remote', '--tags', 'https://github.com/rust-lang/rust'], - check=True, - stdin=None, - capture_output=True, - encoding='utf-8', - ) - tag_lines = result.stdout.strip().splitlines() - return max(parse_release_tags(tag_lines)) + """Fetches the most recent stable `rustc` version.""" + result = subprocess.run( + ["git", "ls-remote", "--tags", "https://github.com/rust-lang/rust"], + check=True, + stdin=None, + capture_output=True, + encoding="utf-8", + ) + tag_lines = result.stdout.strip().splitlines() + return max(parse_release_tags(tag_lines)) class GitCommit(NamedTuple): - """Represents a single git commit.""" - sha: str - subject: str + """Represents a single git commit.""" + + sha: str + subject: str def update_git_repo(git_dir: pathlib.Path) -> None: - """Updates the repo at `git_dir`, retrying a few times on failure.""" - for i in itertools.count(start=1): - result = subprocess.run( - ['git', 'fetch', 'origin'], + """Updates the repo at `git_dir`, retrying a few times on failure.""" + for i in itertools.count(start=1): + result = subprocess.run( + ["git", "fetch", "origin"], + check=False, + cwd=str(git_dir), + stdin=None, + ) + + if not result.returncode: + break + + if i == 5: + # 5 attempts is too many. Something else may be wrong. + result.check_returncode() + + sleep_time = 60 * i + logging.error( + "Failed updating gentoo's repo; will try again in %ds...", + sleep_time, + ) + time.sleep(sleep_time) + + +def get_new_gentoo_commits( + git_dir: pathlib.Path, most_recent_sha: str +) -> List[GitCommit]: + """Gets commits to dev-lang/rust since `most_recent_sha`. + + Older commits come earlier in the returned list. + """ + commits = subprocess.run( + [ + "git", + "log", + "--format=%H %s", + f"{most_recent_sha}..origin/master", # nocheck + "--", + "dev-lang/rust", + ], + capture_output=True, check=False, cwd=str(git_dir), - stdin=None, + encoding="utf-8", ) - if not result.returncode: - break - - if i == 5: - # 5 attempts is too many. Something else may be wrong. - result.check_returncode() - - sleep_time = 60 * i - logging.error("Failed updating gentoo's repo; will try again in %ds...", - sleep_time) - time.sleep(sleep_time) - - -def get_new_gentoo_commits(git_dir: pathlib.Path, - most_recent_sha: str) -> List[GitCommit]: - """Gets commits to dev-lang/rust since `most_recent_sha`. - - Older commits come earlier in the returned list. - """ - commits = subprocess.run( - [ - 'git', - 'log', - '--format=%H %s', - f'{most_recent_sha}..origin/master', # nocheck - '--', - 'dev-lang/rust', - ], - capture_output=True, - check=False, - cwd=str(git_dir), - encoding='utf-8', - ) - - if commits.returncode: - logging.error('Error getting new gentoo commits; stderr:\n%s', - commits.stderr) - commits.check_returncode() - - results = [] - for line in commits.stdout.strip().splitlines(): - sha, subject = line.strip().split(None, 1) - results.append(GitCommit(sha=sha, subject=subject)) - - # `git log` outputs things in newest -> oldest order. - results.reverse() - return results + if commits.returncode: + logging.error( + "Error getting new gentoo commits; stderr:\n%s", commits.stderr + ) + commits.check_returncode() + + results = [] + for line in commits.stdout.strip().splitlines(): + sha, subject = line.strip().split(None, 1) + results.append(GitCommit(sha=sha, subject=subject)) + + # `git log` outputs things in newest -> oldest order. + results.reverse() + return results def setup_gentoo_git_repo(git_dir: pathlib.Path) -> str: - """Sets up a gentoo git repo at the given directory. Returns HEAD.""" - subprocess.run( - [ - 'git', 'clone', 'https://anongit.gentoo.org/git/repo/gentoo.git', - str(git_dir) - ], - stdin=None, - check=True, - ) - - head_rev = subprocess.run( - ['git', 'rev-parse', 'HEAD'], - cwd=str(git_dir), - check=True, - stdin=None, - capture_output=True, - encoding='utf-8', - ) - return head_rev.stdout.strip() + """Sets up a gentoo git repo at the given directory. Returns HEAD.""" + subprocess.run( + [ + "git", + "clone", + "https://anongit.gentoo.org/git/repo/gentoo.git", + str(git_dir), + ], + stdin=None, + check=True, + ) + + head_rev = subprocess.run( + ["git", "rev-parse", "HEAD"], + cwd=str(git_dir), + check=True, + stdin=None, + capture_output=True, + encoding="utf-8", + ) + return head_rev.stdout.strip() def read_state(state_file: pathlib.Path) -> State: - """Reads state from the given file.""" - with state_file.open(encoding='utf-8') as f: - return State.from_json(json.load(f)) + """Reads state from the given file.""" + with state_file.open(encoding="utf-8") as f: + return State.from_json(json.load(f)) def atomically_write_state(state_file: pathlib.Path, state: State) -> None: - """Writes state to the given file.""" - temp_file = pathlib.Path(str(state_file) + '.new') - with temp_file.open('w', encoding='utf-8') as f: - json.dump(state.to_json(), f) - temp_file.rename(state_file) + """Writes state to the given file.""" + temp_file = pathlib.Path(str(state_file) + ".new") + with temp_file.open("w", encoding="utf-8") as f: + json.dump(state.to_json(), f) + temp_file.rename(state_file) def file_bug(title: str, body: str) -> None: - """Files a bug against gbiv@ with the given title/body.""" - bugs.CreateNewBug( - bugs.WellKnownComponents.CrOSToolchainPublic, - title, - body, - # To either take or reassign depending on the rotation. - assignee='gbiv@google.com', - ) + """Files a bug against gbiv@ with the given title/body.""" + bugs.CreateNewBug( + bugs.WellKnownComponents.CrOSToolchainPublic, + title, + body, + # To either take or reassign depending on the rotation. + assignee="gbiv@google.com", + ) def maybe_compose_bug( old_state: State, newest_release: RustReleaseVersion, ) -> Optional[Tuple[str, str]]: - """Creates a bug to file about the new release, if doing is desired.""" - if newest_release == old_state.last_seen_release: - return None - - title = f'[Rust] Update to {newest_release}' - body = ('A new release has been detected; we should probably roll to it. ' - "Please see go/crostc-rust-rotation for who's turn it is.") - return title, body + """Creates a bug to file about the new release, if doing is desired.""" + if newest_release == old_state.last_seen_release: + return None + + title = f"[Rust] Update to {newest_release}" + body = ( + "A new release has been detected; we should probably roll to it. " + "Please see go/crostc-rust-rotation for who's turn it is." + ) + return title, body def maybe_compose_email( - new_gentoo_commits: List[GitCommit] + new_gentoo_commits: List[GitCommit], ) -> Optional[Tuple[str, List[tiny_render.Piece]]]: - """Creates an email given our new state, if doing so is appropriate.""" - if not new_gentoo_commits: - return None - - subject_pieces = [] - body_pieces = [] - - # Separate the sections a bit for prettier output. - if body_pieces: - body_pieces += [tiny_render.line_break, tiny_render.line_break] - - if len(new_gentoo_commits) == 1: - subject_pieces.append('new rust ebuild commit detected') - body_pieces.append('commit:') - else: - subject_pieces.append('new rust ebuild commits detected') - body_pieces.append('commits (newest first):') - - commit_lines = [] - for commit in new_gentoo_commits: - commit_lines.append([ - tiny_render.Link( - gentoo_sha_to_link(commit.sha), - commit.sha[:12], - ), - f': {commit.subject}', - ]) + """Creates an email given our new state, if doing so is appropriate.""" + if not new_gentoo_commits: + return None + + subject_pieces = [] + body_pieces = [] + + # Separate the sections a bit for prettier output. + if body_pieces: + body_pieces += [tiny_render.line_break, tiny_render.line_break] + + if len(new_gentoo_commits) == 1: + subject_pieces.append("new rust ebuild commit detected") + body_pieces.append("commit:") + else: + subject_pieces.append("new rust ebuild commits detected") + body_pieces.append("commits (newest first):") + + commit_lines = [] + for commit in new_gentoo_commits: + commit_lines.append( + [ + tiny_render.Link( + gentoo_sha_to_link(commit.sha), + commit.sha[:12], + ), + f": {commit.subject}", + ] + ) - body_pieces.append(tiny_render.UnorderedList(commit_lines)) + body_pieces.append(tiny_render.UnorderedList(commit_lines)) - subject = '[rust-watch] ' + '; '.join(subject_pieces) - return subject, body_pieces + subject = "[rust-watch] " + "; ".join(subject_pieces) + return subject, body_pieces def main(argv: List[str]) -> None: - logging.basicConfig(level=logging.INFO) - - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('--state_dir', - required=True, - help='Directory to store state in.') - parser.add_argument('--skip_side_effects', - action='store_true', - help="Don't send an email or file a bug.") - parser.add_argument( - '--skip_state_update', - action='store_true', - help="Don't update the state file. Doesn't apply to initial setup.") - opts = parser.parse_args(argv) - - state_dir = pathlib.Path(opts.state_dir) - state_file = state_dir / 'state.json' - gentoo_subdir = state_dir / 'upstream-gentoo' - if not state_file.exists(): - logging.info("state_dir isn't fully set up; doing that now.") - - # Could be in a partially set-up state. - if state_dir.exists(): - logging.info('incomplete state_dir detected; removing.') - shutil.rmtree(str(state_dir)) - - state_dir.mkdir(parents=True) + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--state_dir", required=True, help="Directory to store state in." + ) + parser.add_argument( + "--skip_side_effects", + action="store_true", + help="Don't send an email or file a bug.", + ) + parser.add_argument( + "--skip_state_update", + action="store_true", + help="Don't update the state file. Doesn't apply to initial setup.", + ) + opts = parser.parse_args(argv) + + state_dir = pathlib.Path(opts.state_dir) + state_file = state_dir / "state.json" + gentoo_subdir = state_dir / "upstream-gentoo" + if not state_file.exists(): + logging.info("state_dir isn't fully set up; doing that now.") + + # Could be in a partially set-up state. + if state_dir.exists(): + logging.info("incomplete state_dir detected; removing.") + shutil.rmtree(str(state_dir)) + + state_dir.mkdir(parents=True) + most_recent_release = fetch_most_recent_release() + most_recent_gentoo_commit = setup_gentoo_git_repo(gentoo_subdir) + atomically_write_state( + state_file, + State( + last_seen_release=most_recent_release, + last_gentoo_sha=most_recent_gentoo_commit, + ), + ) + # Running through this _should_ be a nop, but do it anyway. Should make any + # bugs more obvious on the first run of the script. + + prior_state = read_state(state_file) + logging.info("Last state was %r", prior_state) + most_recent_release = fetch_most_recent_release() - most_recent_gentoo_commit = setup_gentoo_git_repo(gentoo_subdir) + logging.info("Most recent Rust release is %s", most_recent_release) + + logging.info("Fetching new commits from Gentoo") + update_git_repo(gentoo_subdir) + new_commits = get_new_gentoo_commits( + gentoo_subdir, prior_state.last_gentoo_sha + ) + logging.info("New commits: %r", new_commits) + + maybe_bug = maybe_compose_bug(prior_state, most_recent_release) + maybe_email = maybe_compose_email(new_commits) + + if maybe_bug is None: + logging.info("No bug to file") + else: + title, body = maybe_bug + if opts.skip_side_effects: + logging.info( + "Skipping sending bug with title %r and contents\n%s", + title, + body, + ) + else: + logging.info("Writing new bug") + file_bug(title, body) + + if maybe_email is None: + logging.info("No email to send") + else: + title, body = maybe_email + if opts.skip_side_effects: + logging.info( + "Skipping sending email with title %r and contents\n%s", + title, + tiny_render.render_html_pieces(body), + ) + else: + logging.info("Sending email") + send_email(title, body) + + if opts.skip_state_update: + logging.info("Skipping state update, as requested") + return + + newest_sha = ( + new_commits[-1].sha if new_commits else prior_state.last_gentoo_sha + ) atomically_write_state( state_file, State( last_seen_release=most_recent_release, - last_gentoo_sha=most_recent_gentoo_commit, + last_gentoo_sha=newest_sha, ), ) - # Running through this _should_ be a nop, but do it anyway. Should make any - # bugs more obvious on the first run of the script. - - prior_state = read_state(state_file) - logging.info('Last state was %r', prior_state) - - most_recent_release = fetch_most_recent_release() - logging.info('Most recent Rust release is %s', most_recent_release) - - logging.info('Fetching new commits from Gentoo') - update_git_repo(gentoo_subdir) - new_commits = get_new_gentoo_commits(gentoo_subdir, - prior_state.last_gentoo_sha) - logging.info('New commits: %r', new_commits) - - maybe_bug = maybe_compose_bug(prior_state, most_recent_release) - maybe_email = maybe_compose_email(new_commits) - - if maybe_bug is None: - logging.info('No bug to file') - else: - title, body = maybe_bug - if opts.skip_side_effects: - logging.info('Skipping sending bug with title %r and contents\n%s', - title, body) - else: - logging.info('Writing new bug') - file_bug(title, body) - - if maybe_email is None: - logging.info('No email to send') - else: - title, body = maybe_email - if opts.skip_side_effects: - logging.info('Skipping sending email with title %r and contents\n%s', - title, tiny_render.render_html_pieces(body)) - else: - logging.info('Sending email') - send_email(title, body) - - if opts.skip_state_update: - logging.info('Skipping state update, as requested') - return - - newest_sha = (new_commits[-1].sha - if new_commits else prior_state.last_gentoo_sha) - atomically_write_state( - state_file, - State( - last_seen_release=most_recent_release, - last_gentoo_sha=newest_sha, - ), - ) - - -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/rust_tools/rust_watch_test.py b/rust_tools/rust_watch_test.py index 3e25a950..dbeb0e80 100755 --- a/rust_tools/rust_watch_test.py +++ b/rust_tools/rust_watch_test.py @@ -14,154 +14,172 @@ import unittest import unittest.mock from cros_utils import tiny_render - import rust_watch class Test(unittest.TestCase): - """Tests.""" - def _silence_logs(self): - """Silences all log output until the end of the current test.""" - def should_log(_record): - return 0 - - logger = logging.root - logger.addFilter(should_log) - self.addCleanup(logger.removeFilter, should_log) - - def test_release_version_parsing(self): - self.assertEqual( - rust_watch.RustReleaseVersion.from_string('1.2.3'), - rust_watch.RustReleaseVersion(1, 2, 3), - ) - - def test_release_version_json_round_trips(self): - ver = rust_watch.RustReleaseVersion(1, 2, 3) - self.assertEqual(rust_watch.RustReleaseVersion.from_json(ver.to_json()), - ver) - - def test_state_json_round_trips(self): - state = rust_watch.State( - last_seen_release=rust_watch.RustReleaseVersion(1, 2, 3), - last_gentoo_sha='abc123', - ) - - self.assertEqual(rust_watch.State.from_json(state.to_json()), state) - - @unittest.mock.patch.object(subprocess, 'run') - @unittest.mock.patch.object(time, 'sleep') - def test_update_git_repo_tries_again_on_failure(self, sleep_mock, run_mock): - self._silence_logs() - - oh_no_error = ValueError('oh no') - - def check_returncode(): - raise oh_no_error - - run_call_count = 0 - - def run_sideeffect(*_args, **_kwargs): - nonlocal run_call_count - run_call_count += 1 - result = unittest.mock.Mock() - result.returncode = 1 - result.check_returncode = check_returncode - return result - - run_mock.side_effect = run_sideeffect - - with self.assertRaises(ValueError) as raised: - rust_watch.update_git_repo(pathlib.Path('/does/not/exist/at/all')) - - self.assertIs(raised.exception, oh_no_error) - self.assertEqual(run_call_count, 5) - - sleep_timings = [unittest.mock.call(60 * i) for i in range(1, 5)] - self.assertEqual(sleep_mock.mock_calls, sleep_timings) - - @unittest.mock.patch.object(subprocess, 'run') - def test_get_new_gentoo_commits_functions(self, run_mock): - returned = unittest.mock.Mock() - returned.returncode = 0 - returned.stdout = '\n'.join(( - 'abc123 newer commit', - 'abcdef and an older commit', - )) - run_mock.return_value = returned - results = rust_watch.get_new_gentoo_commits( - pathlib.Path('/does/not/exist/at/all'), 'defabc') - self.assertEqual(results, [ - rust_watch.GitCommit('abcdef', 'and an older commit'), - rust_watch.GitCommit('abc123', 'newer commit'), - ]) - - def test_compose_email_on_a_new_gentoo_commit(self): - sha_a = 'a' * 40 - new_commit = rust_watch.maybe_compose_email(new_gentoo_commits=[ - rust_watch.GitCommit( - sha=sha_a, - subject='summary_a', - ), - ], ) - - self.assertEqual(new_commit, - ('[rust-watch] new rust ebuild commit detected', [ - 'commit:', - tiny_render.UnorderedList([ - [ - tiny_render.Link( - rust_watch.gentoo_sha_to_link(sha_a), - sha_a[:12], - ), - ': summary_a', - ], - ]) - ])) - - def test_compose_email_composes_nothing_when_no_new_updates_exist(self): - self.assertIsNone(rust_watch.maybe_compose_email(new_gentoo_commits=())) - - def test_compose_bug_creates_bugs_on_new_versions(self): - title, body = rust_watch.maybe_compose_bug( - old_state=rust_watch.State( - last_seen_release=rust_watch.RustReleaseVersion(1, 0, 0), - last_gentoo_sha='', - ), - newest_release=rust_watch.RustReleaseVersion(1, 0, 1), - ) - self.assertEqual(title, '[Rust] Update to 1.0.1') - self.assertTrue(body.startswith('A new release has been detected;')) - - title, body = rust_watch.maybe_compose_bug( - old_state=rust_watch.State( - last_seen_release=rust_watch.RustReleaseVersion(1, 0, 0), - last_gentoo_sha='', - ), - newest_release=rust_watch.RustReleaseVersion(1, 1, 0), - ) - self.assertEqual(title, '[Rust] Update to 1.1.0') - self.assertTrue(body.startswith('A new release has been detected;')) - - title, body = rust_watch.maybe_compose_bug( - old_state=rust_watch.State( - last_seen_release=rust_watch.RustReleaseVersion(1, 0, 0), - last_gentoo_sha='', - ), - newest_release=rust_watch.RustReleaseVersion(2, 0, 0), - ) - self.assertEqual(title, '[Rust] Update to 2.0.0') - self.assertTrue(body.startswith('A new release has been detected;')) - - def test_compose_bug_does_nothing_when_no_new_updates_exist(self): - self.assertIsNone( - rust_watch.maybe_compose_bug( + """Tests.""" + + def _silence_logs(self): + """Silences all log output until the end of the current test.""" + + def should_log(_record): + return 0 + + logger = logging.root + logger.addFilter(should_log) + self.addCleanup(logger.removeFilter, should_log) + + def test_release_version_parsing(self): + self.assertEqual( + rust_watch.RustReleaseVersion.from_string("1.2.3"), + rust_watch.RustReleaseVersion(1, 2, 3), + ) + + def test_release_version_json_round_trips(self): + ver = rust_watch.RustReleaseVersion(1, 2, 3) + self.assertEqual( + rust_watch.RustReleaseVersion.from_json(ver.to_json()), ver + ) + + def test_state_json_round_trips(self): + state = rust_watch.State( + last_seen_release=rust_watch.RustReleaseVersion(1, 2, 3), + last_gentoo_sha="abc123", + ) + + self.assertEqual(rust_watch.State.from_json(state.to_json()), state) + + @unittest.mock.patch.object(subprocess, "run") + @unittest.mock.patch.object(time, "sleep") + def test_update_git_repo_tries_again_on_failure(self, sleep_mock, run_mock): + self._silence_logs() + + oh_no_error = ValueError("oh no") + + def check_returncode(): + raise oh_no_error + + run_call_count = 0 + + def run_sideeffect(*_args, **_kwargs): + nonlocal run_call_count + run_call_count += 1 + result = unittest.mock.Mock() + result.returncode = 1 + result.check_returncode = check_returncode + return result + + run_mock.side_effect = run_sideeffect + + with self.assertRaises(ValueError) as raised: + rust_watch.update_git_repo(pathlib.Path("/does/not/exist/at/all")) + + self.assertIs(raised.exception, oh_no_error) + self.assertEqual(run_call_count, 5) + + sleep_timings = [unittest.mock.call(60 * i) for i in range(1, 5)] + self.assertEqual(sleep_mock.mock_calls, sleep_timings) + + @unittest.mock.patch.object(subprocess, "run") + def test_get_new_gentoo_commits_functions(self, run_mock): + returned = unittest.mock.Mock() + returned.returncode = 0 + returned.stdout = "\n".join( + ( + "abc123 newer commit", + "abcdef and an older commit", + ) + ) + run_mock.return_value = returned + results = rust_watch.get_new_gentoo_commits( + pathlib.Path("/does/not/exist/at/all"), "defabc" + ) + self.assertEqual( + results, + [ + rust_watch.GitCommit("abcdef", "and an older commit"), + rust_watch.GitCommit("abc123", "newer commit"), + ], + ) + + def test_compose_email_on_a_new_gentoo_commit(self): + sha_a = "a" * 40 + new_commit = rust_watch.maybe_compose_email( + new_gentoo_commits=[ + rust_watch.GitCommit( + sha=sha_a, + subject="summary_a", + ), + ], + ) + + self.assertEqual( + new_commit, + ( + "[rust-watch] new rust ebuild commit detected", + [ + "commit:", + tiny_render.UnorderedList( + [ + [ + tiny_render.Link( + rust_watch.gentoo_sha_to_link(sha_a), + sha_a[:12], + ), + ": summary_a", + ], + ] + ), + ], + ), + ) + + def test_compose_email_composes_nothing_when_no_new_updates_exist(self): + self.assertIsNone(rust_watch.maybe_compose_email(new_gentoo_commits=())) + + def test_compose_bug_creates_bugs_on_new_versions(self): + title, body = rust_watch.maybe_compose_bug( old_state=rust_watch.State( last_seen_release=rust_watch.RustReleaseVersion(1, 0, 0), - last_gentoo_sha='', + last_gentoo_sha="", ), - newest_release=rust_watch.RustReleaseVersion(1, 0, 0), - )) + newest_release=rust_watch.RustReleaseVersion(1, 0, 1), + ) + self.assertEqual(title, "[Rust] Update to 1.0.1") + self.assertTrue(body.startswith("A new release has been detected;")) + title, body = rust_watch.maybe_compose_bug( + old_state=rust_watch.State( + last_seen_release=rust_watch.RustReleaseVersion(1, 0, 0), + last_gentoo_sha="", + ), + newest_release=rust_watch.RustReleaseVersion(1, 1, 0), + ) + self.assertEqual(title, "[Rust] Update to 1.1.0") + self.assertTrue(body.startswith("A new release has been detected;")) -if __name__ == '__main__': - unittest.main() + title, body = rust_watch.maybe_compose_bug( + old_state=rust_watch.State( + last_seen_release=rust_watch.RustReleaseVersion(1, 0, 0), + last_gentoo_sha="", + ), + newest_release=rust_watch.RustReleaseVersion(2, 0, 0), + ) + self.assertEqual(title, "[Rust] Update to 2.0.0") + self.assertTrue(body.startswith("A new release has been detected;")) + + def test_compose_bug_does_nothing_when_no_new_updates_exist(self): + self.assertIsNone( + rust_watch.maybe_compose_bug( + old_state=rust_watch.State( + last_seen_release=rust_watch.RustReleaseVersion(1, 0, 0), + last_gentoo_sha="", + ), + newest_release=rust_watch.RustReleaseVersion(1, 0, 0), + ) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py b/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py index 957227b8..ecc6bc83 100755 --- a/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py +++ b/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py @@ -15,259 +15,272 @@ from typing import Any, Iterable, Optional from dataclasses import dataclass, field # Pre-compiled regexes. -AMD64_RE = re.compile(r'.*(amd|x86_)64.*\.policy') -X86_RE = re.compile(r'.*x86.*\.policy') -AARCH64_RE = re.compile(r'.*a(arch|rm)64.*\.policy') -ARM_RE = re.compile(r'.*arm(v7)?.*\.policy') +AMD64_RE = re.compile(r".*(amd|x86_)64.*\.policy") +X86_RE = re.compile(r".*x86.*\.policy") +AARCH64_RE = re.compile(r".*a(arch|rm)64.*\.policy") +ARM_RE = re.compile(r".*arm(v7)?.*\.policy") @dataclass(frozen=True) class Policies: - """Dataclass to hold lists of policies which match certain types.""" - arm: list[str] = field(default_factory=list) - x86_64: list[str] = field(default_factory=list) - x86: list[str] = field(default_factory=list) - arm64: list[str] = field(default_factory=list) - none: list[str] = field(default_factory=list) + """Dataclass to hold lists of policies which match certain types.""" - def to_dict(self) -> dict[str, list[str]]: - """Convert this class to a dictionary.""" - return {**self.__dict__} + arm: list[str] = field(default_factory=list) + x86_64: list[str] = field(default_factory=list) + x86: list[str] = field(default_factory=list) + arm64: list[str] = field(default_factory=list) + none: list[str] = field(default_factory=list) + def to_dict(self) -> dict[str, list[str]]: + """Convert this class to a dictionary.""" + return {**self.__dict__} -def main(): - """Run the program from cmd line""" - args = parse_args() - if all(x is None for x in [args.all, args.b64, args.b32, args.none]): - print('Require at least one of {--all, --b64, --b32, --none}', - file=sys.stderr) - sys.exit(1) - matches, success = find_potential_policy_files(args.packages) - - separated = Policies() - - for m in matches: - if AMD64_RE.match(m): - separated.x86_64.append(m) - continue - if X86_RE.match(m): - separated.x86.append(m) - continue - if AARCH64_RE.match(m): - separated.arm64.append(m) - continue - if ARM_RE.match(m): - separated.arm.append(m) - continue - separated.none.append(m) - - syscall_lookup_table = _make_syscall_lookup_table(args) - - for (type_, val) in separated.to_dict().items(): - for fp in val: - syscalls = syscall_lookup_table[type_] - missing = check_missing_syscalls(syscalls, fp) - if missing is None: - print(f'E ({type_}) {fp}') - elif len(missing) == 0: - print(f'_ ({type_}) {fp}') - else: - missing_str = ','.join(missing) - print(f'M ({type_}) {fp} :: {missing_str}') - - if not args.edit: - sys.exit(0 if success else 2) - for (type_, val) in separated.to_dict().items(): - for fp in val: - syscalls = syscall_lookup_table[type_] - if args.force: - _confirm_add(fp, syscalls, args.yes) - continue - missing = check_missing_syscalls(syscalls, fp) - if missing is None or len(missing) == 0: - print(f'Already good for {fp} ({type_})') - else: - _confirm_add(fp, missing, args.yes) +def main(): + """Run the program from cmd line""" + args = parse_args() + if all(x is None for x in [args.all, args.b64, args.b32, args.none]): + print( + "Require at least one of {--all, --b64, --b32, --none}", + file=sys.stderr, + ) + sys.exit(1) + matches, success = find_potential_policy_files(args.packages) + + separated = Policies() + + for m in matches: + if AMD64_RE.match(m): + separated.x86_64.append(m) + continue + if X86_RE.match(m): + separated.x86.append(m) + continue + if AARCH64_RE.match(m): + separated.arm64.append(m) + continue + if ARM_RE.match(m): + separated.arm.append(m) + continue + separated.none.append(m) + + syscall_lookup_table = _make_syscall_lookup_table(args) + + for (type_, val) in separated.to_dict().items(): + for fp in val: + syscalls = syscall_lookup_table[type_] + missing = check_missing_syscalls(syscalls, fp) + if missing is None: + print(f"E ({type_}) {fp}") + elif len(missing) == 0: + print(f"_ ({type_}) {fp}") + else: + missing_str = ",".join(missing) + print(f"M ({type_}) {fp} :: {missing_str}") + + if not args.edit: + sys.exit(0 if success else 2) + + for (type_, val) in separated.to_dict().items(): + for fp in val: + syscalls = syscall_lookup_table[type_] + if args.force: + _confirm_add(fp, syscalls, args.yes) + continue + missing = check_missing_syscalls(syscalls, fp) + if missing is None or len(missing) == 0: + print(f"Already good for {fp} ({type_})") + else: + _confirm_add(fp, missing, args.yes) - sys.exit(0 if success else 2) + sys.exit(0 if success else 2) def _make_syscall_lookup_table(args: Any) -> dict[str, list[str]]: - """Make lookup table, segmented by all/b32/b64/none policies. - - Args: - args: Direct output from parse_args. - - Returns: - dict of syscalls we want to search for in each policy file, - where the key is the policy file arch, and the value is - a list of syscalls as strings. - """ - syscall_lookup_table = Policies().to_dict() - if args.all: - split_syscalls = [x.strip() for x in args.all.split(',')] - for v in syscall_lookup_table.values(): - v.extend(split_syscalls) - if args.b32: - split_syscalls = [x.strip() for x in args.b32.split(',')] - syscall_lookup_table['x86'].extend(split_syscalls) - syscall_lookup_table['arm'].extend(split_syscalls) - if args.b64: - split_syscalls = [x.strip() for x in args.b64.split(',')] - syscall_lookup_table['x86_64'].extend(split_syscalls) - syscall_lookup_table['arm64'].extend(split_syscalls) - if args.none: - split_syscalls = [x.strip() for x in args.none.split(',')] - syscall_lookup_table['none'].extend(split_syscalls) - return syscall_lookup_table + """Make lookup table, segmented by all/b32/b64/none policies. + + Args: + args: Direct output from parse_args. + + Returns: + dict of syscalls we want to search for in each policy file, + where the key is the policy file arch, and the value is + a list of syscalls as strings. + """ + syscall_lookup_table = Policies().to_dict() + if args.all: + split_syscalls = [x.strip() for x in args.all.split(",")] + for v in syscall_lookup_table.values(): + v.extend(split_syscalls) + if args.b32: + split_syscalls = [x.strip() for x in args.b32.split(",")] + syscall_lookup_table["x86"].extend(split_syscalls) + syscall_lookup_table["arm"].extend(split_syscalls) + if args.b64: + split_syscalls = [x.strip() for x in args.b64.split(",")] + syscall_lookup_table["x86_64"].extend(split_syscalls) + syscall_lookup_table["arm64"].extend(split_syscalls) + if args.none: + split_syscalls = [x.strip() for x in args.none.split(",")] + syscall_lookup_table["none"].extend(split_syscalls) + return syscall_lookup_table def _confirm_add(fp: str, syscalls: Iterable[str], noninteractive=None): - """Interactive confirmation check you wish to add a syscall. - - Args: - fp: filepath of the file to edit. - syscalls: list-like of syscalls to add to append to the files. - noninteractive: Just add the syscalls without asking. - """ - if noninteractive: - _update_seccomp(fp, list(syscalls)) - return - syscalls_str = ','.join(syscalls) - user_input = input(f'Add {syscalls_str} for {fp}? [y/N]> ') - if user_input.lower().startswith('y'): - _update_seccomp(fp, list(syscalls)) - print('Edited!') - else: - print(f'Skipping {fp}') + """Interactive confirmation check you wish to add a syscall. + + Args: + fp: filepath of the file to edit. + syscalls: list-like of syscalls to add to append to the files. + noninteractive: Just add the syscalls without asking. + """ + if noninteractive: + _update_seccomp(fp, list(syscalls)) + return + syscalls_str = ",".join(syscalls) + user_input = input(f"Add {syscalls_str} for {fp}? [y/N]> ") + if user_input.lower().startswith("y"): + _update_seccomp(fp, list(syscalls)) + print("Edited!") + else: + print(f"Skipping {fp}") def check_missing_syscalls(syscalls: list[str], fp: str) -> Optional[set[str]]: - """Return which specified syscalls are missing in the given file.""" - missing_syscalls = set(syscalls) - with open(fp) as f: - try: - lines = f.readlines() - for syscall in syscalls: - for line in lines: - if re.match(syscall + r':\s*1', line): - missing_syscalls.remove(syscall) - except UnicodeDecodeError: - return None - return missing_syscalls + """Return which specified syscalls are missing in the given file.""" + missing_syscalls = set(syscalls) + with open(fp) as f: + try: + lines = f.readlines() + for syscall in syscalls: + for line in lines: + if re.match(syscall + r":\s*1", line): + missing_syscalls.remove(syscall) + except UnicodeDecodeError: + return None + return missing_syscalls def _update_seccomp(fp: str, missing_syscalls: list[str]): - """Update the seccomp of the file based on the seccomp change type.""" - with open(fp, 'a') as f: - sorted_syscalls = sorted(missing_syscalls) - for to_write in sorted_syscalls: - f.write(to_write + ': 1\n') + """Update the seccomp of the file based on the seccomp change type.""" + with open(fp, "a") as f: + sorted_syscalls = sorted(missing_syscalls) + for to_write in sorted_syscalls: + f.write(to_write + ": 1\n") def _search_cmd(query: str, use_fd=True) -> list[str]: - if use_fd and shutil.which('fdfind') is not None: + if use_fd and shutil.which("fdfind") is not None: + return [ + "fdfind", + "-t", + "f", + "--full-path", + f"^.*{query}.*\\.policy$", + ] return [ - 'fdfind', - '-t', - 'f', - '--full-path', - f'^.*{query}.*\\.policy$', + "find", + ".", + "-regex", + f"^.*{query}.*\\.policy$", + "-type", + "f", ] - return [ - 'find', - '.', - '-regex', - f'^.*{query}.*\\.policy$', - '-type', - 'f', - ] def find_potential_policy_files(packages: list[str]) -> tuple[list[str], bool]: - """Find potentially related policy files to the given packages. - - Returns: - (policy_files, successful): A list of policy file paths, and a boolean - indicating whether all queries were successful in finding at least - one related policy file. - """ - all_queries_succeeded = True - matches = [] - for p in packages: - # It's quite common that hyphens are translated to underscores - # and similarly common that underscores are translated to hyphens. - # We make them agnostic here. - hyphen_agnostic = re.sub(r'[-_]', '[-_]', p) - cmd = subprocess.run( - _search_cmd(hyphen_agnostic), - stdout=subprocess.PIPE, - check=True, - ) - new_matches = [a for a in cmd.stdout.decode('utf-8').split('\n') if a] - if not new_matches: - print(f'WARNING: No matches found for {p}', file=sys.stderr) - all_queries_succeeded = False - else: - matches.extend(new_matches) - return matches, all_queries_succeeded + """Find potentially related policy files to the given packages. + + Returns: + (policy_files, successful): A list of policy file paths, and a boolean + indicating whether all queries were successful in finding at least + one related policy file. + """ + all_queries_succeeded = True + matches = [] + for p in packages: + # It's quite common that hyphens are translated to underscores + # and similarly common that underscores are translated to hyphens. + # We make them agnostic here. + hyphen_agnostic = re.sub(r"[-_]", "[-_]", p) + cmd = subprocess.run( + _search_cmd(hyphen_agnostic), + stdout=subprocess.PIPE, + check=True, + ) + new_matches = [a for a in cmd.stdout.decode("utf-8").split("\n") if a] + if not new_matches: + print(f"WARNING: No matches found for {p}", file=sys.stderr) + all_queries_succeeded = False + else: + matches.extend(new_matches) + return matches, all_queries_succeeded def parse_args() -> Any: - """Handle command line arguments.""" - parser = argparse.ArgumentParser( - description='Check for missing syscalls in' - ' seccomp policy files, or make' - ' mass seccomp changes.\n\n' - 'The format of this output follows the template:\n' - ' status (arch) local/policy/filepath :: syscall,syscall,syscall\n' - 'Where the status can be "_" for present, "M" for missing,' - ' or "E" for Error\n\n' - 'Example:\n' - ' mass_seccomp_editor.py --all fstatfs --b32 fstatfs64' - ' modemmanager\n\n' - 'Exit Codes:\n' - " '0' for successfully found specific policy files\n" - " '1' for python-related error.\n" - " '2' for no matched policy files for a given query.", - formatter_class=argparse.RawTextHelpFormatter, - ) - parser.add_argument('packages', nargs='+') - parser.add_argument( - '--all', - type=str, - metavar='syscalls', - help='comma separated syscalls to check in all policy files') - parser.add_argument( - '--b64', - type=str, - metavar='syscalls', - help='Comma separated syscalls to check in 64bit architectures') - parser.add_argument( - '--b32', - type=str, - metavar='syscalls', - help='Comma separated syscalls to check in 32bit architectures') - parser.add_argument( - '--none', - type=str, - metavar='syscalls', - help='Comma separated syscalls to check in unknown architectures') - parser.add_argument('--edit', - action='store_true', - help='Make changes to the listed files,' - ' rather than just printing out what is missing') - parser.add_argument('-y', - '--yes', - action='store_true', - help='Say "Y" to all interactive checks') - parser.add_argument('--force', - action='store_true', - help='Edit all files, regardless of missing status.' - ' Does nothing without --edit.') - return parser.parse_args() - - -if __name__ == '__main__': - main() + """Handle command line arguments.""" + parser = argparse.ArgumentParser( + description="Check for missing syscalls in" + " seccomp policy files, or make" + " mass seccomp changes.\n\n" + "The format of this output follows the template:\n" + " status (arch) local/policy/filepath :: syscall,syscall,syscall\n" + 'Where the status can be "_" for present, "M" for missing,' + ' or "E" for Error\n\n' + "Example:\n" + " mass_seccomp_editor.py --all fstatfs --b32 fstatfs64" + " modemmanager\n\n" + "Exit Codes:\n" + " '0' for successfully found specific policy files\n" + " '1' for python-related error.\n" + " '2' for no matched policy files for a given query.", + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument("packages", nargs="+") + parser.add_argument( + "--all", + type=str, + metavar="syscalls", + help="comma separated syscalls to check in all policy files", + ) + parser.add_argument( + "--b64", + type=str, + metavar="syscalls", + help="Comma separated syscalls to check in 64bit architectures", + ) + parser.add_argument( + "--b32", + type=str, + metavar="syscalls", + help="Comma separated syscalls to check in 32bit architectures", + ) + parser.add_argument( + "--none", + type=str, + metavar="syscalls", + help="Comma separated syscalls to check in unknown architectures", + ) + parser.add_argument( + "--edit", + action="store_true", + help="Make changes to the listed files," + " rather than just printing out what is missing", + ) + parser.add_argument( + "-y", + "--yes", + action="store_true", + help='Say "Y" to all interactive checks', + ) + parser.add_argument( + "--force", + action="store_true", + help="Edit all files, regardless of missing status." + " Does nothing without --edit.", + ) + return parser.parse_args() + + +if __name__ == "__main__": + main() diff --git a/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py b/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py index c128ad02..5889dec5 100755 --- a/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py +++ b/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py @@ -17,20 +17,22 @@ poll: 1 foobar: 1 """ -TEST_FP = 'foo' +TEST_FP = "foo" class TestMassSeccompEditor(unittest.TestCase): - """Test the mass_seccomp_editor.""" + """Test the mass_seccomp_editor.""" - def test_check_missing_sycalls(self): - """Test we can find missing syscalls.""" - with mock.patch('builtins.open', - mock.mock_open(read_data=BASE_SECCOMP_CONTENTS)): - out = mass_seccomp_editor.check_missing_syscalls( - ['fstat', 'dup', 'fizzbuzz'], TEST_FP) - self.assertEqual(out, set(['dup', 'fizzbuzz'])) + def test_check_missing_sycalls(self): + """Test we can find missing syscalls.""" + with mock.patch( + "builtins.open", mock.mock_open(read_data=BASE_SECCOMP_CONTENTS) + ): + out = mass_seccomp_editor.check_missing_syscalls( + ["fstat", "dup", "fizzbuzz"], TEST_FP + ) + self.assertEqual(out, set(["dup", "fizzbuzz"])) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/tc_enter_chroot.py b/tc_enter_chroot.py index 2f4fc8ed..0dcadb07 100755 --- a/tc_enter_chroot.py +++ b/tc_enter_chroot.py @@ -11,7 +11,8 @@ This script enters the chroot with mounted sources. from __future__ import print_function -__author__ = 'asharif@google.com (Ahmad Sharif)' + +__author__ = "asharif@google.com (Ahmad Sharif)" import argparse import getpass @@ -25,285 +26,326 @@ from cros_utils import misc class MountPoint(object): - """Mount point class""" - - def __init__(self, external_dir, mount_dir, owner, options=None): - self.external_dir = os.path.realpath(external_dir) - self.mount_dir = os.path.realpath(mount_dir) - self.owner = owner - self.options = options - - def CreateAndOwnDir(self, dir_name): - retv = 0 - if not os.path.exists(dir_name): - command = 'mkdir -p ' + dir_name - command += ' || sudo mkdir -p ' + dir_name - retv = command_executer.GetCommandExecuter().RunCommand(command) - if retv != 0: - return retv - pw = pwd.getpwnam(self.owner) - if os.stat(dir_name).st_uid != pw.pw_uid: - command = 'sudo chown -f ' + self.owner + ' ' + dir_name - retv = command_executer.GetCommandExecuter().RunCommand(command) - return retv - - def DoMount(self): - ce = command_executer.GetCommandExecuter() - mount_signature = '%s on %s' % (self.external_dir, self.mount_dir) - command = 'mount' - retv, out, _ = ce.RunCommandWOutput(command) - if mount_signature not in out: - retv = self.CreateAndOwnDir(self.mount_dir) - logger.GetLogger().LogFatalIf(retv, 'Cannot create mount_dir!') - retv = self.CreateAndOwnDir(self.external_dir) - logger.GetLogger().LogFatalIf(retv, 'Cannot create external_dir!') - retv = self.MountDir() - logger.GetLogger().LogFatalIf(retv, 'Cannot mount!') - return retv - else: - return 0 - - def UnMount(self): - ce = command_executer.GetCommandExecuter() - return ce.RunCommand('sudo umount %s' % self.mount_dir) - - def MountDir(self): - command = 'sudo mount --bind ' + self.external_dir + ' ' + self.mount_dir - if self.options == 'ro': - command += ' && sudo mount --bind -oremount,ro ' + self.mount_dir - retv = command_executer.GetCommandExecuter().RunCommand(command) - return retv - - def __str__(self): - ret = '' - ret += self.external_dir + '\n' - ret += self.mount_dir + '\n' - if self.owner: - ret += self.owner + '\n' - if self.options: - ret += self.options + '\n' - return ret + """Mount point class""" + + def __init__(self, external_dir, mount_dir, owner, options=None): + self.external_dir = os.path.realpath(external_dir) + self.mount_dir = os.path.realpath(mount_dir) + self.owner = owner + self.options = options + + def CreateAndOwnDir(self, dir_name): + retv = 0 + if not os.path.exists(dir_name): + command = "mkdir -p " + dir_name + command += " || sudo mkdir -p " + dir_name + retv = command_executer.GetCommandExecuter().RunCommand(command) + if retv != 0: + return retv + pw = pwd.getpwnam(self.owner) + if os.stat(dir_name).st_uid != pw.pw_uid: + command = "sudo chown -f " + self.owner + " " + dir_name + retv = command_executer.GetCommandExecuter().RunCommand(command) + return retv + + def DoMount(self): + ce = command_executer.GetCommandExecuter() + mount_signature = "%s on %s" % (self.external_dir, self.mount_dir) + command = "mount" + retv, out, _ = ce.RunCommandWOutput(command) + if mount_signature not in out: + retv = self.CreateAndOwnDir(self.mount_dir) + logger.GetLogger().LogFatalIf(retv, "Cannot create mount_dir!") + retv = self.CreateAndOwnDir(self.external_dir) + logger.GetLogger().LogFatalIf(retv, "Cannot create external_dir!") + retv = self.MountDir() + logger.GetLogger().LogFatalIf(retv, "Cannot mount!") + return retv + else: + return 0 + + def UnMount(self): + ce = command_executer.GetCommandExecuter() + return ce.RunCommand("sudo umount %s" % self.mount_dir) + + def MountDir(self): + command = ( + "sudo mount --bind " + self.external_dir + " " + self.mount_dir + ) + if self.options == "ro": + command += " && sudo mount --bind -oremount,ro " + self.mount_dir + retv = command_executer.GetCommandExecuter().RunCommand(command) + return retv + + def __str__(self): + ret = "" + ret += self.external_dir + "\n" + ret += self.mount_dir + "\n" + if self.owner: + ret += self.owner + "\n" + if self.options: + ret += self.options + "\n" + return ret def Main(argv, return_output=False): - """The main function.""" - - parser = argparse.ArgumentParser() - parser.add_argument( - '-c', - '--chromeos_root', - dest='chromeos_root', - default='../..', - help='ChromeOS root checkout directory.') - parser.add_argument( - '-t', - '--toolchain_root', - dest='toolchain_root', - help='Toolchain root directory.') - parser.add_argument( - '-o', '--output', dest='output', help='Toolchain output directory') - parser.add_argument( - '--sudo', - dest='sudo', - action='store_true', - default=False, - help='Run the command with sudo.') - parser.add_argument( - '-r', - '--third_party', - dest='third_party', - help='The third_party directory to mount.') - parser.add_argument( - '-m', - '--other_mounts', - dest='other_mounts', - help='Other mount points in the form: ' - 'dir:mounted_dir:options') - parser.add_argument( - '-s', - '--mount-scripts-only', - dest='mount_scripts_only', - action='store_true', - default=False, - help='Mount only the scripts dir, and not the sources.') - parser.add_argument( - 'passthrough_argv', - nargs='*', - help='Command to be executed inside the chroot.') - - options = parser.parse_args(argv) - - chromeos_root = options.chromeos_root - - chromeos_root = os.path.expanduser(chromeos_root) - if options.toolchain_root: - options.toolchain_root = os.path.expanduser(options.toolchain_root) - - chromeos_root = os.path.abspath(chromeos_root) - - tc_dirs = [] - if options.toolchain_root is None or options.mount_scripts_only: - m = 'toolchain_root not specified. Will not mount toolchain dirs.' - logger.GetLogger().LogWarning(m) - else: - tc_dirs = [ - options.toolchain_root + '/google_vendor_src_branch/gcc', - options.toolchain_root + '/google_vendor_src_branch/binutils' - ] - - for tc_dir in tc_dirs: - if not os.path.exists(tc_dir): - logger.GetLogger().LogError('toolchain path ' + tc_dir + - ' does not exist!') - parser.print_help() - sys.exit(1) - - if not os.path.exists(chromeos_root): - logger.GetLogger().LogError('chromeos_root ' + options.chromeos_root + - ' does not exist!') - parser.print_help() - sys.exit(1) - - if not os.path.exists(chromeos_root + '/src/scripts/build_packages'): - logger.GetLogger().LogError(options.chromeos_root + - '/src/scripts/build_packages' - ' not found!') - parser.print_help() - sys.exit(1) - - version_dir = os.path.realpath(os.path.expanduser(os.path.dirname(__file__))) - - mounted_tc_root = '/usr/local/toolchain_root' - full_mounted_tc_root = chromeos_root + '/chroot/' + mounted_tc_root - full_mounted_tc_root = os.path.abspath(full_mounted_tc_root) - - mount_points = [] - for tc_dir in tc_dirs: - last_dir = misc.GetRoot(tc_dir)[1] - mount_point = MountPoint(tc_dir, full_mounted_tc_root + '/' + last_dir, - getpass.getuser(), 'ro') + """The main function.""" + + parser = argparse.ArgumentParser() + parser.add_argument( + "-c", + "--chromeos_root", + dest="chromeos_root", + default="../..", + help="ChromeOS root checkout directory.", + ) + parser.add_argument( + "-t", + "--toolchain_root", + dest="toolchain_root", + help="Toolchain root directory.", + ) + parser.add_argument( + "-o", "--output", dest="output", help="Toolchain output directory" + ) + parser.add_argument( + "--sudo", + dest="sudo", + action="store_true", + default=False, + help="Run the command with sudo.", + ) + parser.add_argument( + "-r", + "--third_party", + dest="third_party", + help="The third_party directory to mount.", + ) + parser.add_argument( + "-m", + "--other_mounts", + dest="other_mounts", + help="Other mount points in the form: " "dir:mounted_dir:options", + ) + parser.add_argument( + "-s", + "--mount-scripts-only", + dest="mount_scripts_only", + action="store_true", + default=False, + help="Mount only the scripts dir, and not the sources.", + ) + parser.add_argument( + "passthrough_argv", + nargs="*", + help="Command to be executed inside the chroot.", + ) + + options = parser.parse_args(argv) + + chromeos_root = options.chromeos_root + + chromeos_root = os.path.expanduser(chromeos_root) + if options.toolchain_root: + options.toolchain_root = os.path.expanduser(options.toolchain_root) + + chromeos_root = os.path.abspath(chromeos_root) + + tc_dirs = [] + if options.toolchain_root is None or options.mount_scripts_only: + m = "toolchain_root not specified. Will not mount toolchain dirs." + logger.GetLogger().LogWarning(m) + else: + tc_dirs = [ + options.toolchain_root + "/google_vendor_src_branch/gcc", + options.toolchain_root + "/google_vendor_src_branch/binutils", + ] + + for tc_dir in tc_dirs: + if not os.path.exists(tc_dir): + logger.GetLogger().LogError( + "toolchain path " + tc_dir + " does not exist!" + ) + parser.print_help() + sys.exit(1) + + if not os.path.exists(chromeos_root): + logger.GetLogger().LogError( + "chromeos_root " + options.chromeos_root + " does not exist!" + ) + parser.print_help() + sys.exit(1) + + if not os.path.exists(chromeos_root + "/src/scripts/build_packages"): + logger.GetLogger().LogError( + options.chromeos_root + "/src/scripts/build_packages" " not found!" + ) + parser.print_help() + sys.exit(1) + + version_dir = os.path.realpath( + os.path.expanduser(os.path.dirname(__file__)) + ) + + mounted_tc_root = "/usr/local/toolchain_root" + full_mounted_tc_root = chromeos_root + "/chroot/" + mounted_tc_root + full_mounted_tc_root = os.path.abspath(full_mounted_tc_root) + + mount_points = [] + for tc_dir in tc_dirs: + last_dir = misc.GetRoot(tc_dir)[1] + mount_point = MountPoint( + tc_dir, + full_mounted_tc_root + "/" + last_dir, + getpass.getuser(), + "ro", + ) + mount_points.append(mount_point) + + # Add the third_party mount point if it exists + if options.third_party: + third_party_dir = options.third_party + logger.GetLogger().LogFatalIf( + not os.path.isdir(third_party_dir), + "--third_party option is not a valid dir.", + ) + else: + third_party_dir = os.path.abspath( + "%s/../../../third_party" % os.path.dirname(__file__) + ) + + if os.path.isdir(third_party_dir): + mount_point = MountPoint( + third_party_dir, + ( + "%s/%s" + % (full_mounted_tc_root, os.path.basename(third_party_dir)) + ), + getpass.getuser(), + ) + mount_points.append(mount_point) + + output = options.output + if output is None and options.toolchain_root: + # Mount the output directory at /usr/local/toolchain_root/output + output = options.toolchain_root + "/output" + + if output: + mount_points.append( + MountPoint( + output, full_mounted_tc_root + "/output", getpass.getuser() + ) + ) + + # Mount the other mount points + mount_points += CreateMountPointsFromString( + options.other_mounts, chromeos_root + "/chroot/" + ) + + last_dir = misc.GetRoot(version_dir)[1] + + # Mount the version dir (v14) at /usr/local/toolchain_root/v14 + mount_point = MountPoint( + version_dir, full_mounted_tc_root + "/" + last_dir, getpass.getuser() + ) mount_points.append(mount_point) - # Add the third_party mount point if it exists - if options.third_party: - third_party_dir = options.third_party - logger.GetLogger().LogFatalIf(not os.path.isdir(third_party_dir), - '--third_party option is not a valid dir.') - else: - third_party_dir = os.path.abspath( - '%s/../../../third_party' % os.path.dirname(__file__)) + for mount_point in mount_points: + retv = mount_point.DoMount() + if retv != 0: + return retv - if os.path.isdir(third_party_dir): - mount_point = MountPoint( - third_party_dir, - ('%s/%s' % (full_mounted_tc_root, os.path.basename(third_party_dir))), - getpass.getuser()) - mount_points.append(mount_point) + # Finally, create the symlink to build-gcc. + command = "sudo chown " + getpass.getuser() + " " + full_mounted_tc_root + retv = command_executer.GetCommandExecuter().RunCommand(command) - output = options.output - if output is None and options.toolchain_root: - # Mount the output directory at /usr/local/toolchain_root/output - output = options.toolchain_root + '/output' - - if output: - mount_points.append( - MountPoint(output, full_mounted_tc_root + '/output', getpass.getuser())) - - # Mount the other mount points - mount_points += CreateMountPointsFromString(options.other_mounts, - chromeos_root + '/chroot/') - - last_dir = misc.GetRoot(version_dir)[1] - - # Mount the version dir (v14) at /usr/local/toolchain_root/v14 - mount_point = MountPoint(version_dir, full_mounted_tc_root + '/' + last_dir, - getpass.getuser()) - mount_points.append(mount_point) - - for mount_point in mount_points: - retv = mount_point.DoMount() - if retv != 0: - return retv - - # Finally, create the symlink to build-gcc. - command = 'sudo chown ' + getpass.getuser() + ' ' + full_mounted_tc_root - retv = command_executer.GetCommandExecuter().RunCommand(command) - - try: - CreateSymlink(last_dir + '/build-gcc', full_mounted_tc_root + '/build-gcc') - CreateSymlink(last_dir + '/build-binutils', - full_mounted_tc_root + '/build-binutils') - except Exception as e: - logger.GetLogger().LogError(str(e)) - - # Now call cros_sdk --enter with the rest of the arguments. - command = 'cd %s/src/scripts && cros_sdk --enter' % chromeos_root - - if len(options.passthrough_argv) > 1: - inner_command = ' '.join(options.passthrough_argv[1:]) - inner_command = inner_command.strip() - if inner_command.startswith('-- '): - inner_command = inner_command[3:] - command_file = 'tc_enter_chroot.cmd' - command_file_path = chromeos_root + '/src/scripts/' + command_file - retv = command_executer.GetCommandExecuter().RunCommand('sudo rm -f ' + - command_file_path) - if retv != 0: - return retv - with open(command_file_path, 'w', encoding='utf-8') as f: - f.write(inner_command) - logger.GetLogger().LogCmd(inner_command) - retv = command_executer.GetCommandExecuter().RunCommand('chmod +x ' + - command_file_path) - if retv != 0: - return retv - - if options.sudo: - command += ' sudo ./' + command_file + try: + CreateSymlink( + last_dir + "/build-gcc", full_mounted_tc_root + "/build-gcc" + ) + CreateSymlink( + last_dir + "/build-binutils", + full_mounted_tc_root + "/build-binutils", + ) + except Exception as e: + logger.GetLogger().LogError(str(e)) + + # Now call cros_sdk --enter with the rest of the arguments. + command = "cd %s/src/scripts && cros_sdk --enter" % chromeos_root + + if len(options.passthrough_argv) > 1: + inner_command = " ".join(options.passthrough_argv[1:]) + inner_command = inner_command.strip() + if inner_command.startswith("-- "): + inner_command = inner_command[3:] + command_file = "tc_enter_chroot.cmd" + command_file_path = chromeos_root + "/src/scripts/" + command_file + retv = command_executer.GetCommandExecuter().RunCommand( + "sudo rm -f " + command_file_path + ) + if retv != 0: + return retv + with open(command_file_path, "w", encoding="utf-8") as f: + f.write(inner_command) + logger.GetLogger().LogCmd(inner_command) + retv = command_executer.GetCommandExecuter().RunCommand( + "chmod +x " + command_file_path + ) + if retv != 0: + return retv + + if options.sudo: + command += " sudo ./" + command_file + else: + command += " ./" + command_file + retv = command_executer.GetCommandExecuter().RunCommandGeneric( + command, return_output + ) + return retv else: - command += ' ./' + command_file - retv = command_executer.GetCommandExecuter().RunCommandGeneric( - command, return_output) - return retv - else: - os.chdir('%s/src/scripts' % chromeos_root) - ce = command_executer.GetCommandExecuter() - _, out, _ = ce.RunCommandWOutput('which cros_sdk') - cros_sdk_binary = out.split()[0] - return os.execv(cros_sdk_binary, ['', '--enter']) + os.chdir("%s/src/scripts" % chromeos_root) + ce = command_executer.GetCommandExecuter() + _, out, _ = ce.RunCommandWOutput("which cros_sdk") + cros_sdk_binary = out.split()[0] + return os.execv(cros_sdk_binary, ["", "--enter"]) def CreateMountPointsFromString(mount_strings, chroot_dir): - # String has options in the form dir:mount:options - mount_points = [] - if not mount_strings: + # String has options in the form dir:mount:options + mount_points = [] + if not mount_strings: + return mount_points + mount_list = mount_strings.split() + for mount_string in mount_list: + mount_values = mount_string.split(":") + external_dir = mount_values[0] + mount_dir = mount_values[1] + if len(mount_values) > 2: + options = mount_values[2] + else: + options = None + mount_point = MountPoint( + external_dir, + chroot_dir + "/" + mount_dir, + getpass.getuser(), + options, + ) + mount_points.append(mount_point) return mount_points - mount_list = mount_strings.split() - for mount_string in mount_list: - mount_values = mount_string.split(':') - external_dir = mount_values[0] - mount_dir = mount_values[1] - if len(mount_values) > 2: - options = mount_values[2] - else: - options = None - mount_point = MountPoint(external_dir, chroot_dir + '/' + mount_dir, - getpass.getuser(), options) - mount_points.append(mount_point) - return mount_points def CreateSymlink(target, link_name): - logger.GetLogger().LogFatalIf( - target.startswith('/'), "Can't create symlink to absolute path!") - real_from_file = misc.GetRoot(link_name)[0] + '/' + target - if os.path.realpath(real_from_file) != os.path.realpath(link_name): - if os.path.exists(link_name): - command = 'rm -rf ' + link_name - command_executer.GetCommandExecuter().RunCommand(command) - os.symlink(target, link_name) - - -if __name__ == '__main__': - retval = Main(sys.argv) - sys.exit(retval) + logger.GetLogger().LogFatalIf( + target.startswith("/"), "Can't create symlink to absolute path!" + ) + real_from_file = misc.GetRoot(link_name)[0] + "/" + target + if os.path.realpath(real_from_file) != os.path.realpath(link_name): + if os.path.exists(link_name): + command = "rm -rf " + link_name + command_executer.GetCommandExecuter().RunCommand(command) + os.symlink(target, link_name) + + +if __name__ == "__main__": + retval = Main(sys.argv) + sys.exit(retval) diff --git a/toolchain_utils_githooks/check-presubmit.py b/toolchain_utils_githooks/check-presubmit.py index e99e70ae..f9da974b 100755 --- a/toolchain_utils_githooks/check-presubmit.py +++ b/toolchain_utils_githooks/check-presubmit.py @@ -23,55 +23,55 @@ import typing as t from pathlib import Path -def run_command_unchecked(command: t.List[str], - cwd: str, - env: t.Dict[str, str] = None) -> t.Tuple[int, str]: - """Runs a command in the given dir, returning its exit code and stdio.""" - p = subprocess.Popen( - command, - cwd=cwd, - stdin=subprocess.DEVNULL, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - env=env, - ) - - stdout, _ = p.communicate() - exit_code = p.wait() - return exit_code, stdout.decode('utf-8', 'replace') +def run_command_unchecked( + command: t.List[str], cwd: str, env: t.Dict[str, str] = None +) -> t.Tuple[int, str]: + """Runs a command in the given dir, returning its exit code and stdio.""" + p = subprocess.Popen( + command, + cwd=cwd, + stdin=subprocess.DEVNULL, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + env=env, + ) + + stdout, _ = p.communicate() + exit_code = p.wait() + return exit_code, stdout.decode("utf-8", "replace") def has_executable_on_path(exe: str) -> bool: - """Returns whether we have `exe` somewhere on our $PATH""" - return shutil.which(exe) is not None + """Returns whether we have `exe` somewhere on our $PATH""" + return shutil.which(exe) is not None def escape_command(command: t.Iterable[str]) -> str: - """Returns a human-readable and copy-pastable shell command. + """Returns a human-readable and copy-pastable shell command. - Only intended for use in output to users. shell=True is strongly discouraged. - """ - return ' '.join(shlex.quote(x) for x in command) + Only intended for use in output to users. shell=True is strongly discouraged. + """ + return " ".join(shlex.quote(x) for x in command) def remove_deleted_files(files: t.Iterable[str]) -> t.List[str]: - return [f for f in files if os.path.exists(f)] + return [f for f in files if os.path.exists(f)] def is_file_executable(file_path: str) -> bool: - return os.access(file_path, os.X_OK) + return os.access(file_path, os.X_OK) # As noted in our docs, some of our Python code depends on modules that sit in # toolchain-utils/. Add that to PYTHONPATH to ensure that things like `cros # lint` are kept happy. def env_with_pythonpath(toolchain_utils_root: str) -> t.Dict[str, str]: - env = dict(os.environ) - if 'PYTHONPATH' in env: - env['PYTHONPATH'] += ':' + toolchain_utils_root - else: - env['PYTHONPATH'] = toolchain_utils_root - return env + env = dict(os.environ) + if "PYTHONPATH" in env: + env["PYTHONPATH"] += ":" + toolchain_utils_root + else: + env["PYTHONPATH"] = toolchain_utils_root + return env # Each checker represents an independent check that's done on our sources. @@ -86,649 +86,703 @@ def env_with_pythonpath(toolchain_utils_root: str) -> t.Dict[str, str]: # in the pool. In order words, blocking on results from the provided # threadpool is OK. CheckResult = t.NamedTuple( - 'CheckResult', + "CheckResult", ( - ('ok', bool), - ('output', str), - ('autofix_commands', t.List[t.List[str]]), + ("ok", bool), + ("output", str), + ("autofix_commands", t.List[t.List[str]]), ), ) def get_check_result_or_catch( - task: multiprocessing.pool.ApplyResult) -> CheckResult: - """Returns the result of task(); if that raises, returns a CheckResult. - - The task is expected to return a CheckResult on get(). - """ - try: - return task.get() - except Exception: - return CheckResult( - ok=False, - output='Check exited with an unexpected exception:\n%s' % - traceback.format_exc(), - autofix_commands=[], + task: multiprocessing.pool.ApplyResult, +) -> CheckResult: + """Returns the result of task(); if that raises, returns a CheckResult. + + The task is expected to return a CheckResult on get(). + """ + try: + return task.get() + except Exception: + return CheckResult( + ok=False, + output="Check exited with an unexpected exception:\n%s" + % traceback.format_exc(), + autofix_commands=[], + ) + + +def check_isort( + toolchain_utils_root: str, python_files: t.Iterable[str] +) -> CheckResult: + """Subchecker of check_py_format. Checks python file formats with isort""" + chromite = Path("/mnt/host/source/chromite") + isort = chromite / "scripts" / "isort" + config_file = chromite / ".isort.cfg" + + if not (isort.exists() and config_file.exists()): + return CheckResult( + ok=True, + output="isort not found; skipping", + autofix_commands=[], + ) + + config_file_flag = f"--settings-file={config_file}" + command = [isort, "-c", config_file_flag] + python_files + exit_code, stdout_and_stderr = run_command_unchecked( + command, cwd=toolchain_utils_root ) - -def check_isort(toolchain_utils_root: str, - python_files: t.Iterable[str]) -> CheckResult: - """Subchecker of check_py_format. Checks python file formats with isort""" - chromite = Path('/mnt/host/source/chromite') - isort = chromite / 'scripts' / 'isort' - config_file = chromite / '.isort.cfg' - - if not (isort.exists() and config_file.exists()): - return CheckResult( - ok=True, - output='isort not found; skipping', - autofix_commands=[], + # isort fails when files have broken formatting. + if not exit_code: + return CheckResult( + ok=True, + output="", + autofix_commands=[], + ) + + bad_files = [] + bad_file_re = re.compile( + r"^ERROR: (.*) Imports are incorrectly sorted and/or formatted\.$" ) - - config_file_flag = f'--settings-file={config_file}' - command = [isort, '-c', config_file_flag] + python_files - exit_code, stdout_and_stderr = run_command_unchecked( - command, cwd=toolchain_utils_root) - - # isort fails when files have broken formatting. - if not exit_code: - return CheckResult( - ok=True, - output='', - autofix_commands=[], - ) - - bad_files = [] - bad_file_re = re.compile( - r'^ERROR: (.*) Imports are incorrectly sorted and/or formatted\.$') - for line in stdout_and_stderr.splitlines(): - m = bad_file_re.match(line) - if m: - file_name, = m.groups() - bad_files.append(file_name.strip()) - - if not bad_files: + for line in stdout_and_stderr.splitlines(): + m = bad_file_re.match(line) + if m: + (file_name,) = m.groups() + bad_files.append(file_name.strip()) + + if not bad_files: + return CheckResult( + ok=False, + output="`%s` failed; stdout/stderr:\n%s" + % (escape_command(command), stdout_and_stderr), + autofix_commands=[], + ) + + autofix = [str(isort), config_file_flag] + bad_files return CheckResult( ok=False, - output='`%s` failed; stdout/stderr:\n%s' % - (escape_command(command), stdout_and_stderr), - autofix_commands=[], + output="The following file(s) have formatting errors: %s" % bad_files, + autofix_commands=[autofix], ) - autofix = [str(isort), config_file_flag] + bad_files - return CheckResult( - ok=False, - output='The following file(s) have formatting errors: %s' % bad_files, - autofix_commands=[autofix], - ) - - -def check_black(toolchain_utils_root: str, black: Path, - python_files: t.Iterable[str]) -> CheckResult: - """Subchecker of check_py_format. Checks python file formats with black""" - # Folks have been bitten by accidentally using multiple formatter versions in - # the past. This is an issue, since newer versions of black may format things - # differently. Make the version obvious. - command = [black, '--version'] - exit_code, stdout_and_stderr = run_command_unchecked( - command, cwd=toolchain_utils_root) - if exit_code: - return CheckResult( - ok=False, - output=f'Failed getting black version; stdstreams: {stdout_and_stderr}', - autofix_commands=[], - ) - black_version = stdout_and_stderr.strip() - command = [black, '--line-length=80', '--check'] + python_files - exit_code, stdout_and_stderr = run_command_unchecked( - command, cwd=toolchain_utils_root) - # black fails when files are poorly formatted. - if exit_code == 0: - return CheckResult( - ok=True, - output=f'Using {black_version!r}, no issues were found.', - autofix_commands=[], +def check_black( + toolchain_utils_root: str, black: Path, python_files: t.Iterable[str] +) -> CheckResult: + """Subchecker of check_py_format. Checks python file formats with black""" + # Folks have been bitten by accidentally using multiple formatter versions in + # the past. This is an issue, since newer versions of black may format things + # differently. Make the version obvious. + command = [black, "--version"] + exit_code, stdout_and_stderr = run_command_unchecked( + command, cwd=toolchain_utils_root ) - - # Output format looks something like: - # f'{complaints}\nOh no!{emojis}\n{summary}' - # Whittle it down to complaints. - complaints = stdout_and_stderr.split('\nOh no!', 1) - if len(complaints) != 2: - return CheckResult( - ok=False, - output=f'Unparseable `black` output:\n{stdout_and_stderr}', - autofix_commands=[], + if exit_code: + return CheckResult( + ok=False, + output=f"Failed getting black version; stdstreams: {stdout_and_stderr}", + autofix_commands=[], + ) + + black_version = stdout_and_stderr.strip() + command = [black, "--line-length=80", "--check"] + python_files + exit_code, stdout_and_stderr = run_command_unchecked( + command, cwd=toolchain_utils_root ) - - bad_files = [] - errors = [] - refmt_prefix = 'would reformat ' - for line in complaints[0].strip().splitlines(): - line = line.strip() - if line.startswith('error:'): - errors.append(line) - continue - - if not line.startswith(refmt_prefix): - return CheckResult( - ok=False, - output=f'Unparseable `black` output:\n{stdout_and_stderr}', - autofix_commands=[], - ) - - bad_files.append(line[len(refmt_prefix):].strip()) - - # If black had internal errors that it could handle, print them out and exit - # without an autofix. - if errors: - err_str = "\n".join(errors) + # black fails when files are poorly formatted. + if exit_code == 0: + return CheckResult( + ok=True, + output=f"Using {black_version!r}, no issues were found.", + autofix_commands=[], + ) + + # Output format looks something like: + # f'{complaints}\nOh no!{emojis}\n{summary}' + # Whittle it down to complaints. + complaints = stdout_and_stderr.split("\nOh no!", 1) + if len(complaints) != 2: + return CheckResult( + ok=False, + output=f"Unparseable `black` output:\n{stdout_and_stderr}", + autofix_commands=[], + ) + + bad_files = [] + errors = [] + refmt_prefix = "would reformat " + for line in complaints[0].strip().splitlines(): + line = line.strip() + if line.startswith("error:"): + errors.append(line) + continue + + if not line.startswith(refmt_prefix): + return CheckResult( + ok=False, + output=f"Unparseable `black` output:\n{stdout_and_stderr}", + autofix_commands=[], + ) + + bad_files.append(line[len(refmt_prefix) :].strip()) + + # If black had internal errors that it could handle, print them out and exit + # without an autofix. + if errors: + err_str = "\n".join(errors) + return CheckResult( + ok=False, + output=f"Using {black_version!r} had the following errors:\n{err_str}", + autofix_commands=[], + ) + + autofix = [black] + bad_files return CheckResult( ok=False, - output=f'Using {black_version!r} had the following errors:\n{err_str}', - autofix_commands=[], + output=f"Using {black_version!r}, these file(s) have formatting errors: " + f"{bad_files}", + autofix_commands=[autofix], ) - autofix = [black] + bad_files - return CheckResult( - ok=False, - output=f'Using {black_version!r}, these file(s) have formatting errors: ' - f'{bad_files}', - autofix_commands=[autofix], - ) - def check_python_file_headers(python_files: t.Iterable[str]) -> CheckResult: - """Subchecker of check_py_format. Checks python #!s""" - add_hashbang = [] - remove_hashbang = [] - - for python_file in python_files: - needs_hashbang = is_file_executable(python_file) - with open(python_file, encoding='utf-8') as f: - has_hashbang = f.read(2) == '#!' - if needs_hashbang == has_hashbang: - continue - - if needs_hashbang: - add_hashbang.append(python_file) - else: - remove_hashbang.append(python_file) - - autofix = [] - output = [] - if add_hashbang: - output.append('The following files have no #!, but need one: %s' % - add_hashbang) - autofix.append(['sed', '-i', '1i#!/usr/bin/env python3'] + add_hashbang) - - if remove_hashbang: - output.append("The following files have a #!, but shouldn't: %s" % - remove_hashbang) - autofix.append(['sed', '-i', '1d'] + remove_hashbang) - - if not output: - return CheckResult( - ok=True, - output='', - autofix_commands=[], - ) - return CheckResult( - ok=False, - output='\n'.join(output), - autofix_commands=autofix, - ) - - -def check_py_format(toolchain_utils_root: str, - thread_pool: multiprocessing.pool.ThreadPool, - files: t.Iterable[str]) -> t.List[CheckResult]: - """Runs yapf on files to check for style bugs. Also checks for #!s.""" - black = 'black' - if not has_executable_on_path(black): + """Subchecker of check_py_format. Checks python #!s""" + add_hashbang = [] + remove_hashbang = [] + + for python_file in python_files: + needs_hashbang = is_file_executable(python_file) + with open(python_file, encoding="utf-8") as f: + has_hashbang = f.read(2) == "#!" + if needs_hashbang == has_hashbang: + continue + + if needs_hashbang: + add_hashbang.append(python_file) + else: + remove_hashbang.append(python_file) + + autofix = [] + output = [] + if add_hashbang: + output.append( + "The following files have no #!, but need one: %s" % add_hashbang + ) + autofix.append(["sed", "-i", "1i#!/usr/bin/env python3"] + add_hashbang) + + if remove_hashbang: + output.append( + "The following files have a #!, but shouldn't: %s" % remove_hashbang + ) + autofix.append(["sed", "-i", "1d"] + remove_hashbang) + + if not output: + return CheckResult( + ok=True, + output="", + autofix_commands=[], + ) return CheckResult( ok=False, - output="black isn't available on your $PATH. Please either " - 'enter a chroot, or place depot_tools on your $PATH.', - autofix_commands=[], + output="\n".join(output), + autofix_commands=autofix, ) - python_files = [f for f in remove_deleted_files(files) if f.endswith('.py')] - if not python_files: - return CheckResult( - ok=True, - output='no python files to check', - autofix_commands=[], - ) - tasks = [ - ('check_black', - thread_pool.apply_async(check_black, - (toolchain_utils_root, black, python_files))), - ('check_isort', - thread_pool.apply_async(check_isort, - (toolchain_utils_root, python_files))), - ('check_file_headers', - thread_pool.apply_async(check_python_file_headers, (python_files, ))), - ] - return [(name, get_check_result_or_catch(task)) for name, task in tasks] +def check_py_format( + toolchain_utils_root: str, + thread_pool: multiprocessing.pool.ThreadPool, + files: t.Iterable[str], +) -> t.List[CheckResult]: + """Runs yapf on files to check for style bugs. Also checks for #!s.""" + black = "black" + if not has_executable_on_path(black): + return CheckResult( + ok=False, + output="black isn't available on your $PATH. Please either " + "enter a chroot, or place depot_tools on your $PATH.", + autofix_commands=[], + ) + + python_files = [f for f in remove_deleted_files(files) if f.endswith(".py")] + if not python_files: + return CheckResult( + ok=True, + output="no python files to check", + autofix_commands=[], + ) + + tasks = [ + ( + "check_black", + thread_pool.apply_async( + check_black, (toolchain_utils_root, black, python_files) + ), + ), + ( + "check_isort", + thread_pool.apply_async( + check_isort, (toolchain_utils_root, python_files) + ), + ), + ( + "check_file_headers", + thread_pool.apply_async(check_python_file_headers, (python_files,)), + ), + ] + return [(name, get_check_result_or_catch(task)) for name, task in tasks] def find_chromeos_root_directory() -> t.Optional[str]: - return os.getenv('CHROMEOS_ROOT_DIRECTORY') + return os.getenv("CHROMEOS_ROOT_DIRECTORY") def check_cros_lint( - toolchain_utils_root: str, thread_pool: multiprocessing.pool.ThreadPool, - files: t.Iterable[str]) -> t.Union[t.List[CheckResult], CheckResult]: - """Runs `cros lint`""" - - fixed_env = env_with_pythonpath(toolchain_utils_root) - - # We have to support users who don't have a chroot. So we either run `cros - # lint` (if it's been made available to us), or we try a mix of - # pylint+golint. - def try_run_cros_lint(cros_binary: str) -> t.Optional[CheckResult]: - exit_code, output = run_command_unchecked([cros_binary, 'lint', '--'] + - files, - toolchain_utils_root, - env=fixed_env) - - # This is returned specifically if cros couldn't find the ChromeOS tree - # root. - if exit_code == 127: - return None - - return CheckResult( - ok=exit_code == 0, - output=output, - autofix_commands=[], - ) - - cros_lint = try_run_cros_lint('cros') - if cros_lint is not None: - return cros_lint - - cros_root = find_chromeos_root_directory() - if cros_root: - cros_lint = try_run_cros_lint(os.path.join(cros_root, 'chromite/bin/cros')) + toolchain_utils_root: str, + thread_pool: multiprocessing.pool.ThreadPool, + files: t.Iterable[str], +) -> t.Union[t.List[CheckResult], CheckResult]: + """Runs `cros lint`""" + + fixed_env = env_with_pythonpath(toolchain_utils_root) + + # We have to support users who don't have a chroot. So we either run `cros + # lint` (if it's been made available to us), or we try a mix of + # pylint+golint. + def try_run_cros_lint(cros_binary: str) -> t.Optional[CheckResult]: + exit_code, output = run_command_unchecked( + [cros_binary, "lint", "--"] + files, + toolchain_utils_root, + env=fixed_env, + ) + + # This is returned specifically if cros couldn't find the ChromeOS tree + # root. + if exit_code == 127: + return None + + return CheckResult( + ok=exit_code == 0, + output=output, + autofix_commands=[], + ) + + cros_lint = try_run_cros_lint("cros") if cros_lint is not None: - return cros_lint - - tasks = [] - - def check_result_from_command(command: t.List[str]) -> CheckResult: - exit_code, output = run_command_unchecked(command, - toolchain_utils_root, - env=fixed_env) - return CheckResult( - ok=exit_code == 0, - output=output, - autofix_commands=[], + return cros_lint + + cros_root = find_chromeos_root_directory() + if cros_root: + cros_lint = try_run_cros_lint( + os.path.join(cros_root, "chromite/bin/cros") + ) + if cros_lint is not None: + return cros_lint + + tasks = [] + + def check_result_from_command(command: t.List[str]) -> CheckResult: + exit_code, output = run_command_unchecked( + command, toolchain_utils_root, env=fixed_env + ) + return CheckResult( + ok=exit_code == 0, + output=output, + autofix_commands=[], + ) + + python_files = [f for f in remove_deleted_files(files) if f.endswith(".py")] + if python_files: + + def run_pylint() -> CheckResult: + # pylint is required. Fail hard if it DNE. + return check_result_from_command(["pylint"] + python_files) + + tasks.append(("pylint", thread_pool.apply_async(run_pylint))) + + go_files = [f for f in remove_deleted_files(files) if f.endswith(".go")] + if go_files: + + def run_golint() -> CheckResult: + if has_executable_on_path("golint"): + return check_result_from_command( + ["golint", "-set_exit_status"] + go_files + ) + + complaint = "\n".join( + ( + "WARNING: go linting disabled. golint is not on your $PATH.", + "Please either enter a chroot, or install go locally. Continuing.", + ) + ) + return CheckResult( + ok=True, + output=complaint, + autofix_commands=[], + ) + + tasks.append(("golint", thread_pool.apply_async(run_golint))) + + complaint = "\n".join( + ( + "WARNING: No ChromeOS checkout detected, and no viable CrOS tree", + "found; falling back to linting only python and go. If you have a", + "ChromeOS checkout, please either develop from inside of the source", + "tree, or set $CHROMEOS_ROOT_DIRECTORY to the root of it.", + ) ) - python_files = [f for f in remove_deleted_files(files) if f.endswith('.py')] - if python_files: - - def run_pylint() -> CheckResult: - # pylint is required. Fail hard if it DNE. - return check_result_from_command(['pylint'] + python_files) - - tasks.append(('pylint', thread_pool.apply_async(run_pylint))) - - go_files = [f for f in remove_deleted_files(files) if f.endswith('.go')] - if go_files: - - def run_golint() -> CheckResult: - if has_executable_on_path('golint'): - return check_result_from_command(['golint', '-set_exit_status'] + - go_files) - - complaint = '\n'.join(( - 'WARNING: go linting disabled. golint is not on your $PATH.', - 'Please either enter a chroot, or install go locally. Continuing.', - )) - return CheckResult( - ok=True, - output=complaint, - autofix_commands=[], - ) - - tasks.append(('golint', thread_pool.apply_async(run_golint))) - - complaint = '\n'.join(( - 'WARNING: No ChromeOS checkout detected, and no viable CrOS tree', - 'found; falling back to linting only python and go. If you have a', - 'ChromeOS checkout, please either develop from inside of the source', - 'tree, or set $CHROMEOS_ROOT_DIRECTORY to the root of it.', - )) - - results = [(name, get_check_result_or_catch(task)) for name, task in tasks] - if not results: - return CheckResult( - ok=True, - output=complaint, - autofix_commands=[], - ) + results = [(name, get_check_result_or_catch(task)) for name, task in tasks] + if not results: + return CheckResult( + ok=True, + output=complaint, + autofix_commands=[], + ) - # We need to complain _somewhere_. - name, angry_result = results[0] - angry_complaint = (complaint + '\n\n' + angry_result.output).strip() - results[0] = (name, angry_result._replace(output=angry_complaint)) - return results + # We need to complain _somewhere_. + name, angry_result = results[0] + angry_complaint = (complaint + "\n\n" + angry_result.output).strip() + results[0] = (name, angry_result._replace(output=angry_complaint)) + return results def check_go_format(toolchain_utils_root, _thread_pool, files): - """Runs gofmt on files to check for style bugs.""" - gofmt = 'gofmt' - if not has_executable_on_path(gofmt): - return CheckResult( - ok=False, - output="gofmt isn't available on your $PATH. Please either " - 'enter a chroot, or place your go bin/ directory on your $PATH.', - autofix_commands=[], - ) + """Runs gofmt on files to check for style bugs.""" + gofmt = "gofmt" + if not has_executable_on_path(gofmt): + return CheckResult( + ok=False, + output="gofmt isn't available on your $PATH. Please either " + "enter a chroot, or place your go bin/ directory on your $PATH.", + autofix_commands=[], + ) + + go_files = [f for f in remove_deleted_files(files) if f.endswith(".go")] + if not go_files: + return CheckResult( + ok=True, + output="no go files to check", + autofix_commands=[], + ) + + command = [gofmt, "-l"] + go_files + exit_code, output = run_command_unchecked(command, cwd=toolchain_utils_root) - go_files = [f for f in remove_deleted_files(files) if f.endswith('.go')] - if not go_files: + if exit_code: + return CheckResult( + ok=False, + output="%s failed; stdout/stderr:\n%s" + % (escape_command(command), output), + autofix_commands=[], + ) + + output = output.strip() + if not output: + return CheckResult( + ok=True, + output="", + autofix_commands=[], + ) + + broken_files = [x.strip() for x in output.splitlines()] + autofix = [gofmt, "-w"] + broken_files return CheckResult( - ok=True, - output='no go files to check', - autofix_commands=[], + ok=False, + output="The following Go files have incorrect " + "formatting: %s" % broken_files, + autofix_commands=[autofix], ) - command = [gofmt, '-l'] + go_files - exit_code, output = run_command_unchecked(command, cwd=toolchain_utils_root) - if exit_code: - return CheckResult( - ok=False, - output='%s failed; stdout/stderr:\n%s' % - (escape_command(command), output), - autofix_commands=[], +def check_tests( + toolchain_utils_root: str, + _thread_pool: multiprocessing.pool.ThreadPool, + files: t.List[str], +) -> CheckResult: + """Runs tests.""" + exit_code, stdout_and_stderr = run_command_unchecked( + [os.path.join(toolchain_utils_root, "run_tests_for.py"), "--"] + files, + toolchain_utils_root, ) - - output = output.strip() - if not output: return CheckResult( - ok=True, - output='', + ok=exit_code == 0, + output=stdout_and_stderr, autofix_commands=[], ) - broken_files = [x.strip() for x in output.splitlines()] - autofix = [gofmt, '-w'] + broken_files - return CheckResult( - ok=False, - output='The following Go files have incorrect ' - 'formatting: %s' % broken_files, - autofix_commands=[autofix], - ) - - -def check_tests(toolchain_utils_root: str, - _thread_pool: multiprocessing.pool.ThreadPool, - files: t.List[str]) -> CheckResult: - """Runs tests.""" - exit_code, stdout_and_stderr = run_command_unchecked( - [os.path.join(toolchain_utils_root, 'run_tests_for.py'), '--'] + files, - toolchain_utils_root) - return CheckResult( - ok=exit_code == 0, - output=stdout_and_stderr, - autofix_commands=[], - ) - def detect_toolchain_utils_root() -> str: - return os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + return os.path.dirname(os.path.dirname(os.path.abspath(__file__))) def process_check_result( - check_name: str, check_results: t.Union[t.List[CheckResult], CheckResult], - start_time: datetime.datetime) -> t.Tuple[bool, t.List[t.List[str]]]: - """Prints human-readable output for the given check_results.""" - indent = ' ' - - def indent_block(text: str) -> str: - return indent + text.replace('\n', '\n' + indent) - - if isinstance(check_results, CheckResult): - ok, output, autofix_commands = check_results - if not ok and autofix_commands: - recommendation = ('Recommended command(s) to fix this: %s' % - [escape_command(x) for x in autofix_commands]) - if output: - output += '\n' + recommendation - else: - output = recommendation - else: - output_pieces = [] - autofix_commands = [] - for subname, (ok, output, autofix) in check_results: - status = 'succeeded' if ok else 'failed' - message = ['*** %s.%s %s' % (check_name, subname, status)] - if output: - message.append(indent_block(output)) - if not ok and autofix: - message.append( - indent_block('Recommended command(s) to fix this: %s' % - [escape_command(x) for x in autofix])) - - output_pieces.append('\n'.join(message)) - autofix_commands += autofix - - ok = all(x.ok for _, x in check_results) - output = '\n\n'.join(output_pieces) - - time_taken = datetime.datetime.now() - start_time - if ok: - print('*** %s succeeded after %s' % (check_name, time_taken)) - else: - print('*** %s failed after %s' % (check_name, time_taken)) - - if output: - print(indent_block(output)) - - print() - return ok, autofix_commands - - -def try_autofix(all_autofix_commands: t.List[t.List[str]], - toolchain_utils_root: str) -> None: - """Tries to run all given autofix commands, if appropriate.""" - if not all_autofix_commands: - return - - exit_code, output = run_command_unchecked(['git', 'status', '--porcelain'], - cwd=toolchain_utils_root) - if exit_code != 0: - print("Autofix aborted: couldn't get toolchain-utils git status.") - return - - if output.strip(): - # A clean repo makes checking/undoing autofix commands trivial. A dirty - # one... less so. :) - print('Git repo seems dirty; skipping autofix.') - return - - anything_succeeded = False - for command in all_autofix_commands: - exit_code, output = run_command_unchecked(command, - cwd=toolchain_utils_root) - - if exit_code: - print('*** Autofix command `%s` exited with code %d; stdout/stderr:' % - (escape_command(command), exit_code)) - print(output) + check_name: str, + check_results: t.Union[t.List[CheckResult], CheckResult], + start_time: datetime.datetime, +) -> t.Tuple[bool, t.List[t.List[str]]]: + """Prints human-readable output for the given check_results.""" + indent = " " + + def indent_block(text: str) -> str: + return indent + text.replace("\n", "\n" + indent) + + if isinstance(check_results, CheckResult): + ok, output, autofix_commands = check_results + if not ok and autofix_commands: + recommendation = "Recommended command(s) to fix this: %s" % [ + escape_command(x) for x in autofix_commands + ] + if output: + output += "\n" + recommendation + else: + output = recommendation else: - print('*** Autofix `%s` succeeded' % escape_command(command)) - anything_succeeded = True - - if anything_succeeded: - print('NOTE: Autofixes have been applied. Please check your tree, since ' - 'some lints may now be fixed') - - -def find_repo_root(base_dir: str) -> t.Optional[str]: - current = base_dir - while current != '/': - if os.path.isdir(os.path.join(current, '.repo')): - return current - current = os.path.dirname(current) - return None + output_pieces = [] + autofix_commands = [] + for subname, (ok, output, autofix) in check_results: + status = "succeeded" if ok else "failed" + message = ["*** %s.%s %s" % (check_name, subname, status)] + if output: + message.append(indent_block(output)) + if not ok and autofix: + message.append( + indent_block( + "Recommended command(s) to fix this: %s" + % [escape_command(x) for x in autofix] + ) + ) + + output_pieces.append("\n".join(message)) + autofix_commands += autofix + + ok = all(x.ok for _, x in check_results) + output = "\n\n".join(output_pieces) + + time_taken = datetime.datetime.now() - start_time + if ok: + print("*** %s succeeded after %s" % (check_name, time_taken)) + else: + print("*** %s failed after %s" % (check_name, time_taken)) + if output: + print(indent_block(output)) -def is_in_chroot() -> bool: - return os.path.exists('/etc/cros_chroot_version') + print() + return ok, autofix_commands -def maybe_reexec_inside_chroot(autofix: bool, files: t.List[str]) -> None: - if is_in_chroot(): - return - - enter_chroot = True - chdir_to = None - toolchain_utils = detect_toolchain_utils_root() - if find_repo_root(toolchain_utils) is None: - chromeos_root_dir = find_chromeos_root_directory() - if chromeos_root_dir is None: - print('Standalone toolchain-utils checkout detected; cannot enter ' - 'chroot.') - enter_chroot = False - else: - chdir_to = chromeos_root_dir +def try_autofix( + all_autofix_commands: t.List[t.List[str]], toolchain_utils_root: str +) -> None: + """Tries to run all given autofix commands, if appropriate.""" + if not all_autofix_commands: + return - if not has_executable_on_path('cros_sdk'): - print('No `cros_sdk` detected on $PATH; cannot enter chroot.') - enter_chroot = False + exit_code, output = run_command_unchecked( + ["git", "status", "--porcelain"], cwd=toolchain_utils_root + ) + if exit_code != 0: + print("Autofix aborted: couldn't get toolchain-utils git status.") + return + + if output.strip(): + # A clean repo makes checking/undoing autofix commands trivial. A dirty + # one... less so. :) + print("Git repo seems dirty; skipping autofix.") + return + + anything_succeeded = False + for command in all_autofix_commands: + exit_code, output = run_command_unchecked( + command, cwd=toolchain_utils_root + ) + + if exit_code: + print( + "*** Autofix command `%s` exited with code %d; stdout/stderr:" + % (escape_command(command), exit_code) + ) + print(output) + else: + print("*** Autofix `%s` succeeded" % escape_command(command)) + anything_succeeded = True + + if anything_succeeded: + print( + "NOTE: Autofixes have been applied. Please check your tree, since " + "some lints may now be fixed" + ) - if not enter_chroot: - print('Giving up on entering the chroot; be warned that some presubmits ' - 'may be broken.') - return - # We'll be changing ${PWD}, so make everything relative to toolchain-utils, - # which resides at a well-known place inside of the chroot. - chroot_toolchain_utils = '/mnt/host/source/src/third_party/toolchain-utils' +def find_repo_root(base_dir: str) -> t.Optional[str]: + current = base_dir + while current != "/": + if os.path.isdir(os.path.join(current, ".repo")): + return current + current = os.path.dirname(current) + return None - def rebase_path(path: str) -> str: - return os.path.join(chroot_toolchain_utils, - os.path.relpath(path, toolchain_utils)) - args = [ - 'cros_sdk', - '--enter', - '--', - rebase_path(__file__), - ] +def is_in_chroot() -> bool: + return os.path.exists("/etc/cros_chroot_version") - if not autofix: - args.append('--no_autofix') - args.extend(rebase_path(x) for x in files) - if chdir_to is None: - print('Attempting to enter the chroot...') - else: - print(f'Attempting to enter the chroot for tree at {chdir_to}...') - os.chdir(chdir_to) - os.execvp(args[0], args) +def maybe_reexec_inside_chroot(autofix: bool, files: t.List[str]) -> None: + if is_in_chroot(): + return + + enter_chroot = True + chdir_to = None + toolchain_utils = detect_toolchain_utils_root() + if find_repo_root(toolchain_utils) is None: + chromeos_root_dir = find_chromeos_root_directory() + if chromeos_root_dir is None: + print( + "Standalone toolchain-utils checkout detected; cannot enter " + "chroot." + ) + enter_chroot = False + else: + chdir_to = chromeos_root_dir + + if not has_executable_on_path("cros_sdk"): + print("No `cros_sdk` detected on $PATH; cannot enter chroot.") + enter_chroot = False + + if not enter_chroot: + print( + "Giving up on entering the chroot; be warned that some presubmits " + "may be broken." + ) + return + + # We'll be changing ${PWD}, so make everything relative to toolchain-utils, + # which resides at a well-known place inside of the chroot. + chroot_toolchain_utils = "/mnt/host/source/src/third_party/toolchain-utils" + + def rebase_path(path: str) -> str: + return os.path.join( + chroot_toolchain_utils, os.path.relpath(path, toolchain_utils) + ) + + args = [ + "cros_sdk", + "--enter", + "--", + rebase_path(__file__), + ] + + if not autofix: + args.append("--no_autofix") + args.extend(rebase_path(x) for x in files) + + if chdir_to is None: + print("Attempting to enter the chroot...") + else: + print(f"Attempting to enter the chroot for tree at {chdir_to}...") + os.chdir(chdir_to) + os.execvp(args[0], args) def ensure_pip_deps_installed() -> None: - if not has_executable_on_path('pip'): - print('Autoinstalling `pip`...') - subprocess.check_call(['sudo', 'emerge', 'dev-python/pip']) - - for package in ('scipy', 'yapf'): - exit_code = subprocess.call( - ['python3', '-c', f'import {package}'], - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - if exit_code != 0: - print(f'Autoinstalling `{package}`...') - subprocess.check_call(['pip', 'install', '--user', package]) + if not has_executable_on_path("pip"): + print("Autoinstalling `pip`...") + subprocess.check_call(["sudo", "emerge", "dev-python/pip"]) + + for package in ("scipy", "yapf"): + exit_code = subprocess.call( + ["python3", "-c", f"import {package}"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + if exit_code != 0: + print(f"Autoinstalling `{package}`...") + subprocess.check_call(["pip", "install", "--user", package]) def main(argv: t.List[str]) -> int: - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('--no_autofix', - dest='autofix', - action='store_false', - help="Don't run any autofix commands.") - parser.add_argument( - '--no_enter_chroot', - dest='enter_chroot', - action='store_false', - help="Prevent auto-entering the chroot if we're not already in it.") - parser.add_argument('files', nargs='*') - opts = parser.parse_args(argv) - - files = opts.files - if not files: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--no_autofix", + dest="autofix", + action="store_false", + help="Don't run any autofix commands.", + ) + parser.add_argument( + "--no_enter_chroot", + dest="enter_chroot", + action="store_false", + help="Prevent auto-entering the chroot if we're not already in it.", + ) + parser.add_argument("files", nargs="*") + opts = parser.parse_args(argv) + + files = opts.files + if not files: + return 0 + + if opts.enter_chroot: + maybe_reexec_inside_chroot(opts.autofix, opts.files) + + # If you ask for --no_enter_chroot, you're on your own for installing these + # things. + if is_in_chroot(): + ensure_pip_deps_installed() + + files = [os.path.abspath(f) for f in files] + + # Note that we extract .__name__s from these, so please name them in a + # user-friendly way. + checks = [ + check_cros_lint, + check_py_format, + check_go_format, + check_tests, + ] + + toolchain_utils_root = detect_toolchain_utils_root() + + # NOTE: As mentioned above, checks can block on threads they spawn in this + # pool, so we need at least len(checks)+1 threads to avoid deadlock. Use *2 + # so all checks can make progress at a decent rate. + num_threads = max(multiprocessing.cpu_count(), len(checks) * 2) + start_time = datetime.datetime.now() + + # For our single print statement... + spawn_print_lock = threading.RLock() + + def run_check(check_fn): + name = check_fn.__name__ + with spawn_print_lock: + print("*** Spawning %s" % name) + return name, check_fn(toolchain_utils_root, pool, files) + + # ThreadPool is a ContextManager in py3. + # pylint: disable=not-context-manager + with multiprocessing.pool.ThreadPool(num_threads) as pool: + all_checks_ok = True + all_autofix_commands = [] + for check_name, result in pool.imap_unordered(run_check, checks): + ok, autofix_commands = process_check_result( + check_name, result, start_time + ) + all_checks_ok = ok and all_checks_ok + all_autofix_commands += autofix_commands + + # Run these after everything settles, so: + # - we don't collide with checkers that are running concurrently + # - we clearly print out everything that went wrong ahead of time, in case + # any of these fail + if opts.autofix: + try_autofix(all_autofix_commands, toolchain_utils_root) + + if not all_checks_ok: + return 1 return 0 - if opts.enter_chroot: - maybe_reexec_inside_chroot(opts.autofix, opts.files) - - # If you ask for --no_enter_chroot, you're on your own for installing these - # things. - if is_in_chroot(): - ensure_pip_deps_installed() - - files = [os.path.abspath(f) for f in files] - - # Note that we extract .__name__s from these, so please name them in a - # user-friendly way. - checks = [ - check_cros_lint, - check_py_format, - check_go_format, - check_tests, - ] - - toolchain_utils_root = detect_toolchain_utils_root() - - # NOTE: As mentioned above, checks can block on threads they spawn in this - # pool, so we need at least len(checks)+1 threads to avoid deadlock. Use *2 - # so all checks can make progress at a decent rate. - num_threads = max(multiprocessing.cpu_count(), len(checks) * 2) - start_time = datetime.datetime.now() - - # For our single print statement... - spawn_print_lock = threading.RLock() - - def run_check(check_fn): - name = check_fn.__name__ - with spawn_print_lock: - print('*** Spawning %s' % name) - return name, check_fn(toolchain_utils_root, pool, files) - - # ThreadPool is a ContextManager in py3. - # pylint: disable=not-context-manager - with multiprocessing.pool.ThreadPool(num_threads) as pool: - all_checks_ok = True - all_autofix_commands = [] - for check_name, result in pool.imap_unordered(run_check, checks): - ok, autofix_commands = process_check_result(check_name, result, - start_time) - all_checks_ok = ok and all_checks_ok - all_autofix_commands += autofix_commands - - # Run these after everything settles, so: - # - we don't collide with checkers that are running concurrently - # - we clearly print out everything that went wrong ahead of time, in case - # any of these fail - if opts.autofix: - try_autofix(all_autofix_commands, toolchain_utils_root) - - if not all_checks_ok: - return 1 - return 0 - - -if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/update_telemetry_defaults.py b/update_telemetry_defaults.py index e9eb0427..88420b64 100755 --- a/update_telemetry_defaults.py +++ b/update_telemetry_defaults.py @@ -13,110 +13,115 @@ benchmarks. from __future__ import print_function -__author__ = 'cmtice@google.com (Caroline Tice)' +__author__ = "cmtice@google.com (Caroline Tice)" + +import json import os import sys -import json from cros_utils import misc + Defaults = {} class TelemetryDefaults(object): - """Class for handling telemetry default return result fields.""" - - DEFAULTS_FILE_NAME = 'crosperf/default-telemetry-results.json' - - def __init__(self): - # Get the Crosperf directory; that is where the defaults - # file should be. - dirname, __ = misc.GetRoot(__file__) - fullname = os.path.join(dirname, self.DEFAULTS_FILE_NAME) - self._filename = fullname - self._defaults = {} - - def ReadDefaultsFile(self): - if os.path.exists(self._filename): - with open(self._filename, 'r', encoding='utf-8') as fp: - self._defaults = json.load(fp) - - def WriteDefaultsFile(self): - with open(self._filename, 'w', encoding='utf-8') as fp: - json.dump(self._defaults, fp, indent=2) - - def ListCurrentDefaults(self, benchmark='all'): - # Show user current defaults. By default, show all. The user - # can specify the name of a particular benchmark to see only that - # benchmark's default values. - if len(self._defaults) == 0: - print('The benchmark default results are currently empty.') - if benchmark == 'all': - for b in self._defaults.keys(): - results = self._defaults[b] - out_str = b + ' : ' - for r in results: - out_str += r + ' ' - print(out_str) - elif benchmark in self._defaults: - results = self._defaults[benchmark] - out_str = benchmark + ' : ' - for r in results: - out_str += r + ' ' - print(out_str) - else: - print("Error: Unrecognized benchmark '%s'" % benchmark) - - def AddDefault(self, benchmark, result): - if benchmark in self._defaults: - resultList = self._defaults[benchmark] - else: - resultList = [] - resultList.append(result) - self._defaults[benchmark] = resultList - print("Updated results set for '%s': " % benchmark) - print('%s : %s' % (benchmark, repr(self._defaults[benchmark]))) - - def RemoveDefault(self, benchmark, result): - if benchmark in self._defaults: - resultList = self._defaults[benchmark] - if result in resultList: - resultList.remove(result) + """Class for handling telemetry default return result fields.""" + + DEFAULTS_FILE_NAME = "crosperf/default-telemetry-results.json" + + def __init__(self): + # Get the Crosperf directory; that is where the defaults + # file should be. + dirname, __ = misc.GetRoot(__file__) + fullname = os.path.join(dirname, self.DEFAULTS_FILE_NAME) + self._filename = fullname + self._defaults = {} + + def ReadDefaultsFile(self): + if os.path.exists(self._filename): + with open(self._filename, "r", encoding="utf-8") as fp: + self._defaults = json.load(fp) + + def WriteDefaultsFile(self): + with open(self._filename, "w", encoding="utf-8") as fp: + json.dump(self._defaults, fp, indent=2) + + def ListCurrentDefaults(self, benchmark="all"): + # Show user current defaults. By default, show all. The user + # can specify the name of a particular benchmark to see only that + # benchmark's default values. + if len(self._defaults) == 0: + print("The benchmark default results are currently empty.") + if benchmark == "all": + for b in self._defaults.keys(): + results = self._defaults[b] + out_str = b + " : " + for r in results: + out_str += r + " " + print(out_str) + elif benchmark in self._defaults: + results = self._defaults[benchmark] + out_str = benchmark + " : " + for r in results: + out_str += r + " " + print(out_str) + else: + print("Error: Unrecognized benchmark '%s'" % benchmark) + + def AddDefault(self, benchmark, result): + if benchmark in self._defaults: + resultList = self._defaults[benchmark] + else: + resultList = [] + resultList.append(result) + self._defaults[benchmark] = resultList print("Updated results set for '%s': " % benchmark) - print('%s : %s' % (benchmark, repr(self._defaults[benchmark]))) - else: + print("%s : %s" % (benchmark, repr(self._defaults[benchmark]))) + + def RemoveDefault(self, benchmark, result): + if benchmark in self._defaults: + resultList = self._defaults[benchmark] + if result in resultList: + resultList.remove(result) + print("Updated results set for '%s': " % benchmark) + print("%s : %s" % (benchmark, repr(self._defaults[benchmark]))) + else: + print( + "'%s' is not in '%s's default results list." + % (result, benchmark) + ) + else: + print("Cannot find benchmark named '%s'" % benchmark) + + def GetDefault(self): + return self._defaults + + def RemoveBenchmark(self, benchmark): + if benchmark in self._defaults: + del self._defaults[benchmark] + print("Deleted benchmark '%s' from list of benchmarks." % benchmark) + else: + print("Cannot find benchmark named '%s'" % benchmark) + + def RenameBenchmark(self, old_name, new_name): + if old_name in self._defaults: + resultsList = self._defaults[old_name] + del self._defaults[old_name] + self._defaults[new_name] = resultsList + print("Renamed '%s' to '%s'." % (old_name, new_name)) + else: + print("Cannot find benchmark named '%s'" % old_name) + + def UsageError(self, user_input): + # Print error message, then show options + print("Error:Invalid user input: '%s'" % user_input) + self.ShowOptions() + + def ShowOptions(self): print( - "'%s' is not in '%s's default results list." % (result, benchmark)) - else: - print("Cannot find benchmark named '%s'" % benchmark) - - def GetDefault(self): - return self._defaults - - def RemoveBenchmark(self, benchmark): - if benchmark in self._defaults: - del self._defaults[benchmark] - print("Deleted benchmark '%s' from list of benchmarks." % benchmark) - else: - print("Cannot find benchmark named '%s'" % benchmark) - - def RenameBenchmark(self, old_name, new_name): - if old_name in self._defaults: - resultsList = self._defaults[old_name] - del self._defaults[old_name] - self._defaults[new_name] = resultsList - print("Renamed '%s' to '%s'." % (old_name, new_name)) - else: - print("Cannot find benchmark named '%s'" % old_name) - - def UsageError(self, user_input): - # Print error message, then show options - print("Error:Invalid user input: '%s'" % user_input) - self.ShowOptions() - - def ShowOptions(self): - print(""" + """ Below are the valid user options and their arguments, and an explanation of what each option does. You may either print out the full name of the option, or you may use the first letter of the option. Case (upper or @@ -136,69 +141,70 @@ lower) does not matter, for the command (case of the result name DOES matter): (Q)uit - Exit this program, saving changes. (T)erminate - Exit this program; abandon changes. -""") - - def GetUserInput(self): - # Prompt user - print('Enter option> ') - # Process user input - inp = sys.stdin.readline() - inp = inp[:-1] - # inp = inp.lower() - words = inp.split(' ') - option = words[0] - option = option.lower() - if option in ('h', 'help'): - self.ShowOptions() - elif option in ('l', 'list'): - if len(words) == 1: - self.ListCurrentDefaults() - else: - self.ListCurrentDefaults(benchmark=words[1]) - elif option in ('a', 'add'): - if len(words) < 3: - self.UsageError(inp) - else: - benchmark = words[1] - resultList = words[2:] - for r in resultList: - self.AddDefault(benchmark, r) - elif option in ('d', 'delete'): - if len(words) != 3: - self.UsageError(inp) - else: - benchmark = words[1] - result = words[2] - self.RemoveDefault(benchmark, result) - elif option in ('r', 'remove'): - if len(words) != 2: - self.UsageError(inp) - else: - benchmark = words[1] - self.RemoveBenchmark(benchmark) - elif option in ('m', 'move'): - if len(words) != 3: - self.UsageError(inp) - else: - old_name = words[1] - new_name = words[2] - self.RenameBenchmark(old_name, new_name) - elif option in ('q', 'quit'): - self.WriteDefaultsFile() - - return option in ('q', 'quit', 't', 'terminate') +""" + ) + + def GetUserInput(self): + # Prompt user + print("Enter option> ") + # Process user input + inp = sys.stdin.readline() + inp = inp[:-1] + # inp = inp.lower() + words = inp.split(" ") + option = words[0] + option = option.lower() + if option in ("h", "help"): + self.ShowOptions() + elif option in ("l", "list"): + if len(words) == 1: + self.ListCurrentDefaults() + else: + self.ListCurrentDefaults(benchmark=words[1]) + elif option in ("a", "add"): + if len(words) < 3: + self.UsageError(inp) + else: + benchmark = words[1] + resultList = words[2:] + for r in resultList: + self.AddDefault(benchmark, r) + elif option in ("d", "delete"): + if len(words) != 3: + self.UsageError(inp) + else: + benchmark = words[1] + result = words[2] + self.RemoveDefault(benchmark, result) + elif option in ("r", "remove"): + if len(words) != 2: + self.UsageError(inp) + else: + benchmark = words[1] + self.RemoveBenchmark(benchmark) + elif option in ("m", "move"): + if len(words) != 3: + self.UsageError(inp) + else: + old_name = words[1] + new_name = words[2] + self.RenameBenchmark(old_name, new_name) + elif option in ("q", "quit"): + self.WriteDefaultsFile() + + return option in ("q", "quit", "t", "terminate") def Main(): - defaults = TelemetryDefaults() - defaults.ReadDefaultsFile() - defaults.ShowOptions() - done = defaults.GetUserInput() - while not done: + defaults = TelemetryDefaults() + defaults.ReadDefaultsFile() + defaults.ShowOptions() done = defaults.GetUserInput() - return 0 + while not done: + done = defaults.GetUserInput() + return 0 -if __name__ == '__main__': - retval = Main() - sys.exit(retval) +if __name__ == "__main__": + retval = Main() + sys.exit(retval) -- cgit v1.2.3 From 84556973468631cce626a9cebb7b23439afa763e Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Tue, 6 Sep 2022 12:14:42 -0700 Subject: remove old .style.yapf file Since we're swapping to using `black`, this old YAPF file is no longer needed (and instructs `yapf` to actively do the wrong thing with indentation/etc) BUG=b:244644217 TEST=None Change-Id: I1bbf5be37c313b7fbbd841132570b3e5dd197ed5 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3877338 Reviewed-by: Ryan Beltran <ryanbeltran@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> Tested-by: George Burgess <gbiv@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> --- .style.yapf | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 .style.yapf diff --git a/.style.yapf b/.style.yapf deleted file mode 100644 index 0baa978f..00000000 --- a/.style.yapf +++ /dev/null @@ -1,6 +0,0 @@ -[style] -based_on_style = pep8 -split_before_bitwise_operator = false -blank_line_before_module_docstring = true -blank_lines_between_top_level_imports_and_variables = 2 -indent_width = 2 -- cgit v1.2.3 From c0041a9550814e402f661a560855ff99863cffb2 Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Tue, 6 Sep 2022 12:12:02 -0700 Subject: remove `from __future__ import ...` directives These are only useful when we're running code in a Python 2.7 interpreter. Since we no longer support python2, drop these. BUG=b:244644217 TEST=run_tests_for.py shows no new failures Change-Id: Ief9a12b87a560ab38ca71668636874bcb434a0b3 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3877339 Reviewed-by: Ryan Beltran <ryanbeltran@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Tested-by: George Burgess <gbiv@chromium.org> --- afdo_redaction/redact_profile.py | 2 -- afdo_redaction/redact_profile_test.py | 2 -- afdo_redaction/remove_cold_functions.py | 2 -- afdo_redaction/remove_cold_functions_test.py | 1 - afdo_redaction/remove_indirect_calls.py | 2 -- afdo_redaction/remove_indirect_calls_test.py | 1 - afdo_tools/bisection/afdo_prof_analysis.py | 1 - afdo_tools/bisection/afdo_prof_analysis_e2e_test.py | 1 - afdo_tools/bisection/afdo_prof_analysis_test.py | 1 - afdo_tools/generate_afdo_from_tryjob.py | 1 - afdo_tools/run_afdo_tryjob.py | 1 - binary_search_tool/binary_search_perforce.py | 2 -- binary_search_tool/binary_search_state.py | 2 -- binary_search_tool/bisect_driver.py | 1 - binary_search_tool/common.py | 1 - binary_search_tool/compiler_wrapper.py | 1 - binary_search_tool/cros_pkg/create_cleanup_script.py | 1 - binary_search_tool/run_bisect.py | 1 - binary_search_tool/run_bisect_tests.py | 1 - binary_search_tool/sysroot_wrapper/testing_test.py | 1 - binary_search_tool/test/binary_search_tool_test.py | 2 -- binary_search_tool/test/cmd_script.py | 1 - binary_search_tool/test/cmd_script_no_support.py | 1 - binary_search_tool/test/gen_init_list.py | 1 - binary_search_tool/test/gen_obj.py | 1 - binary_search_tool/test/generate_cmd.py | 1 - binary_search_tool/test/is_good.py | 1 - binary_search_tool/test/is_good_noinc_prune.py | 1 - binary_search_tool/test/switch_tmp.py | 1 - binary_search_tool/test/switch_to_bad.py | 1 - binary_search_tool/test/switch_to_bad_noinc_prune.py | 1 - binary_search_tool/test/switch_to_bad_set_file.py | 1 - binary_search_tool/test/switch_to_good.py | 1 - binary_search_tool/test/switch_to_good_noinc_prune.py | 1 - binary_search_tool/test/switch_to_good_set_file.py | 1 - binary_search_tool/test/test_setup.py | 1 - binary_search_tool/test/test_setup_bad.py | 1 - build_chromeos.py | 2 -- build_tc.py | 2 -- buildbot_test_llvm.py | 1 - buildbot_test_toolchains.py | 1 - chromiumos_image_diff.py | 2 -- compiler_wrapper/build.py | 1 - compiler_wrapper/bundle.py | 1 - cros_utils/buildbot_utils.py | 2 -- cros_utils/buildbot_utils_unittest.py | 1 - cros_utils/command_executer.py | 1 - cros_utils/command_executer_timeout_test.py | 2 -- cros_utils/command_executer_unittest.py | 1 - cros_utils/device_setup_utils.py | 3 --- cros_utils/device_setup_utils_unittest.py | 1 - cros_utils/email_sender.py | 1 - cros_utils/email_sender_unittest.py | 1 - cros_utils/file_utils.py | 1 - cros_utils/locks.py | 1 - cros_utils/logger.py | 1 - cros_utils/machines.py | 1 - cros_utils/misc.py | 3 --- cros_utils/misc_test.py | 2 -- cros_utils/no_pseudo_terminal_test.py | 1 - cros_utils/perf_diff.py | 2 -- cros_utils/tabulator.py | 2 -- cros_utils/tabulator_test.py | 1 - cros_utils/timeline.py | 2 -- cros_utils/timeline_test.py | 2 -- cros_utils/tiny_render.py | 1 - cros_utils/tiny_render_test.py | 1 - crosperf/benchmark.py | 2 -- crosperf/benchmark_run.py | 1 - crosperf/benchmark_run_unittest.py | 1 - crosperf/benchmark_unittest.py | 1 - crosperf/compare_machines.py | 1 - crosperf/config_unittest.py | 1 - crosperf/crosperf.py | 1 - crosperf/crosperf_unittest.py | 2 -- crosperf/download_images.py | 1 - crosperf/download_images_buildid_test.py | 1 - crosperf/download_images_unittest.py | 1 - crosperf/experiment.py | 1 - crosperf/experiment_factory.py | 1 - crosperf/experiment_factory_unittest.py | 1 - crosperf/experiment_file.py | 1 - crosperf/experiment_file_unittest.py | 1 - crosperf/experiment_runner.py | 1 - crosperf/experiment_runner_unittest.py | 1 - crosperf/experiment_status.py | 2 -- crosperf/flag_test_unittest.py | 1 - crosperf/generate_report.py | 2 -- crosperf/generate_report_unittest.py | 2 -- crosperf/help.py | 1 - crosperf/image_checksummer.py | 1 - crosperf/label.py | 1 - crosperf/machine_image_manager.py | 1 - crosperf/machine_image_manager_unittest.py | 1 - crosperf/machine_manager.py | 2 -- crosperf/machine_manager_unittest.py | 1 - crosperf/mock_instance.py | 1 - crosperf/results_cache.py | 2 -- crosperf/results_cache_unittest.py | 1 - crosperf/results_organizer.py | 1 - crosperf/results_organizer_unittest.py | 1 - crosperf/results_report.py | 1 - crosperf/results_report_templates.py | 1 - crosperf/results_report_unittest.py | 2 -- crosperf/schedv2.py | 2 -- crosperf/schedv2_unittest.py | 1 - crosperf/settings.py | 1 - crosperf/settings_factory.py | 1 - crosperf/settings_factory_unittest.py | 1 - crosperf/settings_unittest.py | 1 - crosperf/suite_runner.py | 2 -- crosperf/suite_runner_unittest.py | 1 - crosperf/translate_xbuddy.py | 1 - cwp/cr-os/fetch_gn_descs.py | 1 - cwp/cr-os/fetch_gn_descs_test.py | 1 - debug_info_test/allowlist.py | 1 - debug_info_test/check_cus.py | 1 - debug_info_test/check_exist.py | 1 - debug_info_test/check_icf.py | 1 - debug_info_test/check_ngcc.py | 1 - debug_info_test/debug_info_test.py | 1 - file_lock_machine.py | 3 --- file_lock_machine_test.py | 2 -- go/chromeos/setup_chromeos_testing.py | 1 - heatmaps/heat_map.py | 1 - heatmaps/heat_map_test.py | 1 - heatmaps/heatmap_generator.py | 2 -- heatmaps/heatmap_generator_test.py | 2 -- image_chromeos.py | 2 -- llvm_extra/create_ebuild_file.py | 1 - llvm_tools/auto_llvm_bisection.py | 1 - llvm_tools/auto_llvm_bisection_unittest.py | 1 - llvm_tools/chroot.py | 1 - llvm_tools/chroot_unittest.py | 1 - llvm_tools/copy_helpers_to_chromiumos_overlay.py | 1 - llvm_tools/custom_script_example.py | 1 - llvm_tools/failure_modes.py | 1 - llvm_tools/get_llvm_hash.py | 1 - llvm_tools/get_llvm_hash_unittest.py | 1 - llvm_tools/git.py | 1 - llvm_tools/git_llvm_rev.py | 1 - llvm_tools/git_unittest.py | 1 - llvm_tools/llvm_bisection.py | 1 - llvm_tools/llvm_bisection_unittest.py | 1 - llvm_tools/llvm_project.py | 1 - llvm_tools/modify_a_tryjob.py | 1 - llvm_tools/modify_a_tryjob_unittest.py | 1 - llvm_tools/nightly_revert_checker.py | 1 - llvm_tools/nightly_revert_checker_test.py | 1 - llvm_tools/subprocess_helpers.py | 1 - llvm_tools/test_helpers.py | 1 - llvm_tools/update_chromeos_llvm_hash_unittest.py | 1 - llvm_tools/update_packages_and_run_tests.py | 1 - llvm_tools/update_packages_and_run_tests_unittest.py | 1 - llvm_tools/update_tryjob_status.py | 1 - llvm_tools/update_tryjob_status_unittest.py | 1 - lock_machine.py | 1 - make_root_writable.py | 2 -- orderfile/post_process_orderfile.py | 2 -- orderfile/post_process_orderfile_test.py | 2 -- pgo_tools/merge_profdata_and_upload.py | 1 - remote_test.py | 2 -- run_tests_for.py | 1 - tc_enter_chroot.py | 2 -- update_telemetry_defaults.py | 2 -- 165 files changed, 208 deletions(-) diff --git a/afdo_redaction/redact_profile.py b/afdo_redaction/redact_profile.py index f37199e3..d9f3d6ab 100755 --- a/afdo_redaction/redact_profile.py +++ b/afdo_redaction/redact_profile.py @@ -24,8 +24,6 @@ It reads a textual AFDO profile from stdin, and prints a 'fixed' version of it to stdout. A summary of what the script actually did is printed to stderr. """ -from __future__ import division -from __future__ import print_function import collections import re diff --git a/afdo_redaction/redact_profile_test.py b/afdo_redaction/redact_profile_test.py index 154f8f7e..487c5091 100755 --- a/afdo_redaction/redact_profile_test.py +++ b/afdo_redaction/redact_profile_test.py @@ -6,8 +6,6 @@ """Tests for redact_profile.py.""" -from __future__ import division -from __future__ import print_function import io import unittest diff --git a/afdo_redaction/remove_cold_functions.py b/afdo_redaction/remove_cold_functions.py index 4b4eaec6..6501cec0 100755 --- a/afdo_redaction/remove_cold_functions.py +++ b/afdo_redaction/remove_cold_functions.py @@ -24,8 +24,6 @@ This is part of the effort to stablize the impact of AFDO profile on Chrome binary size. See crbug.com/1062014 for more context. """ -from __future__ import division -from __future__ import print_function import argparse import collections diff --git a/afdo_redaction/remove_cold_functions_test.py b/afdo_redaction/remove_cold_functions_test.py index a203ab15..ed8b1972 100755 --- a/afdo_redaction/remove_cold_functions_test.py +++ b/afdo_redaction/remove_cold_functions_test.py @@ -6,7 +6,6 @@ """Tests for remove_cold_functions.""" -from __future__ import print_function import io import unittest diff --git a/afdo_redaction/remove_indirect_calls.py b/afdo_redaction/remove_indirect_calls.py index 6d77ba7a..769bd0ff 100755 --- a/afdo_redaction/remove_indirect_calls.py +++ b/afdo_redaction/remove_indirect_calls.py @@ -17,8 +17,6 @@ objects as Chrome, this can become problematic, and lead to NaCl doubling in size (or worse). See crbug.com/1005023 and crbug.com/916130. """ -from __future__ import division -from __future__ import print_function import argparse import re diff --git a/afdo_redaction/remove_indirect_calls_test.py b/afdo_redaction/remove_indirect_calls_test.py index f3b4c5cc..b24de45a 100755 --- a/afdo_redaction/remove_indirect_calls_test.py +++ b/afdo_redaction/remove_indirect_calls_test.py @@ -6,7 +6,6 @@ """Tests for remove_indirect_calls""" -from __future__ import print_function import io import unittest diff --git a/afdo_tools/bisection/afdo_prof_analysis.py b/afdo_tools/bisection/afdo_prof_analysis.py index a7bb4a4c..ee9b0c92 100755 --- a/afdo_tools/bisection/afdo_prof_analysis.py +++ b/afdo_tools/bisection/afdo_prof_analysis.py @@ -20,7 +20,6 @@ exit code. The codes known to this script are: - >127: quit immediately """ -from __future__ import division, print_function import argparse import json diff --git a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py index 4fe265c9..e9a72b13 100755 --- a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py +++ b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py @@ -6,7 +6,6 @@ """End-to-end test for afdo_prof_analysis.""" -from __future__ import absolute_import, division, print_function import json import os diff --git a/afdo_tools/bisection/afdo_prof_analysis_test.py b/afdo_tools/bisection/afdo_prof_analysis_test.py index 6d4b17d3..d21a3596 100755 --- a/afdo_tools/bisection/afdo_prof_analysis_test.py +++ b/afdo_tools/bisection/afdo_prof_analysis_test.py @@ -6,7 +6,6 @@ """Tests for afdo_prof_analysis.""" -from __future__ import print_function import random import io diff --git a/afdo_tools/generate_afdo_from_tryjob.py b/afdo_tools/generate_afdo_from_tryjob.py index 3c5c7f64..30f5b570 100755 --- a/afdo_tools/generate_afdo_from_tryjob.py +++ b/afdo_tools/generate_afdo_from_tryjob.py @@ -6,7 +6,6 @@ """Given a tryjob and perf profile, generates an AFDO profile.""" -from __future__ import print_function import argparse import distutils.spawn diff --git a/afdo_tools/run_afdo_tryjob.py b/afdo_tools/run_afdo_tryjob.py index 5112723e..1e832c4e 100755 --- a/afdo_tools/run_afdo_tryjob.py +++ b/afdo_tools/run_afdo_tryjob.py @@ -49,7 +49,6 @@ If you provide neither --use_afdo_generation_stage nor since it's safer. """ -from __future__ import print_function import argparse import collections diff --git a/binary_search_tool/binary_search_perforce.py b/binary_search_tool/binary_search_perforce.py index b4332ab6..d8e7f77b 100755 --- a/binary_search_tool/binary_search_perforce.py +++ b/binary_search_tool/binary_search_perforce.py @@ -5,8 +5,6 @@ # found in the LICENSE file. """Module of binary serch for perforce.""" -from __future__ import division -from __future__ import print_function import argparse import math diff --git a/binary_search_tool/binary_search_state.py b/binary_search_tool/binary_search_state.py index 1ede37f8..bd04d1b1 100755 --- a/binary_search_tool/binary_search_state.py +++ b/binary_search_tool/binary_search_state.py @@ -6,8 +6,6 @@ """The binary search wrapper.""" -from __future__ import division -from __future__ import print_function import argparse import contextlib diff --git a/binary_search_tool/bisect_driver.py b/binary_search_tool/bisect_driver.py index 2f6cd85b..ddab1682 100644 --- a/binary_search_tool/bisect_driver.py +++ b/binary_search_tool/bisect_driver.py @@ -19,7 +19,6 @@ Design doc: https://docs.google.com/document/d/1yDgaUIa2O5w6dc3sSTe1ry-1ehKajTGJGQCbyn0fcEM """ -from __future__ import print_function import contextlib import fcntl diff --git a/binary_search_tool/common.py b/binary_search_tool/common.py index b8a7a1d2..1f7886f4 100644 --- a/binary_search_tool/common.py +++ b/binary_search_tool/common.py @@ -21,7 +21,6 @@ ArgumentDict inherits OrderedDict in order to preserve the order the args are created so the help text is made properly. """ -from __future__ import print_function import collections import os diff --git a/binary_search_tool/compiler_wrapper.py b/binary_search_tool/compiler_wrapper.py index acb7f9eb..02dd332e 100755 --- a/binary_search_tool/compiler_wrapper.py +++ b/binary_search_tool/compiler_wrapper.py @@ -20,7 +20,6 @@ Design doc: https://docs.google.com/document/d/1yDgaUIa2O5w6dc3sSTe1ry-1ehKajTGJGQCbyn0fcEM """ -from __future__ import print_function import os import shlex diff --git a/binary_search_tool/cros_pkg/create_cleanup_script.py b/binary_search_tool/cros_pkg/create_cleanup_script.py index aebf523a..5c46d3ef 100755 --- a/binary_search_tool/cros_pkg/create_cleanup_script.py +++ b/binary_search_tool/cros_pkg/create_cleanup_script.py @@ -13,7 +13,6 @@ undo the changes made by setup.sh, returning everything to its original state. """ -from __future__ import print_function import argparse import sys diff --git a/binary_search_tool/run_bisect.py b/binary_search_tool/run_bisect.py index eeda98cc..480408d7 100755 --- a/binary_search_tool/run_bisect.py +++ b/binary_search_tool/run_bisect.py @@ -6,7 +6,6 @@ """The unified package/object bisecting tool.""" -from __future__ import print_function import abc import argparse diff --git a/binary_search_tool/run_bisect_tests.py b/binary_search_tool/run_bisect_tests.py index 22092ff9..ae230131 100755 --- a/binary_search_tool/run_bisect_tests.py +++ b/binary_search_tool/run_bisect_tests.py @@ -6,7 +6,6 @@ """Run full bisection test.""" -from __future__ import print_function import argparse import os diff --git a/binary_search_tool/sysroot_wrapper/testing_test.py b/binary_search_tool/sysroot_wrapper/testing_test.py index 2523c0be..20bc7f75 100755 --- a/binary_search_tool/sysroot_wrapper/testing_test.py +++ b/binary_search_tool/sysroot_wrapper/testing_test.py @@ -11,7 +11,6 @@ chromeos-chrome built for a daisy board, if you are using another package you will need to change the base_path accordingly. """ -from __future__ import print_function import subprocess import sys diff --git a/binary_search_tool/test/binary_search_tool_test.py b/binary_search_tool/test/binary_search_tool_test.py index f9070989..b2bbec01 100755 --- a/binary_search_tool/test/binary_search_tool_test.py +++ b/binary_search_tool/test/binary_search_tool_test.py @@ -6,8 +6,6 @@ """Tests for bisecting tool.""" -from __future__ import division -from __future__ import print_function __author__ = "shenhan@google.com (Han Shen)" diff --git a/binary_search_tool/test/cmd_script.py b/binary_search_tool/test/cmd_script.py index b668280e..73852bc5 100755 --- a/binary_search_tool/test/cmd_script.py +++ b/binary_search_tool/test/cmd_script.py @@ -11,7 +11,6 @@ It assumes that -opt-bisect-limit and -print-debug-counter are supported by the compiler. """ -from __future__ import print_function import os import sys diff --git a/binary_search_tool/test/cmd_script_no_support.py b/binary_search_tool/test/cmd_script_no_support.py index d2c8c39b..7c5297d2 100644 --- a/binary_search_tool/test/cmd_script_no_support.py +++ b/binary_search_tool/test/cmd_script_no_support.py @@ -9,7 +9,6 @@ This script generates a pseudo log when certain bisecting flags are not supported by compiler. """ -from __future__ import print_function import os import sys diff --git a/binary_search_tool/test/gen_init_list.py b/binary_search_tool/test/gen_init_list.py index 1fe1b43e..927ad22c 100755 --- a/binary_search_tool/test/gen_init_list.py +++ b/binary_search_tool/test/gen_init_list.py @@ -6,7 +6,6 @@ """Prints out index for every object file, starting from 0.""" -from __future__ import print_function import sys diff --git a/binary_search_tool/test/gen_obj.py b/binary_search_tool/test/gen_obj.py index aa9a9344..e1eb4913 100755 --- a/binary_search_tool/test/gen_obj.py +++ b/binary_search_tool/test/gen_obj.py @@ -10,7 +10,6 @@ 1 represents a bad object file. """ -from __future__ import print_function import argparse import os diff --git a/binary_search_tool/test/generate_cmd.py b/binary_search_tool/test/generate_cmd.py index bcfe176d..b02c2ad3 100755 --- a/binary_search_tool/test/generate_cmd.py +++ b/binary_search_tool/test/generate_cmd.py @@ -10,7 +10,6 @@ This is a required argument for pass level bisecting. For unit test, we use this script to verify if cmd_script.sh is generated correctly. """ -from __future__ import print_function import os import sys diff --git a/binary_search_tool/test/is_good.py b/binary_search_tool/test/is_good.py index 3be7248f..51ee742c 100755 --- a/binary_search_tool/test/is_good.py +++ b/binary_search_tool/test/is_good.py @@ -6,7 +6,6 @@ """Check to see if the working set produces a good executable.""" -from __future__ import print_function import os import sys diff --git a/binary_search_tool/test/is_good_noinc_prune.py b/binary_search_tool/test/is_good_noinc_prune.py index 4e520162..5fe4ed72 100755 --- a/binary_search_tool/test/is_good_noinc_prune.py +++ b/binary_search_tool/test/is_good_noinc_prune.py @@ -12,7 +12,6 @@ to the switch scripts is equals to the actual number of items (i.e. checking that noincremental always holds). """ -from __future__ import print_function import os import sys diff --git a/binary_search_tool/test/switch_tmp.py b/binary_search_tool/test/switch_tmp.py index 2ff35427..674433eb 100755 --- a/binary_search_tool/test/switch_tmp.py +++ b/binary_search_tool/test/switch_tmp.py @@ -12,7 +12,6 @@ this script) content. Every line in the file is an object index, which will be set to good (mark as 42). """ -from __future__ import print_function import sys diff --git a/binary_search_tool/test/switch_to_bad.py b/binary_search_tool/test/switch_to_bad.py index 17061dd3..c711d19f 100755 --- a/binary_search_tool/test/switch_to_bad.py +++ b/binary_search_tool/test/switch_to_bad.py @@ -6,7 +6,6 @@ """Switch part of the objects file in working set to (possible) bad ones.""" -from __future__ import print_function import sys diff --git a/binary_search_tool/test/switch_to_bad_noinc_prune.py b/binary_search_tool/test/switch_to_bad_noinc_prune.py index dd57324f..473ade81 100755 --- a/binary_search_tool/test/switch_to_bad_noinc_prune.py +++ b/binary_search_tool/test/switch_to_bad_noinc_prune.py @@ -18,7 +18,6 @@ that noincremental always holds). Warning: This switch script assumes the --file_args option """ -from __future__ import print_function import shutil import sys diff --git a/binary_search_tool/test/switch_to_bad_set_file.py b/binary_search_tool/test/switch_to_bad_set_file.py index 6a4f9131..002622de 100755 --- a/binary_search_tool/test/switch_to_bad_set_file.py +++ b/binary_search_tool/test/switch_to_bad_set_file.py @@ -10,7 +10,6 @@ This script is meant to be specifically used with the set_file test. This uses the set files generated by binary_search_state to do the switching. """ -from __future__ import print_function import os import sys diff --git a/binary_search_tool/test/switch_to_good.py b/binary_search_tool/test/switch_to_good.py index bcbe5c28..a4d173d2 100755 --- a/binary_search_tool/test/switch_to_good.py +++ b/binary_search_tool/test/switch_to_good.py @@ -11,7 +11,6 @@ this script) content. Every line in the file is an object index, which will be set to good (mark as 0). """ -from __future__ import print_function import sys diff --git a/binary_search_tool/test/switch_to_good_noinc_prune.py b/binary_search_tool/test/switch_to_good_noinc_prune.py index 37976668..8ed8d0ca 100755 --- a/binary_search_tool/test/switch_to_good_noinc_prune.py +++ b/binary_search_tool/test/switch_to_good_noinc_prune.py @@ -18,7 +18,6 @@ that noincremental always holds). Warning: This switch script assumes the --file_args option """ -from __future__ import print_function import shutil import sys diff --git a/binary_search_tool/test/switch_to_good_set_file.py b/binary_search_tool/test/switch_to_good_set_file.py index 89b8bf17..a1feaddf 100755 --- a/binary_search_tool/test/switch_to_good_set_file.py +++ b/binary_search_tool/test/switch_to_good_set_file.py @@ -14,7 +14,6 @@ This script is meant to be specifically used with the set_file test. This uses the set files generated by binary_search_state to do the switching. """ -from __future__ import print_function import os import sys diff --git a/binary_search_tool/test/test_setup.py b/binary_search_tool/test/test_setup.py index 4fe8c661..5ee35c55 100755 --- a/binary_search_tool/test/test_setup.py +++ b/binary_search_tool/test/test_setup.py @@ -6,7 +6,6 @@ """Emulate running of test setup script, is_good.py should fail without this.""" -from __future__ import print_function import sys diff --git a/binary_search_tool/test/test_setup_bad.py b/binary_search_tool/test/test_setup_bad.py index f34753bf..4879f455 100755 --- a/binary_search_tool/test/test_setup_bad.py +++ b/binary_search_tool/test/test_setup_bad.py @@ -6,7 +6,6 @@ """Emulate test setup that fails (i.e. failed flash to device)""" -from __future__ import print_function import sys diff --git a/build_chromeos.py b/build_chromeos.py index 3a6a17e1..b5bc48fa 100755 --- a/build_chromeos.py +++ b/build_chromeos.py @@ -11,8 +11,6 @@ This script sets up the ChromeOS source in the given directory, matching a particular release of ChromeOS. """ -from __future__ import print_function - __author__ = ( "asharif@google.com (Ahmad Sharif) " diff --git a/build_tc.py b/build_tc.py index 8eed86ee..f1d025a4 100755 --- a/build_tc.py +++ b/build_tc.py @@ -9,8 +9,6 @@ This script sets up the toolchain if you give it the gcctools directory. """ -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/buildbot_test_llvm.py b/buildbot_test_llvm.py index 25d269f5..38d3ff49 100755 --- a/buildbot_test_llvm.py +++ b/buildbot_test_llvm.py @@ -16,7 +16,6 @@ well as copying the result into a directory. # Script to test different toolchains against ChromeOS benchmarks. -from __future__ import print_function import argparse import datetime diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py index 9f82fe8f..639b817e 100755 --- a/buildbot_test_toolchains.py +++ b/buildbot_test_toolchains.py @@ -16,7 +16,6 @@ well as copying the images into the seven-day reports directory. # Script to test different toolchains against ChromeOS benchmarks. -from __future__ import print_function import argparse import datetime diff --git a/chromiumos_image_diff.py b/chromiumos_image_diff.py index 5943f3aa..3b3ae912 100755 --- a/chromiumos_image_diff.py +++ b/chromiumos_image_diff.py @@ -20,8 +20,6 @@ And this script should be executed outside chroot. """ -from __future__ import print_function - __author__ = "shenhan@google.com (Han Shen)" diff --git a/compiler_wrapper/build.py b/compiler_wrapper/build.py index e7486c54..3b4b0571 100755 --- a/compiler_wrapper/build.py +++ b/compiler_wrapper/build.py @@ -6,7 +6,6 @@ """Build script that builds a binary from a bundle.""" -from __future__ import print_function import argparse import os.path diff --git a/compiler_wrapper/bundle.py b/compiler_wrapper/bundle.py index 2624376b..ddfa4e9c 100755 --- a/compiler_wrapper/bundle.py +++ b/compiler_wrapper/bundle.py @@ -6,7 +6,6 @@ """Build script that copies the go sources to a build destination.""" -from __future__ import print_function import argparse import os.path diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py index 6cf159d8..eb1d255c 100644 --- a/cros_utils/buildbot_utils.py +++ b/cros_utils/buildbot_utils.py @@ -5,8 +5,6 @@ """Utilities for launching and accessing ChromeOS buildbots.""" -from __future__ import division -from __future__ import print_function import ast import json diff --git a/cros_utils/buildbot_utils_unittest.py b/cros_utils/buildbot_utils_unittest.py index e12ea19e..31d3e947 100755 --- a/cros_utils/buildbot_utils_unittest.py +++ b/cros_utils/buildbot_utils_unittest.py @@ -7,7 +7,6 @@ """Unittest for buildbot_utils.py.""" -from __future__ import print_function import time import unittest diff --git a/cros_utils/command_executer.py b/cros_utils/command_executer.py index a30ba752..aa6de4c0 100755 --- a/cros_utils/command_executer.py +++ b/cros_utils/command_executer.py @@ -6,7 +6,6 @@ """Utilities to run commands in outside/inside chroot and on the board.""" -from __future__ import print_function import getpass import os diff --git a/cros_utils/command_executer_timeout_test.py b/cros_utils/command_executer_timeout_test.py index 2798d227..92f2e9ce 100755 --- a/cros_utils/command_executer_timeout_test.py +++ b/cros_utils/command_executer_timeout_test.py @@ -7,8 +7,6 @@ """Timeout test for command_executer.""" -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/cros_utils/command_executer_unittest.py b/cros_utils/command_executer_unittest.py index aade4eb0..ecb85f93 100755 --- a/cros_utils/command_executer_unittest.py +++ b/cros_utils/command_executer_unittest.py @@ -6,7 +6,6 @@ """Unittest for command_executer.py.""" -from __future__ import print_function import time import unittest diff --git a/cros_utils/device_setup_utils.py b/cros_utils/device_setup_utils.py index 33f934c9..29284a5c 100644 --- a/cros_utils/device_setup_utils.py +++ b/cros_utils/device_setup_utils.py @@ -9,9 +9,6 @@ This script provides utils to set device specs. """ -from __future__ import division -from __future__ import print_function - __author__ = "zhizhouy@google.com (Zhizhou Yang)" diff --git a/cros_utils/device_setup_utils_unittest.py b/cros_utils/device_setup_utils_unittest.py index 76775c4d..8fe99bcb 100755 --- a/cros_utils/device_setup_utils_unittest.py +++ b/cros_utils/device_setup_utils_unittest.py @@ -7,7 +7,6 @@ """Unittest for device_setup_utils.""" -from __future__ import print_function import time import unittest diff --git a/cros_utils/email_sender.py b/cros_utils/email_sender.py index 0572d2e1..259078b1 100755 --- a/cros_utils/email_sender.py +++ b/cros_utils/email_sender.py @@ -7,7 +7,6 @@ """Utilities to send email either through SMTP or SendGMR.""" -from __future__ import print_function import base64 import contextlib diff --git a/cros_utils/email_sender_unittest.py b/cros_utils/email_sender_unittest.py index 26b5b9a0..38711061 100755 --- a/cros_utils/email_sender_unittest.py +++ b/cros_utils/email_sender_unittest.py @@ -7,7 +7,6 @@ """Tests for email_sender.""" -from __future__ import print_function import contextlib import io diff --git a/cros_utils/file_utils.py b/cros_utils/file_utils.py index 23b3969b..0c5213b5 100644 --- a/cros_utils/file_utils.py +++ b/cros_utils/file_utils.py @@ -5,7 +5,6 @@ """Utilities for operations on files.""" -from __future__ import print_function import errno import os diff --git a/cros_utils/locks.py b/cros_utils/locks.py index b7eacd39..bd2242cf 100644 --- a/cros_utils/locks.py +++ b/cros_utils/locks.py @@ -6,7 +6,6 @@ """Utilities for locking machines.""" -from __future__ import print_function import time diff --git a/cros_utils/logger.py b/cros_utils/logger.py index bf50e01e..8ad9df58 100644 --- a/cros_utils/logger.py +++ b/cros_utils/logger.py @@ -5,7 +5,6 @@ """Logging helper module.""" -from __future__ import print_function # System modules import os.path diff --git a/cros_utils/machines.py b/cros_utils/machines.py index bdd1f322..b0a7134c 100644 --- a/cros_utils/machines.py +++ b/cros_utils/machines.py @@ -5,7 +5,6 @@ """Utilities relating to machine-specific functions.""" -from __future__ import print_function from cros_utils import command_executer diff --git a/cros_utils/misc.py b/cros_utils/misc.py index 0b44b994..02c8e051 100644 --- a/cros_utils/misc.py +++ b/cros_utils/misc.py @@ -5,9 +5,6 @@ """Utilities for toolchain build.""" -from __future__ import division -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/cros_utils/misc_test.py b/cros_utils/misc_test.py index 9713ae10..c3c96996 100755 --- a/cros_utils/misc_test.py +++ b/cros_utils/misc_test.py @@ -6,8 +6,6 @@ """Tests for misc.""" -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/cros_utils/no_pseudo_terminal_test.py b/cros_utils/no_pseudo_terminal_test.py index 1980d401..3cb35fac 100755 --- a/cros_utils/no_pseudo_terminal_test.py +++ b/cros_utils/no_pseudo_terminal_test.py @@ -7,7 +7,6 @@ """Test to ensure we're not touching /dev/ptmx when running commands.""" -from __future__ import print_function import os import subprocess diff --git a/cros_utils/perf_diff.py b/cros_utils/perf_diff.py index 191a6ee0..97e6e19d 100755 --- a/cros_utils/perf_diff.py +++ b/cros_utils/perf_diff.py @@ -9,8 +9,6 @@ A detailed description of perf_diff. """ -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py index 7b51bf86..65d0cd42 100644 --- a/cros_utils/tabulator.py +++ b/cros_utils/tabulator.py @@ -61,8 +61,6 @@ table: print tp.Print() """ -from __future__ import division -from __future__ import print_function import collections import getpass diff --git a/cros_utils/tabulator_test.py b/cros_utils/tabulator_test.py index 5a5d909e..96d163e5 100755 --- a/cros_utils/tabulator_test.py +++ b/cros_utils/tabulator_test.py @@ -6,7 +6,6 @@ """Tests for the tabulator module.""" -from __future__ import print_function __author__ = 'asharif@google.com (Ahmad Sharif)' diff --git a/cros_utils/timeline.py b/cros_utils/timeline.py index d6d4cc0b..be8f9a06 100644 --- a/cros_utils/timeline.py +++ b/cros_utils/timeline.py @@ -5,8 +5,6 @@ """Tools for recording and reporting timeline of benchmark_run.""" -from __future__ import print_function - __author__ = "yunlian@google.com (Yunlian Jiang)" diff --git a/cros_utils/timeline_test.py b/cros_utils/timeline_test.py index 6743b986..9dc73d91 100755 --- a/cros_utils/timeline_test.py +++ b/cros_utils/timeline_test.py @@ -6,8 +6,6 @@ """Tests for time_line.py.""" -from __future__ import print_function - __author__ = "yunlian@google.com (Yunlian Jiang)" diff --git a/cros_utils/tiny_render.py b/cros_utils/tiny_render.py index 978bf05c..27891c20 100644 --- a/cros_utils/tiny_render.py +++ b/cros_utils/tiny_render.py @@ -51,7 +51,6 @@ Turns into The rendering functions should never mutate your input. """ -from __future__ import print_function import collections import html diff --git a/cros_utils/tiny_render_test.py b/cros_utils/tiny_render_test.py index 6534a9d7..8299093b 100755 --- a/cros_utils/tiny_render_test.py +++ b/cros_utils/tiny_render_test.py @@ -6,7 +6,6 @@ """Tests for tiny_render.""" -from __future__ import print_function import unittest diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py index 473ab547..d7c62c54 100644 --- a/crosperf/benchmark.py +++ b/crosperf/benchmark.py @@ -5,8 +5,6 @@ """Define a type that wraps a Benchmark instance.""" -from __future__ import division -from __future__ import print_function import math diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py index 79cfdd1c..a661d6a9 100644 --- a/crosperf/benchmark_run.py +++ b/crosperf/benchmark_run.py @@ -4,7 +4,6 @@ # found in the LICENSE file. """Module of benchmark runs.""" -from __future__ import print_function import datetime import threading diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py index e59b275c..7113826a 100755 --- a/crosperf/benchmark_run_unittest.py +++ b/crosperf/benchmark_run_unittest.py @@ -7,7 +7,6 @@ """Testing of benchmark_run.""" -from __future__ import print_function import inspect import unittest diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py index 31a95f25..bfa16841 100755 --- a/crosperf/benchmark_unittest.py +++ b/crosperf/benchmark_unittest.py @@ -7,7 +7,6 @@ """Unit tests for the Crosperf Benchmark class.""" -from __future__ import print_function import inspect import unittest diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py index 338c039b..003a38ba 100644 --- a/crosperf/compare_machines.py +++ b/crosperf/compare_machines.py @@ -5,7 +5,6 @@ """Module to compare two machines.""" -from __future__ import print_function import argparse import os.path diff --git a/crosperf/config_unittest.py b/crosperf/config_unittest.py index 47387c71..df02786e 100755 --- a/crosperf/config_unittest.py +++ b/crosperf/config_unittest.py @@ -6,7 +6,6 @@ """Unit tests for config.py""" -from __future__ import print_function import unittest diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py index 7ce78afd..813da415 100755 --- a/crosperf/crosperf.py +++ b/crosperf/crosperf.py @@ -6,7 +6,6 @@ """The driver script for running performance benchmarks on ChromeOS.""" -from __future__ import print_function import argparse import atexit diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py index 4cbf71d8..88172ec6 100755 --- a/crosperf/crosperf_unittest.py +++ b/crosperf/crosperf_unittest.py @@ -7,8 +7,6 @@ """Unittest for crosperf.""" -from __future__ import division -from __future__ import print_function import argparse import io diff --git a/crosperf/download_images.py b/crosperf/download_images.py index da73d941..7dc52495 100644 --- a/crosperf/download_images.py +++ b/crosperf/download_images.py @@ -5,7 +5,6 @@ """Download images from Cloud Storage.""" -from __future__ import print_function import ast import os diff --git a/crosperf/download_images_buildid_test.py b/crosperf/download_images_buildid_test.py index b5063ed9..37868e0a 100755 --- a/crosperf/download_images_buildid_test.py +++ b/crosperf/download_images_buildid_test.py @@ -6,7 +6,6 @@ """Test translation of xbuddy names.""" -from __future__ import print_function import argparse import sys diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py index 5206bd3d..8e8c6524 100755 --- a/crosperf/download_images_unittest.py +++ b/crosperf/download_images_unittest.py @@ -6,7 +6,6 @@ """Download image unittest.""" -from __future__ import print_function import os import unittest diff --git a/crosperf/experiment.py b/crosperf/experiment.py index cfd56b8f..28ab616b 100644 --- a/crosperf/experiment.py +++ b/crosperf/experiment.py @@ -5,7 +5,6 @@ """The experiment setting module.""" -from __future__ import print_function import os from threading import Lock diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index 9a89cb9c..a4265d41 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -5,7 +5,6 @@ """A module to generate experiments.""" -from __future__ import print_function import os import re diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py index 115061e6..ffb8e579 100755 --- a/crosperf/experiment_factory_unittest.py +++ b/crosperf/experiment_factory_unittest.py @@ -7,7 +7,6 @@ """Unit test for experiment_factory.py""" -from __future__ import print_function import io import os diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py index fbf16fe9..783a3224 100644 --- a/crosperf/experiment_file.py +++ b/crosperf/experiment_file.py @@ -5,7 +5,6 @@ """The experiment file module. It manages the input file of crosperf.""" -from __future__ import print_function import os.path import re diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py index 90c70fb3..90ea87a6 100755 --- a/crosperf/experiment_file_unittest.py +++ b/crosperf/experiment_file_unittest.py @@ -6,7 +6,6 @@ # found in the LICENSE file. """The unittest of experiment_file.""" -from __future__ import print_function import io import unittest diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py index c65917c3..1f1a90b2 100644 --- a/crosperf/experiment_runner.py +++ b/crosperf/experiment_runner.py @@ -4,7 +4,6 @@ # found in the LICENSE file. """The experiment runner module.""" -from __future__ import print_function import getpass import os diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py index 241e1343..50ef1797 100755 --- a/crosperf/experiment_runner_unittest.py +++ b/crosperf/experiment_runner_unittest.py @@ -7,7 +7,6 @@ """Tests for the experiment runner module.""" -from __future__ import print_function import getpass import io diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py index 4bd3995e..c76dfa24 100644 --- a/crosperf/experiment_status.py +++ b/crosperf/experiment_status.py @@ -5,8 +5,6 @@ """The class to show the banner.""" -from __future__ import division -from __future__ import print_function import collections import datetime diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py index 1efc9167..7bb59515 100755 --- a/crosperf/flag_test_unittest.py +++ b/crosperf/flag_test_unittest.py @@ -6,7 +6,6 @@ """The unittest of flags.""" -from __future__ import print_function import unittest diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py index 54cf4d91..186aba29 100755 --- a/crosperf/generate_report.py +++ b/crosperf/generate_report.py @@ -44,8 +44,6 @@ Peppy's runs took 1.321ms and 1.920ms, while peppy-new-crosstool's took 1.221ms and 1.423ms. None of the runs failed to complete. """ -from __future__ import division -from __future__ import print_function import argparse import functools diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py index dbbd08f4..0d4ccf4f 100755 --- a/crosperf/generate_report_unittest.py +++ b/crosperf/generate_report_unittest.py @@ -6,8 +6,6 @@ """Test for generate_report.py.""" -from __future__ import division -from __future__ import print_function import copy import json diff --git a/crosperf/help.py b/crosperf/help.py index 660e2a4b..1f3c4f36 100644 --- a/crosperf/help.py +++ b/crosperf/help.py @@ -5,7 +5,6 @@ """Module to print help message.""" -from __future__ import print_function import sys import textwrap diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py index 1fa25cfa..133a0576 100644 --- a/crosperf/image_checksummer.py +++ b/crosperf/image_checksummer.py @@ -5,7 +5,6 @@ """Compute image checksum.""" -from __future__ import print_function import os import threading diff --git a/crosperf/label.py b/crosperf/label.py index 0ce3957b..8785c037 100644 --- a/crosperf/label.py +++ b/crosperf/label.py @@ -5,7 +5,6 @@ """The label of benchamrks.""" -from __future__ import print_function import hashlib import os diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py index 753ce0fe..c61d624e 100644 --- a/crosperf/machine_image_manager.py +++ b/crosperf/machine_image_manager.py @@ -5,7 +5,6 @@ """MachineImageManager allocates images to duts.""" -from __future__ import print_function import functools diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py index e93a5646..dd557cdc 100755 --- a/crosperf/machine_image_manager_unittest.py +++ b/crosperf/machine_image_manager_unittest.py @@ -6,7 +6,6 @@ """Unit tests for the MachineImageManager class.""" -from __future__ import print_function import random import unittest diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py index 5c8af75a..c780094b 100644 --- a/crosperf/machine_manager.py +++ b/crosperf/machine_manager.py @@ -5,8 +5,6 @@ """Machine Manager module.""" -from __future__ import division -from __future__ import print_function import collections import hashlib diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py index 80b3dd11..5eed5e1f 100755 --- a/crosperf/machine_manager_unittest.py +++ b/crosperf/machine_manager_unittest.py @@ -7,7 +7,6 @@ """Unittest for machine_manager.""" -from __future__ import print_function import hashlib import os.path diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py index a596662e..a0d581cd 100644 --- a/crosperf/mock_instance.py +++ b/crosperf/mock_instance.py @@ -5,7 +5,6 @@ """This contains some mock instances for testing.""" -from __future__ import print_function from benchmark import Benchmark from label import MockLabel diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py index 3dd6839a..ca5966c0 100644 --- a/crosperf/results_cache.py +++ b/crosperf/results_cache.py @@ -5,8 +5,6 @@ """Module to deal with result cache.""" -from __future__ import division -from __future__ import print_function import collections import glob diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py index dbf5d672..8029161a 100755 --- a/crosperf/results_cache_unittest.py +++ b/crosperf/results_cache_unittest.py @@ -7,7 +7,6 @@ """Module of result cache unittest.""" -from __future__ import print_function import io import os diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py index 59ac685b..a3701ab7 100644 --- a/crosperf/results_organizer.py +++ b/crosperf/results_organizer.py @@ -5,7 +5,6 @@ """Parse data from benchmark_runs for tabulator.""" -from __future__ import print_function import errno import json diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py index 90a95a73..707f89f7 100755 --- a/crosperf/results_organizer_unittest.py +++ b/crosperf/results_organizer_unittest.py @@ -11,7 +11,6 @@ after that, we compare the result of ResultOrganizer. """ -from __future__ import print_function import unittest diff --git a/crosperf/results_report.py b/crosperf/results_report.py index 50412086..735e1a34 100644 --- a/crosperf/results_report.py +++ b/crosperf/results_report.py @@ -4,7 +4,6 @@ # found in the LICENSE file. """A module to handle the report format.""" -from __future__ import print_function import datetime import functools diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py index ec87ac41..e88fd9c7 100644 --- a/crosperf/results_report_templates.py +++ b/crosperf/results_report_templates.py @@ -4,7 +4,6 @@ # found in the LICENSE file. """Text templates used by various parts of results_report.""" -from __future__ import print_function import html from string import Template diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py index ef073a71..3c1d6663 100755 --- a/crosperf/results_report_unittest.py +++ b/crosperf/results_report_unittest.py @@ -7,8 +7,6 @@ """Unittest for the results reporter.""" -from __future__ import division -from __future__ import print_function import collections import io diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py index 692f3420..c611cbcf 100644 --- a/crosperf/schedv2.py +++ b/crosperf/schedv2.py @@ -5,8 +5,6 @@ """Module to optimize the scheduling of benchmark_run tasks.""" -from __future__ import division -from __future__ import print_function from collections import defaultdict import sys diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py index e939bc5b..c79c6ecd 100755 --- a/crosperf/schedv2_unittest.py +++ b/crosperf/schedv2_unittest.py @@ -7,7 +7,6 @@ """This contains the unit tests for the new Crosperf task scheduler.""" -from __future__ import print_function import functools import io diff --git a/crosperf/settings.py b/crosperf/settings.py index 5ea25927..5488a5b0 100644 --- a/crosperf/settings.py +++ b/crosperf/settings.py @@ -5,7 +5,6 @@ """Module to get the settings from experiment file.""" -from __future__ import print_function from cros_utils import logger from cros_utils import misc diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py index b91156dc..469d2260 100644 --- a/crosperf/settings_factory.py +++ b/crosperf/settings_factory.py @@ -5,7 +5,6 @@ """Setting files for global, benchmark and labels.""" -from __future__ import print_function from field import BooleanField from field import EnumField diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py index 031a0e65..b8487b96 100755 --- a/crosperf/settings_factory_unittest.py +++ b/crosperf/settings_factory_unittest.py @@ -7,7 +7,6 @@ """Unittest for crosperf.""" -from __future__ import print_function import unittest diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py index 0128c33e..75913a09 100755 --- a/crosperf/settings_unittest.py +++ b/crosperf/settings_unittest.py @@ -6,7 +6,6 @@ """unittest for settings.""" -from __future__ import print_function import unittest import unittest.mock as mock diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index b3c5879d..f5c4d2c7 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -5,8 +5,6 @@ """SuiteRunner defines the interface from crosperf to test script.""" -from __future__ import division -from __future__ import print_function import json import os diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py index a97e1638..a1ab660f 100755 --- a/crosperf/suite_runner_unittest.py +++ b/crosperf/suite_runner_unittest.py @@ -7,7 +7,6 @@ """Unittest for suite_runner.""" -from __future__ import print_function import json import unittest diff --git a/crosperf/translate_xbuddy.py b/crosperf/translate_xbuddy.py index eb28ecae..2ae60af4 100755 --- a/crosperf/translate_xbuddy.py +++ b/crosperf/translate_xbuddy.py @@ -6,7 +6,6 @@ """Module to translate the xbuddy config.""" -from __future__ import print_function import os import sys diff --git a/cwp/cr-os/fetch_gn_descs.py b/cwp/cr-os/fetch_gn_descs.py index 220511bf..a9cbdb55 100755 --- a/cwp/cr-os/fetch_gn_descs.py +++ b/cwp/cr-os/fetch_gn_descs.py @@ -19,7 +19,6 @@ The result is of the form: } """ -from __future__ import print_function import argparse import json diff --git a/cwp/cr-os/fetch_gn_descs_test.py b/cwp/cr-os/fetch_gn_descs_test.py index f9a9cf93..a42cb2e9 100755 --- a/cwp/cr-os/fetch_gn_descs_test.py +++ b/cwp/cr-os/fetch_gn_descs_test.py @@ -6,7 +6,6 @@ """Tests for fetch_gn_descs.py.""" -from __future__ import print_function import io import unittest diff --git a/debug_info_test/allowlist.py b/debug_info_test/allowlist.py index 945c0440..3cccf293 100644 --- a/debug_info_test/allowlist.py +++ b/debug_info_test/allowlist.py @@ -5,7 +5,6 @@ """Allowlist functions.""" -from __future__ import print_function import glob import os diff --git a/debug_info_test/check_cus.py b/debug_info_test/check_cus.py index df2c4275..b2458e94 100644 --- a/debug_info_test/check_cus.py +++ b/debug_info_test/check_cus.py @@ -5,7 +5,6 @@ """check compile units.""" -from __future__ import print_function import os import subprocess diff --git a/debug_info_test/check_exist.py b/debug_info_test/check_exist.py index 795cb470..d51abe42 100644 --- a/debug_info_test/check_exist.py +++ b/debug_info_test/check_exist.py @@ -5,7 +5,6 @@ """check whether intended components exists in the given dso.""" -from __future__ import print_function import os import subprocess diff --git a/debug_info_test/check_icf.py b/debug_info_test/check_icf.py index 5e92ec13..8eca39a2 100644 --- a/debug_info_test/check_icf.py +++ b/debug_info_test/check_icf.py @@ -5,7 +5,6 @@ """check whether chrome was built with identical code folding.""" -from __future__ import print_function import os import re diff --git a/debug_info_test/check_ngcc.py b/debug_info_test/check_ngcc.py index 60508691..348fcc8a 100644 --- a/debug_info_test/check_ngcc.py +++ b/debug_info_test/check_ngcc.py @@ -5,7 +5,6 @@ """Check whether the compile unit is not built by gcc.""" -from __future__ import print_function from allowlist import is_allowlisted diff --git a/debug_info_test/debug_info_test.py b/debug_info_test/debug_info_test.py index ab123e0b..057921aa 100755 --- a/debug_info_test/debug_info_test.py +++ b/debug_info_test/debug_info_test.py @@ -6,7 +6,6 @@ """Test for debug info.""" -from __future__ import print_function import os import subprocess diff --git a/file_lock_machine.py b/file_lock_machine.py index 72d6233a..2b281ff7 100755 --- a/file_lock_machine.py +++ b/file_lock_machine.py @@ -6,9 +6,6 @@ """Script to lock/unlock machines.""" -from __future__ import division -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/file_lock_machine_test.py b/file_lock_machine_test.py index d1189512..d59f7e28 100755 --- a/file_lock_machine_test.py +++ b/file_lock_machine_test.py @@ -10,8 +10,6 @@ MachineManagerTest tests MachineManager. """ -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/go/chromeos/setup_chromeos_testing.py b/go/chromeos/setup_chromeos_testing.py index 53254a99..863cf8c8 100755 --- a/go/chromeos/setup_chromeos_testing.py +++ b/go/chromeos/setup_chromeos_testing.py @@ -6,7 +6,6 @@ """Generate board-specific scripts for Go compiler testing.""" -from __future__ import print_function import argparse import getpass diff --git a/heatmaps/heat_map.py b/heatmaps/heat_map.py index c4e43fdc..78ee8cba 100755 --- a/heatmaps/heat_map.py +++ b/heatmaps/heat_map.py @@ -6,7 +6,6 @@ """Wrapper to generate heat maps for chrome.""" -from __future__ import print_function import argparse import os diff --git a/heatmaps/heat_map_test.py b/heatmaps/heat_map_test.py index 0d3ca4e2..2b86363b 100755 --- a/heatmaps/heat_map_test.py +++ b/heatmaps/heat_map_test.py @@ -7,7 +7,6 @@ """Tests for heat_map.py.""" -from __future__ import print_function import os import unittest diff --git a/heatmaps/heatmap_generator.py b/heatmaps/heatmap_generator.py index c139c364..768a4e40 100644 --- a/heatmaps/heatmap_generator.py +++ b/heatmaps/heatmap_generator.py @@ -13,8 +13,6 @@ performed by another script perf-to-inst-page.sh). It can also analyze the symbol names in hot pages. """ -from __future__ import division -from __future__ import print_function import bisect import collections diff --git a/heatmaps/heatmap_generator_test.py b/heatmaps/heatmap_generator_test.py index 4afc9351..fbace10a 100755 --- a/heatmaps/heatmap_generator_test.py +++ b/heatmaps/heatmap_generator_test.py @@ -6,8 +6,6 @@ """Tests for heatmap_generator.py.""" -from __future__ import division -from __future__ import print_function import os import unittest diff --git a/image_chromeos.py b/image_chromeos.py index 150c7de0..5922a241 100755 --- a/image_chromeos.py +++ b/image_chromeos.py @@ -10,8 +10,6 @@ This script images a remote ChromeOS device with a specific image." """ -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/llvm_extra/create_ebuild_file.py b/llvm_extra/create_ebuild_file.py index d974d50c..4abd466a 100755 --- a/llvm_extra/create_ebuild_file.py +++ b/llvm_extra/create_ebuild_file.py @@ -90,7 +90,6 @@ diff -Nuar llvm-pre7.0_pre335547_p20180529.ebuild newly-created-file.ebuild # some users may find it useful """ -from __future__ import print_function import os import sys diff --git a/llvm_tools/auto_llvm_bisection.py b/llvm_tools/auto_llvm_bisection.py index b9d04d1d..0dcd6bad 100755 --- a/llvm_tools/auto_llvm_bisection.py +++ b/llvm_tools/auto_llvm_bisection.py @@ -6,7 +6,6 @@ """Performs bisection on LLVM based off a .JSON file.""" -from __future__ import print_function import enum import json diff --git a/llvm_tools/auto_llvm_bisection_unittest.py b/llvm_tools/auto_llvm_bisection_unittest.py index 9d0654cf..ef82453b 100755 --- a/llvm_tools/auto_llvm_bisection_unittest.py +++ b/llvm_tools/auto_llvm_bisection_unittest.py @@ -6,7 +6,6 @@ """Tests for auto bisection of LLVM.""" -from __future__ import print_function import json import os diff --git a/llvm_tools/chroot.py b/llvm_tools/chroot.py index 3a3bdde4..73ab9203 100755 --- a/llvm_tools/chroot.py +++ b/llvm_tools/chroot.py @@ -6,7 +6,6 @@ """Chroot helper functions.""" -from __future__ import print_function import collections import os diff --git a/llvm_tools/chroot_unittest.py b/llvm_tools/chroot_unittest.py index 0e7d133c..39877aa5 100755 --- a/llvm_tools/chroot_unittest.py +++ b/llvm_tools/chroot_unittest.py @@ -6,7 +6,6 @@ """Unit tests for chroot helper functions.""" -from __future__ import print_function import subprocess import unittest diff --git a/llvm_tools/copy_helpers_to_chromiumos_overlay.py b/llvm_tools/copy_helpers_to_chromiumos_overlay.py index 042b19fa..758c7533 100755 --- a/llvm_tools/copy_helpers_to_chromiumos_overlay.py +++ b/llvm_tools/copy_helpers_to_chromiumos_overlay.py @@ -11,7 +11,6 @@ patch_manager ones). This script simplifies the copying of those around. """ # Necessary until crbug.com/1006448 is fixed -from __future__ import print_function import argparse import os diff --git a/llvm_tools/custom_script_example.py b/llvm_tools/custom_script_example.py index 4b90e88b..ebfde1f1 100755 --- a/llvm_tools/custom_script_example.py +++ b/llvm_tools/custom_script_example.py @@ -6,7 +6,6 @@ """A custom script example that utilizes the .JSON contents of the tryjob.""" -from __future__ import print_function import json import sys diff --git a/llvm_tools/failure_modes.py b/llvm_tools/failure_modes.py index f043b1ec..098a0270 100644 --- a/llvm_tools/failure_modes.py +++ b/llvm_tools/failure_modes.py @@ -5,7 +5,6 @@ """Failure mode constants avaiable to the patch manager.""" -from __future__ import print_function import enum diff --git a/llvm_tools/get_llvm_hash.py b/llvm_tools/get_llvm_hash.py index 9c0a5020..77efccbe 100755 --- a/llvm_tools/get_llvm_hash.py +++ b/llvm_tools/get_llvm_hash.py @@ -6,7 +6,6 @@ """Returns the latest LLVM version's hash.""" -from __future__ import print_function import argparse import contextlib diff --git a/llvm_tools/get_llvm_hash_unittest.py b/llvm_tools/get_llvm_hash_unittest.py index 32fb5b53..fc13bc17 100755 --- a/llvm_tools/get_llvm_hash_unittest.py +++ b/llvm_tools/get_llvm_hash_unittest.py @@ -6,7 +6,6 @@ """Unit tests for retrieving the LLVM hash.""" -from __future__ import print_function import subprocess import unittest diff --git a/llvm_tools/git.py b/llvm_tools/git.py index 0fe4cb63..2bb43c29 100755 --- a/llvm_tools/git.py +++ b/llvm_tools/git.py @@ -6,7 +6,6 @@ """Git helper functions.""" -from __future__ import print_function import collections import os diff --git a/llvm_tools/git_llvm_rev.py b/llvm_tools/git_llvm_rev.py index 283a3920..44a6426e 100755 --- a/llvm_tools/git_llvm_rev.py +++ b/llvm_tools/git_llvm_rev.py @@ -10,7 +10,6 @@ Revision numbers are all of the form '(branch_name, r1234)'. As a shorthand, r1234 is parsed as '(main, 1234)'. """ -from __future__ import print_function import argparse import re diff --git a/llvm_tools/git_unittest.py b/llvm_tools/git_unittest.py index 8e75100f..cf4035ff 100755 --- a/llvm_tools/git_unittest.py +++ b/llvm_tools/git_unittest.py @@ -6,7 +6,6 @@ """Unit tests for git helper functions.""" -from __future__ import print_function import os import subprocess diff --git a/llvm_tools/llvm_bisection.py b/llvm_tools/llvm_bisection.py index f268bfb1..e6f0e2c9 100755 --- a/llvm_tools/llvm_bisection.py +++ b/llvm_tools/llvm_bisection.py @@ -6,7 +6,6 @@ """Performs bisection on LLVM based off a .JSON file.""" -from __future__ import print_function import argparse import enum diff --git a/llvm_tools/llvm_bisection_unittest.py b/llvm_tools/llvm_bisection_unittest.py index 0dfdef54..e047be41 100755 --- a/llvm_tools/llvm_bisection_unittest.py +++ b/llvm_tools/llvm_bisection_unittest.py @@ -8,7 +8,6 @@ """Tests for LLVM bisection.""" -from __future__ import print_function import json import os diff --git a/llvm_tools/llvm_project.py b/llvm_tools/llvm_project.py index 85b4a0c2..45539596 100644 --- a/llvm_tools/llvm_project.py +++ b/llvm_tools/llvm_project.py @@ -5,7 +5,6 @@ """Module for manipulating llvm-project-copy. Generally intended for tests.""" -from __future__ import print_function import datetime import os diff --git a/llvm_tools/modify_a_tryjob.py b/llvm_tools/modify_a_tryjob.py index 6ef12008..79f999b1 100755 --- a/llvm_tools/modify_a_tryjob.py +++ b/llvm_tools/modify_a_tryjob.py @@ -6,7 +6,6 @@ """Modifies a tryjob based off of arguments.""" -from __future__ import print_function import argparse import enum diff --git a/llvm_tools/modify_a_tryjob_unittest.py b/llvm_tools/modify_a_tryjob_unittest.py index 38ebccad..6ae29c5f 100755 --- a/llvm_tools/modify_a_tryjob_unittest.py +++ b/llvm_tools/modify_a_tryjob_unittest.py @@ -6,7 +6,6 @@ """Tests for modifying a tryjob.""" -from __future__ import print_function import json import unittest diff --git a/llvm_tools/nightly_revert_checker.py b/llvm_tools/nightly_revert_checker.py index 17b1c40f..78a85f90 100755 --- a/llvm_tools/nightly_revert_checker.py +++ b/llvm_tools/nightly_revert_checker.py @@ -10,7 +10,6 @@ If any reverts are found that were previously unknown, this cherry-picks them or fires off an email. All LLVM SHAs to monitor are autodetected. """ -from __future__ import print_function import argparse import io diff --git a/llvm_tools/nightly_revert_checker_test.py b/llvm_tools/nightly_revert_checker_test.py index 2064cf96..694d3d80 100755 --- a/llvm_tools/nightly_revert_checker_test.py +++ b/llvm_tools/nightly_revert_checker_test.py @@ -6,7 +6,6 @@ """Tests for nightly_revert_checker.""" -from __future__ import print_function import io import unittest diff --git a/llvm_tools/subprocess_helpers.py b/llvm_tools/subprocess_helpers.py index d4f545d2..c295cbb8 100644 --- a/llvm_tools/subprocess_helpers.py +++ b/llvm_tools/subprocess_helpers.py @@ -5,7 +5,6 @@ """Helpers/wrappers for the subprocess module for migration to python3.""" -from __future__ import print_function import subprocess diff --git a/llvm_tools/test_helpers.py b/llvm_tools/test_helpers.py index 2391a48c..48a8e8c1 100644 --- a/llvm_tools/test_helpers.py +++ b/llvm_tools/test_helpers.py @@ -5,7 +5,6 @@ """Helper functions for unit testing.""" -from __future__ import print_function from contextlib import contextmanager import json diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index c361334a..591890e6 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -6,7 +6,6 @@ """Unit tests for updating LLVM hashes.""" -from __future__ import print_function import collections import datetime diff --git a/llvm_tools/update_packages_and_run_tests.py b/llvm_tools/update_packages_and_run_tests.py index 5d004546..cc96ec53 100755 --- a/llvm_tools/update_packages_and_run_tests.py +++ b/llvm_tools/update_packages_and_run_tests.py @@ -6,7 +6,6 @@ """Runs a tryjob/tryjobs after updating the packages.""" -from __future__ import print_function import argparse import datetime diff --git a/llvm_tools/update_packages_and_run_tests_unittest.py b/llvm_tools/update_packages_and_run_tests_unittest.py index 0b029e04..cff28672 100755 --- a/llvm_tools/update_packages_and_run_tests_unittest.py +++ b/llvm_tools/update_packages_and_run_tests_unittest.py @@ -6,7 +6,6 @@ """Unittests for running tests after updating packages.""" -from __future__ import print_function import json import subprocess diff --git a/llvm_tools/update_tryjob_status.py b/llvm_tools/update_tryjob_status.py index ea7fe9c0..4c72403d 100755 --- a/llvm_tools/update_tryjob_status.py +++ b/llvm_tools/update_tryjob_status.py @@ -6,7 +6,6 @@ """Updates the status of a tryjob.""" -from __future__ import print_function import argparse import enum diff --git a/llvm_tools/update_tryjob_status_unittest.py b/llvm_tools/update_tryjob_status_unittest.py index b6fc59c8..e73cb281 100755 --- a/llvm_tools/update_tryjob_status_unittest.py +++ b/llvm_tools/update_tryjob_status_unittest.py @@ -6,7 +6,6 @@ """Tests when updating a tryjob's status.""" -from __future__ import print_function import json import os diff --git a/lock_machine.py b/lock_machine.py index 030d7d45..e7befdb2 100755 --- a/lock_machine.py +++ b/lock_machine.py @@ -7,7 +7,6 @@ """This module controls locking and unlocking of test machines.""" -from __future__ import print_function import argparse import enum diff --git a/make_root_writable.py b/make_root_writable.py index 500f8fe5..297a9150 100755 --- a/make_root_writable.py +++ b/make_root_writable.py @@ -10,8 +10,6 @@ This script updates a remote chromebook to make the / directory writable." """ -from __future__ import print_function - __author__ = "cmtice@google.com (Caroline Tice)" diff --git a/orderfile/post_process_orderfile.py b/orderfile/post_process_orderfile.py index 5d98756b..4f61c19f 100755 --- a/orderfile/post_process_orderfile.py +++ b/orderfile/post_process_orderfile.py @@ -19,8 +19,6 @@ The results of the file is intended to be uploaded and consumed when linking Chrome in ChromeOS. """ -from __future__ import division -from __future__ import print_function import argparse import os diff --git a/orderfile/post_process_orderfile_test.py b/orderfile/post_process_orderfile_test.py index 4eb36699..d7ee3f8b 100755 --- a/orderfile/post_process_orderfile_test.py +++ b/orderfile/post_process_orderfile_test.py @@ -6,8 +6,6 @@ """Tests for post_process_orderfile.py.""" -from __future__ import division -from __future__ import print_function import os import shutil diff --git a/pgo_tools/merge_profdata_and_upload.py b/pgo_tools/merge_profdata_and_upload.py index 768a1a57..efc986f4 100755 --- a/pgo_tools/merge_profdata_and_upload.py +++ b/pgo_tools/merge_profdata_and_upload.py @@ -35,7 +35,6 @@ In this example, the script will merge profdata from arm and amd64 builder, and profdata from an arm64 buildbucket task. """ -from __future__ import print_function import argparse import collections diff --git a/remote_test.py b/remote_test.py index 574d6762..c3c88b5c 100755 --- a/remote_test.py +++ b/remote_test.py @@ -10,8 +10,6 @@ This script can login to the chromeos machine using the test private key. """ -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/run_tests_for.py b/run_tests_for.py index 93d48984..a8ad1674 100755 --- a/run_tests_for.py +++ b/run_tests_for.py @@ -24,7 +24,6 @@ All tests are run in parallel. # of the chroot a bit more obnoxious?), but might be worth exploring if this # starts to grow quite complex on its own. -from __future__ import print_function import argparse import collections diff --git a/tc_enter_chroot.py b/tc_enter_chroot.py index 0dcadb07..c746a0a2 100755 --- a/tc_enter_chroot.py +++ b/tc_enter_chroot.py @@ -9,8 +9,6 @@ This script enters the chroot with mounted sources. """ -from __future__ import print_function - __author__ = "asharif@google.com (Ahmad Sharif)" diff --git a/update_telemetry_defaults.py b/update_telemetry_defaults.py index 88420b64..a3feb6a0 100755 --- a/update_telemetry_defaults.py +++ b/update_telemetry_defaults.py @@ -11,8 +11,6 @@ results to be used in generating reports from running the Telemetry benchmarks. """ -from __future__ import print_function - __author__ = "cmtice@google.com (Caroline Tice)" -- cgit v1.2.3 From 6cb38a6c19295fd4db8db03046cb819eba217dae Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Tue, 6 Sep 2022 15:19:39 -0700 Subject: check-presubmit: use subprocess.run Fixing feedback from crrev.com/c/3877334, which isn't strictly related to that change, but is still useful to clean up BUG=None TEST=`repo upload` Change-Id: I1e183e174cc99dc9e309cc53fbde9f545eb8536f Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3877525 Tested-by: George Burgess <gbiv@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> --- toolchain_utils_githooks/check-presubmit.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/toolchain_utils_githooks/check-presubmit.py b/toolchain_utils_githooks/check-presubmit.py index f9da974b..691e2510 100755 --- a/toolchain_utils_githooks/check-presubmit.py +++ b/toolchain_utils_githooks/check-presubmit.py @@ -12,6 +12,7 @@ import datetime import multiprocessing import multiprocessing.pool import os +from pathlib import Path import re import shlex import shutil @@ -20,25 +21,24 @@ import sys import threading import traceback import typing as t -from pathlib import Path def run_command_unchecked( command: t.List[str], cwd: str, env: t.Dict[str, str] = None ) -> t.Tuple[int, str]: """Runs a command in the given dir, returning its exit code and stdio.""" - p = subprocess.Popen( + p = subprocess.run( command, + check=False, cwd=cwd, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, + encoding="utf-8", + errors="replace", ) - - stdout, _ = p.communicate() - exit_code = p.wait() - return exit_code, stdout.decode("utf-8", "replace") + return p.returncode, p.stdout def has_executable_on_path(exe: str) -> bool: -- cgit v1.2.3 From e2565a6bec088a8995403777a9ee58fd90375318 Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@google.com> Date: Fri, 9 Sep 2022 14:18:43 -0700 Subject: afdo: Add kernel metadata for arm BUG=b:244337204 TEST=cq Change-Id: I3a0cac0145f7bdc4b63a791eca2f1ebbb413e3a2 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3887561 Tested-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Manoj Gupta <manojgupta@chromium.org> Auto-Submit: Denis Nikitin <denik@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> --- afdo_metadata/kernel_arm_afdo.json | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 afdo_metadata/kernel_arm_afdo.json diff --git a/afdo_metadata/kernel_arm_afdo.json b/afdo_metadata/kernel_arm_afdo.json new file mode 100644 index 00000000..2ff1c15d --- /dev/null +++ b/afdo_metadata/kernel_arm_afdo.json @@ -0,0 +1,5 @@ +{ + "chromeos-kernel-5_15": { + "name": "R106-14541.0-1662074754" + } +} -- cgit v1.2.3 From fdcd39d5de4bd61cee94cf1e26416838d23092b8 Mon Sep 17 00:00:00 2001 From: Mike Frysinger <vapier@chromium.org> Date: Tue, 13 Sep 2022 14:19:58 -0400 Subject: Update license boilerplate text in source code files Normally we don't do this, but enough changes have accumulated that we're doing a tree-wide one-off update of the name & style. BUG=chromium:1098010 TEST=`repo upload` works Change-Id: Icb42e5012a87920c2cd13b666fb3e55e7e4fb3b8 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3891080 Auto-Submit: Mike Frysinger <vapier@chromium.org> Tested-by: Mike Frysinger <vapier@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> --- LICENSE | 4 ++-- afdo_redaction/redact_profile.py | 2 +- afdo_redaction/redact_profile_test.py | 2 +- afdo_redaction/remove_cold_functions.py | 2 +- afdo_redaction/remove_cold_functions_test.py | 2 +- afdo_redaction/remove_indirect_calls.py | 2 +- afdo_redaction/remove_indirect_calls_test.py | 2 +- afdo_tools/bisection/afdo_prof_analysis.py | 2 +- afdo_tools/bisection/afdo_prof_analysis_e2e_test.py | 2 +- afdo_tools/bisection/afdo_prof_analysis_test.py | 2 +- afdo_tools/bisection/state_assumption_external.sh | 2 +- afdo_tools/bisection/state_assumption_interrupt.sh | 2 +- afdo_tools/generate_afdo_from_tryjob.py | 2 +- afdo_tools/run_afdo_tryjob.py | 2 +- afdo_tools/update_kernel_afdo | 2 +- android_merge_from_upstream.sh | 2 +- auto_delete_nightly_test_data.py | 2 +- bestflags/example_algorithms.py | 2 +- bestflags/flags.py | 2 +- bestflags/flags_test.py | 2 +- bestflags/flags_util.py | 2 +- bestflags/generation.py | 2 +- bestflags/generation_test.py | 2 +- bestflags/genetic_algorithm.py | 2 +- bestflags/hill_climb_best_neighbor.py | 2 +- bestflags/iterative_elimination.py | 2 +- bestflags/mock_task.py | 2 +- bestflags/pipeline_process.py | 2 +- bestflags/pipeline_process_test.py | 2 +- bestflags/pipeline_worker.py | 2 +- bestflags/pipeline_worker_test.py | 2 +- bestflags/steering.py | 2 +- bestflags/steering_test.py | 2 +- bestflags/task.py | 2 +- bestflags/task_test.py | 2 +- bestflags/testing_batch.py | 2 +- binary_search_tool/MAINTENANCE | 2 +- binary_search_tool/__init__.py | 2 +- binary_search_tool/android/boot_test.sh | 2 +- binary_search_tool/android/cleanup.sh | 2 +- binary_search_tool/android/generate_cmd.sh | 2 +- binary_search_tool/android/get_initial_items.sh | 3 +-- binary_search_tool/android/interactive_test.sh | 2 +- binary_search_tool/android/setup.sh | 2 +- binary_search_tool/android/switch_to_bad.sh | 2 +- binary_search_tool/android/switch_to_good.sh | 2 +- binary_search_tool/android/test_setup.sh | 2 +- binary_search_tool/binary_search_perforce.py | 2 +- binary_search_tool/binary_search_state.py | 2 +- binary_search_tool/bisect_driver.py | 2 +- binary_search_tool/common.py | 2 +- binary_search_tool/common/boot_test.sh | 2 +- binary_search_tool/common/hash_test.sh | 2 +- binary_search_tool/common/interactive_test.sh | 2 +- binary_search_tool/common/interactive_test_noping.sh | 2 +- binary_search_tool/common/test_setup.sh | 2 +- binary_search_tool/compiler_wrapper.py | 2 +- binary_search_tool/cros_pkg/create_cleanup_script.py | 2 +- binary_search_tool/cros_pkg/get_initial_items.sh | 3 +-- binary_search_tool/cros_pkg/setup.sh | 2 +- binary_search_tool/cros_pkg/switch_to_bad.sh | 2 +- binary_search_tool/cros_pkg/switch_to_good.sh | 2 +- binary_search_tool/cros_pkg/test_setup_usb.sh | 2 +- binary_search_tool/ndk/DO_BISECTION.sh | 2 +- binary_search_tool/ndk/boot_test.sh | 2 +- binary_search_tool/ndk/get_initial_items.sh | 3 +-- binary_search_tool/ndk/switch_to_good.sh | 3 +-- binary_search_tool/ndk/test_setup.sh | 2 +- binary_search_tool/pass_mapping.py | 2 +- binary_search_tool/run_bisect.py | 2 +- binary_search_tool/run_bisect_tests.py | 2 +- binary_search_tool/sysroot_wrapper/cleanup.sh | 2 +- binary_search_tool/sysroot_wrapper/interactive_test_host.sh | 2 +- binary_search_tool/sysroot_wrapper/setup.sh | 2 +- binary_search_tool/sysroot_wrapper/test_setup_host.sh | 2 +- binary_search_tool/sysroot_wrapper/testing_test.py | 2 +- binary_search_tool/test/__init__.py | 2 +- binary_search_tool/test/binary_search_tool_test.py | 2 +- binary_search_tool/test/cmd_script.py | 2 +- binary_search_tool/test/cmd_script_no_support.py | 2 +- binary_search_tool/test/common.py | 2 +- binary_search_tool/test/gen_init_list.py | 2 +- binary_search_tool/test/gen_obj.py | 2 +- binary_search_tool/test/generate_cmd.py | 2 +- binary_search_tool/test/is_good.py | 2 +- binary_search_tool/test/is_good_noinc_prune.py | 2 +- binary_search_tool/test/switch_tmp.py | 2 +- binary_search_tool/test/switch_to_bad.py | 2 +- binary_search_tool/test/switch_to_bad_noinc_prune.py | 2 +- binary_search_tool/test/switch_to_bad_set_file.py | 2 +- binary_search_tool/test/switch_to_good.py | 2 +- binary_search_tool/test/switch_to_good_noinc_prune.py | 2 +- binary_search_tool/test/switch_to_good_set_file.py | 2 +- binary_search_tool/test/test_setup.py | 2 +- binary_search_tool/test/test_setup_bad.py | 2 +- build_chromeos.py | 2 +- build_tc.py | 2 +- buildbot_test_llvm.py | 2 +- buildbot_test_toolchains.py | 2 +- chromiumos_image_diff.py | 2 +- compiler_wrapper/android_config_test.go | 2 +- compiler_wrapper/bisect_flag.go | 2 +- compiler_wrapper/bisect_flag_test.go | 2 +- compiler_wrapper/build.py | 2 +- compiler_wrapper/bundle.README | 2 +- compiler_wrapper/bundle.py | 2 +- compiler_wrapper/ccache_flag.go | 2 +- compiler_wrapper/ccache_flag_test.go | 2 +- compiler_wrapper/clang_flags.go | 2 +- compiler_wrapper/clang_flags_test.go | 2 +- compiler_wrapper/clang_syntax_flag.go | 2 +- compiler_wrapper/clang_syntax_flag_test.go | 2 +- compiler_wrapper/clang_tidy_flag.go | 2 +- compiler_wrapper/clang_tidy_flag_test.go | 2 +- compiler_wrapper/command.go | 2 +- compiler_wrapper/command_test.go | 2 +- compiler_wrapper/compile_with_fallback.go | 2 +- compiler_wrapper/compile_with_fallback_test.go | 2 +- compiler_wrapper/compiler_wrapper.go | 2 +- compiler_wrapper/compiler_wrapper_test.go | 2 +- compiler_wrapper/config.go | 2 +- compiler_wrapper/config_test.go | 2 +- compiler_wrapper/crash_builds.go | 2 +- compiler_wrapper/crash_builds_test.go | 2 +- compiler_wrapper/cros_hardened_config_test.go | 2 +- compiler_wrapper/cros_host_config_test.go | 2 +- compiler_wrapper/cros_llvm_next_flags.go | 2 +- compiler_wrapper/cros_nonhardened_config_test.go | 2 +- compiler_wrapper/disable_werror_flag.go | 2 +- compiler_wrapper/disable_werror_flag_test.go | 2 +- compiler_wrapper/env.go | 2 +- compiler_wrapper/env_test.go | 2 +- compiler_wrapper/errors.go | 2 +- compiler_wrapper/errors_test.go | 2 +- compiler_wrapper/gcc_flags.go | 2 +- compiler_wrapper/gcc_flags_test.go | 2 +- compiler_wrapper/go_exec.go | 2 +- compiler_wrapper/goldenutil_test.go | 2 +- compiler_wrapper/install_compiler_wrapper.sh | 2 +- compiler_wrapper/kernel_bug.go | 2 +- compiler_wrapper/kernel_bug_test.go | 2 +- compiler_wrapper/libc_exec.go | 2 +- compiler_wrapper/libgcc_flags.go | 2 +- compiler_wrapper/libgcc_flags_test.go | 2 +- compiler_wrapper/main.go | 2 +- compiler_wrapper/print_cmdline_flag.go | 2 +- compiler_wrapper/print_cmdline_flag_test.go | 2 +- compiler_wrapper/print_config_flag.go | 2 +- compiler_wrapper/print_config_flag_test.go | 2 +- compiler_wrapper/remote_build_flag_test.go | 2 +- compiler_wrapper/remote_build_flags.go | 2 +- compiler_wrapper/reset_compiler_wrapper.sh | 2 +- compiler_wrapper/rusage_flag.go | 2 +- compiler_wrapper/rusage_flag_test.go | 2 +- compiler_wrapper/sanitizer_flags.go | 2 +- compiler_wrapper/sanitizer_flags_test.go | 2 +- compiler_wrapper/stackprotector_flags.go | 2 +- compiler_wrapper/stackprotector_flags_test.go | 2 +- compiler_wrapper/sysroot_flag.go | 2 +- compiler_wrapper/sysroot_flag_test.go | 2 +- compiler_wrapper/testutil_test.go | 2 +- compiler_wrapper/thumb_flags.go | 2 +- compiler_wrapper/thumb_flags_test.go | 2 +- compiler_wrapper/unsupported_flags.go | 2 +- compiler_wrapper/unsupported_flags_test.go | 2 +- compiler_wrapper/x64_flags.go | 2 +- compiler_wrapper/x64_flags_test.go | 2 +- crate_ebuild_help.py | 2 +- cros_utils/__init__.py | 2 +- cros_utils/bugs.py | 2 +- cros_utils/bugs_test.py | 2 +- cros_utils/buildbot_utils.py | 2 +- cros_utils/buildbot_utils_unittest.py | 2 +- cros_utils/command_executer.py | 2 +- cros_utils/command_executer_timeout_test.py | 2 +- cros_utils/command_executer_unittest.py | 2 +- cros_utils/constants.py | 2 +- cros_utils/device_setup_utils.py | 2 +- cros_utils/device_setup_utils_unittest.py | 2 +- cros_utils/email_sender.py | 2 +- cros_utils/email_sender_unittest.py | 2 +- cros_utils/file_utils.py | 2 +- cros_utils/html_tools.py | 2 +- cros_utils/locks.py | 2 +- cros_utils/logger.py | 2 +- cros_utils/machines.py | 2 +- cros_utils/misc.py | 2 +- cros_utils/misc_test.py | 2 +- cros_utils/no_pseudo_terminal_test.py | 2 +- cros_utils/perf_diff.py | 2 +- cros_utils/tabulator.py | 2 +- cros_utils/tabulator_test.py | 2 +- cros_utils/timeline.py | 2 +- cros_utils/timeline_test.py | 2 +- cros_utils/tiny_render.py | 2 +- cros_utils/tiny_render_test.py | 2 +- cros_utils/toolchain_utils.sh | 2 +- crosperf/benchmark.py | 2 +- crosperf/benchmark_run.py | 2 +- crosperf/benchmark_run_unittest.py | 2 +- crosperf/benchmark_unittest.py | 2 +- crosperf/column_chart.py | 2 +- crosperf/compare_machines.py | 2 +- crosperf/config.py | 2 +- crosperf/config_unittest.py | 2 +- crosperf/crosperf | 2 +- crosperf/crosperf.py | 2 +- crosperf/crosperf_autolock.py | 2 +- crosperf/crosperf_unittest.py | 2 +- crosperf/download_images.py | 2 +- crosperf/download_images_buildid_test.py | 2 +- crosperf/download_images_unittest.py | 2 +- crosperf/experiment.py | 2 +- crosperf/experiment_factory.py | 2 +- crosperf/experiment_factory_unittest.py | 2 +- crosperf/experiment_file.py | 2 +- crosperf/experiment_file_unittest.py | 2 +- crosperf/experiment_files/telemetry_perf_perf | 3 +-- crosperf/experiment_runner.py | 2 +- crosperf/experiment_runner_unittest.py | 2 +- crosperf/experiment_status.py | 2 +- crosperf/field.py | 2 +- crosperf/flag_test_unittest.py | 2 +- crosperf/generate_report.py | 2 +- crosperf/generate_report_unittest.py | 2 +- crosperf/help.py | 2 +- crosperf/image_checksummer.py | 2 +- crosperf/label.py | 2 +- crosperf/machine_image_manager.py | 2 +- crosperf/machine_image_manager_unittest.py | 2 +- crosperf/machine_manager.py | 2 +- crosperf/machine_manager_unittest.py | 2 +- crosperf/mock_instance.py | 2 +- crosperf/results_cache.py | 2 +- crosperf/results_cache_unittest.py | 2 +- crosperf/results_organizer.py | 2 +- crosperf/results_organizer_unittest.py | 2 +- crosperf/results_report.py | 2 +- crosperf/results_report_templates.py | 2 +- crosperf/results_report_unittest.py | 2 +- crosperf/run_tests.sh | 2 +- crosperf/schedv2.py | 2 +- crosperf/schedv2_unittest.py | 2 +- crosperf/settings.py | 2 +- crosperf/settings_factory.py | 2 +- crosperf/settings_factory_unittest.py | 2 +- crosperf/settings_unittest.py | 2 +- crosperf/suite_runner.py | 2 +- crosperf/suite_runner_unittest.py | 2 +- crosperf/test_flag.py | 2 +- crosperf/translate_xbuddy.py | 2 +- cwp/cr-os/fetch_gn_descs.py | 2 +- cwp/cr-os/fetch_gn_descs_test.py | 2 +- debug_info_test/allowlist.py | 2 +- debug_info_test/check_cus.py | 2 +- debug_info_test/check_exist.py | 2 +- debug_info_test/check_icf.py | 2 +- debug_info_test/check_ngcc.py | 2 +- debug_info_test/debug_info_test.py | 2 +- file_lock_machine.py | 2 +- file_lock_machine_test.py | 2 +- go/chromeos/setup_chromeos_testing.py | 2 +- heatmaps/heat_map.py | 2 +- heatmaps/heat_map_test.py | 2 +- heatmaps/heatmap_generator.py | 2 +- heatmaps/heatmap_generator_test.py | 2 +- heatmaps/perf-to-inst-page.sh | 2 +- image_chromeos.py | 2 +- llvm_extra/create_ebuild_file.py | 2 +- llvm_extra/create_llvm_extra.sh | 2 +- llvm_tools/auto_llvm_bisection.py | 2 +- llvm_tools/auto_llvm_bisection_unittest.py | 2 +- llvm_tools/bisect_clang_crashes.py | 2 +- llvm_tools/bisect_clang_crashes_unittest.py | 2 +- llvm_tools/check_clang_diags.py | 2 +- llvm_tools/check_clang_diags_test.py | 2 +- llvm_tools/chroot.py | 2 +- llvm_tools/chroot_unittest.py | 2 +- llvm_tools/copy_helpers_to_chromiumos_overlay.py | 2 +- llvm_tools/custom_script_example.py | 2 +- llvm_tools/failure_modes.py | 2 +- llvm_tools/fetch_cros_sdk_rolls.py | 2 +- llvm_tools/get_llvm_hash.py | 2 +- llvm_tools/get_llvm_hash_unittest.py | 2 +- llvm_tools/get_upstream_patch.py | 2 +- llvm_tools/git.py | 2 +- llvm_tools/git_llvm_rev.py | 2 +- llvm_tools/git_llvm_rev_test.py | 2 +- llvm_tools/git_unittest.py | 2 +- llvm_tools/llvm_bisection.py | 2 +- llvm_tools/llvm_bisection_unittest.py | 2 +- llvm_tools/llvm_local_bisection.sh | 2 +- llvm_tools/llvm_project.py | 2 +- llvm_tools/modify_a_tryjob.py | 2 +- llvm_tools/modify_a_tryjob_unittest.py | 2 +- llvm_tools/nightly_revert_checker.py | 2 +- llvm_tools/nightly_revert_checker_test.py | 2 +- llvm_tools/patch_manager.py | 2 +- llvm_tools/patch_manager_unittest.py | 2 +- llvm_tools/patch_sync/src/android_utils.rs | 2 +- llvm_tools/patch_sync/src/main.rs | 2 +- llvm_tools/patch_sync/src/patch_parsing.rs | 2 +- llvm_tools/patch_sync/src/version_control.rs | 2 +- llvm_tools/patch_utils.py | 2 +- llvm_tools/patch_utils_unittest.py | 2 +- llvm_tools/subprocess_helpers.py | 2 +- llvm_tools/test_helpers.py | 2 +- llvm_tools/update_chromeos_llvm_hash.py | 2 +- llvm_tools/update_chromeos_llvm_hash_unittest.py | 2 +- llvm_tools/update_packages_and_run_tests.py | 2 +- llvm_tools/update_packages_and_run_tests_unittest.py | 2 +- llvm_tools/update_tryjob_status.py | 2 +- llvm_tools/update_tryjob_status_unittest.py | 2 +- llvm_tools/upload_lexan_crashes_to_forcey.py | 2 +- llvm_tools/upload_lexan_crashes_to_forcey_test.py | 2 +- lock_machine.py | 2 +- make_root_writable.py | 2 +- orderfile/post_process_orderfile.py | 2 +- orderfile/post_process_orderfile_test.py | 2 +- pgo_tools/merge_profdata_and_upload.py | 2 +- pgo_tools/monitor_pgo_profiles.py | 2 +- pgo_tools/monitor_pgo_profiles_unittest.py | 2 +- pgo_tools_rust/pgo_rust.py | 2 +- remote_test.py | 2 +- run_tests_for.py | 2 +- rust-analyzer-chromiumos-wrapper/src/main.rs | 2 +- rust_tools/rust_uprev.py | 2 +- rust_tools/rust_uprev_test.py | 2 +- rust_tools/rust_watch.py | 2 +- rust_tools/rust_watch_test.py | 2 +- seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py | 2 +- seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py | 2 +- tc_enter_chroot.py | 2 +- toolchain_utils_githooks/check-presubmit.py | 2 +- toolchain_utils_githooks/pre-push | 2 +- toolchain_utils_githooks/pre-push.real | 2 +- update_telemetry_defaults.py | 2 +- upstream_workon/upstream_workon.bash | 2 +- 338 files changed, 339 insertions(+), 344 deletions(-) diff --git a/LICENSE b/LICENSE index 2defaff9..73b03ad0 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -// Copyright (c) 2011-2016 The ChromiumOS Authors. All rights reserved. +// Copyright 2011-2016 The ChromiumOS Authors // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. -// * Neither the name of Google Inc. nor the names of its +// * Neither the name of Google LLC nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // diff --git a/afdo_redaction/redact_profile.py b/afdo_redaction/redact_profile.py index d9f3d6ab..0779d2ac 100755 --- a/afdo_redaction/redact_profile.py +++ b/afdo_redaction/redact_profile.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_redaction/redact_profile_test.py b/afdo_redaction/redact_profile_test.py index 487c5091..93c65510 100755 --- a/afdo_redaction/redact_profile_test.py +++ b/afdo_redaction/redact_profile_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_redaction/remove_cold_functions.py b/afdo_redaction/remove_cold_functions.py index 6501cec0..c6043bc0 100755 --- a/afdo_redaction/remove_cold_functions.py +++ b/afdo_redaction/remove_cold_functions.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_redaction/remove_cold_functions_test.py b/afdo_redaction/remove_cold_functions_test.py index ed8b1972..89a87f82 100755 --- a/afdo_redaction/remove_cold_functions_test.py +++ b/afdo_redaction/remove_cold_functions_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_redaction/remove_indirect_calls.py b/afdo_redaction/remove_indirect_calls.py index 769bd0ff..32dab3f4 100755 --- a/afdo_redaction/remove_indirect_calls.py +++ b/afdo_redaction/remove_indirect_calls.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_redaction/remove_indirect_calls_test.py b/afdo_redaction/remove_indirect_calls_test.py index b24de45a..640b747f 100755 --- a/afdo_redaction/remove_indirect_calls_test.py +++ b/afdo_redaction/remove_indirect_calls_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/afdo_prof_analysis.py b/afdo_tools/bisection/afdo_prof_analysis.py index ee9b0c92..c9ca9214 100755 --- a/afdo_tools/bisection/afdo_prof_analysis.py +++ b/afdo_tools/bisection/afdo_prof_analysis.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py index e9a72b13..8a0dae38 100755 --- a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py +++ b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/afdo_prof_analysis_test.py b/afdo_tools/bisection/afdo_prof_analysis_test.py index d21a3596..babfc021 100755 --- a/afdo_tools/bisection/afdo_prof_analysis_test.py +++ b/afdo_tools/bisection/afdo_prof_analysis_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/state_assumption_external.sh b/afdo_tools/bisection/state_assumption_external.sh index 153aefa3..a2076b0d 100755 --- a/afdo_tools/bisection/state_assumption_external.sh +++ b/afdo_tools/bisection/state_assumption_external.sh @@ -1,5 +1,5 @@ #!/bin/bash -eu -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/bisection/state_assumption_interrupt.sh b/afdo_tools/bisection/state_assumption_interrupt.sh index 7486137a..d1599d0b 100755 --- a/afdo_tools/bisection/state_assumption_interrupt.sh +++ b/afdo_tools/bisection/state_assumption_interrupt.sh @@ -1,5 +1,5 @@ #!/bin/bash -eu -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/generate_afdo_from_tryjob.py b/afdo_tools/generate_afdo_from_tryjob.py index 30f5b570..e398f8a1 100755 --- a/afdo_tools/generate_afdo_from_tryjob.py +++ b/afdo_tools/generate_afdo_from_tryjob.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/run_afdo_tryjob.py b/afdo_tools/run_afdo_tryjob.py index 1e832c4e..013e10c6 100755 --- a/afdo_tools/run_afdo_tryjob.py +++ b/afdo_tools/run_afdo_tryjob.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo index 60a15ea5..701a4307 100755 --- a/afdo_tools/update_kernel_afdo +++ b/afdo_tools/update_kernel_afdo @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/android_merge_from_upstream.sh b/android_merge_from_upstream.sh index 9a8c7dce..301ea632 100755 --- a/android_merge_from_upstream.sh +++ b/android_merge_from_upstream.sh @@ -1,5 +1,5 @@ #!/bin/bash -eu -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py index 8023ce33..0dd2dba8 100755 --- a/auto_delete_nightly_test_data.py +++ b/auto_delete_nightly_test_data.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/bestflags/example_algorithms.py b/bestflags/example_algorithms.py index 10136aca..c39b2943 100644 --- a/bestflags/example_algorithms.py +++ b/bestflags/example_algorithms.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """An example main file running the algorithms. diff --git a/bestflags/flags.py b/bestflags/flags.py index 9ae360af..b1b79999 100644 --- a/bestflags/flags.py +++ b/bestflags/flags.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Manage bundles of flags used for the optimizing of ChromeOS. diff --git a/bestflags/flags_test.py b/bestflags/flags_test.py index cbb59287..231e569f 100644 --- a/bestflags/flags_test.py +++ b/bestflags/flags_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the classes in module 'flags'. diff --git a/bestflags/flags_util.py b/bestflags/flags_util.py index 088319c5..c4a490e2 100644 --- a/bestflags/flags_util.py +++ b/bestflags/flags_util.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions to explore the neighbor flags. diff --git a/bestflags/generation.py b/bestflags/generation.py index 9ab8edbf..69622de5 100644 --- a/bestflags/generation.py +++ b/bestflags/generation.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A generation of a set of tasks. diff --git a/bestflags/generation_test.py b/bestflags/generation_test.py index 2d9d4680..0928edcc 100644 --- a/bestflags/generation_test.py +++ b/bestflags/generation_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generation unittest. diff --git a/bestflags/genetic_algorithm.py b/bestflags/genetic_algorithm.py index 0d947067..c2bd5574 100644 --- a/bestflags/genetic_algorithm.py +++ b/bestflags/genetic_algorithm.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The hill genetic algorithm. diff --git a/bestflags/hill_climb_best_neighbor.py b/bestflags/hill_climb_best_neighbor.py index 51e30369..2455dd94 100644 --- a/bestflags/hill_climb_best_neighbor.py +++ b/bestflags/hill_climb_best_neighbor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A variation of the hill climbing algorithm. diff --git a/bestflags/iterative_elimination.py b/bestflags/iterative_elimination.py index 7ba19633..8d548606 100644 --- a/bestflags/iterative_elimination.py +++ b/bestflags/iterative_elimination.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Iterative flags elimination. diff --git a/bestflags/mock_task.py b/bestflags/mock_task.py index 3d6a4acf..e25daeba 100644 --- a/bestflags/mock_task.py +++ b/bestflags/mock_task.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This module defines the common mock tasks used by various unit tests. diff --git a/bestflags/pipeline_process.py b/bestflags/pipeline_process.py index 6b061a10..3aab96fe 100644 --- a/bestflags/pipeline_process.py +++ b/bestflags/pipeline_process.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Pipeline process that encapsulates the actual content. diff --git a/bestflags/pipeline_process_test.py b/bestflags/pipeline_process_test.py index ec810d13..04e641ec 100644 --- a/bestflags/pipeline_process_test.py +++ b/bestflags/pipeline_process_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Pipeline Process unittest. diff --git a/bestflags/pipeline_worker.py b/bestflags/pipeline_worker.py index d045dc26..f18be66b 100644 --- a/bestflags/pipeline_worker.py +++ b/bestflags/pipeline_worker.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The pipeline_worker functions of the build and test stage of the framework. diff --git a/bestflags/pipeline_worker_test.py b/bestflags/pipeline_worker_test.py index 3fca0294..15c51ec1 100644 --- a/bestflags/pipeline_worker_test.py +++ b/bestflags/pipeline_worker_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unittest for the pipeline_worker functions in the build/test stage. diff --git a/bestflags/steering.py b/bestflags/steering.py index a640507d..ead2516b 100644 --- a/bestflags/steering.py +++ b/bestflags/steering.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The framework stage that produces the next generation of tasks to run. diff --git a/bestflags/steering_test.py b/bestflags/steering_test.py index ac91e925..28a2f108 100644 --- a/bestflags/steering_test.py +++ b/bestflags/steering_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Steering stage unittest. diff --git a/bestflags/task.py b/bestflags/task.py index cd2f0146..a7822061 100644 --- a/bestflags/task.py +++ b/bestflags/task.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A reproducing entity. diff --git a/bestflags/task_test.py b/bestflags/task_test.py index 1b559bbb..f151bc78 100644 --- a/bestflags/task_test.py +++ b/bestflags/task_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Task unittest. diff --git a/bestflags/testing_batch.py b/bestflags/testing_batch.py index d8a7932f..783d95bf 100644 --- a/bestflags/testing_batch.py +++ b/bestflags/testing_batch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Hill climbing unitest. diff --git a/binary_search_tool/MAINTENANCE b/binary_search_tool/MAINTENANCE index cc86b760..90ac582d 100644 --- a/binary_search_tool/MAINTENANCE +++ b/binary_search_tool/MAINTENANCE @@ -1,4 +1,4 @@ -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/__init__.py b/binary_search_tool/__init__.py index af3cc751..6e3ade4a 100644 --- a/binary_search_tool/__init__.py +++ b/binary_search_tool/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/android/boot_test.sh b/binary_search_tool/android/boot_test.sh index dc871601..4c0c77e2 100755 --- a/binary_search_tool/android/boot_test.sh +++ b/binary_search_tool/android/boot_test.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script pings the android device to determine if it successfully booted. # diff --git a/binary_search_tool/android/cleanup.sh b/binary_search_tool/android/cleanup.sh index 759b3ed4..480b830b 100755 --- a/binary_search_tool/android/cleanup.sh +++ b/binary_search_tool/android/cleanup.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script is part of the Android binary search triage process. # It should be the last script called by the user, after the user has diff --git a/binary_search_tool/android/generate_cmd.sh b/binary_search_tool/android/generate_cmd.sh index 52c26b3c..6d0e5692 100755 --- a/binary_search_tool/android/generate_cmd.sh +++ b/binary_search_tool/android/generate_cmd.sh @@ -1,6 +1,6 @@ #!/bin/bash -eu -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/android/get_initial_items.sh b/binary_search_tool/android/get_initial_items.sh index 2a1eda3a..1ed30425 100755 --- a/binary_search_tool/android/get_initial_items.sh +++ b/binary_search_tool/android/get_initial_items.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script is intended to be used by binary_search_state.py, as # part of the binary search triage on the Android source tree. This script @@ -11,4 +11,3 @@ source android/common.sh cat ${BISECT_GOOD_BUILD}/_LIST - diff --git a/binary_search_tool/android/interactive_test.sh b/binary_search_tool/android/interactive_test.sh index e506b236..0a8a4b8c 100755 --- a/binary_search_tool/android/interactive_test.sh +++ b/binary_search_tool/android/interactive_test.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script pings the android device to determine if it successfully booted. # It then asks the user if the image is good or not, allowing the user to diff --git a/binary_search_tool/android/setup.sh b/binary_search_tool/android/setup.sh index 7f8ba0e9..06918226 100755 --- a/binary_search_tool/android/setup.sh +++ b/binary_search_tool/android/setup.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script is part of the Android binary search triage process. # It should be the first script called by the user, after the user has set up diff --git a/binary_search_tool/android/switch_to_bad.sh b/binary_search_tool/android/switch_to_bad.sh index d44f9f13..2100ed43 100755 --- a/binary_search_tool/android/switch_to_bad.sh +++ b/binary_search_tool/android/switch_to_bad.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script is intended to be used by binary_search_state.py, as # part of the binary search triage on the Android source tree. This script diff --git a/binary_search_tool/android/switch_to_good.sh b/binary_search_tool/android/switch_to_good.sh index 557553c3..a5be3c3e 100755 --- a/binary_search_tool/android/switch_to_good.sh +++ b/binary_search_tool/android/switch_to_good.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script is intended to be used by binary_search_state.py, as # part of the binary search triage on the Android source tree. This script diff --git a/binary_search_tool/android/test_setup.sh b/binary_search_tool/android/test_setup.sh index 26f8ec22..be4a0b76 100755 --- a/binary_search_tool/android/test_setup.sh +++ b/binary_search_tool/android/test_setup.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This is the test setup script for generating an Android image based off the # current working build tree. make is called to relink the object files and diff --git a/binary_search_tool/binary_search_perforce.py b/binary_search_tool/binary_search_perforce.py index d8e7f77b..01756b8e 100755 --- a/binary_search_tool/binary_search_perforce.py +++ b/binary_search_tool/binary_search_perforce.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/binary_search_state.py b/binary_search_tool/binary_search_state.py index bd04d1b1..1b423b5c 100755 --- a/binary_search_tool/binary_search_state.py +++ b/binary_search_tool/binary_search_state.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/bisect_driver.py b/binary_search_tool/bisect_driver.py index ddab1682..8feb1a37 100644 --- a/binary_search_tool/bisect_driver.py +++ b/binary_search_tool/bisect_driver.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # diff --git a/binary_search_tool/common.py b/binary_search_tool/common.py index 1f7886f4..f6165847 100644 --- a/binary_search_tool/common.py +++ b/binary_search_tool/common.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/common/boot_test.sh b/binary_search_tool/common/boot_test.sh index 8f6d9a7d..384712b7 100755 --- a/binary_search_tool/common/boot_test.sh +++ b/binary_search_tool/common/boot_test.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script pings the chromebook to determine if it has successfully booted. # diff --git a/binary_search_tool/common/hash_test.sh b/binary_search_tool/common/hash_test.sh index 5450988e..338ee026 100755 --- a/binary_search_tool/common/hash_test.sh +++ b/binary_search_tool/common/hash_test.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script is intended to be used by binary_search_state.py. It is to # be used for testing/development of the binary search triage tool diff --git a/binary_search_tool/common/interactive_test.sh b/binary_search_tool/common/interactive_test.sh index 8773dd12..05d47b7f 100755 --- a/binary_search_tool/common/interactive_test.sh +++ b/binary_search_tool/common/interactive_test.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script pings the chromebook to determine if it successfully booted. # It then asks the user if the image is good or not, allowing the user to diff --git a/binary_search_tool/common/interactive_test_noping.sh b/binary_search_tool/common/interactive_test_noping.sh index bb01b950..d4e77d7c 100755 --- a/binary_search_tool/common/interactive_test_noping.sh +++ b/binary_search_tool/common/interactive_test_noping.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script asks the user if the image is good or not, allowing the user to # conduct whatever tests the user wishes, and waiting for a response. diff --git a/binary_search_tool/common/test_setup.sh b/binary_search_tool/common/test_setup.sh index 0abc64de..06452346 100755 --- a/binary_search_tool/common/test_setup.sh +++ b/binary_search_tool/common/test_setup.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # diff --git a/binary_search_tool/compiler_wrapper.py b/binary_search_tool/compiler_wrapper.py index 02dd332e..c32826b0 100755 --- a/binary_search_tool/compiler_wrapper.py +++ b/binary_search_tool/compiler_wrapper.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/cros_pkg/create_cleanup_script.py b/binary_search_tool/cros_pkg/create_cleanup_script.py index 5c46d3ef..abfea5eb 100755 --- a/binary_search_tool/cros_pkg/create_cleanup_script.py +++ b/binary_search_tool/cros_pkg/create_cleanup_script.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/cros_pkg/get_initial_items.sh b/binary_search_tool/cros_pkg/get_initial_items.sh index 49ca3d18..bc0fd2e6 100755 --- a/binary_search_tool/cros_pkg/get_initial_items.sh +++ b/binary_search_tool/cros_pkg/get_initial_items.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2015 Google Inc. All Rights Reserved. +# Copyright 2015 Google LLC # # This script is intended to be used by binary_search_state.py, as # part of the binary search triage on ChromeOS packages. This script @@ -13,4 +13,3 @@ source common/common.sh cd ${GOOD_BUILD}/packages find . -name "*.tbz2" - diff --git a/binary_search_tool/cros_pkg/setup.sh b/binary_search_tool/cros_pkg/setup.sh index ae31fa82..30a3a423 100755 --- a/binary_search_tool/cros_pkg/setup.sh +++ b/binary_search_tool/cros_pkg/setup.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2015 Google Inc. All Rights Reserved. +# Copyright 2015 Google LLC # # This script is part of the ChromeOS package binary search triage process. # It should be the first script called by the user, after the user has set up diff --git a/binary_search_tool/cros_pkg/switch_to_bad.sh b/binary_search_tool/cros_pkg/switch_to_bad.sh index 126425f4..b4156a0e 100755 --- a/binary_search_tool/cros_pkg/switch_to_bad.sh +++ b/binary_search_tool/cros_pkg/switch_to_bad.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2015 Google Inc. All Rights Reserved. +# Copyright 2015 Google LLC # # This script is intended to be used by binary_search_state.py, as # part of the binary search triage on ChromeOS packages. This script diff --git a/binary_search_tool/cros_pkg/switch_to_good.sh b/binary_search_tool/cros_pkg/switch_to_good.sh index a9095e99..5f7c2d77 100755 --- a/binary_search_tool/cros_pkg/switch_to_good.sh +++ b/binary_search_tool/cros_pkg/switch_to_good.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2015 Google Inc. All Rights Reserved. +# Copyright 2015 Google LLC # # This script is intended to be used by binary_search_state.py, as # part of the binary search triage on ChromeOS packages. This script diff --git a/binary_search_tool/cros_pkg/test_setup_usb.sh b/binary_search_tool/cros_pkg/test_setup_usb.sh index fec66f8e..54d0baa1 100755 --- a/binary_search_tool/cros_pkg/test_setup_usb.sh +++ b/binary_search_tool/cros_pkg/test_setup_usb.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This is a generic ChromeOS package/image test setup script. It is meant to # be used for the package bisection tool, in particular when there is a booting diff --git a/binary_search_tool/ndk/DO_BISECTION.sh b/binary_search_tool/ndk/DO_BISECTION.sh index 298d5747..e6eed765 100755 --- a/binary_search_tool/ndk/DO_BISECTION.sh +++ b/binary_search_tool/ndk/DO_BISECTION.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This is an example script to show users the steps for bisecting an NDK # application for Android. Our example is the Teapot app that comes bundled with diff --git a/binary_search_tool/ndk/boot_test.sh b/binary_search_tool/ndk/boot_test.sh index b8c34aa5..0b66ddfa 100755 --- a/binary_search_tool/ndk/boot_test.sh +++ b/binary_search_tool/ndk/boot_test.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script checks the android device to determine if the app is currently # running. For our specific test case we will be checking if the Teapot app diff --git a/binary_search_tool/ndk/get_initial_items.sh b/binary_search_tool/ndk/get_initial_items.sh index bc2d05cd..5dd3396d 100755 --- a/binary_search_tool/ndk/get_initial_items.sh +++ b/binary_search_tool/ndk/get_initial_items.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script is intended to be used by binary_search_state.py, as # part of the binary search triage on the Android NDK apps. This script @@ -9,4 +9,3 @@ # cat ${BISECT_DIR}/good/_LIST - diff --git a/binary_search_tool/ndk/switch_to_good.sh b/binary_search_tool/ndk/switch_to_good.sh index cb8d5fd9..c98de67c 100755 --- a/binary_search_tool/ndk/switch_to_good.sh +++ b/binary_search_tool/ndk/switch_to_good.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script is intended to be used by binary_search_state.py, as # part of the binary search triage on Android NDK apps. This script simply @@ -43,4 +43,3 @@ fi cat $OBJ_LIST_FILE | xargs rm exit 0 - diff --git a/binary_search_tool/ndk/test_setup.sh b/binary_search_tool/ndk/test_setup.sh index 477bcb21..8f3ce04e 100755 --- a/binary_search_tool/ndk/test_setup.sh +++ b/binary_search_tool/ndk/test_setup.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This is the setup script for generating and installing the ndk app. # diff --git a/binary_search_tool/pass_mapping.py b/binary_search_tool/pass_mapping.py index c8a616b5..33c023a9 100644 --- a/binary_search_tool/pass_mapping.py +++ b/binary_search_tool/pass_mapping.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/run_bisect.py b/binary_search_tool/run_bisect.py index 480408d7..f54e00e1 100755 --- a/binary_search_tool/run_bisect.py +++ b/binary_search_tool/run_bisect.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/run_bisect_tests.py b/binary_search_tool/run_bisect_tests.py index ae230131..ca7077d3 100755 --- a/binary_search_tool/run_bisect_tests.py +++ b/binary_search_tool/run_bisect_tests.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/sysroot_wrapper/cleanup.sh b/binary_search_tool/sysroot_wrapper/cleanup.sh index 5066d638..b3ae2dd9 100755 --- a/binary_search_tool/sysroot_wrapper/cleanup.sh +++ b/binary_search_tool/sysroot_wrapper/cleanup.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2016 Google Inc. All Rights Reserved. +# Copyright 2016 Google LLC # # This script is part of the ChromeOS object binary search triage process. # It should be the last script called by the user, after the user has diff --git a/binary_search_tool/sysroot_wrapper/interactive_test_host.sh b/binary_search_tool/sysroot_wrapper/interactive_test_host.sh index 58adffc0..bd84936c 100755 --- a/binary_search_tool/sysroot_wrapper/interactive_test_host.sh +++ b/binary_search_tool/sysroot_wrapper/interactive_test_host.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2017 Google Inc. All Rights Reserved. +# Copyright 2017 Google LLC # # This script is intended to be used by binary_search_state.py, as # part of the binary search triage on ChromeOS package and object files for a diff --git a/binary_search_tool/sysroot_wrapper/setup.sh b/binary_search_tool/sysroot_wrapper/setup.sh index 31cdf113..f9ecb0ea 100755 --- a/binary_search_tool/sysroot_wrapper/setup.sh +++ b/binary_search_tool/sysroot_wrapper/setup.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # diff --git a/binary_search_tool/sysroot_wrapper/test_setup_host.sh b/binary_search_tool/sysroot_wrapper/test_setup_host.sh index b5169eee..e61bc367 100755 --- a/binary_search_tool/sysroot_wrapper/test_setup_host.sh +++ b/binary_search_tool/sysroot_wrapper/test_setup_host.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2017 Google Inc. All Rights Reserved. +# Copyright 2017 Google LLC # # This is a generic ChromeOS package/image test setup script. It is meant to # be used for either the object file or package bisection tools. This script diff --git a/binary_search_tool/sysroot_wrapper/testing_test.py b/binary_search_tool/sysroot_wrapper/testing_test.py index 20bc7f75..af884be9 100755 --- a/binary_search_tool/sysroot_wrapper/testing_test.py +++ b/binary_search_tool/sysroot_wrapper/testing_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/__init__.py b/binary_search_tool/test/__init__.py index af3cc751..6e3ade4a 100644 --- a/binary_search_tool/test/__init__.py +++ b/binary_search_tool/test/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/binary_search_tool_test.py b/binary_search_tool/test/binary_search_tool_test.py index b2bbec01..a79c9a1d 100755 --- a/binary_search_tool/test/binary_search_tool_test.py +++ b/binary_search_tool/test/binary_search_tool_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/cmd_script.py b/binary_search_tool/test/cmd_script.py index 73852bc5..b0475c70 100755 --- a/binary_search_tool/test/cmd_script.py +++ b/binary_search_tool/test/cmd_script.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/cmd_script_no_support.py b/binary_search_tool/test/cmd_script_no_support.py index 7c5297d2..f1c2bcbe 100644 --- a/binary_search_tool/test/cmd_script_no_support.py +++ b/binary_search_tool/test/cmd_script_no_support.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/common.py b/binary_search_tool/test/common.py index fa33f20c..6632a4c7 100755 --- a/binary_search_tool/test/common.py +++ b/binary_search_tool/test/common.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/gen_init_list.py b/binary_search_tool/test/gen_init_list.py index 927ad22c..138e949c 100755 --- a/binary_search_tool/test/gen_init_list.py +++ b/binary_search_tool/test/gen_init_list.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/gen_obj.py b/binary_search_tool/test/gen_obj.py index e1eb4913..394445f0 100755 --- a/binary_search_tool/test/gen_obj.py +++ b/binary_search_tool/test/gen_obj.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/generate_cmd.py b/binary_search_tool/test/generate_cmd.py index b02c2ad3..96fa720c 100755 --- a/binary_search_tool/test/generate_cmd.py +++ b/binary_search_tool/test/generate_cmd.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/is_good.py b/binary_search_tool/test/is_good.py index 51ee742c..fd3f908f 100755 --- a/binary_search_tool/test/is_good.py +++ b/binary_search_tool/test/is_good.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/is_good_noinc_prune.py b/binary_search_tool/test/is_good_noinc_prune.py index 5fe4ed72..654fcd25 100755 --- a/binary_search_tool/test/is_good_noinc_prune.py +++ b/binary_search_tool/test/is_good_noinc_prune.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_tmp.py b/binary_search_tool/test/switch_tmp.py index 674433eb..acc0393d 100755 --- a/binary_search_tool/test/switch_tmp.py +++ b/binary_search_tool/test/switch_tmp.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_bad.py b/binary_search_tool/test/switch_to_bad.py index c711d19f..bc32f3cc 100755 --- a/binary_search_tool/test/switch_to_bad.py +++ b/binary_search_tool/test/switch_to_bad.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_bad_noinc_prune.py b/binary_search_tool/test/switch_to_bad_noinc_prune.py index 473ade81..e5574f95 100755 --- a/binary_search_tool/test/switch_to_bad_noinc_prune.py +++ b/binary_search_tool/test/switch_to_bad_noinc_prune.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_bad_set_file.py b/binary_search_tool/test/switch_to_bad_set_file.py index 002622de..9d4bee6f 100755 --- a/binary_search_tool/test/switch_to_bad_set_file.py +++ b/binary_search_tool/test/switch_to_bad_set_file.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_good.py b/binary_search_tool/test/switch_to_good.py index a4d173d2..61a59a2a 100755 --- a/binary_search_tool/test/switch_to_good.py +++ b/binary_search_tool/test/switch_to_good.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_good_noinc_prune.py b/binary_search_tool/test/switch_to_good_noinc_prune.py index 8ed8d0ca..3bda1d78 100755 --- a/binary_search_tool/test/switch_to_good_noinc_prune.py +++ b/binary_search_tool/test/switch_to_good_noinc_prune.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/switch_to_good_set_file.py b/binary_search_tool/test/switch_to_good_set_file.py index a1feaddf..b83cbe3f 100755 --- a/binary_search_tool/test/switch_to_good_set_file.py +++ b/binary_search_tool/test/switch_to_good_set_file.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/test_setup.py b/binary_search_tool/test/test_setup.py index 5ee35c55..52486a28 100755 --- a/binary_search_tool/test/test_setup.py +++ b/binary_search_tool/test/test_setup.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/binary_search_tool/test/test_setup_bad.py b/binary_search_tool/test/test_setup_bad.py index 4879f455..518a69fd 100755 --- a/binary_search_tool/test/test_setup_bad.py +++ b/binary_search_tool/test/test_setup_bad.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/build_chromeos.py b/build_chromeos.py index b5bc48fa..84ee0b84 100755 --- a/build_chromeos.py +++ b/build_chromeos.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/build_tc.py b/build_tc.py index f1d025a4..08f80e69 100755 --- a/build_tc.py +++ b/build_tc.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2010 The ChromiumOS Authors. All rights reserved. +# Copyright 2010 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/buildbot_test_llvm.py b/buildbot_test_llvm.py index 38d3ff49..57f029c7 100755 --- a/buildbot_test_llvm.py +++ b/buildbot_test_llvm.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2017 The ChromiumOS Authors. All rights reserved. +# Copyright 2017 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py index 639b817e..19c31b54 100755 --- a/buildbot_test_toolchains.py +++ b/buildbot_test_toolchains.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2016 The ChromiumOS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/chromiumos_image_diff.py b/chromiumos_image_diff.py index 3b3ae912..ed840cb0 100755 --- a/chromiumos_image_diff.py +++ b/chromiumos_image_diff.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/compiler_wrapper/android_config_test.go b/compiler_wrapper/android_config_test.go index 6e341dcc..6c62c35a 100644 --- a/compiler_wrapper/android_config_test.go +++ b/compiler_wrapper/android_config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/bisect_flag.go b/compiler_wrapper/bisect_flag.go index 139862e3..2dc8daf2 100644 --- a/compiler_wrapper/bisect_flag.go +++ b/compiler_wrapper/bisect_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/bisect_flag_test.go b/compiler_wrapper/bisect_flag_test.go index b7c2dc7e..2071a5b2 100644 --- a/compiler_wrapper/bisect_flag_test.go +++ b/compiler_wrapper/bisect_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/build.py b/compiler_wrapper/build.py index 3b4b0571..930c2cfb 100755 --- a/compiler_wrapper/build.py +++ b/compiler_wrapper/build.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/compiler_wrapper/bundle.README b/compiler_wrapper/bundle.README index d526c149..1ffaedd7 100644 --- a/compiler_wrapper/bundle.README +++ b/compiler_wrapper/bundle.README @@ -1,4 +1,4 @@ -Copyright 2019 The ChromiumOS Authors. All rights reserved. +Copyright 2019 The ChromiumOS Authors Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. diff --git a/compiler_wrapper/bundle.py b/compiler_wrapper/bundle.py index ddfa4e9c..90386c8f 100755 --- a/compiler_wrapper/bundle.py +++ b/compiler_wrapper/bundle.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/compiler_wrapper/ccache_flag.go b/compiler_wrapper/ccache_flag.go index 2c966fd6..0371f10a 100644 --- a/compiler_wrapper/ccache_flag.go +++ b/compiler_wrapper/ccache_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/ccache_flag_test.go b/compiler_wrapper/ccache_flag_test.go index 0d634b61..330d1a1c 100644 --- a/compiler_wrapper/ccache_flag_test.go +++ b/compiler_wrapper/ccache_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_flags.go b/compiler_wrapper/clang_flags.go index fea14249..1c45935e 100644 --- a/compiler_wrapper/clang_flags.go +++ b/compiler_wrapper/clang_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_flags_test.go b/compiler_wrapper/clang_flags_test.go index 1a9db073..08e8a8dc 100644 --- a/compiler_wrapper/clang_flags_test.go +++ b/compiler_wrapper/clang_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_syntax_flag.go b/compiler_wrapper/clang_syntax_flag.go index 6874bd1f..4d5bd4d0 100644 --- a/compiler_wrapper/clang_syntax_flag.go +++ b/compiler_wrapper/clang_syntax_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_syntax_flag_test.go b/compiler_wrapper/clang_syntax_flag_test.go index d4077098..728168c5 100644 --- a/compiler_wrapper/clang_syntax_flag_test.go +++ b/compiler_wrapper/clang_syntax_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_tidy_flag.go b/compiler_wrapper/clang_tidy_flag.go index bd1f9846..b19976d2 100644 --- a/compiler_wrapper/clang_tidy_flag.go +++ b/compiler_wrapper/clang_tidy_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/clang_tidy_flag_test.go b/compiler_wrapper/clang_tidy_flag_test.go index 2639fe96..73dec25f 100644 --- a/compiler_wrapper/clang_tidy_flag_test.go +++ b/compiler_wrapper/clang_tidy_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/command.go b/compiler_wrapper/command.go index e186623a..20e11bbb 100644 --- a/compiler_wrapper/command.go +++ b/compiler_wrapper/command.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/command_test.go b/compiler_wrapper/command_test.go index 622d070f..031872c8 100644 --- a/compiler_wrapper/command_test.go +++ b/compiler_wrapper/command_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/compile_with_fallback.go b/compiler_wrapper/compile_with_fallback.go index 1e5a95a4..d0b6a163 100644 --- a/compiler_wrapper/compile_with_fallback.go +++ b/compiler_wrapper/compile_with_fallback.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/compile_with_fallback_test.go b/compiler_wrapper/compile_with_fallback_test.go index 54c0c498..67530a24 100644 --- a/compiler_wrapper/compile_with_fallback_test.go +++ b/compiler_wrapper/compile_with_fallback_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/compiler_wrapper.go b/compiler_wrapper/compiler_wrapper.go index 28d22471..1386374e 100644 --- a/compiler_wrapper/compiler_wrapper.go +++ b/compiler_wrapper/compiler_wrapper.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/compiler_wrapper_test.go b/compiler_wrapper/compiler_wrapper_test.go index b5a85c89..a560c9ca 100644 --- a/compiler_wrapper/compiler_wrapper_test.go +++ b/compiler_wrapper/compiler_wrapper_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 6717d503..64d77ae7 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/config_test.go b/compiler_wrapper/config_test.go index 207c0312..0e6b44c5 100644 --- a/compiler_wrapper/config_test.go +++ b/compiler_wrapper/config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/crash_builds.go b/compiler_wrapper/crash_builds.go index 147fb369..76a5412a 100644 --- a/compiler_wrapper/crash_builds.go +++ b/compiler_wrapper/crash_builds.go @@ -1,4 +1,4 @@ -// Copyright 2022 The ChromiumOS Authors. +// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/crash_builds_test.go b/compiler_wrapper/crash_builds_test.go index a4b2b99b..3d33d933 100644 --- a/compiler_wrapper/crash_builds_test.go +++ b/compiler_wrapper/crash_builds_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 The ChromiumOS Authors. +// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/cros_hardened_config_test.go b/compiler_wrapper/cros_hardened_config_test.go index c619e712..80a261c8 100644 --- a/compiler_wrapper/cros_hardened_config_test.go +++ b/compiler_wrapper/cros_hardened_config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/cros_host_config_test.go b/compiler_wrapper/cros_host_config_test.go index 0fd479ea..4eb9027c 100644 --- a/compiler_wrapper/cros_host_config_test.go +++ b/compiler_wrapper/cros_host_config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/cros_llvm_next_flags.go b/compiler_wrapper/cros_llvm_next_flags.go index e0b3179f..4b21ad3d 100644 --- a/compiler_wrapper/cros_llvm_next_flags.go +++ b/compiler_wrapper/cros_llvm_next_flags.go @@ -1,4 +1,4 @@ -// Copyright 2020 The ChromiumOS Authors. All rights reserved. +// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/cros_nonhardened_config_test.go b/compiler_wrapper/cros_nonhardened_config_test.go index df14e81b..9428254e 100644 --- a/compiler_wrapper/cros_nonhardened_config_test.go +++ b/compiler_wrapper/cros_nonhardened_config_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/disable_werror_flag.go b/compiler_wrapper/disable_werror_flag.go index 26248f4d..17077671 100644 --- a/compiler_wrapper/disable_werror_flag.go +++ b/compiler_wrapper/disable_werror_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/disable_werror_flag_test.go b/compiler_wrapper/disable_werror_flag_test.go index b96c0d53..d3be921d 100644 --- a/compiler_wrapper/disable_werror_flag_test.go +++ b/compiler_wrapper/disable_werror_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/env.go b/compiler_wrapper/env.go index 1b4d9b31..6b25d960 100644 --- a/compiler_wrapper/env.go +++ b/compiler_wrapper/env.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/env_test.go b/compiler_wrapper/env_test.go index 4864f4db..6b00a8b5 100644 --- a/compiler_wrapper/env_test.go +++ b/compiler_wrapper/env_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/errors.go b/compiler_wrapper/errors.go index 468fb6a9..30a9ffda 100644 --- a/compiler_wrapper/errors.go +++ b/compiler_wrapper/errors.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/errors_test.go b/compiler_wrapper/errors_test.go index 71e35cdb..096ae373 100644 --- a/compiler_wrapper/errors_test.go +++ b/compiler_wrapper/errors_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/gcc_flags.go b/compiler_wrapper/gcc_flags.go index 9adc9c0d..01f7a3c2 100644 --- a/compiler_wrapper/gcc_flags.go +++ b/compiler_wrapper/gcc_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/gcc_flags_test.go b/compiler_wrapper/gcc_flags_test.go index 2dc8b306..45ad8668 100644 --- a/compiler_wrapper/gcc_flags_test.go +++ b/compiler_wrapper/gcc_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/go_exec.go b/compiler_wrapper/go_exec.go index 8c2b8ce7..88335052 100644 --- a/compiler_wrapper/go_exec.go +++ b/compiler_wrapper/go_exec.go @@ -1,4 +1,4 @@ -// Copyright 2020 The ChromiumOS Authors. All rights reserved. +// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/goldenutil_test.go b/compiler_wrapper/goldenutil_test.go index 3a6cc7cb..16e2b7e7 100644 --- a/compiler_wrapper/goldenutil_test.go +++ b/compiler_wrapper/goldenutil_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/install_compiler_wrapper.sh b/compiler_wrapper/install_compiler_wrapper.sh index a503895f..81459082 100755 --- a/compiler_wrapper/install_compiler_wrapper.sh +++ b/compiler_wrapper/install_compiler_wrapper.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/compiler_wrapper/kernel_bug.go b/compiler_wrapper/kernel_bug.go index a1c85a71..857dae01 100644 --- a/compiler_wrapper/kernel_bug.go +++ b/compiler_wrapper/kernel_bug.go @@ -1,4 +1,4 @@ -// Copyright 2021 The ChromiumOS Authors. All rights reserved. +// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package main diff --git a/compiler_wrapper/kernel_bug_test.go b/compiler_wrapper/kernel_bug_test.go index 39f2dbf4..3b363847 100644 --- a/compiler_wrapper/kernel_bug_test.go +++ b/compiler_wrapper/kernel_bug_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 The ChromiumOS Authors. All rights reserved. +// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package main diff --git a/compiler_wrapper/libc_exec.go b/compiler_wrapper/libc_exec.go index 44326d61..e4bcad8e 100644 --- a/compiler_wrapper/libc_exec.go +++ b/compiler_wrapper/libc_exec.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/libgcc_flags.go b/compiler_wrapper/libgcc_flags.go index 5e599504..a87223e0 100644 --- a/compiler_wrapper/libgcc_flags.go +++ b/compiler_wrapper/libgcc_flags.go @@ -1,4 +1,4 @@ -// Copyright 2021 The ChromiumOS Authors. All rights reserved. +// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/libgcc_flags_test.go b/compiler_wrapper/libgcc_flags_test.go index ce6456fa..d30bf737 100644 --- a/compiler_wrapper/libgcc_flags_test.go +++ b/compiler_wrapper/libgcc_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 The ChromiumOS Authors. All rights reserved. +// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/main.go b/compiler_wrapper/main.go index a0981dbe..8cfa4364 100644 --- a/compiler_wrapper/main.go +++ b/compiler_wrapper/main.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/print_cmdline_flag.go b/compiler_wrapper/print_cmdline_flag.go index c1375358..9c744c31 100644 --- a/compiler_wrapper/print_cmdline_flag.go +++ b/compiler_wrapper/print_cmdline_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/print_cmdline_flag_test.go b/compiler_wrapper/print_cmdline_flag_test.go index 8e79435d..54bd70c7 100644 --- a/compiler_wrapper/print_cmdline_flag_test.go +++ b/compiler_wrapper/print_cmdline_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/print_config_flag.go b/compiler_wrapper/print_config_flag.go index bcb23943..37e54077 100644 --- a/compiler_wrapper/print_config_flag.go +++ b/compiler_wrapper/print_config_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/print_config_flag_test.go b/compiler_wrapper/print_config_flag_test.go index d6868235..1984723a 100644 --- a/compiler_wrapper/print_config_flag_test.go +++ b/compiler_wrapper/print_config_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/remote_build_flag_test.go b/compiler_wrapper/remote_build_flag_test.go index eecfb400..23a22e1a 100644 --- a/compiler_wrapper/remote_build_flag_test.go +++ b/compiler_wrapper/remote_build_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/remote_build_flags.go b/compiler_wrapper/remote_build_flags.go index 7cbddfdc..7a5a7651 100644 --- a/compiler_wrapper/remote_build_flags.go +++ b/compiler_wrapper/remote_build_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/reset_compiler_wrapper.sh b/compiler_wrapper/reset_compiler_wrapper.sh index cc1ec7db..3206199f 100755 --- a/compiler_wrapper/reset_compiler_wrapper.sh +++ b/compiler_wrapper/reset_compiler_wrapper.sh @@ -1,6 +1,6 @@ #!/bin/bash -eux # -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/compiler_wrapper/rusage_flag.go b/compiler_wrapper/rusage_flag.go index b2c2a4aa..ed59b11e 100644 --- a/compiler_wrapper/rusage_flag.go +++ b/compiler_wrapper/rusage_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/rusage_flag_test.go b/compiler_wrapper/rusage_flag_test.go index 6c264fd7..67021662 100644 --- a/compiler_wrapper/rusage_flag_test.go +++ b/compiler_wrapper/rusage_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/sanitizer_flags.go b/compiler_wrapper/sanitizer_flags.go index 163b0f4f..58312cc4 100644 --- a/compiler_wrapper/sanitizer_flags.go +++ b/compiler_wrapper/sanitizer_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/sanitizer_flags_test.go b/compiler_wrapper/sanitizer_flags_test.go index 796961eb..b4b1fd83 100644 --- a/compiler_wrapper/sanitizer_flags_test.go +++ b/compiler_wrapper/sanitizer_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/stackprotector_flags.go b/compiler_wrapper/stackprotector_flags.go index a41bdaf5..0e620b57 100644 --- a/compiler_wrapper/stackprotector_flags.go +++ b/compiler_wrapper/stackprotector_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/stackprotector_flags_test.go b/compiler_wrapper/stackprotector_flags_test.go index 00d511c7..c13862a7 100644 --- a/compiler_wrapper/stackprotector_flags_test.go +++ b/compiler_wrapper/stackprotector_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/sysroot_flag.go b/compiler_wrapper/sysroot_flag.go index 58d010da..597153a9 100644 --- a/compiler_wrapper/sysroot_flag.go +++ b/compiler_wrapper/sysroot_flag.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/sysroot_flag_test.go b/compiler_wrapper/sysroot_flag_test.go index 579d76db..9fea6848 100644 --- a/compiler_wrapper/sysroot_flag_test.go +++ b/compiler_wrapper/sysroot_flag_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/testutil_test.go b/compiler_wrapper/testutil_test.go index ff8c3d56..463e34ac 100644 --- a/compiler_wrapper/testutil_test.go +++ b/compiler_wrapper/testutil_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/thumb_flags.go b/compiler_wrapper/thumb_flags.go index 2afd15a1..3e641d3b 100644 --- a/compiler_wrapper/thumb_flags.go +++ b/compiler_wrapper/thumb_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/thumb_flags_test.go b/compiler_wrapper/thumb_flags_test.go index 23487642..24985bcf 100644 --- a/compiler_wrapper/thumb_flags_test.go +++ b/compiler_wrapper/thumb_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/unsupported_flags.go b/compiler_wrapper/unsupported_flags.go index 364ee07c..5a0dceeb 100644 --- a/compiler_wrapper/unsupported_flags.go +++ b/compiler_wrapper/unsupported_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/unsupported_flags_test.go b/compiler_wrapper/unsupported_flags_test.go index e2441141..17ff13af 100644 --- a/compiler_wrapper/unsupported_flags_test.go +++ b/compiler_wrapper/unsupported_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/x64_flags.go b/compiler_wrapper/x64_flags.go index 3d0db701..2e4a0af1 100644 --- a/compiler_wrapper/x64_flags.go +++ b/compiler_wrapper/x64_flags.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/compiler_wrapper/x64_flags_test.go b/compiler_wrapper/x64_flags_test.go index 5261ee83..ce5caebd 100644 --- a/compiler_wrapper/x64_flags_test.go +++ b/compiler_wrapper/x64_flags_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The ChromiumOS Authors. All rights reserved. +// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/crate_ebuild_help.py b/crate_ebuild_help.py index 0a0b6dcb..c66b9897 100755 --- a/crate_ebuild_help.py +++ b/crate_ebuild_help.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2022 The ChromiumOS Authors. +# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/__init__.py b/cros_utils/__init__.py index dc696529..d365cb0c 100644 --- a/cros_utils/__init__.py +++ b/cros_utils/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/bugs.py b/cros_utils/bugs.py index 5c620739..43e0e553 100755 --- a/cros_utils/bugs.py +++ b/cros_utils/bugs.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/bugs_test.py b/cros_utils/bugs_test.py index b7a2d09a..5a07dbd8 100755 --- a/cros_utils/bugs_test.py +++ b/cros_utils/bugs_test.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py index eb1d255c..8f0ce5e0 100644 --- a/cros_utils/buildbot_utils.py +++ b/cros_utils/buildbot_utils.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 The ChromiumOS Authors. All rights reserved. +# Copyright 2017 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/buildbot_utils_unittest.py b/cros_utils/buildbot_utils_unittest.py index 31d3e947..2c9585b5 100755 --- a/cros_utils/buildbot_utils_unittest.py +++ b/cros_utils/buildbot_utils_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/command_executer.py b/cros_utils/command_executer.py index aa6de4c0..573bb2d6 100755 --- a/cros_utils/command_executer.py +++ b/cros_utils/command_executer.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/command_executer_timeout_test.py b/cros_utils/command_executer_timeout_test.py index 92f2e9ce..3af9bd3e 100755 --- a/cros_utils/command_executer_timeout_test.py +++ b/cros_utils/command_executer_timeout_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/command_executer_unittest.py b/cros_utils/command_executer_unittest.py index ecb85f93..7cd46a71 100755 --- a/cros_utils/command_executer_unittest.py +++ b/cros_utils/command_executer_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/constants.py b/cros_utils/constants.py index caee1947..47c16686 100644 --- a/cros_utils/constants.py +++ b/cros_utils/constants.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/device_setup_utils.py b/cros_utils/device_setup_utils.py index 29284a5c..443c6474 100644 --- a/cros_utils/device_setup_utils.py +++ b/cros_utils/device_setup_utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/device_setup_utils_unittest.py b/cros_utils/device_setup_utils_unittest.py index 8fe99bcb..d7339e25 100755 --- a/cros_utils/device_setup_utils_unittest.py +++ b/cros_utils/device_setup_utils_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/email_sender.py b/cros_utils/email_sender.py index 259078b1..ccf4c1b4 100755 --- a/cros_utils/email_sender.py +++ b/cros_utils/email_sender.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/email_sender_unittest.py b/cros_utils/email_sender_unittest.py index 38711061..66ec6a2d 100755 --- a/cros_utils/email_sender_unittest.py +++ b/cros_utils/email_sender_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/file_utils.py b/cros_utils/file_utils.py index 0c5213b5..743edefa 100644 --- a/cros_utils/file_utils.py +++ b/cros_utils/file_utils.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/html_tools.py b/cros_utils/html_tools.py index 04ea93a1..202bef05 100644 --- a/cros_utils/html_tools.py +++ b/cros_utils/html_tools.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/locks.py b/cros_utils/locks.py index bd2242cf..db6f4343 100644 --- a/cros_utils/locks.py +++ b/cros_utils/locks.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/logger.py b/cros_utils/logger.py index 8ad9df58..e9b9d1ba 100644 --- a/cros_utils/logger.py +++ b/cros_utils/logger.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/machines.py b/cros_utils/machines.py index b0a7134c..a5385731 100644 --- a/cros_utils/machines.py +++ b/cros_utils/machines.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015 The ChromiumOS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/misc.py b/cros_utils/misc.py index 02c8e051..aabb5ad7 100644 --- a/cros_utils/misc.py +++ b/cros_utils/misc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/misc_test.py b/cros_utils/misc_test.py index c3c96996..9e2d1107 100755 --- a/cros_utils/misc_test.py +++ b/cros_utils/misc_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/no_pseudo_terminal_test.py b/cros_utils/no_pseudo_terminal_test.py index 3cb35fac..acc90af4 100755 --- a/cros_utils/no_pseudo_terminal_test.py +++ b/cros_utils/no_pseudo_terminal_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/perf_diff.py b/cros_utils/perf_diff.py index 97e6e19d..6647b76a 100755 --- a/cros_utils/perf_diff.py +++ b/cros_utils/perf_diff.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py index 65d0cd42..d079ea22 100644 --- a/cros_utils/tabulator.py +++ b/cros_utils/tabulator.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/tabulator_test.py b/cros_utils/tabulator_test.py index 96d163e5..91ce8fd5 100755 --- a/cros_utils/tabulator_test.py +++ b/cros_utils/tabulator_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2012 The ChromiumOS Authors. All rights reserved. +# Copyright 2012 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/timeline.py b/cros_utils/timeline.py index be8f9a06..f18a39bb 100644 --- a/cros_utils/timeline.py +++ b/cros_utils/timeline.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/timeline_test.py b/cros_utils/timeline_test.py index 9dc73d91..aceab2df 100755 --- a/cros_utils/timeline_test.py +++ b/cros_utils/timeline_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/tiny_render.py b/cros_utils/tiny_render.py index 27891c20..6168a247 100644 --- a/cros_utils/tiny_render.py +++ b/cros_utils/tiny_render.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/tiny_render_test.py b/cros_utils/tiny_render_test.py index 8299093b..9c4d750b 100755 --- a/cros_utils/tiny_render_test.py +++ b/cros_utils/tiny_render_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cros_utils/toolchain_utils.sh b/cros_utils/toolchain_utils.sh index 78b6dd58..b5403bbe 100644 --- a/cros_utils/toolchain_utils.sh +++ b/cros_utils/toolchain_utils.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2012 The ChromiumOS Authors. All rights reserved. +# Copyright 2012 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py index d7c62c54..f9de0cf3 100644 --- a/crosperf/benchmark.py +++ b/crosperf/benchmark.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py index a661d6a9..84797d1c 100644 --- a/crosperf/benchmark_run.py +++ b/crosperf/benchmark_run.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py index 7113826a..0013e19b 100755 --- a/crosperf/benchmark_run_unittest.py +++ b/crosperf/benchmark_run_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py index bfa16841..bb23bdbb 100755 --- a/crosperf/benchmark_unittest.py +++ b/crosperf/benchmark_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2014 The ChromiumOS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/column_chart.py b/crosperf/column_chart.py index 7d6bd0dd..6ed99bf0 100644 --- a/crosperf/column_chart.py +++ b/crosperf/column_chart.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py index 003a38ba..756753a2 100644 --- a/crosperf/compare_machines.py +++ b/crosperf/compare_machines.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014 The ChromiumOS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/config.py b/crosperf/config.py index 82a5ee38..c2a7fe5d 100644 --- a/crosperf/config.py +++ b/crosperf/config.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/config_unittest.py b/crosperf/config_unittest.py index df02786e..fdff7ea6 100755 --- a/crosperf/config_unittest.py +++ b/crosperf/config_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2014 The ChromiumOS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/crosperf b/crosperf/crosperf index 313a65ff..9a7bde0a 100755 --- a/crosperf/crosperf +++ b/crosperf/crosperf @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py index 813da415..aace2c80 100755 --- a/crosperf/crosperf.py +++ b/crosperf/crosperf.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/crosperf_autolock.py b/crosperf/crosperf_autolock.py index 9a44936f..011f01e3 100755 --- a/crosperf/crosperf_autolock.py +++ b/crosperf/crosperf_autolock.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py index 88172ec6..7b52f2e0 100755 --- a/crosperf/crosperf_unittest.py +++ b/crosperf/crosperf_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright (c) 2014 The ChromiumOS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/download_images.py b/crosperf/download_images.py index 7dc52495..9a46280d 100644 --- a/crosperf/download_images.py +++ b/crosperf/download_images.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2014-2015 The ChromiumOS Authors. All rights reserved. +# Copyright 2014-2015 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/download_images_buildid_test.py b/crosperf/download_images_buildid_test.py index 37868e0a..20dd13c5 100755 --- a/crosperf/download_images_buildid_test.py +++ b/crosperf/download_images_buildid_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2014 The ChromiumOS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py index 8e8c6524..6a640f80 100755 --- a/crosperf/download_images_unittest.py +++ b/crosperf/download_images_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment.py b/crosperf/experiment.py index 28ab616b..9973f7e9 100644 --- a/crosperf/experiment.py +++ b/crosperf/experiment.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index a4265d41..c71981ab 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py index ffb8e579..be84a003 100755 --- a/crosperf/experiment_factory_unittest.py +++ b/crosperf/experiment_factory_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py index 783a3224..70852a22 100644 --- a/crosperf/experiment_file.py +++ b/crosperf/experiment_file.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py index 90ea87a6..5c09ee06 100755 --- a/crosperf/experiment_file_unittest.py +++ b/crosperf/experiment_file_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_files/telemetry_perf_perf b/crosperf/experiment_files/telemetry_perf_perf index dc062710..e46fdc2a 100755 --- a/crosperf/experiment_files/telemetry_perf_perf +++ b/crosperf/experiment_files/telemetry_perf_perf @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2016 The ChromiumOS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # @@ -74,4 +74,3 @@ RunExperiment 'cycles.callgraph' \ # overhead. RunExperiment 'memory.bandwidth' \ 'stat -e cycles -e instructions -e uncore_imc/data_reads/ -e uncore_imc/data_writes/ -e cpu/event=0xD0,umask=0x11,name=MEM_UOPS_RETIRED-STLB_MISS_LOADS/ -e cpu/event=0xD0,umask=0x12,name=MEM_UOPS_RETIRED-STLB_MISS_STORES/' - diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py index 1f1a90b2..1f78dcc0 100644 --- a/crosperf/experiment_runner.py +++ b/crosperf/experiment_runner.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py index 50ef1797..a9a12630 100755 --- a/crosperf/experiment_runner_unittest.py +++ b/crosperf/experiment_runner_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright (c) 2014 The ChromiumOS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py index c76dfa24..fa6b1eec 100644 --- a/crosperf/experiment_status.py +++ b/crosperf/experiment_status.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/field.py b/crosperf/field.py index 11eb3778..6b5ea110 100644 --- a/crosperf/field.py +++ b/crosperf/field.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py index 7bb59515..024849cb 100755 --- a/crosperf/flag_test_unittest.py +++ b/crosperf/flag_test_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2014 The ChromiumOS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py index 186aba29..55c13212 100755 --- a/crosperf/generate_report.py +++ b/crosperf/generate_report.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2016 The ChromiumOS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py index 0d4ccf4f..86bbc164 100755 --- a/crosperf/generate_report_unittest.py +++ b/crosperf/generate_report_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2016 The ChromiumOS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/help.py b/crosperf/help.py index 1f3c4f36..db95fc6c 100644 --- a/crosperf/help.py +++ b/crosperf/help.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py index 133a0576..87664e9d 100644 --- a/crosperf/image_checksummer.py +++ b/crosperf/image_checksummer.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/label.py b/crosperf/label.py index 8785c037..9aeff562 100644 --- a/crosperf/label.py +++ b/crosperf/label.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py index c61d624e..74379bff 100644 --- a/crosperf/machine_image_manager.py +++ b/crosperf/machine_image_manager.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015 The ChromiumOS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py index dd557cdc..1ea63b1c 100755 --- a/crosperf/machine_image_manager_unittest.py +++ b/crosperf/machine_image_manager_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2015 The ChromiumOS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py index c780094b..ffb0b5e6 100644 --- a/crosperf/machine_manager.py +++ b/crosperf/machine_manager.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py index 5eed5e1f..6324a227 100755 --- a/crosperf/machine_manager_unittest.py +++ b/crosperf/machine_manager_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2012 The ChromiumOS Authors. All rights reserved. +# Copyright 2012 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py index a0d581cd..4a3f9a72 100644 --- a/crosperf/mock_instance.py +++ b/crosperf/mock_instance.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py index ca5966c0..043da990 100644 --- a/crosperf/results_cache.py +++ b/crosperf/results_cache.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py index 8029161a..cad149e0 100755 --- a/crosperf/results_cache_unittest.py +++ b/crosperf/results_cache_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py index a3701ab7..354e002d 100644 --- a/crosperf/results_organizer.py +++ b/crosperf/results_organizer.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py index 707f89f7..f3db266d 100755 --- a/crosperf/results_organizer_unittest.py +++ b/crosperf/results_organizer_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_report.py b/crosperf/results_report.py index 735e1a34..045e623b 100644 --- a/crosperf/results_report.py +++ b/crosperf/results_report.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py index e88fd9c7..3ef9e74a 100644 --- a/crosperf/results_report_templates.py +++ b/crosperf/results_report_templates.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016 The ChromiumOS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py index 3c1d6663..4ce654d0 100755 --- a/crosperf/results_report_unittest.py +++ b/crosperf/results_report_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2016 The ChromiumOS Authors. All rights reserved. +# Copyright 2016 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/run_tests.sh b/crosperf/run_tests.sh index d70fc99d..b3d4d1e2 100755 --- a/crosperf/run_tests.sh +++ b/crosperf/run_tests.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2011 Google Inc. All Rights Reserved. +# Copyright 2011 Google LLC # Author: raymes@google.com (Raymes Khoury) ../run_tests_for.py . diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py index c611cbcf..828b8b81 100644 --- a/crosperf/schedv2.py +++ b/crosperf/schedv2.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015 The ChromiumOS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py index c79c6ecd..db5f5feb 100755 --- a/crosperf/schedv2_unittest.py +++ b/crosperf/schedv2_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2015 The ChromiumOS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/settings.py b/crosperf/settings.py index 5488a5b0..5a983b32 100644 --- a/crosperf/settings.py +++ b/crosperf/settings.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py index 469d2260..6382bba7 100644 --- a/crosperf/settings_factory.py +++ b/crosperf/settings_factory.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py index b8487b96..93d3bd6d 100755 --- a/crosperf/settings_factory_unittest.py +++ b/crosperf/settings_factory_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2017 The ChromiumOS Authors. All rights reserved. +# Copyright 2017 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py index 75913a09..ab31e18f 100755 --- a/crosperf/settings_unittest.py +++ b/crosperf/settings_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index f5c4d2c7..9fc99138 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2013 The ChromiumOS Authors. All rights reserved. +# Copyright 2013 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py index a1ab660f..69476f37 100755 --- a/crosperf/suite_runner_unittest.py +++ b/crosperf/suite_runner_unittest.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright (c) 2014 The ChromiumOS Authors. All rights reserved. +# Copyright 2014 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/test_flag.py b/crosperf/test_flag.py index 62679197..17c17a3d 100644 --- a/crosperf/test_flag.py +++ b/crosperf/test_flag.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The ChromiumOS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/crosperf/translate_xbuddy.py b/crosperf/translate_xbuddy.py index 2ae60af4..e6a53a94 100755 --- a/crosperf/translate_xbuddy.py +++ b/crosperf/translate_xbuddy.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cwp/cr-os/fetch_gn_descs.py b/cwp/cr-os/fetch_gn_descs.py index a9cbdb55..50b555ad 100755 --- a/cwp/cr-os/fetch_gn_descs.py +++ b/cwp/cr-os/fetch_gn_descs.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/cwp/cr-os/fetch_gn_descs_test.py b/cwp/cr-os/fetch_gn_descs_test.py index a42cb2e9..8a88fe3e 100755 --- a/cwp/cr-os/fetch_gn_descs_test.py +++ b/cwp/cr-os/fetch_gn_descs_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/allowlist.py b/debug_info_test/allowlist.py index 3cccf293..70bb776a 100644 --- a/debug_info_test/allowlist.py +++ b/debug_info_test/allowlist.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/check_cus.py b/debug_info_test/check_cus.py index b2458e94..dbf22d08 100644 --- a/debug_info_test/check_cus.py +++ b/debug_info_test/check_cus.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/check_exist.py b/debug_info_test/check_exist.py index d51abe42..863c591f 100644 --- a/debug_info_test/check_exist.py +++ b/debug_info_test/check_exist.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/check_icf.py b/debug_info_test/check_icf.py index 8eca39a2..a717d81e 100644 --- a/debug_info_test/check_icf.py +++ b/debug_info_test/check_icf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/check_ngcc.py b/debug_info_test/check_ngcc.py index 348fcc8a..bbb58741 100644 --- a/debug_info_test/check_ngcc.py +++ b/debug_info_test/check_ngcc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/debug_info_test/debug_info_test.py b/debug_info_test/debug_info_test.py index 057921aa..c324bf4c 100755 --- a/debug_info_test/debug_info_test.py +++ b/debug_info_test/debug_info_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/file_lock_machine.py b/file_lock_machine.py index 2b281ff7..5268398c 100755 --- a/file_lock_machine.py +++ b/file_lock_machine.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/file_lock_machine_test.py b/file_lock_machine_test.py index d59f7e28..467c183d 100755 --- a/file_lock_machine_test.py +++ b/file_lock_machine_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/go/chromeos/setup_chromeos_testing.py b/go/chromeos/setup_chromeos_testing.py index 863cf8c8..39530bde 100755 --- a/go/chromeos/setup_chromeos_testing.py +++ b/go/chromeos/setup_chromeos_testing.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/heat_map.py b/heatmaps/heat_map.py index 78ee8cba..a3c52369 100755 --- a/heatmaps/heat_map.py +++ b/heatmaps/heat_map.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2015 The ChromiumOS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/heat_map_test.py b/heatmaps/heat_map_test.py index 2b86363b..96300bb4 100755 --- a/heatmaps/heat_map_test.py +++ b/heatmaps/heat_map_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/heatmap_generator.py b/heatmaps/heatmap_generator.py index 768a4e40..703c37d4 100644 --- a/heatmaps/heatmap_generator.py +++ b/heatmaps/heatmap_generator.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/heatmap_generator_test.py b/heatmaps/heatmap_generator_test.py index fbace10a..898c7370 100755 --- a/heatmaps/heatmap_generator_test.py +++ b/heatmaps/heatmap_generator_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/heatmaps/perf-to-inst-page.sh b/heatmaps/perf-to-inst-page.sh index 6aa03eaf..5be1a2b9 100755 --- a/heatmaps/perf-to-inst-page.sh +++ b/heatmaps/perf-to-inst-page.sh @@ -1,5 +1,5 @@ #! /bin/bash -u -# Copyright 2015 The ChromiumOS Authors. All rights reserved. +# Copyright 2015 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/image_chromeos.py b/image_chromeos.py index 5922a241..36965d05 100755 --- a/image_chromeos.py +++ b/image_chromeos.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_extra/create_ebuild_file.py b/llvm_extra/create_ebuild_file.py index 4abd466a..2a1a69a3 100755 --- a/llvm_extra/create_ebuild_file.py +++ b/llvm_extra/create_ebuild_file.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_extra/create_llvm_extra.sh b/llvm_extra/create_llvm_extra.sh index cd138ccf..6a10a877 100755 --- a/llvm_extra/create_llvm_extra.sh +++ b/llvm_extra/create_llvm_extra.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 The ChromiumOS Authors. All rights reserved. +# Copyright 2018 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/auto_llvm_bisection.py b/llvm_tools/auto_llvm_bisection.py index 0dcd6bad..3640abae 100755 --- a/llvm_tools/auto_llvm_bisection.py +++ b/llvm_tools/auto_llvm_bisection.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/auto_llvm_bisection_unittest.py b/llvm_tools/auto_llvm_bisection_unittest.py index ef82453b..c70ddee5 100755 --- a/llvm_tools/auto_llvm_bisection_unittest.py +++ b/llvm_tools/auto_llvm_bisection_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/bisect_clang_crashes.py b/llvm_tools/bisect_clang_crashes.py index 65aadabd..b2759051 100755 --- a/llvm_tools/bisect_clang_crashes.py +++ b/llvm_tools/bisect_clang_crashes.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/bisect_clang_crashes_unittest.py b/llvm_tools/bisect_clang_crashes_unittest.py index 96a375a0..22c9be19 100755 --- a/llvm_tools/bisect_clang_crashes_unittest.py +++ b/llvm_tools/bisect_clang_crashes_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/check_clang_diags.py b/llvm_tools/check_clang_diags.py index 2509dc3c..7beb958f 100755 --- a/llvm_tools/check_clang_diags.py +++ b/llvm_tools/check_clang_diags.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2022 The Chromium OS Authors. All rights reserved. +# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/check_clang_diags_test.py b/llvm_tools/check_clang_diags_test.py index c15716f0..a7889038 100755 --- a/llvm_tools/check_clang_diags_test.py +++ b/llvm_tools/check_clang_diags_test.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2022 The Chromium OS Authors. All rights reserved. +# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/chroot.py b/llvm_tools/chroot.py index 73ab9203..46464feb 100755 --- a/llvm_tools/chroot.py +++ b/llvm_tools/chroot.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/chroot_unittest.py b/llvm_tools/chroot_unittest.py index 39877aa5..f1a6a626 100755 --- a/llvm_tools/chroot_unittest.py +++ b/llvm_tools/chroot_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/copy_helpers_to_chromiumos_overlay.py b/llvm_tools/copy_helpers_to_chromiumos_overlay.py index 758c7533..84716aad 100755 --- a/llvm_tools/copy_helpers_to_chromiumos_overlay.py +++ b/llvm_tools/copy_helpers_to_chromiumos_overlay.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/custom_script_example.py b/llvm_tools/custom_script_example.py index ebfde1f1..5a320b41 100755 --- a/llvm_tools/custom_script_example.py +++ b/llvm_tools/custom_script_example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/failure_modes.py b/llvm_tools/failure_modes.py index 098a0270..fc4e1fc2 100644 --- a/llvm_tools/failure_modes.py +++ b/llvm_tools/failure_modes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/fetch_cros_sdk_rolls.py b/llvm_tools/fetch_cros_sdk_rolls.py index 72692b3d..dc678e10 100755 --- a/llvm_tools/fetch_cros_sdk_rolls.py +++ b/llvm_tools/fetch_cros_sdk_rolls.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/get_llvm_hash.py b/llvm_tools/get_llvm_hash.py index 77efccbe..fee8e4f6 100755 --- a/llvm_tools/get_llvm_hash.py +++ b/llvm_tools/get_llvm_hash.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/get_llvm_hash_unittest.py b/llvm_tools/get_llvm_hash_unittest.py index fc13bc17..17a094b4 100755 --- a/llvm_tools/get_llvm_hash_unittest.py +++ b/llvm_tools/get_llvm_hash_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/get_upstream_patch.py b/llvm_tools/get_upstream_patch.py index d882fdc7..72aa16b6 100755 --- a/llvm_tools/get_upstream_patch.py +++ b/llvm_tools/get_upstream_patch.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/git.py b/llvm_tools/git.py index 2bb43c29..0f56aa0d 100755 --- a/llvm_tools/git.py +++ b/llvm_tools/git.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/git_llvm_rev.py b/llvm_tools/git_llvm_rev.py index 44a6426e..3dc34fce 100755 --- a/llvm_tools/git_llvm_rev.py +++ b/llvm_tools/git_llvm_rev.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/git_llvm_rev_test.py b/llvm_tools/git_llvm_rev_test.py index e47a2ee6..86a4b202 100755 --- a/llvm_tools/git_llvm_rev_test.py +++ b/llvm_tools/git_llvm_rev_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/git_unittest.py b/llvm_tools/git_unittest.py index cf4035ff..ce21e6c9 100755 --- a/llvm_tools/git_unittest.py +++ b/llvm_tools/git_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/llvm_bisection.py b/llvm_tools/llvm_bisection.py index e6f0e2c9..0b851ebe 100755 --- a/llvm_tools/llvm_bisection.py +++ b/llvm_tools/llvm_bisection.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/llvm_bisection_unittest.py b/llvm_tools/llvm_bisection_unittest.py index e047be41..1e86a678 100755 --- a/llvm_tools/llvm_bisection_unittest.py +++ b/llvm_tools/llvm_bisection_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/llvm_local_bisection.sh b/llvm_tools/llvm_local_bisection.sh index 26d277d5..e319080c 100755 --- a/llvm_tools/llvm_local_bisection.sh +++ b/llvm_tools/llvm_local_bisection.sh @@ -1,6 +1,6 @@ #!/bin/bash -u # -*- coding: utf-8 -*- -# Copyright 2022 The ChromiumOS Authors. All rights reserved. +# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/llvm_project.py b/llvm_tools/llvm_project.py index 45539596..79a6cd2e 100644 --- a/llvm_tools/llvm_project.py +++ b/llvm_tools/llvm_project.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/modify_a_tryjob.py b/llvm_tools/modify_a_tryjob.py index 79f999b1..03de606d 100755 --- a/llvm_tools/modify_a_tryjob.py +++ b/llvm_tools/modify_a_tryjob.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/modify_a_tryjob_unittest.py b/llvm_tools/modify_a_tryjob_unittest.py index 6ae29c5f..712e2614 100755 --- a/llvm_tools/modify_a_tryjob_unittest.py +++ b/llvm_tools/modify_a_tryjob_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/nightly_revert_checker.py b/llvm_tools/nightly_revert_checker.py index 78a85f90..d12464a6 100755 --- a/llvm_tools/nightly_revert_checker.py +++ b/llvm_tools/nightly_revert_checker.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/nightly_revert_checker_test.py b/llvm_tools/nightly_revert_checker_test.py index 694d3d80..86b7898a 100755 --- a/llvm_tools/nightly_revert_checker_test.py +++ b/llvm_tools/nightly_revert_checker_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index d71c3888..11e82227 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py index 444156a5..19c2d8af 100755 --- a/llvm_tools/patch_manager_unittest.py +++ b/llvm_tools/patch_manager_unittest.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/patch_sync/src/android_utils.rs b/llvm_tools/patch_sync/src/android_utils.rs index 7b0e5849..70bca189 100644 --- a/llvm_tools/patch_sync/src/android_utils.rs +++ b/llvm_tools/patch_sync/src/android_utils.rs @@ -1,4 +1,4 @@ -// Copyright 2022 The ChromiumOS Authors. +// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/llvm_tools/patch_sync/src/main.rs b/llvm_tools/patch_sync/src/main.rs index c3c9a61b..a6c340be 100644 --- a/llvm_tools/patch_sync/src/main.rs +++ b/llvm_tools/patch_sync/src/main.rs @@ -1,4 +1,4 @@ -// Copyright 2022 The ChromiumOS Authors. +// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/llvm_tools/patch_sync/src/patch_parsing.rs b/llvm_tools/patch_sync/src/patch_parsing.rs index 0f4cb741..00153834 100644 --- a/llvm_tools/patch_sync/src/patch_parsing.rs +++ b/llvm_tools/patch_sync/src/patch_parsing.rs @@ -1,4 +1,4 @@ -// Copyright 2022 The ChromiumOS Authors. +// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/llvm_tools/patch_sync/src/version_control.rs b/llvm_tools/patch_sync/src/version_control.rs index 125d5cbc..fc6211ae 100644 --- a/llvm_tools/patch_sync/src/version_control.rs +++ b/llvm_tools/patch_sync/src/version_control.rs @@ -1,4 +1,4 @@ -// Copyright 2022 The ChromiumOS Authors. +// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py index 846b379a..ca912f2b 100644 --- a/llvm_tools/patch_utils.py +++ b/llvm_tools/patch_utils.py @@ -1,4 +1,4 @@ -# Copyright 2022 The ChromiumOS Authors. +# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/patch_utils_unittest.py b/llvm_tools/patch_utils_unittest.py index 54c38763..8fe45c2c 100755 --- a/llvm_tools/patch_utils_unittest.py +++ b/llvm_tools/patch_utils_unittest.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2022 The ChromiumOS Authors. +# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/subprocess_helpers.py b/llvm_tools/subprocess_helpers.py index c295cbb8..bc87db85 100644 --- a/llvm_tools/subprocess_helpers.py +++ b/llvm_tools/subprocess_helpers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/test_helpers.py b/llvm_tools/test_helpers.py index 48a8e8c1..67d88d9f 100644 --- a/llvm_tools/test_helpers.py +++ b/llvm_tools/test_helpers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index 366e233f..31a10867 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index 591890e6..9bed2712 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_packages_and_run_tests.py b/llvm_tools/update_packages_and_run_tests.py index cc96ec53..dc14b6de 100755 --- a/llvm_tools/update_packages_and_run_tests.py +++ b/llvm_tools/update_packages_and_run_tests.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_packages_and_run_tests_unittest.py b/llvm_tools/update_packages_and_run_tests_unittest.py index cff28672..fc65749f 100755 --- a/llvm_tools/update_packages_and_run_tests_unittest.py +++ b/llvm_tools/update_packages_and_run_tests_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_tryjob_status.py b/llvm_tools/update_tryjob_status.py index 4c72403d..49c48658 100755 --- a/llvm_tools/update_tryjob_status.py +++ b/llvm_tools/update_tryjob_status.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/update_tryjob_status_unittest.py b/llvm_tools/update_tryjob_status_unittest.py index e73cb281..fd9250a3 100755 --- a/llvm_tools/update_tryjob_status_unittest.py +++ b/llvm_tools/update_tryjob_status_unittest.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/upload_lexan_crashes_to_forcey.py b/llvm_tools/upload_lexan_crashes_to_forcey.py index 204061b0..885a88f6 100755 --- a/llvm_tools/upload_lexan_crashes_to_forcey.py +++ b/llvm_tools/upload_lexan_crashes_to_forcey.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/llvm_tools/upload_lexan_crashes_to_forcey_test.py b/llvm_tools/upload_lexan_crashes_to_forcey_test.py index 6c5008d6..7238281a 100755 --- a/llvm_tools/upload_lexan_crashes_to_forcey_test.py +++ b/llvm_tools/upload_lexan_crashes_to_forcey_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/lock_machine.py b/lock_machine.py index e7befdb2..5c2bedb3 100755 --- a/lock_machine.py +++ b/lock_machine.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/make_root_writable.py b/make_root_writable.py index 297a9150..6a15defc 100755 --- a/make_root_writable.py +++ b/make_root_writable.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/orderfile/post_process_orderfile.py b/orderfile/post_process_orderfile.py index 4f61c19f..5f52aa6f 100755 --- a/orderfile/post_process_orderfile.py +++ b/orderfile/post_process_orderfile.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/orderfile/post_process_orderfile_test.py b/orderfile/post_process_orderfile_test.py index d7ee3f8b..60716d87 100755 --- a/orderfile/post_process_orderfile_test.py +++ b/orderfile/post_process_orderfile_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/pgo_tools/merge_profdata_and_upload.py b/pgo_tools/merge_profdata_and_upload.py index efc986f4..bb53ed6c 100755 --- a/pgo_tools/merge_profdata_and_upload.py +++ b/pgo_tools/merge_profdata_and_upload.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/pgo_tools/monitor_pgo_profiles.py b/pgo_tools/monitor_pgo_profiles.py index 846cdc2b..2c54ee80 100755 --- a/pgo_tools/monitor_pgo_profiles.py +++ b/pgo_tools/monitor_pgo_profiles.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/pgo_tools/monitor_pgo_profiles_unittest.py b/pgo_tools/monitor_pgo_profiles_unittest.py index d129c6ba..e6fc0649 100755 --- a/pgo_tools/monitor_pgo_profiles_unittest.py +++ b/pgo_tools/monitor_pgo_profiles_unittest.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/pgo_tools_rust/pgo_rust.py b/pgo_tools_rust/pgo_rust.py index 6aedc9b7..c145bc8b 100755 --- a/pgo_tools_rust/pgo_rust.py +++ b/pgo_tools_rust/pgo_rust.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright 2022 The ChromiumOS Authors. +# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/remote_test.py b/remote_test.py index c3c88b5c..01f3fe89 100755 --- a/remote_test.py +++ b/remote_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/run_tests_for.py b/run_tests_for.py index a8ad1674..46747c43 100755 --- a/run_tests_for.py +++ b/run_tests_for.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/rust-analyzer-chromiumos-wrapper/src/main.rs b/rust-analyzer-chromiumos-wrapper/src/main.rs index 626108ad..f59af454 100644 --- a/rust-analyzer-chromiumos-wrapper/src/main.rs +++ b/rust-analyzer-chromiumos-wrapper/src/main.rs @@ -1,4 +1,4 @@ -// Copyright 2022 The ChromiumOS Authors. +// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. diff --git a/rust_tools/rust_uprev.py b/rust_tools/rust_uprev.py index cd35fa0b..37a8506e 100755 --- a/rust_tools/rust_uprev.py +++ b/rust_tools/rust_uprev.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/rust_tools/rust_uprev_test.py b/rust_tools/rust_uprev_test.py index e737d77c..f82ef485 100755 --- a/rust_tools/rust_uprev_test.py +++ b/rust_tools/rust_uprev_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/rust_tools/rust_watch.py b/rust_tools/rust_watch.py index 1ab27413..dff239f3 100755 --- a/rust_tools/rust_watch.py +++ b/rust_tools/rust_watch.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/rust_tools/rust_watch_test.py b/rust_tools/rust_watch_test.py index dbeb0e80..1e6aec51 100755 --- a/rust_tools/rust_watch_test.py +++ b/rust_tools/rust_watch_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py b/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py index ecc6bc83..8b283d4d 100755 --- a/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py +++ b/seccomp_tools/mass_seccomp_editor/mass_seccomp_editor.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py b/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py index 5889dec5..c1693da5 100755 --- a/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py +++ b/seccomp_tools/mass_seccomp_editor/test_mass_seccomp_editor.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/tc_enter_chroot.py b/tc_enter_chroot.py index c746a0a2..93b3be2d 100755 --- a/tc_enter_chroot.py +++ b/tc_enter_chroot.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2010 The ChromiumOS Authors. All rights reserved. +# Copyright 2010 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/toolchain_utils_githooks/check-presubmit.py b/toolchain_utils_githooks/check-presubmit.py index 691e2510..29744aaf 100755 --- a/toolchain_utils_githooks/check-presubmit.py +++ b/toolchain_utils_githooks/check-presubmit.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -# Copyright 2019 The ChromiumOS Authors. All rights reserved. +# Copyright 2019 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/toolchain_utils_githooks/pre-push b/toolchain_utils_githooks/pre-push index eef8a09a..49548a17 100755 --- a/toolchain_utils_githooks/pre-push +++ b/toolchain_utils_githooks/pre-push @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright (c) 2016 Google Inc. +# Copyright 2016 Google LLC # # Just execute our custom pre-push script. diff --git a/toolchain_utils_githooks/pre-push.real b/toolchain_utils_githooks/pre-push.real index 06aa6213..f913e802 100755 --- a/toolchain_utils_githooks/pre-push.real +++ b/toolchain_utils_githooks/pre-push.real @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright (c) 2015 Google Inc. +# Copyright 2015 Google LLC # # This is a pre-push hook that does the following before uploading a # CL for review: diff --git a/update_telemetry_defaults.py b/update_telemetry_defaults.py index a3feb6a0..929ff07e 100755 --- a/update_telemetry_defaults.py +++ b/update_telemetry_defaults.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 The ChromiumOS Authors. All rights reserved. +# Copyright 2020 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/upstream_workon/upstream_workon.bash b/upstream_workon/upstream_workon.bash index 98dced05..03b56742 100755 --- a/upstream_workon/upstream_workon.bash +++ b/upstream_workon/upstream_workon.bash @@ -1,6 +1,6 @@ #!/bin/bash -eu # -# Copyright 2021 The ChromiumOS Authors. All rights reserved. +# Copyright 2021 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -- cgit v1.2.3 From 26193fe97a5a4911cccf394e6061de5233225d12 Mon Sep 17 00:00:00 2001 From: Bob Haarman <inglorion@chromium.org> Date: Thu, 14 Apr 2022 16:39:07 -0700 Subject: rust_uprev: support new rust-host package dev-lang/rust is being forked into a new dev-lang/rust-host package. As a result, Rust uprevs now need to update that package, too. This adds the necessary support for that to rust_uprev.py. BUG=b:227370760 TEST=Used the script to create a rust-1.61.0 to 1.62.1 uprev Change-Id: I99733db7a799f1c234b628b035557ac429d9e470 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3590914 Commit-Queue: Bob Haarman <inglorion@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Bob Haarman <inglorion@chromium.org> --- rust_tools/rust_uprev.py | 182 ++++++++++++++++++++++++++---------------- rust_tools/rust_uprev_test.py | 47 ++++++++--- 2 files changed, 148 insertions(+), 81 deletions(-) diff --git a/rust_tools/rust_uprev.py b/rust_tools/rust_uprev.py index 37a8506e..61364843 100755 --- a/rust_tools/rust_uprev.py +++ b/rust_tools/rust_uprev.py @@ -6,29 +6,22 @@ """Tool to automatically generate a new Rust uprev CL. -This tool is intended to automatically generate a CL to uprev Rust to a -newer version in ChromeOS, including creating a new Rust version or -removing an old version. It's based on -src/third_party/chromiumos-overlay/dev-lang/rust/UPGRADE.md. When using -the tool, the progress can be saved to a JSON file, so the user can resume -the process after a failing step is fixed. Example usage to create a new -version: - -1. (inside chroot) $ ./rust_tools/rust_uprev.py - --state_file /tmp/state-file.json - create --rust_version 1.45.0 -2. Step "compile rust" failed due to the patches can't apply to new version -3. Manually fix the patches -4. Execute the command in step 1 again. +This tool is intended to automatically generate a CL to uprev Rust to +a newer version in Chrome OS, including creating a new Rust version or +removing an old version. When using the tool, the progress can be +saved to a JSON file, so the user can resume the process after a +failing step is fixed. Example usage to create a new version: + +1. (inside chroot) $ ./rust_tools/rust_uprev.py \\ + --state_file /tmp/rust-to-1.60.0.json \\ + roll --uprev 1.60.0 +2. Step "compile rust" failed due to the patches can't apply to new version. +3. Manually fix the patches. +4. Execute the command in step 1 again, but add "--continue" before "roll". 5. Iterate 1-4 for each failed step until the tool passes. -Replace `create --rust_version 1.45.0` with `remove --rust_version 1.43.0` -if you want to remove all 1.43.0 related stuff in the same CL. Remember to -use a different state file if you choose to run different subcommands. - -If you want a hammer that can do everything for you, use the subcommand -`roll`. It can create a Rust uprev CL with `create` and `remove` and upload -the CL to chromium code review. +Besides "roll", the tool also support subcommands that perform +various parts of an uprev. See `--help` for all available options. """ @@ -52,9 +45,8 @@ from llvm_tools import git EQUERY = "equery" GSUTIL = "gsutil.py" MIRROR_PATH = "gs://chromeos-localmirror/distfiles" -RUST_PATH = Path( - "/mnt/host/source/src/third_party/chromiumos-overlay/dev-lang/rust" -) +EBUILD_PREFIX = Path("/mnt/host/source/src/third_party/chromiumos-overlay") +RUST_PATH = Path(EBUILD_PREFIX, "dev-lang", "rust") def get_command_output(command: List[str], *args, **kwargs) -> str: @@ -343,14 +335,14 @@ def copy_patches( ) -def create_ebuild(template_ebuild: str, new_version: RustVersion) -> str: - shutil.copyfile( - template_ebuild, RUST_PATH.joinpath(f"rust-{new_version}.ebuild") - ) - subprocess.check_call( - ["git", "add", f"rust-{new_version}.ebuild"], cwd=RUST_PATH - ) - return os.path.join(RUST_PATH, f"rust-{new_version}.ebuild") +def create_ebuild( + template_ebuild: str, pkgatom: str, new_version: RustVersion +) -> str: + filename = f"{Path(pkgatom).name}-{new_version}.ebuild" + ebuild = EBUILD_PREFIX.joinpath(f"{pkgatom}/{filename}") + shutil.copyfile(template_ebuild, ebuild) + subprocess.check_call(["git", "add", filename], cwd=ebuild.parent) + return str(ebuild) def update_bootstrap_ebuild(new_bootstrap_version: RustVersion) -> None: @@ -372,8 +364,10 @@ def update_bootstrap_ebuild(new_bootstrap_version: RustVersion) -> None: new_ebuild.write_text(new_text, encoding="utf-8") -def update_ebuild(ebuild_file: str, new_bootstrap_version: RustVersion) -> None: - contents = open(ebuild_file, encoding="utf-8").read() +def update_bootstrap_version( + path: str, new_bootstrap_version: RustVersion +) -> None: + contents = open(path, encoding="utf-8").read() contents, subs = re.subn( r"^BOOTSTRAP_VERSION=.*$", 'BOOTSTRAP_VERSION="%s"' % (new_bootstrap_version,), @@ -381,12 +375,9 @@ def update_ebuild(ebuild_file: str, new_bootstrap_version: RustVersion) -> None: flags=re.MULTILINE, ) if not subs: - raise RuntimeError("BOOTSTRAP_VERSION not found in rust ebuild") - open(ebuild_file, "w", encoding="utf-8").write(contents) - logging.info( - "Rust ebuild file has BOOTSTRAP_VERSION updated to %s", - new_bootstrap_version, - ) + raise RuntimeError(f"BOOTSTRAP_VERSION not found in {path}") + open(path, "w", encoding="utf-8").write(contents) + logging.info("Rust BOOTSTRAP_VERSION updated to %s", new_bootstrap_version) def ebuild_actions( @@ -501,24 +492,28 @@ def update_manifest(ebuild_file: os.PathLike) -> None: ebuild_actions(ebuild.parent.name, ["manifest"]) -def update_rust_packages(rust_version: RustVersion, add: bool) -> None: - package_file = RUST_PATH.joinpath( - "../../profiles/targets/chromeos/package.provided" +def update_rust_packages( + pkgatom: str, rust_version: RustVersion, add: bool +) -> None: + package_file = EBUILD_PREFIX.joinpath( + "profiles/targets/chromeos/package.provided" ) with open(package_file, encoding="utf-8") as f: contents = f.read() if add: - rust_packages_re = re.compile(r"dev-lang/rust-(\d+\.\d+\.\d+)") + rust_packages_re = re.compile( + "^" + re.escape(pkgatom) + r"-\d+\.\d+\.\d+$", re.MULTILINE + ) rust_packages = rust_packages_re.findall(contents) - # Assume all the rust packages are in alphabetical order, so insert the new - # version to the place after the last rust_packages - new_str = f"dev-lang/rust-{rust_version}" + # Assume all the rust packages are in alphabetical order, so insert + # the new version to the place after the last rust_packages + new_str = f"{pkgatom}-{rust_version}" new_contents = contents.replace( rust_packages[-1], f"{rust_packages[-1]}\n{new_str}" ) logging.info("%s has been inserted into package.provided", new_str) else: - old_str = f"dev-lang/rust-{rust_version}\n" + old_str = f"{pkgatom}-{rust_version}\n" assert old_str in contents, f"{old_str!r} not found in package.provided" new_contents = contents.replace(old_str, "") logging.info("%s has been removed from package.provided", old_str) @@ -531,7 +526,7 @@ def update_virtual_rust( template_version: RustVersion, new_version: RustVersion ) -> None: template_ebuild = find_ebuild_path( - RUST_PATH.joinpath("../../virtual/rust"), "rust", template_version + EBUILD_PREFIX.joinpath("virtual/rust"), "rust", template_version ) virtual_rust_dir = template_ebuild.parent new_name = f"rust-{new_version}.ebuild" @@ -615,19 +610,36 @@ def create_rust_uprev( ) ), ) + run_step( + "update bootstrap version", + lambda: update_bootstrap_version( + EBUILD_PREFIX.joinpath("eclass/cros-rustc.eclass"), template_version + ), + ) run_step( "copy patches", lambda: copy_patches(RUST_PATH, template_version, rust_version), ) - ebuild_file = run_step( - "create ebuild", lambda: create_ebuild(template_ebuild, rust_version) + template_host_ebuild = EBUILD_PREFIX.joinpath( + f"dev-lang/rust-host/rust-host-{template_version}.ebuild" + ) + host_file = run_step( + "create host ebuild", + lambda: create_ebuild( + template_host_ebuild, "dev-lang/rust-host", rust_version + ), ) run_step( - "update ebuild", lambda: update_ebuild(ebuild_file, template_version) + "update host manifest to add new version", + lambda: update_manifest(Path(host_file)), + ) + target_file = run_step( + "create target ebuild", + lambda: create_ebuild(template_ebuild, "dev-lang/rust", rust_version), ) run_step( - "update manifest to add new version", - lambda: update_manifest(Path(ebuild_file)), + "update target manifest to add new version", + lambda: update_manifest(Path(target_file)), ) if not skip_compile: run_step( @@ -635,8 +647,14 @@ def create_rust_uprev( lambda: subprocess.check_call(["sudo", "emerge", "dev-lang/rust"]), ) run_step( - "insert version into rust packages", - lambda: update_rust_packages(rust_version, add=True), + "insert host version into rust packages", + lambda: update_rust_packages( + "dev-lang/rust-host", rust_version, add=True + ), + ) + run_step( + "insert target version into rust packages", + lambda: update_rust_packages("dev-lang/rust", rust_version, add=True), ) run_step( "upgrade virtual/rust", @@ -715,38 +733,62 @@ def remove_rust_uprev( "remove patches", lambda: remove_files(f"files/rust-{delete_version}-*.patch", RUST_PATH), ) - run_step("remove ebuild", lambda: remove_files(delete_ebuild, RUST_PATH)) - ebuild_file = find_ebuild_for_package("rust") run_step( - "update manifest to delete old version", - lambda: update_manifest(ebuild_file), + "remove target ebuild", lambda: remove_files(delete_ebuild, RUST_PATH) + ) + run_step( + "remove host ebuild", + lambda: remove_files( + f"rust-host-{delete_version}.ebuild", + EBUILD_PREFIX.joinpath("dev-lang/rust-host"), + ), ) + target_file = find_ebuild_for_package("rust") run_step( - "remove version from rust packages", - lambda: update_rust_packages(delete_version, add=False), + "update target manifest to delete old version", + lambda: update_manifest(target_file), + ) + run_step( + "remove target version from rust packages", + lambda: update_rust_packages( + "dev-lang/rust", delete_version, add=False + ), + ) + host_file = find_ebuild_for_package("rust-host") + run_step( + "update host manifest to delete old version", + lambda: update_manifest(host_file), + ) + run_step( + "remove host version from rust packages", + lambda: update_rust_packages( + "dev-lang/rust-host", delete_version, add=False + ), ) run_step("remove virtual/rust", lambda: remove_virtual_rust(delete_version)) def remove_virtual_rust(delete_version: RustVersion) -> None: ebuild = find_ebuild_path( - RUST_PATH.joinpath("../../virtual/rust"), "rust", delete_version + EBUILD_PREFIX.joinpath("virtual/rust"), "rust", delete_version ) subprocess.check_call(["git", "rm", str(ebuild.name)], cwd=ebuild.parent) def rust_bootstrap_path() -> Path: - return RUST_PATH.joinpath("../rust-bootstrap") + return EBUILD_PREFIX.joinpath("dev-lang/rust-bootstrap") def create_new_repo(rust_version: RustVersion) -> None: - output = get_command_output(["git", "status", "--porcelain"], cwd=RUST_PATH) + output = get_command_output( + ["git", "status", "--porcelain"], cwd=EBUILD_PREFIX + ) if output: raise RuntimeError( - f"{RUST_PATH} has uncommitted changes, please either discard them " - "or commit them." + f"{EBUILD_PREFIX} has uncommitted changes, please either discard " + "them or commit them." ) - git.CreateBranch(RUST_PATH, f"rust-to-{rust_version}") + git.CreateBranch(EBUILD_PREFIX, f"rust-to-{rust_version}") def build_cross_compiler() -> None: @@ -780,7 +822,7 @@ def build_cross_compiler() -> None: def create_new_commit(rust_version: RustVersion) -> None: - subprocess.check_call(["git", "add", "-A"], cwd=RUST_PATH) + subprocess.check_call(["git", "add", "-A"], cwd=EBUILD_PREFIX) messages = [ f"[DO NOT SUBMIT] dev-lang/rust: upgrade to Rust {rust_version}", "", @@ -788,7 +830,7 @@ def create_new_commit(rust_version: RustVersion) -> None: "BUG=None", "TEST=Use CQ to test the new Rust version", ] - git.UploadChanges(RUST_PATH, f"rust-to-{rust_version}", messages) + git.UploadChanges(EBUILD_PREFIX, f"rust-to-{rust_version}", messages) def main() -> None: diff --git a/rust_tools/rust_uprev_test.py b/rust_tools/rust_uprev_test.py index f82ef485..42fde036 100755 --- a/rust_tools/rust_uprev_test.py +++ b/rust_tools/rust_uprev_test.py @@ -242,8 +242,8 @@ class PrepareUprevTest(unittest.TestCase): self.assertEqual(expected, actual) -class UpdateEbuildTest(unittest.TestCase): - """Tests for update_ebuild step in rust_uprev""" +class UpdateBootstrapVersionTest(unittest.TestCase): + """Tests for update_bootstrap_version step in rust_uprev""" ebuild_file_before = """ BOOTSTRAP_VERSION="1.2.0" @@ -255,9 +255,9 @@ BOOTSTRAP_VERSION="1.3.6" def test_success(self): mock_open = mock.mock_open(read_data=self.ebuild_file_before) # ebuild_file and new bootstrap version are deliberately different - ebuild_file = "/path/to/rust/rust-1.3.5.ebuild" + ebuild_file = "/path/to/rust/cros-rustc.eclass" with mock.patch("builtins.open", mock_open): - rust_uprev.update_ebuild( + rust_uprev.update_bootstrap_version( ebuild_file, rust_uprev.RustVersion.parse("1.3.6") ) mock_open.return_value.__enter__().write.assert_called_once_with( @@ -269,11 +269,12 @@ BOOTSTRAP_VERSION="1.3.6" ebuild_file = "/path/to/rust/rust-1.3.5.ebuild" with mock.patch("builtins.open", mock_open): with self.assertRaises(RuntimeError) as context: - rust_uprev.update_ebuild( + rust_uprev.update_bootstrap_version( ebuild_file, rust_uprev.RustVersion.parse("1.2.0") ) self.assertEqual( - "BOOTSTRAP_VERSION not found in rust ebuild", str(context.exception) + "BOOTSTRAP_VERSION not found in /path/to/rust/rust-1.3.5.ebuild", + str(context.exception), ) @@ -354,7 +355,9 @@ class UpdateRustPackagesTests(unittest.TestCase): ) mock_open = mock.mock_open(read_data=package_before) with mock.patch("builtins.open", mock_open): - rust_uprev.update_rust_packages(self.new_version, add=True) + rust_uprev.update_rust_packages( + "dev-lang/rust", self.new_version, add=True + ) mock_open.return_value.__enter__().write.assert_called_once_with( package_after ) @@ -371,7 +374,9 @@ class UpdateRustPackagesTests(unittest.TestCase): ) mock_open = mock.mock_open(read_data=package_before) with mock.patch("builtins.open", mock_open): - rust_uprev.update_rust_packages(self.old_version, add=False) + rust_uprev.update_rust_packages( + "dev-lang/rust", self.old_version, add=False + ) mock_open.return_value.__enter__().write.assert_called_once_with( package_after ) @@ -436,9 +441,11 @@ class RustUprevOtherStagesTests(unittest.TestCase): @mock.patch.object(shutil, "copyfile") @mock.patch.object(subprocess, "check_call") - def test_create_ebuild(self, mock_call, mock_copy): + def test_create_rust_ebuild(self, mock_call, mock_copy): template_ebuild = f"/path/to/rust-{self.current_version}-r2.ebuild" - rust_uprev.create_ebuild(template_ebuild, self.new_version) + rust_uprev.create_ebuild( + template_ebuild, "dev-lang/rust", self.new_version + ) mock_copy.assert_called_once_with( template_ebuild, rust_uprev.RUST_PATH.joinpath(f"rust-{self.new_version}.ebuild"), @@ -448,6 +455,24 @@ class RustUprevOtherStagesTests(unittest.TestCase): cwd=rust_uprev.RUST_PATH, ) + @mock.patch.object(shutil, "copyfile") + @mock.patch.object(subprocess, "check_call") + def test_create_rust_host_ebuild(self, mock_call, mock_copy): + template_ebuild = f"/path/to/rust-host-{self.current_version}-r2.ebuild" + rust_uprev.create_ebuild( + template_ebuild, "dev-lang/rust-host", self.new_version + ) + mock_copy.assert_called_once_with( + template_ebuild, + rust_uprev.EBUILD_PREFIX.joinpath( + f"dev-lang/rust-host/rust-host-{self.new_version}.ebuild" + ), + ) + mock_call.assert_called_once_with( + ["git", "add", f"rust-host-{self.new_version}.ebuild"], + cwd=rust_uprev.EBUILD_PREFIX.joinpath("dev-lang/rust-host"), + ) + @mock.patch.object(rust_uprev, "find_ebuild_for_package") @mock.patch.object(subprocess, "check_call") def test_remove_rust_bootstrap_version(self, mock_call, *_args): @@ -541,7 +566,7 @@ class RustUprevOtherStagesTests(unittest.TestCase): mock_output.return_value = "" rust_uprev.create_new_repo(self.new_version) mock_branch.assert_called_once_with( - rust_uprev.RUST_PATH, f"rust-to-{self.new_version}" + rust_uprev.EBUILD_PREFIX, f"rust-to-{self.new_version}" ) @mock.patch.object(rust_uprev, "get_command_output") -- cgit v1.2.3 From a92f49cb65b31da40958279c80f49a7a46e89fe2 Mon Sep 17 00:00:00 2001 From: Bob Haarman <inglorion@chromium.org> Date: Mon, 16 May 2022 14:24:10 -0700 Subject: rust_uprev: Support links to base ebuild files Previously, rust_uprev required that there be exactly one ebuild file for a given rust{,-bootstrap,-uprev} version. This breaks when using the common idiom of having a rust-1.2.3.ebuild and a symlink to it with an -r version. This change adds support for that by excluding such symlinks from the count, and removing both the base files and the symlinks when removing ebuild versions. BUG=b:227370760 TEST=Example uprev created by this script: crrev.com/c/3919575 Change-Id: I901ac8208c6bc48670860644ae91ee408e5f155d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3651375 Commit-Queue: Bob Haarman <inglorion@chromium.org> Tested-by: Bob Haarman <inglorion@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> --- rust_tools/rust_uprev.py | 91 +++++++++++++++++++------- rust_tools/rust_uprev_test.py | 148 +++++++++++++++++++++++++++++++++++------- 2 files changed, 192 insertions(+), 47 deletions(-) diff --git a/rust_tools/rust_uprev.py b/rust_tools/rust_uprev.py index 61364843..3c87a134 100755 --- a/rust_tools/rust_uprev.py +++ b/rust_tools/rust_uprev.py @@ -126,18 +126,35 @@ def find_ebuild_path( ) -> Path: """Finds an ebuild in a directory. - Returns the path to the ebuild file. Asserts if there is not - exactly one match. The match is constrained by name and optionally - by version, but can match any patch level. E.g. "rust" version - 1.3.4 can match rust-1.3.4.ebuild but also rust-1.3.4-r6.ebuild. + Returns the path to the ebuild file. The match is constrained by + name and optionally by version, but can match any patch level. + E.g. "rust" version 1.3.4 can match rust-1.3.4.ebuild but also + rust-1.3.4-r6.ebuild. + + The expectation is that there is only one matching ebuild, and + an assert is raised if this is not the case. However, symlinks to + ebuilds in the same directory are ignored, so having a + rust-x.y.z-rn.ebuild symlink to rust-x.y.z.ebuild is allowed. """ if version: pattern = f"{name}-{version}*.ebuild" else: pattern = f"{name}-*.ebuild" - matches = list(Path(directory).glob(pattern)) - assert len(matches) == 1, matches - return matches[0] + matches = set(directory.glob(pattern)) + result = [] + # Only count matches that are not links to other matches. + for m in matches: + try: + target = os.readlink(directory / m) + except OSError: + # Getting here means the match is not a symlink to one of + # the matching ebuilds, so add it to the result list. + result.append(m) + continue + if directory / target not in matches: + result.append(m) + assert len(result) == 1, result + return result[0] def get_rust_bootstrap_version(): @@ -317,7 +334,7 @@ def prepare_uprev( def copy_patches( directory: Path, template_version: RustVersion, new_version: RustVersion ) -> None: - patch_path = directory.joinpath("files") + patch_path = directory / "files" prefix = "%s-%s-" % (directory.name, template_version) new_prefix = "%s-%s-" % (directory.name, new_version) for f in os.listdir(patch_path): @@ -670,11 +687,11 @@ def find_rust_versions_in_chroot() -> List[Tuple[RustVersion, str]]: ] -def find_oldest_rust_version_in_chroot() -> Tuple[RustVersion, str]: +def find_oldest_rust_version_in_chroot() -> RustVersion: rust_versions = find_rust_versions_in_chroot() if len(rust_versions) <= 1: raise RuntimeError("Expect to find more than one Rust versions") - return min(rust_versions) + return min(rust_versions)[0] def find_ebuild_for_rust_version(version: RustVersion) -> str: @@ -691,6 +708,31 @@ def find_ebuild_for_rust_version(version: RustVersion) -> str: return rust_ebuilds[0] +def remove_ebuild_version(path: os.PathLike, name: str, version: RustVersion): + """Remove the specified version of an ebuild. + + Removes {path}/{name}-{version}.ebuild and {path}/{name}-{version}-*.ebuild + using git rm. + + Args: + path: The directory in which the ebuild files are. + name: The name of the package (e.g. 'rust'). + version: The version of the ebuild to remove. + """ + path = Path(path) + pattern = f"{name}-{version}-*.ebuild" + matches = list(path.glob(pattern)) + ebuild = path / f"{name}-{version}.ebuild" + if ebuild.exists(): + matches.append(ebuild) + if not matches: + logging.warning( + "No ebuilds matching %s version %s in %r", name, version, str(path) + ) + for m in matches: + remove_files(m.name, path) + + def remove_files(filename: str, path: str) -> None: subprocess.check_call(["git", "rm", filename], cwd=path) @@ -698,10 +740,11 @@ def remove_files(filename: str, path: str) -> None: def remove_rust_bootstrap_version( version: RustVersion, run_step: Callable[[], T] ) -> None: - prefix = f"rust-bootstrap-{version}" run_step( "remove old bootstrap ebuild", - lambda: remove_files(f"{prefix}*.ebuild", rust_bootstrap_path()), + lambda: remove_ebuild_version( + rust_bootstrap_path(), "rust-bootstrap", version + ), ) ebuild_file = find_ebuild_for_package("rust-bootstrap") run_step( @@ -713,18 +756,15 @@ def remove_rust_bootstrap_version( def remove_rust_uprev( rust_version: Optional[RustVersion], run_step: Callable[[], T] ) -> None: - def find_desired_rust_version(): + def find_desired_rust_version() -> RustVersion: if rust_version: - return rust_version, find_ebuild_for_rust_version(rust_version) + return rust_version return find_oldest_rust_version_in_chroot() - def find_desired_rust_version_from_json( - obj: Any, - ) -> Tuple[RustVersion, str]: - version, ebuild_path = obj - return RustVersion(*version), ebuild_path + def find_desired_rust_version_from_json(obj: Any) -> RustVersion: + return RustVersion(*obj) - delete_version, delete_ebuild = run_step( + delete_version = run_step( "find rust version to delete", find_desired_rust_version, result_from_json=find_desired_rust_version_from_json, @@ -734,13 +774,15 @@ def remove_rust_uprev( lambda: remove_files(f"files/rust-{delete_version}-*.patch", RUST_PATH), ) run_step( - "remove target ebuild", lambda: remove_files(delete_ebuild, RUST_PATH) + "remove target ebuild", + lambda: remove_ebuild_version(RUST_PATH, "rust", delete_version), ) run_step( "remove host ebuild", - lambda: remove_files( - f"rust-host-{delete_version}.ebuild", + lambda: remove_ebuild_version( EBUILD_PREFIX.joinpath("dev-lang/rust-host"), + "rust-host", + delete_version, ), ) target_file = find_ebuild_for_package("rust") @@ -769,10 +811,9 @@ def remove_rust_uprev( def remove_virtual_rust(delete_version: RustVersion) -> None: - ebuild = find_ebuild_path( + remove_ebuild_version( EBUILD_PREFIX.joinpath("virtual/rust"), "rust", delete_version ) - subprocess.check_call(["git", "rm", str(ebuild.name)], cwd=ebuild.parent) def rust_bootstrap_path() -> Path: diff --git a/rust_tools/rust_uprev_test.py b/rust_tools/rust_uprev_test.py index 42fde036..0c4c91ed 100755 --- a/rust_tools/rust_uprev_test.py +++ b/rust_tools/rust_uprev_test.py @@ -83,32 +83,109 @@ class FindEbuildPathTest(unittest.TestCase): """Tests for rust_uprev.find_ebuild_path()""" def test_exact_version(self): - with tempfile.TemporaryDirectory() as tmpdir: - ebuild = Path(tmpdir, "test-1.3.4.ebuild") + with tempfile.TemporaryDirectory() as t: + tmpdir = Path(t) + ebuild = tmpdir / "test-1.3.4.ebuild" ebuild.touch() - Path(tmpdir, "test-1.2.3.ebuild").touch() + (tmpdir / "test-1.2.3.ebuild").touch() result = rust_uprev.find_ebuild_path( tmpdir, "test", rust_uprev.RustVersion(1, 3, 4) ) self.assertEqual(result, ebuild) def test_no_version(self): - with tempfile.TemporaryDirectory() as tmpdir: - ebuild = Path(tmpdir, "test-1.2.3.ebuild") + with tempfile.TemporaryDirectory() as t: + tmpdir = Path(t) + ebuild = tmpdir / "test-1.2.3.ebuild" ebuild.touch() result = rust_uprev.find_ebuild_path(tmpdir, "test") self.assertEqual(result, ebuild) def test_patch_version(self): - with tempfile.TemporaryDirectory() as tmpdir: - ebuild = Path(tmpdir, "test-1.3.4-r3.ebuild") + with tempfile.TemporaryDirectory() as t: + tmpdir = Path(t) + ebuild = tmpdir / "test-1.3.4-r3.ebuild" + ebuild.touch() + (tmpdir / "test-1.2.3.ebuild").touch() + result = rust_uprev.find_ebuild_path( + tmpdir, "test", rust_uprev.RustVersion(1, 3, 4) + ) + self.assertEqual(result, ebuild) + + def test_multiple_versions(self): + with tempfile.TemporaryDirectory() as t: + tmpdir = Path(t) + (tmpdir / "test-1.3.4-r3.ebuild").touch() + (tmpdir / "test-1.3.5.ebuild").touch() + with self.assertRaises(AssertionError): + rust_uprev.find_ebuild_path(tmpdir, "test") + + def test_selected_version(self): + with tempfile.TemporaryDirectory() as t: + tmpdir = Path(t) + ebuild = tmpdir / "test-1.3.4-r3.ebuild" ebuild.touch() - Path(tmpdir, "test-1.2.3.ebuild").touch() + (tmpdir / "test-1.3.5.ebuild").touch() result = rust_uprev.find_ebuild_path( tmpdir, "test", rust_uprev.RustVersion(1, 3, 4) ) self.assertEqual(result, ebuild) + def test_symlink(self): + # Symlinks to ebuilds in the same directory are allowed, and the return + # value is the regular file. + with tempfile.TemporaryDirectory() as t: + tmpdir = Path(t) + ebuild = tmpdir / "test-1.3.4.ebuild" + ebuild.touch() + (tmpdir / "test-1.3.4-r1.ebuild").symlink_to("test-1.3.4.ebuild") + result = rust_uprev.find_ebuild_path(tmpdir, "test") + self.assertEqual(result, ebuild) + + +class RemoveEbuildVersionTest(unittest.TestCase): + """Tests for rust_uprev.remove_ebuild_version()""" + + @mock.patch.object(subprocess, "check_call") + def test_single(self, check_call): + with tempfile.TemporaryDirectory() as tmpdir: + ebuild_dir = Path(tmpdir, "test-ebuilds") + ebuild_dir.mkdir() + ebuild = Path(ebuild_dir, "test-3.1.4.ebuild") + ebuild.touch() + Path(ebuild_dir, "unrelated-1.0.0.ebuild").touch() + rust_uprev.remove_ebuild_version( + ebuild_dir, "test", rust_uprev.RustVersion(3, 1, 4) + ) + check_call.assert_called_once_with( + ["git", "rm", "test-3.1.4.ebuild"], cwd=ebuild_dir + ) + + @mock.patch.object(subprocess, "check_call") + def test_symlink(self, check_call): + with tempfile.TemporaryDirectory() as tmpdir: + ebuild_dir = Path(tmpdir, "test-ebuilds") + ebuild_dir.mkdir() + ebuild = Path(ebuild_dir, "test-3.1.4.ebuild") + ebuild.touch() + symlink = Path(ebuild_dir, "test-3.1.4-r5.ebuild") + symlink.symlink_to(ebuild.name) + Path(ebuild_dir, "unrelated-1.0.0.ebuild").touch() + rust_uprev.remove_ebuild_version( + ebuild_dir, "test", rust_uprev.RustVersion(3, 1, 4) + ) + check_call.assert_has_calls( + [ + mock.call( + ["git", "rm", "test-3.1.4.ebuild"], cwd=ebuild_dir + ), + mock.call( + ["git", "rm", "test-3.1.4-r5.ebuild"], cwd=ebuild_dir + ), + ], + any_order=True, + ) + class RustVersionTest(unittest.TestCase): """Tests for RustVersion class""" @@ -504,17 +581,47 @@ class RustUprevOtherStagesTests(unittest.TestCase): ] ) - @mock.patch.object(rust_uprev, "find_ebuild_path") @mock.patch.object(subprocess, "check_call") - def test_remove_virtual_rust(self, mock_call, mock_find_ebuild): - ebuild_path = Path( - f"/some/dir/virtual/rust/rust-{self.old_version}.ebuild" - ) - mock_find_ebuild.return_value = Path(ebuild_path) - rust_uprev.remove_virtual_rust(self.old_version) - mock_call.assert_called_once_with( - ["git", "rm", str(ebuild_path.name)], cwd=ebuild_path.parent - ) + def test_remove_virtual_rust(self, mock_call): + with tempfile.TemporaryDirectory() as tmpdir: + ebuild_path = Path( + tmpdir, f"virtual/rust/rust-{self.old_version}.ebuild" + ) + os.makedirs(ebuild_path.parent) + ebuild_path.touch() + with mock.patch("rust_uprev.EBUILD_PREFIX", Path(tmpdir)): + rust_uprev.remove_virtual_rust(self.old_version) + mock_call.assert_called_once_with( + ["git", "rm", str(ebuild_path.name)], cwd=ebuild_path.parent + ) + + @mock.patch.object(subprocess, "check_call") + def test_remove_virtual_rust_with_symlink(self, mock_call): + with tempfile.TemporaryDirectory() as tmpdir: + ebuild_path = Path( + tmpdir, f"virtual/rust/rust-{self.old_version}.ebuild" + ) + symlink_path = Path( + tmpdir, f"virtual/rust/rust-{self.old_version}-r14.ebuild" + ) + os.makedirs(ebuild_path.parent) + ebuild_path.touch() + symlink_path.symlink_to(ebuild_path.name) + with mock.patch("rust_uprev.EBUILD_PREFIX", Path(tmpdir)): + rust_uprev.remove_virtual_rust(self.old_version) + mock_call.assert_has_calls( + [ + mock.call( + ["git", "rm", ebuild_path.name], + cwd=ebuild_path.parent, + ), + mock.call( + ["git", "rm", symlink_path.name], + cwd=ebuild_path.parent, + ), + ], + any_order=True, + ) @mock.patch.object(rust_uprev, "find_ebuild_path") @mock.patch.object(shutil, "copyfile") @@ -543,10 +650,7 @@ class RustUprevOtherStagesTests(unittest.TestCase): f"rust-{self.new_version}.ebuild", ] actual = rust_uprev.find_oldest_rust_version_in_chroot() - expected = ( - self.old_version, - os.path.join(rust_uprev.RUST_PATH, oldest_version_name), - ) + expected = self.old_version self.assertEqual(expected, actual) @mock.patch.object(os, "listdir") -- cgit v1.2.3 From 17d9a5c3229a2033a834eaf6284694683bb400a0 Mon Sep 17 00:00:00 2001 From: Bob Haarman <inglorion@chromium.org> Date: Mon, 16 May 2022 14:37:41 -0700 Subject: rust_uprev: Ensure that packages can be built Although rust_uprev emerges rust, this does not ensure that all affected packages (rust, rust-bootstrap, rust-host) can be built from the new sources. For example, it is possible to build and install rust-host, then make changes to it, and re-running rust_uprev will then not rebuild it, because it is already installed at the requested version. This CL changes rust_uprev so that it first removes rust, rust-host, and rust-bootstrap if they are installed, so that unbuildable packages will be detected by rust_uprev. BUG=b:232833366 TEST=Run with installed rust-host package that is unbuildable, see error Change-Id: I7aeb7d1ac01e737e322bf005a31f1d678e52e042 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3653197 Tested-by: Bob Haarman <inglorion@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> Commit-Queue: Bob Haarman <inglorion@chromium.org> --- rust_tools/rust_uprev.py | 55 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/rust_tools/rust_uprev.py b/rust_tools/rust_uprev.py index 3c87a134..382d991a 100755 --- a/rust_tools/rust_uprev.py +++ b/rust_tools/rust_uprev.py @@ -33,6 +33,7 @@ import os import pathlib from pathlib import Path import re +import shlex import shutil import subprocess import sys @@ -552,6 +553,20 @@ def update_virtual_rust( subprocess.check_call(["git", "add", new_name], cwd=virtual_rust_dir) +def unmerge_package_if_installed(pkgatom: str) -> None: + """Unmerges a package if it is installed.""" + shpkg = shlex.quote(pkgatom) + subprocess.check_call( + [ + "sudo", + "bash", + "-c", + f"! emerge --pretend --quiet --unmerge {shpkg}" + f" || emerge --rage-clean {shpkg}", + ] + ) + + def perform_step( state_file: pathlib.Path, tmp_state_file: pathlib.Path, @@ -659,10 +674,7 @@ def create_rust_uprev( lambda: update_manifest(Path(target_file)), ) if not skip_compile: - run_step( - "emerge rust", - lambda: subprocess.check_call(["sudo", "emerge", "dev-lang/rust"]), - ) + run_step("build packages", lambda: rebuild_packages(rust_version)) run_step( "insert host version into rust packages", lambda: update_rust_packages( @@ -708,6 +720,41 @@ def find_ebuild_for_rust_version(version: RustVersion) -> str: return rust_ebuilds[0] +def rebuild_packages(version: RustVersion): + """Rebuild packages modified by this script.""" + # Remove all packages we modify to avoid depending on preinstalled + # versions. This ensures that the packages can really be built. + packages = [ + "dev-lang/rust", + "dev-lang/rust-host", + "dev-lang/rust-bootstrap", + ] + for pkg in packages: + unmerge_package_if_installed(pkg) + # Mention only dev-lang/rust explicitly, so that others are pulled + # in as dependencies (letting us detect dependency errors). + # Packages we modify are listed in --usepkg-exclude to ensure they + # are built from source. + try: + subprocess.check_call( + [ + "sudo", + "emerge", + "--quiet-build", + "--usepkg-exclude", + " ".join(packages), + f"=dev-lang/rust-{version}", + ] + ) + except: + logging.warning( + "Failed to build dev-lang/rust or one of its dependencies." + " If necessary, you can restore rust and rust-host from" + " binary packages:\n sudo emerge --getbinpkgonly dev-lang/rust" + ) + raise + + def remove_ebuild_version(path: os.PathLike, name: str, version: RustVersion): """Remove the specified version of an ebuild. -- cgit v1.2.3 From 3d3b092864634dd7e7d342231cdc8e23c829888d Mon Sep 17 00:00:00 2001 From: Bob Haarman <inglorion@chromium.org> Date: Thu, 29 Sep 2022 17:23:51 -0700 Subject: crosperf: fix pylint errors in suite_runner.py crosperf/suit_runner.py uses .format(), which we no longer want. This converts the code to use f-strings instead. BUG=None TEST=See that pylint no longer complains about .format() Change-Id: I7085f2da5659843d2501eb2f19137b64dff7b106 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3928353 Tested-by: Bob Haarman <inglorion@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Reviewed-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Bob Haarman <inglorion@chromium.org> --- crosperf/suite_runner.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index 9fc99138..dfd14b21 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -50,7 +50,7 @@ def GetProfilerArgs(profiler_args): def GetDutConfigArgs(dut_config): - return "dut_config={}".format(pipes.quote(json.dumps(dut_config))) + return f"dut_config={pipes.quote(json.dumps(dut_config))}" class SuiteRunner(object): @@ -222,10 +222,10 @@ class SuiteRunner(object): # process namespace and we can kill process created easily by their # process group. chrome_root_options = ( - "--no-ns-pid " - "--chrome_root={0} --chrome_root_mount={1} " - 'FEATURES="-usersandbox" ' - "CHROME_ROOT={1}".format(label.chrome_src, CHROME_MOUNT_DIR) + f"--no-ns-pid " + f"--chrome_root={label.chrome_src} --chrome_root_mount={CHROME_MOUNT_DIR} " + f'FEATURES="-usersandbox" ' + f"CHROME_ROOT={CHROME_MOUNT_DIR}" ) if self.log_level != "verbose": -- cgit v1.2.3 From 8f7a10dd9cfe029803059de4043cf9ceb30c83ff Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@chromium.org> Date: Fri, 30 Sep 2022 17:35:16 -0700 Subject: crosperf: Remove kefka Kefka reached EOL and was already removed from the toolchain pool. BUG=b:232256537 TEST=CQ Change-Id: Ic12b03177422c10319aa489b7f3baf177269d9ac Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3930952 Tested-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> Auto-Submit: Denis Nikitin <denik@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> --- crosperf/default_remotes | 1 - crosperf/experiment_factory_unittest.py | 1 - 2 files changed, 2 deletions(-) diff --git a/crosperf/default_remotes b/crosperf/default_remotes index 31e99bbf..714385e7 100644 --- a/crosperf/default_remotes +++ b/crosperf/default_remotes @@ -2,6 +2,5 @@ bob : chromeos8-row12-rack16-host2 chell : chromeos2-row1-rack10-host2 chromeos2-row1-rack10-host4 coral : chromeos6-row5-rack6-host1 chromeos6-row5-rack6-host3 chromeos6-row5-rack6-host5 elm : chromeos6-row14-rack15-host21 -kefka : chromeos6-row6-rack22-host2 chromeos6-row6-rack22-host3 chromeos6-row11-rack22-host7 nautilus : chromeos6-row5-rack10-host1 chromeos6-row5-rack10-host3 snappy : chromeos8-row12-rack17-host1 chromeos8-row12-rack17-host2 diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py index be84a003..0541bb9b 100755 --- a/crosperf/experiment_factory_unittest.py +++ b/crosperf/experiment_factory_unittest.py @@ -482,7 +482,6 @@ class ExperimentFactoryTest(unittest.TestCase): "chell", "coral", "elm", - "kefka", "nautilus", "snappy", ] -- cgit v1.2.3 From 29c8cac9d0a133b96d213e28bb277f539582627b Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Sat, 1 Oct 2022 12:08:17 -0700 Subject: command: fix flaky test If `exec` times out, we need to wrap the error it died with, rather than embedding it in another error. This allows `TestRunWithTimeoutReturnsErrorOnTimeout`'s `errors.Is` to correctly detect that `err` is a `context.DeadlineExceeded`. BUG=None TEST=ran this test 1000x Change-Id: I69e0dcea25dfd0cb01ac5ec0a2e8ce2f116deff7 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3932869 Auto-Submit: George Burgess <gbiv@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: George Burgess <gbiv@chromium.org> Tested-by: George Burgess <gbiv@chromium.org> Commit-Queue: Manoj Gupta <manojgupta@chromium.org> --- compiler_wrapper/command.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler_wrapper/command.go b/compiler_wrapper/command.go index 20e11bbb..e2a5176d 100644 --- a/compiler_wrapper/command.go +++ b/compiler_wrapper/command.go @@ -76,7 +76,7 @@ func runCmdWithTimeout(env env, cmd *command, t time.Duration) error { cmdCtx.Stderr = env.stderr() if err := cmdCtx.Start(); err != nil { - return newErrorwithSourceLocf("exec error: %v", err) + return fmt.Errorf("exec error: %w", err) } err := cmdCtx.Wait() if ctx.Err() == nil { -- cgit v1.2.3 From b41250738164926d6bf32af054637c5aa9bb88e7 Mon Sep 17 00:00:00 2001 From: Bob Haarman <inglorion@chromium.org> Date: Thu, 29 Sep 2022 17:08:35 -0700 Subject: crosperf: use sshwatcher to set up port forwarding to DUTs Some DUTs (notably those on gTransit) require port forwarding to be accessible from inside the chroot. This CL makes crosperf's SuitRunner use sshwatcher to set up the required forwarding. BUG=b:235119397 TEST=suite_runner_unittest, then try it out for real Change-Id: Idad2abf40713e2d0aa3b81fdb4762a22b638400e Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3928354 Reviewed-by: Denis Nikitin <denik@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Bob Haarman <inglorion@chromium.org> Tested-by: Bob Haarman <inglorion@chromium.org> --- crosperf/suite_runner.py | 43 ++++++++++++++++++++++++++++++++++++++- crosperf/suite_runner_unittest.py | 13 ++++++++++-- 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index dfd14b21..0e4ba045 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -6,15 +6,29 @@ """SuiteRunner defines the interface from crosperf to test script.""" +import contextlib import json import os +from pathlib import Path import pipes +import random import shlex +import subprocess import time from cros_utils import command_executer +SSHWATCHER = [ + "go", + "run", + str( + Path( + __file__, + "../../../../platform/dev/contrib/sshwatcher/sshwatcher.go", + ).resolve() + ), +] TEST_THAT_PATH = "/usr/bin/test_that" TAST_PATH = "/usr/bin/tast" CROSFLEET_PATH = "crosfleet" @@ -53,6 +67,32 @@ def GetDutConfigArgs(dut_config): return f"dut_config={pipes.quote(json.dumps(dut_config))}" +@contextlib.contextmanager +def ssh_tunnel(machinename): + """Context manager that forwards a TCP port over SSH while active. + + This class is used to set up port forwarding before entering the + chroot, so that the forwarded port can be used from inside + the chroot. + + The value yielded by ssh_tunnel is a host:port string. + """ + # We have to tell sshwatcher which port we want to use. + # We pick a port that is likely to be available. + port = random.randrange(4096, 32768) + cmd = SSHWATCHER + [machinename, str(port)] + # Pylint wants us to use subprocess.Popen as a context manager, + # but we don't, so that we can ask sshwatcher to terminate and + # limit the time we wait for it to do so. + # pylint: disable=consider-using-with + proc = subprocess.Popen(cmd) + try: + yield f"localhost:{port}" + finally: + proc.terminate() + proc.wait(timeout=5) + + class SuiteRunner(object): """This defines the interface from crosperf to test script.""" @@ -83,7 +123,8 @@ class SuiteRunner(object): ) else: if benchmark.suite == "tast": - ret_tup = self.Tast_Run(machine_name, label, benchmark) + with ssh_tunnel(machine_name) as hostport: + ret_tup = self.Tast_Run(hostport, label, benchmark) else: ret_tup = self.Test_That_Run( machine_name, label, benchmark, test_args, profiler_args diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py index 69476f37..cc96ee4a 100755 --- a/crosperf/suite_runner_unittest.py +++ b/crosperf/suite_runner_unittest.py @@ -8,6 +8,7 @@ """Unittest for suite_runner.""" +import contextlib import json import unittest import unittest.mock as mock @@ -118,7 +119,14 @@ class SuiteRunnerTest(unittest.TestCase): res = suite_runner.GetDutConfigArgs(dut_config) self.assertEqual(res, output_str) - def test_run(self): + @mock.patch("suite_runner.ssh_tunnel") + def test_run(self, ssh_tunnel): + @contextlib.contextmanager + def mock_ssh_tunnel(_host): + yield "fakelocalhost:1234" + + ssh_tunnel.side_effect = mock_ssh_tunnel + def reset(): self.test_that_args = [] self.crosfleet_run_args = [] @@ -254,7 +262,8 @@ class SuiteRunnerTest(unittest.TestCase): self.assertFalse(self.call_test_that_run) self.assertFalse(self.call_crosfleet_run) self.assertEqual( - self.tast_args, ["fake_machine", self.mock_label, self.tast_bench] + self.tast_args, + ["fakelocalhost:1234", self.mock_label, self.tast_bench], ) def test_gen_test_args(self): -- cgit v1.2.3 From 69ee77f678ba58ee69d0dc6f82183171aeaee796 Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@chromium.org> Date: Sun, 2 Oct 2022 23:11:14 -0700 Subject: llvm_tools: Fix manifest update Usually we don't need the manifest update with llvm-next because pgo is not default there. Remove default manifest packages from llvm-next. Add unit test for main. BUG=None TEST=unit test Change-Id: I1ef78be8184985e047db8ae68eda2f01c989a7a5 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3932864 Tested-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Denis Nikitin <denik@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> --- llvm_tools/update_chromeos_llvm_hash.py | 22 ++-- llvm_tools/update_chromeos_llvm_hash_unittest.py | 126 ++++++++++++++++++++++- 2 files changed, 139 insertions(+), 9 deletions(-) diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index 31a10867..c52c7328 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -17,7 +17,7 @@ import os from pathlib import Path import re import subprocess -from typing import Dict, List +from typing import Dict, Iterable import chroot import failure_modes @@ -99,7 +99,7 @@ def GetCommandLineArgs(): parser.add_argument( "--manifest_packages", - default=",".join(DEFAULT_MANIFEST_PACKAGES), + default="", help="Comma-separated ebuilds to update manifests for " "(default: %(default)s)", ) @@ -506,7 +506,7 @@ def StagePackagesPatchResultsForCommit(package_info_dict, commit_messages): return commit_messages -def UpdateManifests(packages: List[str], chroot_path: Path): +def UpdateManifests(packages: Iterable[str], chroot_path: Path): """Updates manifest files for packages. Args: @@ -524,8 +524,8 @@ def UpdateManifests(packages: List[str], chroot_path: Path): def UpdatePackages( - packages, - manifest_packages: List[str], + packages: Iterable[str], + manifest_packages: Iterable[str], llvm_variant, git_hash, svn_version, @@ -672,7 +672,7 @@ def EnsurePackageMaskContains(chroot_path, git_hash): def UpdatePackagesPatchMetadataFile( chroot_path: Path, svn_version: int, - packages: List[str], + packages: Iterable[str], mode: failure_modes.FailureModes, ) -> Dict[str, patch_utils.PatchInfo]: """Updates the packages metadata file. @@ -761,8 +761,14 @@ def main(): git_hash_source ) - packages = args_output.update_packages.split(",") - manifest_packages = args_output.manifest_packages.split(",") + # Filter out empty strings. For example "".split{",") returns [""]. + packages = set(p for p in args_output.update_packages.split(",") if p) + manifest_packages = set( + p for p in args_output.manifest_packages.split(",") if p + ) + if not manifest_packages and not args_output.is_llvm_next: + # Set default manifest packages only for the current llvm. + manifest_packages = set(DEFAULT_MANIFEST_PACKAGES) change_list = UpdatePackages( packages=packages, manifest_packages=manifest_packages, diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py index 9bed2712..b758538c 100755 --- a/llvm_tools/update_chromeos_llvm_hash_unittest.py +++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py @@ -12,6 +12,7 @@ import datetime import os from pathlib import Path import subprocess +import sys import unittest import unittest.mock as mock @@ -19,7 +20,6 @@ import chroot import failure_modes import get_llvm_hash import git -import subprocess_helpers import test_helpers import update_chromeos_llvm_hash @@ -1017,6 +1017,130 @@ class UpdateLLVMHashTest(unittest.TestCase): mock_delete_repo.assert_called_once_with(path_to_package_dir, branch) + @mock.patch.object(chroot, "VerifyOutsideChroot") + @mock.patch.object(get_llvm_hash, "GetLLVMHashAndVersionFromSVNOption") + @mock.patch.object(update_chromeos_llvm_hash, "UpdatePackages") + def testMainDefaults( + self, mock_update_packages, mock_gethash, mock_outside_chroot + ): + git_hash = "1234abcd" + svn_version = 5678 + mock_gethash.return_value = (git_hash, svn_version) + argv = [ + "./update_chromeos_llvm_hash_unittest.py", + "--llvm_version", + "google3", + ] + + with mock.patch.object(sys, "argv", argv) as mock.argv: + update_chromeos_llvm_hash.main() + + expected_packages = set(update_chromeos_llvm_hash.DEFAULT_PACKAGES) + expected_manifest_packages = set( + update_chromeos_llvm_hash.DEFAULT_MANIFEST_PACKAGES, + ) + expected_llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current + expected_chroot = update_chromeos_llvm_hash.defaultCrosRoot() + mock_update_packages.assert_called_once_with( + packages=expected_packages, + manifest_packages=expected_manifest_packages, + llvm_variant=expected_llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=expected_chroot, + mode=failure_modes.FailureModes.FAIL, + git_hash_source="google3", + extra_commit_msg=None, + ) + mock_outside_chroot.assert_called() + + @mock.patch.object(chroot, "VerifyOutsideChroot") + @mock.patch.object(get_llvm_hash, "GetLLVMHashAndVersionFromSVNOption") + @mock.patch.object(update_chromeos_llvm_hash, "UpdatePackages") + def testMainLlvmNext( + self, mock_update_packages, mock_gethash, mock_outside_chroot + ): + git_hash = "1234abcd" + svn_version = 5678 + mock_gethash.return_value = (git_hash, svn_version) + argv = [ + "./update_chromeos_llvm_hash_unittest.py", + "--llvm_version", + "google3", + "--is_llvm_next", + ] + + with mock.patch.object(sys, "argv", argv) as mock.argv: + update_chromeos_llvm_hash.main() + + expected_packages = set(update_chromeos_llvm_hash.DEFAULT_PACKAGES) + expected_llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next + expected_chroot = update_chromeos_llvm_hash.defaultCrosRoot() + # llvm-next upgrade does not update manifest by default. + mock_update_packages.assert_called_once_with( + packages=expected_packages, + manifest_packages=set(), + llvm_variant=expected_llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=expected_chroot, + mode=failure_modes.FailureModes.FAIL, + git_hash_source="google3", + extra_commit_msg=None, + ) + mock_outside_chroot.assert_called() + + @mock.patch.object(chroot, "VerifyOutsideChroot") + @mock.patch.object(get_llvm_hash, "GetLLVMHashAndVersionFromSVNOption") + @mock.patch.object(update_chromeos_llvm_hash, "UpdatePackages") + def testMainAllArgs( + self, mock_update_packages, mock_gethash, mock_outside_chroot + ): + packages_to_update = "test-packages/package1,test-libs/lib1" + manifest_packages = "test-libs/lib1,test-libs/lib2" + failure_mode = failure_modes.FailureModes.REMOVE_PATCHES + chroot_path = Path("/some/path/to/chroot") + llvm_ver = 435698 + git_hash = "1234abcd" + svn_version = 5678 + mock_gethash.return_value = (git_hash, svn_version) + + argv = [ + "./update_chromeos_llvm_hash_unittest.py", + "--llvm_version", + str(llvm_ver), + "--is_llvm_next", + "--chroot_path", + str(chroot_path), + "--update_packages", + packages_to_update, + "--manifest_packages", + manifest_packages, + "--failure_mode", + failure_mode.value, + "--patch_metadata_file", + "META.json", + ] + + with mock.patch.object(sys, "argv", argv) as mock.argv: + update_chromeos_llvm_hash.main() + + expected_packages = {"test-packages/package1", "test-libs/lib1"} + expected_manifest_packages = {"test-libs/lib1", "test-libs/lib2"} + expected_llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next + mock_update_packages.assert_called_once_with( + packages=expected_packages, + manifest_packages=expected_manifest_packages, + llvm_variant=expected_llvm_variant, + git_hash=git_hash, + svn_version=svn_version, + chroot_path=chroot_path, + mode=failure_mode, + git_hash_source=llvm_ver, + extra_commit_msg=None, + ) + mock_outside_chroot.assert_called() + @mock.patch.object(subprocess, "check_output", return_value=None) @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion") def testEnsurePackageMaskContainsExisting( -- cgit v1.2.3 From e945e30f306a0dab080a394c82bff49525abb3ef Mon Sep 17 00:00:00 2001 From: Bob Haarman <inglorion@chromium.org> Date: Wed, 5 Oct 2022 15:32:40 -0700 Subject: crosperf: find sshwatcher relative to chromeos_root The root of the ChromiumOS source tree is not always in the same location relative to where the crosperf scripts are. This makes the logic for finding sshwatcher.go not work everywhere. To fix this, this change uses the label.chromeos_root path that is present in crosperf runs to find sshwatcher.go. BUG=b:235119397 TEST=run crosperf in standalone toolchain-utils as well as inside ChromiumOS tree to see that sshwatcher.go is found in both cases. Change-Id: I981a55d3cfd2638da0bf320b86ebc5601101ce3f Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3935652 Tested-by: Bob Haarman <inglorion@chromium.org> Reviewed-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Bob Haarman <inglorion@chromium.org> --- crosperf/suite_runner.py | 33 ++++++++++++++++++--------------- crosperf/suite_runner_unittest.py | 2 +- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index 0e4ba045..e777a57f 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -19,16 +19,8 @@ import time from cros_utils import command_executer -SSHWATCHER = [ - "go", - "run", - str( - Path( - __file__, - "../../../../platform/dev/contrib/sshwatcher/sshwatcher.go", - ).resolve() - ), -] +# sshwatcher path, relative to ChromiumOS source root. +SSHWATCHER = "src/platform/dev/contrib/sshwatcher/sshwatcher.go" TEST_THAT_PATH = "/usr/bin/test_that" TAST_PATH = "/usr/bin/tast" CROSFLEET_PATH = "crosfleet" @@ -68,25 +60,34 @@ def GetDutConfigArgs(dut_config): @contextlib.contextmanager -def ssh_tunnel(machinename): +def ssh_tunnel(sshwatcher: "os.PathLike", machinename: str) -> str: """Context manager that forwards a TCP port over SSH while active. This class is used to set up port forwarding before entering the chroot, so that the forwarded port can be used from inside the chroot. - The value yielded by ssh_tunnel is a host:port string. + Args: + sshwatcher: Path to sshwatcher.go + machinename: Hostname of the machine to connect to. + + Returns: + host:port string that can be passed to tast """ # We have to tell sshwatcher which port we want to use. # We pick a port that is likely to be available. port = random.randrange(4096, 32768) - cmd = SSHWATCHER + [machinename, str(port)] + cmd = ["go", "run", str(sshwatcher), machinename, str(port)] # Pylint wants us to use subprocess.Popen as a context manager, # but we don't, so that we can ask sshwatcher to terminate and # limit the time we wait for it to do so. # pylint: disable=consider-using-with - proc = subprocess.Popen(cmd) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) try: + # sshwatcher takes a few seconds before it binds to the port, + # presumably due to SSH handshaking taking a while. + # Give it 12 seconds before we ask the client to connect. + time.sleep(12) yield f"localhost:{port}" finally: proc.terminate() @@ -123,7 +124,9 @@ class SuiteRunner(object): ) else: if benchmark.suite == "tast": - with ssh_tunnel(machine_name) as hostport: + with ssh_tunnel( + Path(label.chromeos_root, SSHWATCHER), machine_name + ) as hostport: ret_tup = self.Tast_Run(hostport, label, benchmark) else: ret_tup = self.Test_That_Run( diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py index cc96ee4a..c936a074 100755 --- a/crosperf/suite_runner_unittest.py +++ b/crosperf/suite_runner_unittest.py @@ -122,7 +122,7 @@ class SuiteRunnerTest(unittest.TestCase): @mock.patch("suite_runner.ssh_tunnel") def test_run(self, ssh_tunnel): @contextlib.contextmanager - def mock_ssh_tunnel(_host): + def mock_ssh_tunnel(_watcher, _host): yield "fakelocalhost:1234" ssh_tunnel.side_effect = mock_ssh_tunnel -- cgit v1.2.3 From d00092acdf86d8aa1b2a24cdb42a45098cda8d39 Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@chromium.org> Date: Tue, 11 Oct 2022 15:20:58 -0700 Subject: afdo_metadata: Update profile version on arm The arm profile is currently not used in the 5.15 kernel so it's no-op. Update to version to keep it up-to-date and test the PUpr pipeline. BUG=b:244337204 TEST=emerge-trogdor chromeos-kernel-5_15 with the new verions Change-Id: I9d0dca41758bc04f54d77a71c30bab8cbb42cb98 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3945805 Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Auto-Submit: Denis Nikitin <denik@chromium.org> Tested-by: Denis Nikitin <denik@chromium.org> Commit-Queue: Manoj Gupta <manojgupta@chromium.org> --- afdo_metadata/kernel_arm_afdo.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/afdo_metadata/kernel_arm_afdo.json b/afdo_metadata/kernel_arm_afdo.json index 2ff1c15d..fa5feb94 100644 --- a/afdo_metadata/kernel_arm_afdo.json +++ b/afdo_metadata/kernel_arm_afdo.json @@ -1,5 +1,5 @@ { "chromeos-kernel-5_15": { - "name": "R106-14541.0-1662074754" + "name": "R108-15148.0-1665394468" } } -- cgit v1.2.3 From 5971b3261cd08aaa8821f2b57a6e1ad2c0ac511e Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@chromium.org> Date: Tue, 11 Oct 2022 15:33:49 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.4 Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: Ic97b86f9bd0f40ac76d954a0effb733359555e2c Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3946883 Tested-by: Denis Nikitin <denik@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Auto-Submit: Denis Nikitin <denik@chromium.org> Commit-Queue: Manoj Gupta <manojgupta@chromium.org> --- afdo_metadata/kernel_afdo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index fb13f97a..4284a737 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,17 @@ { "chromeos-kernel-4_4": { - "name": "R107-15081.0-1662370474" + "name": "R108-15117.10-1664184941" }, "chromeos-kernel-4_14": { - "name": "R107-15081.0-1662370440" + "name": "R108-15148.0-1664789707" }, "chromeos-kernel-4_19": { - "name": "R107-15054.18-1662370521" + "name": "R108-15148.0-1664789742" }, "chromeos-kernel-5_4": { - "name": "R107-15054.18-1662370576" + "name": "R108-15117.17-1664789530" }, "chromeos-kernel-5_10": { - "name": "R107-15080.0-1662370502" + "name": "R108-15148.0-1664789616" } } -- cgit v1.2.3 From b20b2304b692de6a423acb0035303d53c4ee3e3d Mon Sep 17 00:00:00 2001 From: Adrian Dole <adriandole@google.com> Date: Wed, 5 Oct 2022 22:11:44 +0000 Subject: llvm_tools: update_chromeos_llvm_hash failure modes Support 'disable_patches' and 'remove_patches' failure mode options. BUG=b:250648178 TEST=./patch_utils_unittest.py ./update_chromeos_llvm_hash_unittest.py ./patch_manager_unittest.py ./update_chromeos_llvm_hash [...] --failure_mode remove_patches ./update_chromeos_llvm_hash [...] --failure_mode disable_patches Change-Id: I6269b2220cf05413c7776087030297773ab9a154 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3935651 Reviewed-by: Adrian Dole <adriandole@google.com> Auto-Submit: Adrian Dole <adriandole@google.com> Tested-by: Adrian Dole <adriandole@google.com> Reviewed-by: Denis Nikitin <denik@chromium.org> Reviewed-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Adrian Dole <adriandole@google.com> --- llvm_tools/patch_manager.py | 117 ++------------------------- llvm_tools/patch_manager_unittest.py | 91 --------------------- llvm_tools/patch_utils.py | 136 +++++++++++++++++++++++++++++++- llvm_tools/patch_utils_unittest.py | 97 ++++++++++++++++++++++- llvm_tools/update_chromeos_llvm_hash.py | 29 +++++-- 5 files changed, 258 insertions(+), 212 deletions(-) diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py index 11e82227..4d4e8385 100755 --- a/llvm_tools/patch_manager.py +++ b/llvm_tools/patch_manager.py @@ -7,11 +7,10 @@ import argparse import enum -import json import os from pathlib import Path import sys -from typing import Any, Dict, IO, Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Tuple from failure_modes import FailureModes import get_llvm_hash @@ -97,13 +96,6 @@ def GetHEADSVNVersion(src_path): return version -def _WriteJsonChanges(patches: List[Dict[str, Any]], file_io: IO[str]): - """Write JSON changes to file, does not acquire new file lock.""" - json.dump(patches, file_io, indent=4, separators=(",", ": ")) - # Need to add a newline as json.dump omits it. - file_io.write("\n") - - def GetCommitHashesForBisection(src_path, good_svn_version, bad_svn_version): """Gets the good and bad commit hashes required by `git bisect start`.""" @@ -114,105 +106,6 @@ def GetCommitHashesForBisection(src_path, good_svn_version, bad_svn_version): return good_commit_hash, bad_commit_hash -def RemoveOldPatches( - svn_version: int, llvm_src_dir: Path, patches_json_fp: Path -): - """Remove patches that don't and will never apply for the future. - - Patches are determined to be "old" via the "is_old" method for - each patch entry. - - Args: - svn_version: LLVM SVN version. - llvm_src_dir: LLVM source directory. - patches_json_fp: Location to edit patches on. - """ - with patches_json_fp.open(encoding="utf-8") as f: - patches_list = json.load(f) - patch_entries = ( - patch_utils.PatchEntry.from_dict(llvm_src_dir, elem) - for elem in patches_list - ) - oldness = [(entry, entry.is_old(svn_version)) for entry in patch_entries] - filtered_entries = [entry.to_dict() for entry, old in oldness if not old] - with patch_utils.atomic_write(patches_json_fp, encoding="utf-8") as f: - _WriteJsonChanges(filtered_entries, f) - removed_entries = [entry for entry, old in oldness if old] - plural_patches = "patch" if len(removed_entries) == 1 else "patches" - print(f"Removed {len(removed_entries)} old {plural_patches}:") - for r in removed_entries: - print(f"- {r.rel_patch_path}: {r.title()}") - - -def UpdateVersionRanges( - svn_version: int, llvm_src_dir: Path, patches_json_fp: Path -): - """Reduce the version ranges of failing patches. - - Patches which fail to apply will have their 'version_range.until' - field reduced to the passed in svn_version. - - Modifies the contents of patches_json_fp. - - Ars: - svn_version: LLVM revision number. - llvm_src_dir: llvm-project directory path. - patches_json_fp: Filepath to the PATCHES.json file. - """ - with patches_json_fp.open(encoding="utf-8") as f: - patch_entries = patch_utils.json_to_patch_entries( - patches_json_fp.parent, - f, - ) - modified_entries = UpdateVersionRangesWithEntries( - svn_version, llvm_src_dir, patch_entries - ) - with patch_utils.atomic_write(patches_json_fp, encoding="utf-8") as f: - _WriteJsonChanges([p.to_dict() for p in patch_entries], f) - for entry in modified_entries: - print( - f"Stopped applying {entry.rel_patch_path} ({entry.title()}) " - f"for r{svn_version}" - ) - - -def UpdateVersionRangesWithEntries( - svn_version: int, - llvm_src_dir: Path, - patch_entries: Iterable[patch_utils.PatchEntry], -) -> List[patch_utils.PatchEntry]: - """Test-able helper for UpdateVersionRanges. - - Args: - svn_version: LLVM revision number. - llvm_src_dir: llvm-project directory path. - patch_entries: PatchEntry objects to modify. - - Returns: - A list of PatchEntry objects which were modified. - - Post: - Modifies patch_entries in place. - """ - modified_entries: List[patch_utils.PatchEntry] = [] - with patch_utils.git_clean_context(llvm_src_dir): - for pe in patch_entries: - test_result = pe.test_apply(llvm_src_dir) - if not test_result: - if pe.version_range is None: - pe.version_range = {} - pe.version_range["until"] = svn_version - modified_entries.append(pe) - else: - # We have to actually apply the patch so that future patches - # will stack properly. - if not pe.apply(llvm_src_dir).succeeded: - raise RuntimeError( - "Could not apply patch that dry ran successfully" - ) - return modified_entries - - def CheckPatchApplies( svn_version: int, llvm_src_dir: Path, @@ -374,10 +267,14 @@ def main(sys_argv: List[str]): PrintPatchResults(result) def _remove(args): - RemoveOldPatches(args.svn_version, llvm_src_dir, patches_json_fp) + patch_utils.remove_old_patches( + args.svn_version, llvm_src_dir, patches_json_fp + ) def _disable(args): - UpdateVersionRanges(args.svn_version, llvm_src_dir, patches_json_fp) + patch_utils.update_version_ranges( + args.svn_version, llvm_src_dir, patches_json_fp + ) def _test_single(args): if not args.test_patch: diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py index 19c2d8af..42697d91 100755 --- a/llvm_tools/patch_manager_unittest.py +++ b/llvm_tools/patch_manager_unittest.py @@ -61,50 +61,6 @@ class PatchManagerTest(unittest.TestCase): ) mock_isfile.assert_called_once() - @mock.patch("builtins.print") - def testRemoveOldPatches(self, _): - """Can remove old patches from PATCHES.json.""" - one_patch_dict = { - "metadata": { - "title": "[some label] hello world", - }, - "platforms": [ - "chromiumos", - ], - "rel_patch_path": "x/y/z", - "version_range": { - "from": 4, - "until": 5, - }, - } - patches = [ - one_patch_dict, - {**one_patch_dict, "version_range": {"until": None}}, - {**one_patch_dict, "version_range": {"from": 100}}, - {**one_patch_dict, "version_range": {"until": 8}}, - ] - cases = [ - (0, lambda x: self.assertEqual(len(x), 4)), - (6, lambda x: self.assertEqual(len(x), 3)), - (8, lambda x: self.assertEqual(len(x), 2)), - (1000, lambda x: self.assertEqual(len(x), 2)), - ] - - def _t(dirname: str, svn_version: int, assertion_f: Callable): - json_filepath = Path(dirname) / "PATCHES.json" - with json_filepath.open("w", encoding="utf-8") as f: - json.dump(patches, f) - patch_manager.RemoveOldPatches(svn_version, Path(), json_filepath) - with json_filepath.open("r", encoding="utf-8") as f: - result = json.load(f) - assertion_f(result) - - with tempfile.TemporaryDirectory( - prefix="patch_manager_unittest" - ) as dirname: - for r, a in cases: - _t(dirname, r, a) - @mock.patch("builtins.print") @mock.patch.object(patch_utils, "git_clean_context") def testCheckPatchApplies(self, _, mock_git_clean_context): @@ -253,53 +209,6 @@ class PatchManagerTest(unittest.TestCase): patch_manager.GitBisectionCode.SKIP, ) - @mock.patch("patch_utils.git_clean_context", mock.MagicMock) - def testUpdateVersionRanges(self): - """Test the UpdateVersionRanges function.""" - with tempfile.TemporaryDirectory( - prefix="patch_manager_unittest" - ) as dirname: - dirpath = Path(dirname) - patches = [ - patch_utils.PatchEntry( - workdir=dirpath, - rel_patch_path="x.patch", - metadata=None, - platforms=None, - version_range={ - "from": 0, - "until": 2, - }, - ), - patch_utils.PatchEntry( - workdir=dirpath, - rel_patch_path="y.patch", - metadata=None, - platforms=None, - version_range={ - "from": 0, - "until": 2, - }, - ), - ] - patches[0].apply = mock.MagicMock( - return_value=patch_utils.PatchResult( - succeeded=False, failed_hunks={"a/b/c": []} - ) - ) - patches[1].apply = mock.MagicMock( - return_value=patch_utils.PatchResult(succeeded=True) - ) - results = patch_manager.UpdateVersionRangesWithEntries( - 1, dirpath, patches - ) - # We should only have updated the version_range of the first patch, - # as that one failed to apply. - self.assertEqual(len(results), 1) - self.assertEqual(results[0].version_range, {"from": 0, "until": 1}) - self.assertEqual(patches[0].version_range, {"from": 0, "until": 1}) - self.assertEqual(patches[1].version_range, {"from": 0, "until": 2}) - if __name__ == "__main__": unittest.main() diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py index ca912f2b..b86e1925 100644 --- a/llvm_tools/patch_utils.py +++ b/llvm_tools/patch_utils.py @@ -12,7 +12,7 @@ from pathlib import Path import re import subprocess import sys -from typing import Any, Dict, IO, List, Optional, Tuple, Union +from typing import Any, Dict, IO, Iterable, List, Optional, Tuple, Union CHECKED_FILE_RE = re.compile(r"^checking file\s+(.*)$") @@ -456,3 +456,137 @@ def git_clean_context(git_root_dir: Path): yield finally: clean_src_tree(git_root_dir) + + +def _write_json_changes(patches: List[Dict[str, Any]], file_io: IO[str]): + """Write JSON changes to file, does not acquire new file lock.""" + json.dump(patches, file_io, indent=4, separators=(",", ": ")) + # Need to add a newline as json.dump omits it. + file_io.write("\n") + + +def update_version_ranges( + svn_version: int, llvm_src_dir: Path, patches_json_fp: Path +) -> PatchInfo: + """Reduce the version ranges of failing patches. + + Patches which fail to apply will have their 'version_range.until' + field reduced to the passed in svn_version. + + Modifies the contents of patches_json_fp. + + Args: + svn_version: LLVM revision number. + llvm_src_dir: llvm-project directory path. + patches_json_fp: Filepath to the PATCHES.json file. + + Returns: + PatchInfo for applied and disabled patches. + """ + with patches_json_fp.open(encoding="utf-8") as f: + patch_entries = json_to_patch_entries( + patches_json_fp.parent, + f, + ) + modified_entries, applied_patches = update_version_ranges_with_entries( + svn_version, llvm_src_dir, patch_entries + ) + with atomic_write(patches_json_fp, encoding="utf-8") as f: + _write_json_changes([p.to_dict() for p in patch_entries], f) + for entry in modified_entries: + print( + f"Stopped applying {entry.rel_patch_path} ({entry.title()}) " + f"for r{svn_version}" + ) + return PatchInfo( + non_applicable_patches=[], + applied_patches=[p.rel_patch_path for p in applied_patches], + failed_patches=[], + disabled_patches=[p.rel_patch_path for p in modified_entries], + removed_patches=[], + modified_metadata=patches_json_fp if modified_entries else None, + ) + + +def update_version_ranges_with_entries( + svn_version: int, + llvm_src_dir: Path, + patch_entries: Iterable[PatchEntry], +) -> Tuple[List[PatchEntry], List[PatchEntry]]: + """Test-able helper for UpdateVersionRanges. + + Args: + svn_version: LLVM revision number. + llvm_src_dir: llvm-project directory path. + patch_entries: PatchEntry objects to modify. + + Returns: + Tuple of (modified entries, applied patches) + + Post: + Modifies patch_entries in place. + """ + modified_entries: List[PatchEntry] = [] + applied_patches: List[PatchEntry] = [] + with git_clean_context(llvm_src_dir): + for pe in patch_entries: + test_result = pe.test_apply(llvm_src_dir) + if not test_result: + if pe.version_range is None: + pe.version_range = {} + pe.version_range["until"] = svn_version + modified_entries.append(pe) + else: + # We have to actually apply the patch so that future patches + # will stack properly. + if not pe.apply(llvm_src_dir).succeeded: + raise RuntimeError( + "Could not apply patch that dry ran successfully" + ) + applied_patches.append(pe) + + return modified_entries, applied_patches + + +def remove_old_patches( + svn_version: int, llvm_src_dir: Path, patches_json_fp: Path +) -> PatchInfo: + """Remove patches that don't and will never apply for the future. + + Patches are determined to be "old" via the "is_old" method for + each patch entry. + + Args: + svn_version: LLVM SVN version. + llvm_src_dir: LLVM source directory. + patches_json_fp: Location to edit patches on. + + Returns: + PatchInfo for modified patches. + """ + with patches_json_fp.open(encoding="utf-8") as f: + patches_list = json.load(f) + patch_entries = ( + PatchEntry.from_dict(llvm_src_dir, elem) for elem in patches_list + ) + oldness = [(entry, entry.is_old(svn_version)) for entry in patch_entries] + filtered_entries = [entry.to_dict() for entry, old in oldness if not old] + with atomic_write(patches_json_fp, encoding="utf-8") as f: + _write_json_changes(filtered_entries, f) + removed_entries = [entry for entry, old in oldness if old] + plural_patches = "patch" if len(removed_entries) == 1 else "patches" + print(f"Removed {len(removed_entries)} old {plural_patches}:") + for r in removed_entries: + print(f"- {r.rel_patch_path}: {r.title()}") + + patches_dir_path = llvm_src_dir / patches_json_fp.parent + return PatchInfo( + non_applicable_patches=[], + applied_patches=[], + failed_patches=[], + disabled_patches=[], + removed_patches=[ + patches_dir_path / p.rel_patch_path for p in removed_entries + ], + modified_metadata=patches_json_fp if removed_entries else None, + ) diff --git a/llvm_tools/patch_utils_unittest.py b/llvm_tools/patch_utils_unittest.py index 8fe45c2c..b8c21390 100755 --- a/llvm_tools/patch_utils_unittest.py +++ b/llvm_tools/patch_utils_unittest.py @@ -6,9 +6,11 @@ """Unit tests for the patch_utils.py file.""" import io +import json from pathlib import Path import subprocess import tempfile +from typing import Callable import unittest from unittest import mock @@ -99,7 +101,7 @@ class TestPatchUtils(unittest.TestCase): def test_can_parse_from_json(self): """Test that patches be loaded from json.""" - json = """ + patches_json = """ [ { "metadata": {}, @@ -121,7 +123,7 @@ class TestPatchUtils(unittest.TestCase): } ] """ - result = pu.json_to_patch_entries(Path(), io.StringIO(json)) + result = pu.json_to_patch_entries(Path(), io.StringIO(patches_json)) self.assertEqual(len(result), 4) def test_parsed_hunks(self): @@ -215,6 +217,97 @@ Hunk #1 SUCCEEDED at 96 with fuzz 1. test_file.write_text("abc") self.assertTrue(pu.is_git_dirty(dirpath)) + @mock.patch("patch_utils.git_clean_context", mock.MagicMock) + def test_update_version_ranges(self): + """Test the UpdateVersionRanges function.""" + with tempfile.TemporaryDirectory( + prefix="patch_manager_unittest" + ) as dirname: + dirpath = Path(dirname) + patches = [ + pu.PatchEntry( + workdir=dirpath, + rel_patch_path="x.patch", + metadata=None, + platforms=None, + version_range={ + "from": 0, + "until": 2, + }, + ), + pu.PatchEntry( + workdir=dirpath, + rel_patch_path="y.patch", + metadata=None, + platforms=None, + version_range={ + "from": 0, + "until": 2, + }, + ), + ] + patches[0].apply = mock.MagicMock( + return_value=pu.PatchResult( + succeeded=False, failed_hunks={"a/b/c": []} + ) + ) + patches[1].apply = mock.MagicMock( + return_value=pu.PatchResult(succeeded=True) + ) + results, _ = pu.update_version_ranges_with_entries( + 1, dirpath, patches + ) + # We should only have updated the version_range of the first patch, + # as that one failed to apply. + self.assertEqual(len(results), 1) + self.assertEqual(results[0].version_range, {"from": 0, "until": 1}) + self.assertEqual(patches[0].version_range, {"from": 0, "until": 1}) + self.assertEqual(patches[1].version_range, {"from": 0, "until": 2}) + + @mock.patch("builtins.print") + def test_remove_old_patches(self, _): + """Can remove old patches from PATCHES.json.""" + one_patch_dict = { + "metadata": { + "title": "[some label] hello world", + }, + "platforms": [ + "chromiumos", + ], + "rel_patch_path": "x/y/z", + "version_range": { + "from": 4, + "until": 5, + }, + } + patches = [ + one_patch_dict, + {**one_patch_dict, "version_range": {"until": None}}, + {**one_patch_dict, "version_range": {"from": 100}}, + {**one_patch_dict, "version_range": {"until": 8}}, + ] + cases = [ + (0, lambda x: self.assertEqual(len(x), 4)), + (6, lambda x: self.assertEqual(len(x), 3)), + (8, lambda x: self.assertEqual(len(x), 2)), + (1000, lambda x: self.assertEqual(len(x), 2)), + ] + + def _t(dirname: str, svn_version: int, assertion_f: Callable): + json_filepath = Path(dirname) / "PATCHES.json" + with json_filepath.open("w", encoding="utf-8") as f: + json.dump(patches, f) + pu.remove_old_patches(svn_version, Path(), json_filepath) + with json_filepath.open("r", encoding="utf-8") as f: + result = json.load(f) + assertion_f(result) + + with tempfile.TemporaryDirectory( + prefix="patch_utils_unittest" + ) as dirname: + for r, a in cases: + _t(dirname, r, a) + @staticmethod def _default_json_dict(): return { diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py index c52c7328..75c6ce6c 100755 --- a/llvm_tools/update_chromeos_llvm_hash.py +++ b/llvm_tools/update_chromeos_llvm_hash.py @@ -411,7 +411,7 @@ def RemovePatchesFromFilesDir(patches): """Removes the patches from $FILESDIR of a package. Args: - patches: A list of absolute pathes of patches to remove + patches: A list of absolute paths of patches to remove Raises: ValueError: Failed to remove a patch in $FILESDIR. @@ -728,13 +728,26 @@ def UpdatePackagesPatchMetadataFile( src_path = Path(dirname) with patch_utils.git_clean_context(src_path): - patches_info = patch_utils.apply_all_from_json( - svn_version=svn_version, - llvm_src_dir=src_path, - patches_json_fp=patches_json_fp, - continue_on_failure=mode - == failure_modes.FailureModes.CONTINUE, - ) + if ( + mode == failure_modes.FailureModes.FAIL + or mode == failure_modes.FailureModes.CONTINUE + ): + patches_info = patch_utils.apply_all_from_json( + svn_version=svn_version, + llvm_src_dir=src_path, + patches_json_fp=patches_json_fp, + continue_on_failure=mode + == failure_modes.FailureModes.CONTINUE, + ) + elif mode == failure_modes.FailureModes.REMOVE_PATCHES: + patches_info = patch_utils.remove_old_patches( + svn_version, src_path, patches_json_fp + ) + elif mode == failure_modes.FailureModes.DISABLE_PATCHES: + patches_info = patch_utils.update_version_ranges( + svn_version, src_path, patches_json_fp + ) + package_info[cur_package] = patches_info._asdict() return package_info -- cgit v1.2.3 From be528176c1b17e5e296922eefd80c34506d3312c Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Fri, 14 Oct 2022 23:10:09 +0000 Subject: llvm_tools: Fix patch_utils patch writing There were two bugs with patch_utils here: 1. When deciding to modify patches, it didn't ignore old patches. 2. When writing out changes, it wrote the dicts out in the wrong order. Both of these issues have been resolved. BUG=b:253660089 TEST=./patch_utils_unittest.py TEST=./patch_manager_unittest.py TEST=./update_chromeos_llvm_hash_unittest.py TEST=./update_chromeos_llvm_hash.py TEST=./update_chromeos_llvm_hash.py \ --is_llvm_next \ --llvm_version tot \ --failure_mode disable_patches Change-Id: If25c30ffa2bea55aeafb8d03aa19c5fc07fb4b7b Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3956311 Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> --- llvm_tools/patch_utils.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py index b86e1925..affb3d0d 100644 --- a/llvm_tools/patch_utils.py +++ b/llvm_tools/patch_utils.py @@ -197,15 +197,19 @@ class PatchEntry: ) def to_dict(self) -> Dict[str, Any]: - out = { + out: Dict[str, Any] = { "metadata": self.metadata, - "rel_patch_path": self.rel_patch_path, - "version_range": self.version_range, } if self.platforms: # To match patch_sync, only serialized when # non-empty and non-null. out["platforms"] = sorted(self.platforms) + out.update( + { + "rel_patch_path": self.rel_patch_path, + "version_range": self.version_range, + } + ) return out def parsed_hunks(self) -> Dict[str, List[Hunk]]: @@ -500,11 +504,11 @@ def update_version_ranges( ) return PatchInfo( non_applicable_patches=[], - applied_patches=[p.rel_patch_path for p in applied_patches], + applied_patches=applied_patches, failed_patches=[], disabled_patches=[p.rel_patch_path for p in modified_entries], removed_patches=[], - modified_metadata=patches_json_fp if modified_entries else None, + modified_metadata=str(patches_json_fp) if modified_entries else None, ) @@ -528,8 +532,9 @@ def update_version_ranges_with_entries( """ modified_entries: List[PatchEntry] = [] applied_patches: List[PatchEntry] = [] + active_patches = (pe for pe in patch_entries if not pe.is_old(svn_version)) with git_clean_context(llvm_src_dir): - for pe in patch_entries: + for pe in active_patches: test_result = pe.test_apply(llvm_src_dir) if not test_result: if pe.version_range is None: @@ -579,14 +584,11 @@ def remove_old_patches( for r in removed_entries: print(f"- {r.rel_patch_path}: {r.title()}") - patches_dir_path = llvm_src_dir / patches_json_fp.parent return PatchInfo( non_applicable_patches=[], applied_patches=[], failed_patches=[], disabled_patches=[], - removed_patches=[ - patches_dir_path / p.rel_patch_path for p in removed_entries - ], - modified_metadata=patches_json_fp if removed_entries else None, + removed_patches=[p.rel_patch_path for p in removed_entries], + modified_metadata=str(patches_json_fp) if removed_entries else None, ) -- cgit v1.2.3 From e1d62e60dec52b016e5ad9d543d9de2eae0158f6 Mon Sep 17 00:00:00 2001 From: Jordan R Abrahams-Whitehead <ajordanr@google.com> Date: Sat, 15 Oct 2022 00:08:34 +0000 Subject: toolchain_utils_githooks: Add line-length to black This fixes the autofix command so that we enforce a line length of 80 chars. BUG=None TEST=Checked presubmit on violating file Change-Id: If8817cdf9ab70d5d9e5db10c0818207f976f519c Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3958135 Reviewed-by: George Burgess <gbiv@chromium.org> Auto-Submit: Jordan Abrahams-Whitehead <ajordanr@google.com> Tested-by: Jordan Abrahams-Whitehead <ajordanr@google.com> Commit-Queue: Jordan Abrahams-Whitehead <ajordanr@google.com> --- toolchain_utils_githooks/check-presubmit.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/toolchain_utils_githooks/check-presubmit.py b/toolchain_utils_githooks/check-presubmit.py index 29744aaf..485737d5 100755 --- a/toolchain_utils_githooks/check-presubmit.py +++ b/toolchain_utils_githooks/check-presubmit.py @@ -187,7 +187,8 @@ def check_black( ) black_version = stdout_and_stderr.strip() - command = [black, "--line-length=80", "--check"] + python_files + black_invocation: t.List[str] = [str(black), "--line-length=80"] + command = black_invocation + ["--check"] + list(python_files) exit_code, stdout_and_stderr = run_command_unchecked( command, cwd=toolchain_utils_root ) @@ -238,7 +239,7 @@ def check_black( autofix_commands=[], ) - autofix = [black] + bad_files + autofix = black_invocation + bad_files return CheckResult( ok=False, output=f"Using {black_version!r}, these file(s) have formatting errors: " -- cgit v1.2.3 From 5bf1676c8283ceb9f1dc4f4a77fc9709f3e909cb Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@google.com> Date: Tue, 18 Oct 2022 14:36:36 -0700 Subject: afdo_tools: Remove 4.4 from kernel afdo update BUG=b:217578492 TEST=./update_kernel_afdo Change-Id: I67e35c0bd28936e968b533280d3e6d0d3149e412 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3965288 Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: Manoj Gupta <manojgupta@chromium.org> Auto-Submit: Denis Nikitin <denik@chromium.org> Tested-by: Denis Nikitin <denik@chromium.org> --- afdo_tools/update_kernel_afdo | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo index 701a4307..e8e583e7 100755 --- a/afdo_tools/update_kernel_afdo +++ b/afdo_tools/update_kernel_afdo @@ -25,7 +25,7 @@ set -eu set -o pipefail GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel -KVERS="4.4 4.14 4.19 5.4 5.10" +KVERS="4.14 4.19 5.4 5.10" failed_channels="" # Add skipped chrome branches in ascending order here. SKIPPED_BRANCHES="95" -- cgit v1.2.3 From c139c08d9da731827eb31774a1f1f0dd5137a9a8 Mon Sep 17 00:00:00 2001 From: Bob Haarman <inglorion@chromium.org> Date: Fri, 21 Oct 2022 17:11:50 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update chromeos-kernel-4.14 Update chromeos-kernel-4.19 Update chromeos-kernel-5.4 Update chromeos-kernel-5.10 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: Ia9af7b4fb69d6a04be6bc2d08cb6ded27c2f4391 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3972099 Tested-by: Bob Haarman <inglorion@chromium.org> Commit-Queue: Bob Haarman <inglorion@chromium.org> Reviewed-by: Denis Nikitin <denik@chromium.org> --- afdo_metadata/kernel_afdo.json | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 4284a737..851b2768 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,17 +1,14 @@ { - "chromeos-kernel-4_4": { - "name": "R108-15117.10-1664184941" - }, "chromeos-kernel-4_14": { - "name": "R108-15148.0-1664789707" + "name": "R109-15156.0-1665999473" }, "chromeos-kernel-4_19": { - "name": "R108-15148.0-1664789742" + "name": "R109-15178.0-1665999117" }, "chromeos-kernel-5_4": { - "name": "R108-15117.17-1664789530" + "name": "R109-15178.0-1665999319" }, "chromeos-kernel-5_10": { - "name": "R108-15148.0-1664789616" + "name": "R109-15178.0-1665999174" } } -- cgit v1.2.3 From 4c55d03d75490e1b890accb2675a1edffab632e8 Mon Sep 17 00:00:00 2001 From: Michael Benfield <mbenfield@google.com> Date: Thu, 27 Oct 2022 17:21:46 +0000 Subject: pgo_rust.py: --suffix command line option. This allows multiple benchmarks and profdata to be uploaded for the same Rust version, distinguished by a filename suffix. BUG=None TEST=CQ Change-Id: I226d26a8094e4b1e6a119ee8b6b002717d89026e Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3988763 Auto-Submit: Michael Benfield <mbenfield@google.com> Reviewed-by: George Burgess <gbiv@chromium.org> Commit-Queue: Michael Benfield <mbenfield@google.com> Tested-by: Michael Benfield <mbenfield@google.com> Commit-Queue: George Burgess <gbiv@chromium.org> --- pgo_tools_rust/pgo_rust.py | 42 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/pgo_tools_rust/pgo_rust.py b/pgo_tools_rust/pgo_rust.py index c145bc8b..298c343f 100755 --- a/pgo_tools_rust/pgo_rust.py +++ b/pgo_tools_rust/pgo_rust.py @@ -9,9 +9,12 @@ This is meant to be done at Rust uprev time. Ultimately profdata files need to be placed at -gs://chromeos-localmirror/distfiles/rust-pgo-{rust_version}-frontend.profdata.tz +gs://chromeos-localmirror/distfiles/rust-pgo-{rust_version}-frontend.profdata{s}.tz and -gs://chromeos-localmirror/distfiles/rust-pgo-{rust_version}-llvm.profdata.tz +gs://chromeos-localmirror/distfiles/rust-pgo-{rust_version}-llvm.profdata{s}.tz + +Here {s} is an optional suffix to distinguish between profdata files on the same +Rust version. The intended flow is that you first get the new Rust version in a shape so that it builds, for instance modifying or adding patches as necessary. Note that if @@ -316,7 +319,7 @@ def merge_profdata(llvm_or_frontend, *, source_directory: Path, dest: Path): def do_upload_profdata(*, source: Path, dest: PurePosixPath): - new_path = source.parent / source.name / ".xz" + new_path = source.parent / (source.name + ".xz") run(["xz", "--keep", "--compress", "--force", source]) upload_file(source=new_path, dest=dest, public_read=True) @@ -424,7 +427,9 @@ def benchmark_nopgo(args): ) rust_version = get_rust_version() - dest_directory = GS_BASE / "benchmarks" / rust_version / "nopgo" + dest_directory = ( + GS_BASE / "benchmarks" / rust_version / f"nopgo{args.suffix}" + ) logging.info("Uploading benchmark data") for file in time_directory.iterdir(): upload_file( @@ -492,7 +497,7 @@ def benchmark_pgo(args): GS_BASE / "benchmarks" / rust_version - / f"{args.crate_name}-{args.crate_version}" + / f"{args.crate_name}-{args.crate_version}{args.suffix}" ) logging.info("Uploading benchmark data") for file in time_directory.iterdir(): @@ -503,20 +508,26 @@ def benchmark_pgo(args): def upload_profdata(args): directory = ( - LOCAL_BASE / "profdata /" f"{args.crate_name}-{args.crate_version}" + LOCAL_BASE / "profdata" / f"{args.crate_name}-{args.crate_version}" ) rust_version = get_rust_version() logging.info("Uploading LLVM profdata") do_upload_profdata( source=directory / "llvm.profdata", - dest=(GS_DISTFILES / f"rust-pgo-{rust_version}-llvm.profdata.xz"), + dest=( + GS_DISTFILES + / f"rust-pgo-{rust_version}-llvm{args.suffix}.profdata.xz" + ), ) logging.info("Uploading frontend profdata") do_upload_profdata( source=directory / "frontend.profdata", - dest=(GS_DISTFILES / f"rust-pgo-{rust_version}-frontend.profdata.xz"), + dest=( + GS_DISTFILES + / f"rust-pgo-{rust_version}-frontend{args.suffix}.profdata.xz" + ), ) @@ -566,6 +577,11 @@ def main(): default=CRATE_VERSION, help="Version of the crate whose benchmark to build", ) + parser_benchmark_nopgo.add_argument( + "--suffix", + default="", + help="Suffix to distinguish benchmarks and profdata with identical rustc versions", + ) parser_benchmark_pgo = subparsers.add_parser( "benchmark-pgo", @@ -594,6 +610,11 @@ def main(): default=CRATE_VERSION, help="Version of the crate whose profile to use", ) + parser_benchmark_pgo.add_argument( + "--suffix", + default="", + help="Suffix to distinguish benchmarks and profdata with identical rustc versions", + ) parser_upload_profdata = subparsers.add_parser( "upload-profdata", help="Upload the profdata files" @@ -609,6 +630,11 @@ def main(): default=CRATE_VERSION, help="Version of the crate whose profile to use", ) + parser_upload_profdata.add_argument( + "--suffix", + default="", + help="Suffix to distinguish benchmarks and profdata with identical rustc versions", + ) args = parser.parse_args() -- cgit v1.2.3 From b46ae181d61d13aa2db3d20c99fb11a7a7c559ed Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@google.com> Date: Mon, 31 Oct 2022 07:49:05 -0700 Subject: afdo_metadata: Publish the new kernel profiles Update amd profile on chromeos-kernel-4.4 Update amd profile on chromeos-kernel-4.14 Update amd profile on chromeos-kernel-4.19 Update amd profile on chromeos-kernel-5.4 Update amd profile on chromeos-kernel-5.10 Update arm profile on chromeos-kernel-5.15 BUG=None TEST=Verified in kernel-release-afdo-verify-orchestrator Change-Id: Ia47d80599bb635323851da768db4595d55dfceaf Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3994105 Tested-by: Denis Nikitin <denik@chromium.org> Auto-Submit: Denis Nikitin <denik@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: Manoj Gupta <manojgupta@chromium.org> --- afdo_metadata/kernel_afdo.json | 11 +++++++---- afdo_metadata/kernel_arm_afdo.json | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json index 851b2768..98dab12e 100644 --- a/afdo_metadata/kernel_afdo.json +++ b/afdo_metadata/kernel_afdo.json @@ -1,14 +1,17 @@ { + "chromeos-kernel-4_4": { + "name": "R108-15117.10-1664184941" + }, "chromeos-kernel-4_14": { - "name": "R109-15156.0-1665999473" + "name": "R109-15183.8-1666603998" }, "chromeos-kernel-4_19": { - "name": "R109-15178.0-1665999117" + "name": "R109-15183.8-1666604011" }, "chromeos-kernel-5_4": { - "name": "R109-15178.0-1665999319" + "name": "R109-15183.8-1666603918" }, "chromeos-kernel-5_10": { - "name": "R109-15178.0-1665999174" + "name": "R109-15183.8-1666604219" } } diff --git a/afdo_metadata/kernel_arm_afdo.json b/afdo_metadata/kernel_arm_afdo.json index fa5feb94..e73d2eb8 100644 --- a/afdo_metadata/kernel_arm_afdo.json +++ b/afdo_metadata/kernel_arm_afdo.json @@ -1,5 +1,5 @@ { "chromeos-kernel-5_15": { - "name": "R108-15148.0-1665394468" + "name": "R109-15183.8-1666604194" } } -- cgit v1.2.3 From b3de4ad2a0e02255f06fd605ad4f493edffc1b1f Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@google.com> Date: Sat, 29 Oct 2022 13:59:39 -0700 Subject: update_kernel_afdo: Update arm afdo metadata BUG=b:244337204 TEST=./update_kernel_afdo Change-Id: I47722a6c3e4446bff816baed7711279dd923a9c3 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3994209 Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: Denis Nikitin <denik@chromium.org> Tested-by: Denis Nikitin <denik@chromium.org> --- afdo_tools/update_kernel_afdo | 201 +++++++++++++++++++++++------------------- 1 file changed, 112 insertions(+), 89 deletions(-) diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo index e8e583e7..be0fa8d0 100755 --- a/afdo_tools/update_kernel_afdo +++ b/afdo_tools/update_kernel_afdo @@ -24,8 +24,10 @@ the current branch unchanged. set -eu set -o pipefail -GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel -KVERS="4.14 4.19 5.4 5.10" +AMD_GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel +ARM_GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel/arm +AMD_KVERS="4.14 4.19 5.4 5.10" +ARM_KVERS="5.15" failed_channels="" # Add skipped chrome branches in ascending order here. SKIPPED_BRANCHES="95" @@ -49,14 +51,25 @@ SKIPPED_KVERS_IN_BRANCHES["100"]="5.10" script_dir=$(dirname "$0") tc_utils_dir="${script_dir}/.." metadata_dir="${tc_utils_dir}/afdo_metadata" -outfile="$(realpath --relative-to="${tc_utils_dir}" \ +amd_outfile="$(realpath --relative-to="${tc_utils_dir}" \ "${metadata_dir}"/kernel_afdo.json)" +arm_outfile="$(realpath --relative-to="${tc_utils_dir}" \ + "${metadata_dir}"/kernel_arm_afdo.json)" # Convert toolchain_utils into the absolute path. abs_tc_utils_dir="$(realpath "${tc_utils_dir}")" # Check profiles uploaded within the last week. expected_time=$(date +%s -d "week ago") +ARCHS="amd arm" +declare -A arch_gsbase arch_kvers arch_outfile +arch_gsbase["amd"]="${AMD_GS_BASE}" +arch_gsbase["arm"]="${ARM_GS_BASE}" +arch_kvers["amd"]="${AMD_KVERS}" +arch_kvers["arm"]="${ARM_KVERS}" +arch_outfile["amd"]="${amd_outfile}" +arch_outfile["arm"]="${arm_outfile}" + declare -A branch branch_number commit remote_repo=$(git -C "${tc_utils_dir}" remote) canary_ref="refs/heads/main" @@ -126,112 +139,122 @@ do echo echo "Checking \"${channel}\" channel..." echo "branch_number=${curr_branch_number} branch=${curr_branch}" - json="{" - sep="" - for kver in ${KVERS} + + git reset --hard HEAD + git checkout "${remote_repo}/${curr_branch}" + + for arch in ${ARCHS} do - # Skip kernels disabled in this branch. - skipped=false - for skipped_branch in "${!SKIPPED_KVERS_IN_BRANCHES[@]}" + json="{" + sep="" + for kver in ${arch_kvers[${arch}]} do - if [[ ${curr_branch_number} == "${skipped_branch}" ]] + # Skip kernels disabled in this branch. + skipped=false + for skipped_branch in "${!SKIPPED_KVERS_IN_BRANCHES[@]}" + do + if [[ ${curr_branch_number} == "${skipped_branch}" ]] + then + # Current branch is in the keys of SKIPPED_KVERS_IN_BRANCHES. + # Now lets check if $kver is in the list. + for skipped_kver in ${SKIPPED_KVERS_IN_BRANCHES[${skipped_branch}]} + do + if [[ ${kver} == "${skipped_kver}" ]] + then + skipped=true + break + fi + done + fi + done + if ${skipped} then - # Current branch is in the keys of SKIPPED_KVERS_IN_BRANCHES. - # Now lets check if $kver is in the list. - for skipped_kver in ${SKIPPED_KVERS_IN_BRANCHES[${skipped_branch}]} - do - if [[ ${kver} == "${skipped_kver}" ]] - then - skipped=true - break - fi - done + echo "${kver} is skipped in branch ${curr_branch_number}. Skip it." + continue + fi + # Sort the gs output by timestamp, default ordering is by name. So + # R86-13310.3-1594633089.gcov.xz goes after + # R86-13310.18-1595237847.gcov.xz. + latest=$(gsutil.py ls -l "${arch_gsbase[${arch}]}/${kver}/" | sort -k2 | \ + grep "R${curr_branch_number}" | tail -1 || true) + if [[ -z "${latest}" && "${channel}" != "stable" ]] + then + # if no profiles exist for the current branch, try the previous branch + latest=$(gsutil.py ls -l "${arch_gsbase[${arch}]}/${kver}/" | \ + sort -k2 | grep "R$((curr_branch_number - 1))" | tail -1) fi - done - if ${skipped} - then - echo "${kver} is skipped in branch ${curr_branch_number}. Skip it." - continue - fi - # Sort the gs output by timestamp (default ordering is by name, so - # R86-13310.3-1594633089.gcov.xz goes after R86-13310.18-1595237847.gcov.xz) - latest=$(gsutil.py ls -l "${GS_BASE}/${kver}/" | sort -k2 | \ - grep "R${curr_branch_number}" | tail -1 || true) - if [[ -z "${latest}" && "${channel}" != "stable" ]] - then - # if no profiles exist for the current branch, try the previous branch - latest=$(gsutil.py ls -l "${GS_BASE}/${kver}/" | sort -k2 | \ - grep "R$((curr_branch_number - 1))" | tail -1) - fi - # Verify that the file has the expected date. - file_time=$(echo "${latest}" | awk '{print $2}') - file_time_unix=$(date +%s -d "${file_time}") - if [ "${file_time_unix}" -lt "${expected_time}" ] - then - expected=$(env TZ=UTC date +%Y-%m-%dT%H:%M:%SZ -d @"${expected_time}") - echo "Wrong date for ${kver}: ${file_time} is before ${expected}" >&2 - errs="${errs} ${kver}" - continue - fi + # Verify that the file has the expected date. + file_time=$(echo "${latest}" | awk '{print $2}') + file_time_unix=$(date +%s -d "${file_time}") + if [ "${file_time_unix}" -lt "${expected_time}" ] + then + expected=$(env TZ=UTC date +%Y-%m-%dT%H:%M:%SZ -d @"${expected_time}") + echo "Wrong date for ${kver}: ${file_time} is before ${expected}" >&2 + errs="${errs} ${kver}" + continue + fi - # Generate JSON. - json_kver=$(echo "${kver}" | tr . _) - # b/147370213 (migrating profiles from gcov format) may result in the - # pattern below no longer doing the right thing. - name="$(basename "${latest%.gcov.*}")" - # Skip kernels with no AFDO support in the current channel. - if [[ "${name}" == "" ]] - then - continue - fi - json=$(cat <<EOT + # Generate JSON. + json_kver=$(echo "${kver}" | tr . _) + # b/147370213 (migrating profiles from gcov format) may result in the + # pattern below no longer doing the right thing. + name="$(basename "${latest%.gcov.*}")" + # Skip kernels with no AFDO support in the current channel. + if [[ "${name}" == "" ]] + then + continue + fi + json=$(cat <<EOT ${json}${sep} "chromeos-kernel-${json_kver}": { "name": "${name}" } EOT - ) - sep="," - successes=$((successes + 1)) - done + ) + sep="," + successes=$((successes + 1)) + done # kvers loop - # If we did not succeed for any kvers, exit now. - if [[ ${successes} -eq 0 ]] - then - echo "error: AFDO profiles out of date for all kernel versions" >&2 - failed_channels="${failed_channels} ${channel}" - continue - fi + # If we did not succeed for any kvers, exit now. + if [[ ${successes} -eq 0 ]] + then + echo "error: AFDO profiles out of date for all kernel versions" >&2 + failed_channels="${failed_channels} ${channel}" + continue + fi - git reset --hard HEAD - echo git checkout "${remote_repo}/${curr_branch}" - git checkout "${remote_repo}/${curr_branch}" + # Write new JSON file. + # Don't use `echo` since `json` might have esc characters in it. + printf "%s\n}\n" "${json}" > "${arch_outfile[${arch}]}" - # Write new JSON file. - # Don't use `echo` since `json` might have esc characters in it. - printf "%s\n}\n" "${json}" > "${outfile}" + # If no changes were made, say so. + outdir=$(dirname "${arch_outfile[${arch}]}") + shortstat=$(cd "${outdir}" &&\ + git status --short "$(basename "${arch_outfile[${arch}]}")") + [ -z "${shortstat}" ] &&\ + echo "$(basename "${arch_outfile[${arch}]}") is up to date." \ + && continue - # If no changes were made, say so. - outdir=$(dirname "${outfile}") - shortstat=$(cd "${outdir}" && git status --short "$(basename "${outfile}")") - [ -z "${shortstat}" ] && echo "$(basename "${outfile}") is up to date." \ - && continue + # If we had any errors, warn about them. + if [[ -n "${errs}" ]] + then + echo "warning: failed to update ${errs} in ${channel}" >&2 + failed_channels="${failed_channels} ${channel}" + continue + fi - # If we had any errors, warn about them. - if [[ -n "${errs}" ]] - then - echo "warning: failed to update ${errs} in ${channel}" >&2 - failed_channels="${failed_channels} ${channel}" - continue - fi + git add "${arch_outfile[${arch}]}" + done # ARCHS loop - git add afdo_metadata/kernel_afdo.json case "${channel}" in canary ) commit_contents=$'afdo_metadata: Publish the new kernel profiles\n\n' - for kver in ${KVERS} ; do - commit_contents="${commit_contents}Update chromeos-kernel-${kver}"$'\n' + for arch in ${ARCHS} ; do + for kver in ${arch_kvers[${arch}]} ; do + commit_contents="${commit_contents}Update ${arch} profile on\ + chromeos-kernel-${kver}"$'\n' + done done commit_contents="${commit_contents} -- cgit v1.2.3 From 29f888ab82314300bae873b564181f001d9f86a1 Mon Sep 17 00:00:00 2001 From: Denis Nikitin <denik@google.com> Date: Sat, 29 Oct 2022 14:29:03 -0700 Subject: update_kernel_afdo: Upload CLs automatically Added options --upload (default) and --noupload. BUG=None TEST=./update_kernel_afdo Change-Id: Ib720646a7fbec5b41205beecd84ffbeb31227a15 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3994210 Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: Denis Nikitin <denik@chromium.org> Tested-by: Denis Nikitin <denik@chromium.org> --- afdo_tools/update_kernel_afdo | 77 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 62 insertions(+), 15 deletions(-) diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo index be0fa8d0..9e4d645d 100755 --- a/afdo_tools/update_kernel_afdo +++ b/afdo_tools/update_kernel_afdo @@ -9,7 +9,7 @@ # USAGE=" -Usage: $(basename "$0") [main|beta|stable|all] [--help] +Usage: $(basename "$0") [--noupload|-upload] [main|beta|stable|all] [--help] Description: The script takes one optional argument which is the channel where we want @@ -19,6 +19,12 @@ channels) in the corresponding branch. Follow the prompt to upload the changes. NO CLEAN-UP NEEDED. The script ignores any local changes and keeps the current branch unchanged. + + Args: + --help Show this help. + --upload Upload CLs when the update succeeded (default). + --noupload Do not upload CLs. Instead, print the upload commands. + main|beta|stable Update metadata only on the specified channel. " set -eu @@ -60,6 +66,8 @@ abs_tc_utils_dir="$(realpath "${tc_utils_dir}")" # Check profiles uploaded within the last week. expected_time=$(date +%s -d "week ago") +# Upload CLs on success. +upload_cl=true ARCHS="amd arm" declare -A arch_gsbase arch_kvers arch_outfile @@ -97,26 +105,46 @@ for skipped_branch in ${SKIPPED_BRANCHES} ; do done # Without arguments the script updates all branches. -channels=${1:-"all"} -case "${channels}" in +channels="" +for arg in "$@" +do + case "${arg}" in stable | canary | beta ) + channels="${channels} ${arg}" ;; main ) - channels="canary" + channels="${channels} canary" ;; all ) channels="canary beta stable" ;; + --noupload | --no-upload) + upload_cl=false + ;; + --upload) + upload_cl=true + ;; --help | help | -h ) echo "${USAGE}" exit 0 ;; - * ) - echo "Channel \"${channels}\" is not supported. + -*) + echo "Option \"${arg}\" is not supported." >&2 + echo "${USAGE}" + exit 1 + ;; + *) + echo "Channel \"${arg}\" is not supported. Must be main (or canary), beta, stable or all." >&2 echo "${USAGE}" exit 1 -esac + esac +done + +if [[ -z "${channels}" ]] +then + channels="canary beta stable" +fi # Fetch latest branches. git -C "${tc_utils_dir}" fetch "${remote_repo}" @@ -128,10 +156,19 @@ echo "-> Working in ${worktree_dir}" # change. Neither we should care about clean-up after the submit. git -C "${tc_utils_dir}" worktree add --detach "${worktree_dir}" trap 'git -C "${abs_tc_utils_dir}" worktree remove -f "${worktree_dir}"' EXIT -cd "${worktree_dir}" +pushd "${worktree_dir}" for channel in ${channels} do + set +u + if [[ -n "${commit[${channel}]}" ]] + then + echo "Skipping channel ${channel} which already has commit\ + ${commit[${channel}]}." + continue + fi + set -u + errs="" successes=0 curr_branch_number=${branch_number[${channel}]} @@ -280,6 +317,7 @@ TEST=Verified in kernel-release-afdo-verify-orchestrator" commit[${channel}]=$(git -C "${worktree_dir}" rev-parse HEAD) done +popd echo # Array size check doesn't play well with the unbound variable option. set +u @@ -287,13 +325,22 @@ if [[ ${#commit[@]} -gt 0 ]] then set -u echo "The change is applied in ${!commit[*]}." - echo "Run these commands to upload the change:" - echo - for channel in "${!commit[@]}" - do - echo -e "\tgit -C ${tc_utils_dir} push ${remote_repo} \ -${commit[${channel}]}:refs/for/${branch[${channel}]}" - done + if ${upload_cl} + then + for channel in "${!commit[@]}" + do + git -C "${tc_utils_dir}" push "${remote_repo}" \ + "${commit[${channel}]}:refs/for/${branch[${channel}]}" + done + else + echo "Run these commands to upload the change:" + echo + for channel in "${!commit[@]}" + do + echo -e "\tgit -C ${tc_utils_dir} push ${remote_repo} \ + ${commit[${channel}]}:refs/for/${branch[${channel}]}" + done + fi # Report failed channels. if [[ -n "${failed_channels}" ]] -- cgit v1.2.3 From 6b285afc5e9caadeb6741efe872981ffdbb3bfe1 Mon Sep 17 00:00:00 2001 From: Christopher Di Bella <cjdb@google.com> Date: Tue, 9 Aug 2022 23:46:12 +0000 Subject: compiler-wrapper: adds an IWYU component This first patch makes it possible to run IWYU as a part of the build process. It's not currently possible for us to make changes to packages: this functionality will appear in a later CL. BUG=b:237320348 TEST=Tested locally Change-Id: I00610284143cf478b242b2c0ca1c05e2c8d43de4 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3820351 Reviewed-by: Ryan Beltran <ryanbeltran@chromium.org> Auto-Submit: Christopher Di Bella <cjdb@google.com> Commit-Queue: Ryan Beltran <ryanbeltran@chromium.org> Tested-by: Christopher Di Bella <cjdb@google.com> --- compiler_wrapper/compiler_wrapper.go | 15 ++ compiler_wrapper/iwyu_flag.go | 155 +++++++++++++++++++++ .../testdata/cros_clang_host_golden/clangtidy.json | 8 +- .../testdata/cros_hardened_golden/clangtidy.json | 8 +- .../cros_hardened_llvmnext_golden/clangtidy.json | 8 +- .../cros_hardened_noccache_golden/clangtidy.json | 8 +- .../cros_nonhardened_golden/clangtidy.json | 8 +- 7 files changed, 190 insertions(+), 20 deletions(-) create mode 100644 compiler_wrapper/iwyu_flag.go diff --git a/compiler_wrapper/compiler_wrapper.go b/compiler_wrapper/compiler_wrapper.go index 1386374e..dcaada99 100644 --- a/compiler_wrapper/compiler_wrapper.go +++ b/compiler_wrapper/compiler_wrapper.go @@ -151,6 +151,7 @@ func callCompilerInternal(env env, cfg *config, inputCmd *command) (exitCode int } } else { cSrcFile, tidyFlags, tidyMode := processClangTidyFlags(mainBuilder) + cSrcFile, iwyuFlags, iwyuMode := processIWYUFlags(mainBuilder) if mainBuilder.target.compilerType == clangType { err := prepareClangCommand(mainBuilder) if err != nil { @@ -176,6 +177,20 @@ func callCompilerInternal(env env, cfg *config, inputCmd *command) (exitCode int return 0, err } } + + if iwyuMode != iwyuModeNone { + if iwyuMode == iwyuModeError { + panic(fmt.Sprintf("Unknown IWYU mode")) + } + + allowCCache = false + clangCmdWithoutRemoteBuildAndCCache := mainBuilder.build() + err := runIWYU(env, clangCmdWithoutRemoteBuildAndCCache, cSrcFile, iwyuFlags) + if err != nil { + return 0, err + } + } + if remoteBuildUsed, err = processRemoteBuildAndCCacheFlags(allowCCache, mainBuilder); err != nil { return 0, err } diff --git a/compiler_wrapper/iwyu_flag.go b/compiler_wrapper/iwyu_flag.go new file mode 100644 index 00000000..c1e6af65 --- /dev/null +++ b/compiler_wrapper/iwyu_flag.go @@ -0,0 +1,155 @@ +// Copyright 2022 The ChromiumOS Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "fmt" + "path/filepath" + "strings" +) + +type useIWYUMode int + +const iwyuCrashSubstring = "PLEASE submit a bug report" + +const ( + iwyuModeNone useIWYUMode = iota + iwyuModeAll + iwyuModeError +) + +var srcFileSuffixes = []string{ + ".c", + ".cc", + ".cpp", + ".C", + ".cxx", + ".c++", +} + +func findWithIWYUFlag(args []builderArg) (string, []builderArg) { + for i := range args { + if args[i].value == "--with-iwyu" { + args = append(args[:i], args[i+1:]...) + return "1", args + } + } + return "", args +} + +func processIWYUFlags(builder *commandBuilder) (cSrcFile string, iwyuFlags []string, mode useIWYUMode) { + builder.transformArgs(func(arg builderArg) string { + const prefix = "-iwyu-flag=" + if !strings.HasPrefix(arg.value, prefix) { + return arg.value + } + + iwyuFlags = append(iwyuFlags, arg.value[len(prefix):]) + return "" + }) + + withIWYU, _ := builder.env.getenv("WITH_IWYU") + if withIWYU == "" { + withIWYU, builder.args = findWithIWYUFlag(builder.args) + if withIWYU == "" { + return "", iwyuFlags, iwyuModeNone + } + } + + cSrcFile = "" + lastArg := "" + for _, arg := range builder.args { + if lastArg != "-o" { + for _, suffix := range srcFileSuffixes { + if strings.HasSuffix(arg.value, suffix) { + cSrcFile = arg.value + break + } + } + } + lastArg = arg.value + } + + if cSrcFile == "" { + return "", iwyuFlags, iwyuModeNone + } + + if withIWYU != "1" { + return "", iwyuFlags, iwyuModeError + } + + return cSrcFile, iwyuFlags, iwyuModeAll +} + +func calcIWYUInvocation(env env, clangCmd *command, cSrcFile string, iwyuFlags ...string) (*command, error) { + resourceDir, err := getClangResourceDir(env, clangCmd.Path) + if err != nil { + return nil, err + } + + iwyuPath := filepath.Join(filepath.Dir(clangCmd.Path), "include-what-you-use") + args := append([]string{}, iwyuFlags...) + args = append(args, "-resource-dir="+resourceDir) + args = append(args, clangCmd.Args...) + + for i := 0; i < len(args); i++ { + for j := 0; j < len(srcFileSuffixes); j++ { + if strings.HasSuffix(args[i], srcFileSuffixes[j]) { + args = append(args[:i], args[i+1:]...) + break + } + } + } + args = append(args, cSrcFile) + + return &command{ + Path: iwyuPath, + Args: args, + EnvUpdates: clangCmd.EnvUpdates, + }, nil +} + +func runIWYU(env env, clangCmd *command, cSrcFile string, extraIWYUFlags []string) error { + extraIWYUFlags = append(extraIWYUFlags, "-Xiwyu", "--mapping_file=/usr/share/include-what-you-use/libcxx.imp", "-Xiwyu", "--no_fwd_decls") + iwyuCmd, err := calcIWYUInvocation(env, clangCmd, cSrcFile, extraIWYUFlags...) + if err != nil { + return fmt.Errorf("calculating include-what-you-use invocation: %v", err) + } + + // Note: We pass nil as stdin as we checked before that the compiler + // was invoked with a source file argument. + var stderr bytes.Buffer + stderr_writer := bufio.NewWriter(&stderr) + exitCode, err := wrapSubprocessErrorWithSourceLoc(iwyuCmd, + env.run(iwyuCmd, nil, nil, stderr_writer)) + stderr_ := stderr.String() + fmt.Fprintln(env.stderr(), stderr_) + + if err == nil && exitCode != 0 { + // Note: We continue on purpose when include-what-you-use fails + // to maintain compatibility with the previous wrapper. + fmt.Fprintln(env.stderr(), "include-what-you-use failed") + } + + var path strings.Builder + path.WriteString(strings.TrimSuffix(iwyuCmd.Path, "include-what-you-use")) + path.WriteString("fix_includes.py") + fixIncludesCmd := &command{ + Path: path.String(), + Args: []string{"--nocomment"}, + EnvUpdates: clangCmd.EnvUpdates, + } + + exitCode, err = wrapSubprocessErrorWithSourceLoc(fixIncludesCmd, + env.run(fixIncludesCmd, strings.NewReader(stderr_), env.stdout(), env.stderr())) + if err == nil && exitCode != 0 { + // Note: We continue on purpose when include-what-you-use fails + // to maintain compatibility with the previous wrapper. + fmt.Fprint(env.stderr(), "include-what-you-use failed") + } + return nil +} diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json index c1cf0507..d0a604ba 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "/tmp/stable/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "-Qunused-arguments", @@ -117,7 +117,7 @@ "path": "/tmp/stable/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "-Qunused-arguments", @@ -209,7 +209,7 @@ "path": "/tmp/stable/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "-Qunused-arguments", @@ -305,7 +305,7 @@ "path": "/tmp/stable/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "-Qunused-arguments", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json index f7438940..bfef2799 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -141,7 +141,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -258,7 +258,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -379,7 +379,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json index f7438940..bfef2799 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -141,7 +141,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -258,7 +258,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -379,7 +379,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json index f7438940..bfef2799 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -141,7 +141,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -258,7 +258,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -379,7 +379,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json index 830abee6..3d5078df 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -127,7 +127,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -230,7 +230,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -337,7 +337,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "main.cc", + "", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", -- cgit v1.2.3 From 537f2ecfe7bc9eb996cd4abedae26bf699ab54dd Mon Sep 17 00:00:00 2001 From: Manoj Gupta <manojgupta@chromium.org> Date: Tue, 1 Nov 2022 17:48:50 +0000 Subject: Revert "compiler-wrapper: adds an IWYU component" This reverts commit 6b285afc5e9caadeb6741efe872981ffdbb3bfe1. Reason for revert: Likely erroneous change. Original change's description: > compiler-wrapper: adds an IWYU component > > This first patch makes it possible to run IWYU as a part of the build > process. It's not currently possible for us to make changes to packages: > this functionality will appear in a later CL. > > BUG=b:237320348 > TEST=Tested locally > > Change-Id: I00610284143cf478b242b2c0ca1c05e2c8d43de4 > Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3820351 > Reviewed-by: Ryan Beltran <ryanbeltran@chromium.org> > Auto-Submit: Christopher Di Bella <cjdb@google.com> > Commit-Queue: Ryan Beltran <ryanbeltran@chromium.org> > Tested-by: Christopher Di Bella <cjdb@google.com> Bug: b:237320348 Change-Id: Ia563382c3302b2a4c69942db95d315088763f97c Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3995401 Auto-Submit: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: Ryan Beltran <ryanbeltran@chromium.org> Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com> Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com> Reviewed-by: Ryan Beltran <ryanbeltran@chromium.org> --- compiler_wrapper/compiler_wrapper.go | 15 -- compiler_wrapper/iwyu_flag.go | 155 --------------------- .../testdata/cros_clang_host_golden/clangtidy.json | 8 +- .../testdata/cros_hardened_golden/clangtidy.json | 8 +- .../cros_hardened_llvmnext_golden/clangtidy.json | 8 +- .../cros_hardened_noccache_golden/clangtidy.json | 8 +- .../cros_nonhardened_golden/clangtidy.json | 8 +- 7 files changed, 20 insertions(+), 190 deletions(-) delete mode 100644 compiler_wrapper/iwyu_flag.go diff --git a/compiler_wrapper/compiler_wrapper.go b/compiler_wrapper/compiler_wrapper.go index dcaada99..1386374e 100644 --- a/compiler_wrapper/compiler_wrapper.go +++ b/compiler_wrapper/compiler_wrapper.go @@ -151,7 +151,6 @@ func callCompilerInternal(env env, cfg *config, inputCmd *command) (exitCode int } } else { cSrcFile, tidyFlags, tidyMode := processClangTidyFlags(mainBuilder) - cSrcFile, iwyuFlags, iwyuMode := processIWYUFlags(mainBuilder) if mainBuilder.target.compilerType == clangType { err := prepareClangCommand(mainBuilder) if err != nil { @@ -177,20 +176,6 @@ func callCompilerInternal(env env, cfg *config, inputCmd *command) (exitCode int return 0, err } } - - if iwyuMode != iwyuModeNone { - if iwyuMode == iwyuModeError { - panic(fmt.Sprintf("Unknown IWYU mode")) - } - - allowCCache = false - clangCmdWithoutRemoteBuildAndCCache := mainBuilder.build() - err := runIWYU(env, clangCmdWithoutRemoteBuildAndCCache, cSrcFile, iwyuFlags) - if err != nil { - return 0, err - } - } - if remoteBuildUsed, err = processRemoteBuildAndCCacheFlags(allowCCache, mainBuilder); err != nil { return 0, err } diff --git a/compiler_wrapper/iwyu_flag.go b/compiler_wrapper/iwyu_flag.go deleted file mode 100644 index c1e6af65..00000000 --- a/compiler_wrapper/iwyu_flag.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2022 The ChromiumOS Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package main - -import ( - "bufio" - "bytes" - "fmt" - "path/filepath" - "strings" -) - -type useIWYUMode int - -const iwyuCrashSubstring = "PLEASE submit a bug report" - -const ( - iwyuModeNone useIWYUMode = iota - iwyuModeAll - iwyuModeError -) - -var srcFileSuffixes = []string{ - ".c", - ".cc", - ".cpp", - ".C", - ".cxx", - ".c++", -} - -func findWithIWYUFlag(args []builderArg) (string, []builderArg) { - for i := range args { - if args[i].value == "--with-iwyu" { - args = append(args[:i], args[i+1:]...) - return "1", args - } - } - return "", args -} - -func processIWYUFlags(builder *commandBuilder) (cSrcFile string, iwyuFlags []string, mode useIWYUMode) { - builder.transformArgs(func(arg builderArg) string { - const prefix = "-iwyu-flag=" - if !strings.HasPrefix(arg.value, prefix) { - return arg.value - } - - iwyuFlags = append(iwyuFlags, arg.value[len(prefix):]) - return "" - }) - - withIWYU, _ := builder.env.getenv("WITH_IWYU") - if withIWYU == "" { - withIWYU, builder.args = findWithIWYUFlag(builder.args) - if withIWYU == "" { - return "", iwyuFlags, iwyuModeNone - } - } - - cSrcFile = "" - lastArg := "" - for _, arg := range builder.args { - if lastArg != "-o" { - for _, suffix := range srcFileSuffixes { - if strings.HasSuffix(arg.value, suffix) { - cSrcFile = arg.value - break - } - } - } - lastArg = arg.value - } - - if cSrcFile == "" { - return "", iwyuFlags, iwyuModeNone - } - - if withIWYU != "1" { - return "", iwyuFlags, iwyuModeError - } - - return cSrcFile, iwyuFlags, iwyuModeAll -} - -func calcIWYUInvocation(env env, clangCmd *command, cSrcFile string, iwyuFlags ...string) (*command, error) { - resourceDir, err := getClangResourceDir(env, clangCmd.Path) - if err != nil { - return nil, err - } - - iwyuPath := filepath.Join(filepath.Dir(clangCmd.Path), "include-what-you-use") - args := append([]string{}, iwyuFlags...) - args = append(args, "-resource-dir="+resourceDir) - args = append(args, clangCmd.Args...) - - for i := 0; i < len(args); i++ { - for j := 0; j < len(srcFileSuffixes); j++ { - if strings.HasSuffix(args[i], srcFileSuffixes[j]) { - args = append(args[:i], args[i+1:]...) - break - } - } - } - args = append(args, cSrcFile) - - return &command{ - Path: iwyuPath, - Args: args, - EnvUpdates: clangCmd.EnvUpdates, - }, nil -} - -func runIWYU(env env, clangCmd *command, cSrcFile string, extraIWYUFlags []string) error { - extraIWYUFlags = append(extraIWYUFlags, "-Xiwyu", "--mapping_file=/usr/share/include-what-you-use/libcxx.imp", "-Xiwyu", "--no_fwd_decls") - iwyuCmd, err := calcIWYUInvocation(env, clangCmd, cSrcFile, extraIWYUFlags...) - if err != nil { - return fmt.Errorf("calculating include-what-you-use invocation: %v", err) - } - - // Note: We pass nil as stdin as we checked before that the compiler - // was invoked with a source file argument. - var stderr bytes.Buffer - stderr_writer := bufio.NewWriter(&stderr) - exitCode, err := wrapSubprocessErrorWithSourceLoc(iwyuCmd, - env.run(iwyuCmd, nil, nil, stderr_writer)) - stderr_ := stderr.String() - fmt.Fprintln(env.stderr(), stderr_) - - if err == nil && exitCode != 0 { - // Note: We continue on purpose when include-what-you-use fails - // to maintain compatibility with the previous wrapper. - fmt.Fprintln(env.stderr(), "include-what-you-use failed") - } - - var path strings.Builder - path.WriteString(strings.TrimSuffix(iwyuCmd.Path, "include-what-you-use")) - path.WriteString("fix_includes.py") - fixIncludesCmd := &command{ - Path: path.String(), - Args: []string{"--nocomment"}, - EnvUpdates: clangCmd.EnvUpdates, - } - - exitCode, err = wrapSubprocessErrorWithSourceLoc(fixIncludesCmd, - env.run(fixIncludesCmd, strings.NewReader(stderr_), env.stdout(), env.stderr())) - if err == nil && exitCode != 0 { - // Note: We continue on purpose when include-what-you-use fails - // to maintain compatibility with the previous wrapper. - fmt.Fprint(env.stderr(), "include-what-you-use failed") - } - return nil -} diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json index d0a604ba..c1cf0507 100644 --- a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "/tmp/stable/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "-Qunused-arguments", @@ -117,7 +117,7 @@ "path": "/tmp/stable/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "-Qunused-arguments", @@ -209,7 +209,7 @@ "path": "/tmp/stable/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "-Qunused-arguments", @@ -305,7 +305,7 @@ "path": "/tmp/stable/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "-Qunused-arguments", diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json index bfef2799..f7438940 100644 --- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -141,7 +141,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -258,7 +258,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -379,7 +379,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json index bfef2799..f7438940 100644 --- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -141,7 +141,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -258,7 +258,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -379,7 +379,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json index bfef2799..f7438940 100644 --- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -141,7 +141,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -258,7 +258,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -379,7 +379,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json index 3d5078df..830abee6 100644 --- a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json +++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json @@ -27,7 +27,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -127,7 +127,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -230,7 +230,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", @@ -337,7 +337,7 @@ "path": "../../usr/bin/clang-tidy", "args": [ "-checks=*,-bugprone-narrowing-conversions,-cppcoreguidelines-*,-fuchsia-*,-google-readability*,-google-runtime-references,-hicpp-*,-llvm-*,-misc-non-private-member-variables-in-classes,-misc-unused-parameters,-modernize-*,-readability-*", - "", + "main.cc", "--", "-resource-dir=someResourceDir", "--sysroot=/usr/x86_64-cros-linux-gnu", -- cgit v1.2.3 From e2cce3561628cfafd411417d372cef0719f5166d Mon Sep 17 00:00:00 2001 From: Adrian Dole <adriandole@google.com> Date: Wed, 2 Nov 2022 21:32:50 +0000 Subject: compiler_wrapper: build with zero initialization See go/chromeos-zero-init BUG=b:234642720 TEST=CQ Change-Id: Icbcb9e9b3928b46875c78c99727b1ec9839f27c3 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3999814 Reviewed-by: Adrian Dole <adriandole@google.com> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Tested-by: Adrian Dole <adriandole@google.com> Auto-Submit: Adrian Dole <adriandole@google.com> Commit-Queue: Manoj Gupta <manojgupta@chromium.org> --- compiler_wrapper/config.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go index 64d77ae7..6c3fcf52 100644 --- a/compiler_wrapper/config.go +++ b/compiler_wrapper/config.go @@ -164,6 +164,8 @@ var crosHardenedConfig = config{ "-Wno-section", "-fno-addrsig", "-fuse-ld=lld", + "-ftrivial-auto-var-init=zero", + "-enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang", ), clangPostFlags: crosCommonClangPostFlags(), newWarningsDir: "/tmp/fatal_clang_warnings", -- cgit v1.2.3 From da86b3e9aec025cfb008d0e39361d1b54278b928 Mon Sep 17 00:00:00 2001 From: Ryan Beltran <ryanbeltran@chromium.org> Date: Tue, 1 Nov 2022 18:41:38 +0000 Subject: Revert "Revert "compiler-wrapper: adds an IWYU component"" This reverts commit 537f2ecfe7bc9eb996cd4abedae26bf699ab54dd. It also fixes the logic bug which caused the reert in the first place. IWYU flag prcessing was removing the file names if IWYU was not needed, which meant clang tidy calls were not getting the file names added to the back of the command. It also fixes the header to the most recent style guidance and updates some golang variable names to make sure we pass go lint. BUG=b:237320348 TEST=go test Change-Id: I14885fb90f97d2fb483ae1a01e14c44050852dc3 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3997486 Commit-Queue: Ryan Beltran <ryanbeltran@chromium.org> Reviewed-by: Ryan Beltran <ryanbeltran@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Tested-by: Ryan Beltran <ryanbeltran@chromium.org> --- compiler_wrapper/compiler_wrapper.go | 15 ++++ compiler_wrapper/iwyu_flag.go | 156 +++++++++++++++++++++++++++++++++++ compiler_wrapper/iwyu_flag_test.go | 135 ++++++++++++++++++++++++++++++ 3 files changed, 306 insertions(+) create mode 100644 compiler_wrapper/iwyu_flag.go create mode 100644 compiler_wrapper/iwyu_flag_test.go diff --git a/compiler_wrapper/compiler_wrapper.go b/compiler_wrapper/compiler_wrapper.go index 1386374e..dcaada99 100644 --- a/compiler_wrapper/compiler_wrapper.go +++ b/compiler_wrapper/compiler_wrapper.go @@ -151,6 +151,7 @@ func callCompilerInternal(env env, cfg *config, inputCmd *command) (exitCode int } } else { cSrcFile, tidyFlags, tidyMode := processClangTidyFlags(mainBuilder) + cSrcFile, iwyuFlags, iwyuMode := processIWYUFlags(mainBuilder) if mainBuilder.target.compilerType == clangType { err := prepareClangCommand(mainBuilder) if err != nil { @@ -176,6 +177,20 @@ func callCompilerInternal(env env, cfg *config, inputCmd *command) (exitCode int return 0, err } } + + if iwyuMode != iwyuModeNone { + if iwyuMode == iwyuModeError { + panic(fmt.Sprintf("Unknown IWYU mode")) + } + + allowCCache = false + clangCmdWithoutRemoteBuildAndCCache := mainBuilder.build() + err := runIWYU(env, clangCmdWithoutRemoteBuildAndCCache, cSrcFile, iwyuFlags) + if err != nil { + return 0, err + } + } + if remoteBuildUsed, err = processRemoteBuildAndCCacheFlags(allowCCache, mainBuilder); err != nil { return 0, err } diff --git a/compiler_wrapper/iwyu_flag.go b/compiler_wrapper/iwyu_flag.go new file mode 100644 index 00000000..d13d114d --- /dev/null +++ b/compiler_wrapper/iwyu_flag.go @@ -0,0 +1,156 @@ +// Copyright 2022 The ChromiumOS Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "fmt" + "path/filepath" + "strings" +) + +type useIWYUMode int + +const iwyuCrashSubstring = "PLEASE submit a bug report" + +const ( + iwyuModeNone useIWYUMode = iota + iwyuModeAll + iwyuModeError +) + +var srcFileSuffixes = []string{ + ".c", + ".cc", + ".cpp", + ".C", + ".cxx", + ".c++", +} + +func findWithIWYUFlag(args []builderArg) (string, []builderArg) { + for i := range args { + if args[i].value == "--with-iwyu" { + args = append(args[:i], args[i+1:]...) + return "1", args + } + } + return "", args +} + +func processIWYUFlags(builder *commandBuilder) (cSrcFile string, iwyuFlags []string, mode useIWYUMode) { + builder.transformArgs(func(arg builderArg) string { + const prefix = "-iwyu-flag=" + if !strings.HasPrefix(arg.value, prefix) { + return arg.value + } + + iwyuFlags = append(iwyuFlags, arg.value[len(prefix):]) + return "" + }) + + cSrcFile = "" + lastArg := "" + for _, arg := range builder.args { + if lastArg != "-o" { + for _, suffix := range srcFileSuffixes { + if strings.HasSuffix(arg.value, suffix) { + cSrcFile = arg.value + break + } + } + } + lastArg = arg.value + } + + if cSrcFile == "" { + return "", iwyuFlags, iwyuModeNone + } + + withIWYU, _ := builder.env.getenv("WITH_IWYU") + if withIWYU == "" { + withIWYU, builder.args = findWithIWYUFlag(builder.args) + if withIWYU == "" { + return cSrcFile, iwyuFlags, iwyuModeNone + } + } + + if withIWYU != "1" { + return cSrcFile, iwyuFlags, iwyuModeError + } + + return cSrcFile, iwyuFlags, iwyuModeAll +} + +func calcIWYUInvocation(env env, clangCmd *command, cSrcFile string, iwyuFlags ...string) (*command, error) { + resourceDir, err := getClangResourceDir(env, clangCmd.Path) + if err != nil { + return nil, err + } + + iwyuPath := filepath.Join(filepath.Dir(clangCmd.Path), "include-what-you-use") + args := append([]string{}, iwyuFlags...) + args = append(args, "-resource-dir="+resourceDir) + args = append(args, clangCmd.Args...) + + for i := 0; i < len(args); i++ { + for j := 0; j < len(srcFileSuffixes); j++ { + if strings.HasSuffix(args[i], srcFileSuffixes[j]) { + args = append(args[:i], args[i+1:]...) + break + } + } + } + args = append(args, cSrcFile) + + return &command{ + Path: iwyuPath, + Args: args, + EnvUpdates: clangCmd.EnvUpdates, + }, nil +} + +func runIWYU(env env, clangCmd *command, cSrcFile string, extraIWYUFlags []string) error { + extraIWYUFlags = append(extraIWYUFlags, "-Xiwyu", "--mapping_file=/usr/share/include-what-you-use/libcxx.imp", "-Xiwyu", "--no_fwd_decls") + iwyuCmd, err := calcIWYUInvocation(env, clangCmd, cSrcFile, extraIWYUFlags...) + if err != nil { + return fmt.Errorf("calculating include-what-you-use invocation: %v", err) + } + + // Note: We pass nil as stdin as we checked before that the compiler + // was invoked with a source file argument. + var stderr bytes.Buffer + stderrWriter := bufio.NewWriter(&stderr) + exitCode, err := wrapSubprocessErrorWithSourceLoc(iwyuCmd, + env.run(iwyuCmd, nil, nil, stderrWriter)) + stderrMessage := stderr.String() + fmt.Fprintln(env.stderr(), stderrMessage) + + if err == nil && exitCode != 0 { + // Note: We continue on purpose when include-what-you-use fails + // to maintain compatibility with the previous wrapper. + fmt.Fprintln(env.stderr(), "include-what-you-use failed") + } + + var path strings.Builder + path.WriteString(strings.TrimSuffix(iwyuCmd.Path, "include-what-you-use")) + path.WriteString("fix_includes.py") + fixIncludesCmd := &command{ + Path: path.String(), + Args: []string{"--nocomment"}, + EnvUpdates: clangCmd.EnvUpdates, + } + + exitCode, err = wrapSubprocessErrorWithSourceLoc(fixIncludesCmd, + env.run(fixIncludesCmd, strings.NewReader(stderrMessage), env.stdout(), env.stderr())) + if err == nil && exitCode != 0 { + // Note: We continue on purpose when include-what-you-use fails + // to maintain compatibility with the previous wrapper. + fmt.Fprint(env.stderr(), "include-what-you-use failed") + } + + return err +} diff --git a/compiler_wrapper/iwyu_flag_test.go b/compiler_wrapper/iwyu_flag_test.go new file mode 100644 index 00000000..76135944 --- /dev/null +++ b/compiler_wrapper/iwyu_flag_test.go @@ -0,0 +1,135 @@ +// Copyright 2022 The ChromiumOS Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package main + +import ( + "errors" + "io" + "strings" + "testing" +) + +func TestIWYUArgOrder(t *testing.T) { + withIWYUTestContext(t, func(ctx *testContext) { + ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + if ctx.cmdCount == 2 { + if err := verifyArgOrder(cmd, "-checks=.*", mainCc, "--", "-resource-dir=.*", mainCc, "--some_arg"); err != nil { + return err + } + } + return nil + } + ctx.must(callCompiler(ctx, ctx.cfg, + ctx.newCommand(clangX86_64, mainCc, "--some_arg"))) + if ctx.cmdCount < 2 { + t.Error("expected multiple calls.") + } + }) +} + +func TestIgnoreNonZeroExitCodeFromIWYU(t *testing.T) { + withIWYUTestContext(t, func(ctx *testContext) { + ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + if ctx.cmdCount == 2 { + return newExitCodeError(23) + } + return nil + } + ctx.must(callCompiler(ctx, ctx.cfg, + ctx.newCommand(clangX86_64, mainCc))) + stderr := ctx.stderrString() + if err := verifyNonInternalError(stderr, "include-what-you-use failed"); err != nil { + t.Error(err) + } + }) +} + +func TestReportGeneralErrorsFromIWYU(t *testing.T) { + withIWYUTestContext(t, func(ctx *testContext) { + ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + if ctx.cmdCount > 1 { + return errors.New("someerror") + } + return nil + } + stderr := ctx.mustFail(callCompiler(ctx, ctx.cfg, + ctx.newCommand(clangX86_64, mainCc))) + if err := verifyInternalError(stderr); err != nil { + t.Fatal(err) + } + if !strings.Contains(stderr, "someerror") { + t.Errorf("unexpected error. Got: %s", stderr) + } + }) +} + +func TestUseIWYUBasedOnFileExtension(t *testing.T) { + withIWYUTestContext(t, func(ctx *testContext) { + testData := []struct { + args []string + iwyu bool + }{ + {[]string{"main.cc"}, true}, + {[]string{"main.cc"}, true}, + {[]string{"main.C"}, true}, + {[]string{"main.cxx"}, true}, + {[]string{"main.c++"}, true}, + {[]string{"main.xy"}, false}, + {[]string{"-o", "main.cc"}, false}, + {[]string{}, false}, + } + for _, tt := range testData { + ctx.cmdCount = 0 + ctx.must(callCompiler(ctx, ctx.cfg, + ctx.newCommand(clangX86_64, tt.args...))) + if ctx.cmdCount > 1 && !tt.iwyu { + t.Errorf("expected a call to iwyu but got none for args %s", tt.args) + } + if ctx.cmdCount == 1 && tt.iwyu { + t.Errorf("expected no call to iwyu but got one for args %s", tt.args) + } + } + }) +} + +func TestIWYUFiltersIWYUFlags(t *testing.T) { + withIWYUTestContext(t, func(ctx *testContext) { + addedFlag := "--some_iwyu_flag=flag" + ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + switch ctx.cmdCount { + case 1: + if err := verifyPath(cmd, "usr/bin/clang"); err != nil { + t.Error(err) + } else if err := verifyArgCount(cmd, 0, addedFlag); err != nil { + t.Error(err) + } + return nil + case 2: + if err := verifyPath(cmd, "usr/bin/include-what-you-use"); err != nil { + t.Error(err) + } else if verifyArgCount(cmd, 1, addedFlag); err != nil { + t.Error(err) + } + return nil + default: + return nil + } + } + cmd := ctx.must(callCompiler(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc, "-iwyu-flag="+addedFlag))) + if ctx.cmdCount < 2 { + t.Errorf("expected multiple calls.") + } + if err := verifyPath(cmd, "usr/bin/clang"); err != nil { + t.Error(err) + } + }) +} + +func withIWYUTestContext(t *testing.T, work func(ctx *testContext)) { + withTestContext(t, func(ctx *testContext) { + ctx.env = []string{"WITH_IWYU=1"} + work(ctx) + }) +} -- cgit v1.2.3 From 2c474af4f370b143032144aff1ff1985f789e20f Mon Sep 17 00:00:00 2001 From: George Burgess IV <gbiv@google.com> Date: Thu, 14 Jul 2022 14:22:51 -0700 Subject: rust_tools: add copy_rust_bootstrap.py SDK builder artifacts have changed, so copying them directly to localmirror doesn't work. Add a script that gives us a similar level of convenience. Due to the simplicity of this script, no tests are provided. BUG=b:237786394 TEST=Ran it to upload rust-bootstrap-1.59.0 artifacts Change-Id: I64ff8f6e9043487b3dd5abf04e5f7cca36852f0e Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3764008 Commit-Queue: George Burgess <gbiv@chromium.org> Tested-by: George Burgess <gbiv@chromium.org> Reviewed-by: Michael Benfield <mbenfield@google.com> --- rust_tools/copy_rust_bootstrap.py | 192 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100755 rust_tools/copy_rust_bootstrap.py diff --git a/rust_tools/copy_rust_bootstrap.py b/rust_tools/copy_rust_bootstrap.py new file mode 100755 index 00000000..5da8007f --- /dev/null +++ b/rust_tools/copy_rust_bootstrap.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 +# Copyright 2022 The ChromiumOS Authors. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Copies rust-bootstrap artifacts from an SDK build to localmirror. + +We use localmirror to host these artifacts, but they've changed a bit over +time, so simply `gsutil.py cp $FROM $TO` doesn't work. This script allows the +convenience of the old `cp` command. +""" + +import argparse +import logging +import os +from pathlib import Path +import shutil +import subprocess +import sys +import tempfile +from typing import List + + +_LOCALMIRROR_ROOT = 'gs://chromeos-localmirror/distfiles/' + + +def _is_in_chroot() -> bool: + return Path('/etc/cros_chroot_version').exists() + + +def _ensure_pbzip2_is_installed(): + if shutil.which('pbzip2'): + return + + logging.info('Auto-installing pbzip2...') + subprocess.run(['sudo', 'emerge', '-G', 'pbzip2'], check=True) + + +def _determine_target_path(sdk_path: str) -> str: + """Determine where `sdk_path` should sit in localmirror.""" + gs_prefix = 'gs://' + if not sdk_path.startswith(gs_prefix): + raise ValueError(f'Invalid GS path: {sdk_path!r}') + + file_name = Path(sdk_path[len(gs_prefix):]).name + return _LOCALMIRROR_ROOT + file_name + + +def _download(remote_path: str, local_file: Path): + """Downloads the given gs:// path to the given local file.""" + logging.info('Downloading %s -> %s', remote_path, local_file) + subprocess.run( + ['gsutil.py', 'cp', remote_path, + str(local_file)], + check=True, + ) + + +def _debinpkgify(binpkg_file: Path) -> Path: + """Converts a binpkg into the files it installs. + + Note that this function makes temporary files in the same directory as + `binpkg_file`. It makes no attempt to clean them up. + """ + logging.info('Converting %s from a binpkg...', binpkg_file) + + # The SDK builder produces binary packages: + # https://wiki.gentoo.org/wiki/Binary_package_guide + # + # Which means that `binpkg_file` is in the XPAK format. We want to split + # that out, and recompress it from zstd (which is the compression format + # that CrOS uses) to bzip2 (which is what we've historically used, and + # which is what our ebuild expects). + tmpdir = binpkg_file.parent + + def _mkstemp(suffix=None) -> str: + fd, file_path = tempfile.mkstemp(dir=tmpdir, suffix=suffix) + os.close(fd) + return Path(file_path) + + # First, split the actual artifacts that land in the chroot out to + # `temp_file`. + artifacts_file = _mkstemp() + logging.info('Extracting artifacts from %s into %s...', binpkg_file, + artifacts_file) + with artifacts_file.open('wb') as f: + subprocess.run( + [ + 'qtbz2', + '-s', + '-t', + '-O', + str(binpkg_file), + ], + check=True, + stdout=f, + ) + + decompressed_artifacts_file = _mkstemp() + decompressed_artifacts_file.unlink() + logging.info('Decompressing artifacts from %s to %s...', artifacts_file, + decompressed_artifacts_file) + subprocess.run( + [ + 'zstd', + '-d', + str(artifacts_file), + '-o', + str(decompressed_artifacts_file), + ], + check=True, + ) + + # Finally, recompress it as a tbz2. + tbz2_file = _mkstemp('.tbz2') + logging.info( + 'Recompressing artifacts from %s to %s (this may take a while)...', + decompressed_artifacts_file, tbz2_file) + with tbz2_file.open('wb') as f: + subprocess.run( + [ + 'pbzip2', + '-9', + '-c', + str(decompressed_artifacts_file), + ], + check=True, + stdout=f, + ) + return tbz2_file + + +def _upload(local_file: Path, remote_path: str, force: bool): + """Uploads the local file to the given gs:// path.""" + logging.info('Uploading %s -> %s', local_file, remote_path) + cmd_base = ['gsutil.py', 'cp', '-a', 'public-read'] + if not force: + cmd_base.append('-n') + subprocess.run( + cmd_base + [str(local_file), remote_path], + check=True, + stdin=subprocess.DEVNULL, + ) + + +def main(argv: List[str]): + logging.basicConfig( + format='>> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: ' + '%(message)s', + level=logging.INFO, + ) + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + + parser.add_argument( + 'sdk_artifact', + help='Path to the SDK rust-bootstrap artifact to copy. e.g., ' + 'gs://chromeos-prebuilt/host/amd64/amd64-host/' + 'chroot-2022.07.12.134334/packages/dev-lang/' + 'rust-bootstrap-1.59.0.tbz2.') + parser.add_argument( + '-n', + '--dry-run', + action='store_true', + help='Do everything except actually uploading the artifact.') + parser.add_argument( + '--force', + action='store_true', + help='Upload the artifact even if one exists in localmirror already.') + opts = parser.parse_args(argv) + + if not _is_in_chroot(): + parser.error('Run me from within the chroot.') + _ensure_pbzip2_is_installed() + + target_path = _determine_target_path(opts.sdk_artifact) + with tempfile.TemporaryDirectory() as tempdir: + download_path = Path(tempdir) / 'sdk_artifact' + _download(opts.sdk_artifact, download_path) + file_to_upload = _debinpkgify(download_path) + if opts.dry_run: + logging.info('--dry-run specified; skipping upload of %s to %s', + file_to_upload, target_path) + else: + _upload(file_to_upload, target_path, opts.force) + + +if __name__ == '__main__': + main(sys.argv[1:]) -- cgit v1.2.3