aboutsummaryrefslogtreecommitdiff
path: root/llvm_tools/patch_sync/src
diff options
context:
space:
mode:
Diffstat (limited to 'llvm_tools/patch_sync/src')
-rw-r--r--llvm_tools/patch_sync/src/android_utils.rs62
-rw-r--r--llvm_tools/patch_sync/src/main.rs332
-rw-r--r--llvm_tools/patch_sync/src/patch_parsing.rs462
-rw-r--r--llvm_tools/patch_sync/src/version_control.rs400
4 files changed, 1256 insertions, 0 deletions
diff --git a/llvm_tools/patch_sync/src/android_utils.rs b/llvm_tools/patch_sync/src/android_utils.rs
new file mode 100644
index 00000000..77cb4b8a
--- /dev/null
+++ b/llvm_tools/patch_sync/src/android_utils.rs
@@ -0,0 +1,62 @@
+use std::path::Path;
+use std::process::Command;
+
+use anyhow::{bail, ensure, Result};
+
+const LLVM_ANDROID_REL_PATH: &str = "toolchain/llvm_android";
+
+/// Return the Android checkout's current llvm version.
+///
+/// This uses android_version.get_svn_revision_number, a python function
+/// that can't be executed directly. We spawn a Python3 program
+/// to run it and get the result from that.
+pub fn get_android_llvm_version(android_checkout: &Path) -> Result<String> {
+ let mut command = new_android_cmd(android_checkout, "python3")?;
+ command.args([
+ "-c",
+ "import android_version; print(android_version.get_svn_revision_number(), end='')",
+ ]);
+ let output = command.output()?;
+ if !output.status.success() {
+ bail!(
+ "could not get android llvm version: {}",
+ String::from_utf8_lossy(&output.stderr)
+ );
+ }
+ let out_string = String::from_utf8(output.stdout)?.trim().to_string();
+ Ok(out_string)
+}
+
+/// Sort the Android patches using the cherrypick_cl.py Android utility.
+///
+/// This assumes that:
+/// 1. There exists a python script called cherrypick_cl.py
+/// 2. That calling it with the given arguments sorts the PATCHES.json file.
+/// 3. Calling it does nothing besides sorting the PATCHES.json file.
+///
+/// We aren't doing our own sorting because we shouldn't have to update patch_sync along
+/// with cherrypick_cl.py any time they change the __lt__ implementation.
+pub fn sort_android_patches(android_checkout: &Path) -> Result<()> {
+ let mut command = new_android_cmd(android_checkout, "python3")?;
+ command.args(["cherrypick_cl.py", "--reason", "patch_sync sorting"]);
+ let output = command.output()?;
+ if !output.status.success() {
+ bail!(
+ "could not sort: {}",
+ String::from_utf8_lossy(&output.stderr)
+ );
+ }
+ Ok(())
+}
+
+fn new_android_cmd(android_checkout: &Path, cmd: &str) -> Result<Command> {
+ let mut command = Command::new(cmd);
+ let llvm_android_dir = android_checkout.join(LLVM_ANDROID_REL_PATH);
+ ensure!(
+ llvm_android_dir.is_dir(),
+ "can't make android command; {} is not a directory",
+ llvm_android_dir.display()
+ );
+ command.current_dir(llvm_android_dir);
+ Ok(command)
+}
diff --git a/llvm_tools/patch_sync/src/main.rs b/llvm_tools/patch_sync/src/main.rs
new file mode 100644
index 00000000..c244f1c0
--- /dev/null
+++ b/llvm_tools/patch_sync/src/main.rs
@@ -0,0 +1,332 @@
+mod android_utils;
+mod patch_parsing;
+mod version_control;
+
+use std::borrow::ToOwned;
+use std::collections::BTreeSet;
+use std::path::{Path, PathBuf};
+
+use anyhow::{Context, Result};
+use structopt::StructOpt;
+
+use patch_parsing::{filter_patches_by_platform, PatchCollection, PatchDictSchema};
+use version_control::RepoSetupContext;
+
+fn main() -> Result<()> {
+ match Opt::from_args() {
+ Opt::Show {
+ cros_checkout_path,
+ android_checkout_path,
+ sync,
+ keep_unmerged,
+ } => show_subcmd(ShowOpt {
+ cros_checkout_path,
+ android_checkout_path,
+ sync,
+ keep_unmerged,
+ }),
+ Opt::Transpose {
+ cros_checkout_path,
+ cros_reviewers,
+ old_cros_ref,
+ android_checkout_path,
+ android_reviewers,
+ old_android_ref,
+ sync,
+ verbose,
+ dry_run,
+ no_commit,
+ wip,
+ disable_cq,
+ } => transpose_subcmd(TransposeOpt {
+ cros_checkout_path,
+ cros_reviewers: cros_reviewers
+ .map(|r| r.split(',').map(ToOwned::to_owned).collect())
+ .unwrap_or_default(),
+ old_cros_ref,
+ android_checkout_path,
+ android_reviewers: android_reviewers
+ .map(|r| r.split(',').map(ToOwned::to_owned).collect())
+ .unwrap_or_default(),
+ old_android_ref,
+ sync,
+ verbose,
+ dry_run,
+ no_commit,
+ wip,
+ disable_cq,
+ }),
+ }
+}
+
+struct ShowOpt {
+ cros_checkout_path: PathBuf,
+ android_checkout_path: PathBuf,
+ keep_unmerged: bool,
+ sync: bool,
+}
+
+fn show_subcmd(args: ShowOpt) -> Result<()> {
+ let ShowOpt {
+ cros_checkout_path,
+ android_checkout_path,
+ keep_unmerged,
+ sync,
+ } = args;
+ let ctx = RepoSetupContext {
+ cros_checkout: cros_checkout_path,
+ android_checkout: android_checkout_path,
+ sync_before: sync,
+ wip_mode: true, // Has no effect, as we're not making changes
+ enable_cq: false, // Has no effect, as we're not uploading anything
+ };
+ ctx.setup()?;
+ let make_collection = |platform: &str, patches_fp: &Path| -> Result<PatchCollection> {
+ let parsed_collection = PatchCollection::parse_from_file(patches_fp)
+ .with_context(|| format!("could not parse {} PATCHES.json", platform))?;
+ Ok(if keep_unmerged {
+ parsed_collection
+ } else {
+ filter_patches_by_platform(&parsed_collection, platform).map_patches(|p| {
+ // Need to do this platforms creation as Rust 1.55 cannot use "from".
+ let mut platforms = BTreeSet::new();
+ platforms.insert(platform.to_string());
+ PatchDictSchema {
+ platforms,
+ ..p.clone()
+ }
+ })
+ })
+ };
+ let cur_cros_collection = make_collection("chromiumos", &ctx.cros_patches_path())?;
+ let cur_android_collection = make_collection("android", &ctx.android_patches_path())?;
+ let merged = cur_cros_collection.union(&cur_android_collection)?;
+ println!("{}", merged.serialize_patches()?);
+ Ok(())
+}
+
+struct TransposeOpt {
+ cros_checkout_path: PathBuf,
+ old_cros_ref: String,
+ android_checkout_path: PathBuf,
+ old_android_ref: String,
+ sync: bool,
+ verbose: bool,
+ dry_run: bool,
+ no_commit: bool,
+ cros_reviewers: Vec<String>,
+ android_reviewers: Vec<String>,
+ wip: bool,
+ disable_cq: bool,
+}
+
+fn transpose_subcmd(args: TransposeOpt) -> Result<()> {
+ let ctx = RepoSetupContext {
+ cros_checkout: args.cros_checkout_path,
+ android_checkout: args.android_checkout_path,
+ sync_before: args.sync,
+ wip_mode: args.wip,
+ enable_cq: !args.disable_cq,
+ };
+ ctx.setup()?;
+ let cros_patches_path = ctx.cros_patches_path();
+ let android_patches_path = ctx.android_patches_path();
+
+ // Get new Patches -------------------------------------------------------
+ let (cur_cros_collection, new_cros_patches) = patch_parsing::new_patches(
+ &cros_patches_path,
+ &ctx.old_cros_patch_contents(&args.old_cros_ref)?,
+ "chromiumos",
+ )
+ .context("finding new patches for chromiumos")?;
+ let (cur_android_collection, new_android_patches) = patch_parsing::new_patches(
+ &android_patches_path,
+ &ctx.old_android_patch_contents(&args.old_android_ref)?,
+ "android",
+ )
+ .context("finding new patches for android")?;
+
+ // Have to ignore patches that are already at the destination, even if
+ // the patches are new.
+ let new_cros_patches = new_cros_patches.subtract(&cur_android_collection)?;
+ let new_android_patches = new_android_patches.subtract(&cur_cros_collection)?;
+
+ // Need to do an extra filtering step for Android, as AOSP doesn't
+ // want patches outside of the start/end bounds.
+ let android_llvm_version: u64 = {
+ let android_llvm_version_str =
+ android_utils::get_android_llvm_version(&ctx.android_checkout)?;
+ android_llvm_version_str.parse::<u64>().with_context(|| {
+ format!(
+ "converting llvm version to u64: '{}'",
+ android_llvm_version_str
+ )
+ })?
+ };
+ let new_android_patches = new_android_patches.filter_patches(|p| {
+ match (p.get_start_version(), p.get_end_version()) {
+ (Some(start), Some(end)) => start <= android_llvm_version && android_llvm_version < end,
+ (Some(start), None) => start <= android_llvm_version,
+ (None, Some(end)) => android_llvm_version < end,
+ (None, None) => true,
+ }
+ });
+
+ if args.verbose {
+ display_patches("New patches from Chromium OS", &new_cros_patches);
+ display_patches("New patches from Android", &new_android_patches);
+ }
+
+ if args.dry_run {
+ println!("--dry-run specified; skipping modifications");
+ return Ok(());
+ }
+
+ modify_repos(
+ &ctx,
+ args.no_commit,
+ ModifyOpt {
+ new_cros_patches,
+ cur_cros_collection,
+ cros_reviewers: args.cros_reviewers,
+ new_android_patches,
+ cur_android_collection,
+ android_reviewers: args.android_reviewers,
+ },
+ )
+}
+
+struct ModifyOpt {
+ new_cros_patches: PatchCollection,
+ cur_cros_collection: PatchCollection,
+ cros_reviewers: Vec<String>,
+ new_android_patches: PatchCollection,
+ cur_android_collection: PatchCollection,
+ android_reviewers: Vec<String>,
+}
+
+fn modify_repos(ctx: &RepoSetupContext, no_commit: bool, opt: ModifyOpt) -> Result<()> {
+ // Cleanup on scope exit.
+ scopeguard::defer! {
+ ctx.cleanup();
+ }
+ // Transpose Patches -----------------------------------------------------
+ let mut cur_android_collection = opt.cur_android_collection;
+ let mut cur_cros_collection = opt.cur_cros_collection;
+ if !opt.new_cros_patches.is_empty() {
+ opt.new_cros_patches
+ .transpose_write(&mut cur_android_collection)?;
+ }
+ if !opt.new_android_patches.is_empty() {
+ opt.new_android_patches
+ .transpose_write(&mut cur_cros_collection)?;
+ }
+
+ if no_commit {
+ println!("--no-commit specified; not committing or uploading");
+ return Ok(());
+ }
+ // Commit and upload for review ------------------------------------------
+ // Note we want to check if the android patches are empty for CrOS, and
+ // vice versa. This is a little counterintuitive.
+ if !opt.new_android_patches.is_empty() {
+ ctx.cros_repo_upload(&opt.cros_reviewers)
+ .context("uploading chromiumos changes")?;
+ }
+ if !opt.new_cros_patches.is_empty() {
+ if let Err(e) = android_utils::sort_android_patches(&ctx.android_checkout) {
+ eprintln!(
+ "Couldn't sort Android patches; continuing. Caused by: {}",
+ e
+ );
+ }
+ ctx.android_repo_upload(&opt.android_reviewers)
+ .context("uploading android changes")?;
+ }
+ Ok(())
+}
+
+fn display_patches(prelude: &str, collection: &PatchCollection) {
+ println!("{}", prelude);
+ if collection.patches.is_empty() {
+ println!(" [No Patches]");
+ return;
+ }
+ println!("{}", collection);
+}
+
+#[derive(Debug, structopt::StructOpt)]
+#[structopt(name = "patch_sync", about = "A pipeline for syncing the patch code")]
+enum Opt {
+ /// Show a combined view of the PATCHES.json file, without making any changes.
+ #[allow(dead_code)]
+ Show {
+ #[structopt(parse(from_os_str))]
+ cros_checkout_path: PathBuf,
+ #[structopt(parse(from_os_str))]
+ android_checkout_path: PathBuf,
+
+ /// Keep a patch's platform field even if it's not merged at that platform.
+ #[structopt(long)]
+ keep_unmerged: bool,
+
+ /// Run repo sync before transposing.
+ #[structopt(short, long)]
+ sync: bool,
+ },
+ /// Transpose patches from two PATCHES.json files
+ /// to each other.
+ Transpose {
+ /// Path to the ChromiumOS source repo checkout.
+ #[structopt(long = "cros-checkout", parse(from_os_str))]
+ cros_checkout_path: PathBuf,
+
+ /// Emails to send review requests to during Chromium OS upload.
+ /// Comma separated.
+ #[structopt(long = "cros-rev")]
+ cros_reviewers: Option<String>,
+
+ /// Git ref (e.g. hash) for the ChromiumOS overlay to use as the base.
+ #[structopt(long = "overlay-base-ref")]
+ old_cros_ref: String,
+
+ /// Path to the Android Open Source Project source repo checkout.
+ #[structopt(long = "aosp-checkout", parse(from_os_str))]
+ android_checkout_path: PathBuf,
+
+ /// Emails to send review requests to during Android upload.
+ /// Comma separated.
+ #[structopt(long = "aosp-rev")]
+ android_reviewers: Option<String>,
+
+ /// Git ref (e.g. hash) for the llvm_android repo to use as the base.
+ #[structopt(long = "aosp-base-ref")]
+ old_android_ref: String,
+
+ /// Run repo sync before transposing.
+ #[structopt(short, long)]
+ sync: bool,
+
+ /// Print information to stdout
+ #[structopt(short, long)]
+ verbose: bool,
+
+ /// Do not change any files. Useful in combination with `--verbose`
+ /// Implies `--no-commit`.
+ #[structopt(long)]
+ dry_run: bool,
+
+ /// Do not commit or upload any changes made.
+ #[structopt(long)]
+ no_commit: bool,
+
+ /// Upload and send things for review, but mark as WIP and send no
+ /// emails.
+ #[structopt(long)]
+ wip: bool,
+
+ /// Don't run CQ if set. Only has an effect if uploading.
+ #[structopt(long)]
+ disable_cq: bool,
+ },
+}
diff --git a/llvm_tools/patch_sync/src/patch_parsing.rs b/llvm_tools/patch_sync/src/patch_parsing.rs
new file mode 100644
index 00000000..124f0d6f
--- /dev/null
+++ b/llvm_tools/patch_sync/src/patch_parsing.rs
@@ -0,0 +1,462 @@
+use std::collections::{BTreeMap, BTreeSet};
+use std::fs::{copy, File};
+use std::io::{BufRead, BufReader, Read, Write};
+use std::path::{Path, PathBuf};
+
+use anyhow::{anyhow, Context, Result};
+use serde::{Deserialize, Serialize};
+use sha2::{Digest, Sha256};
+
+/// JSON serde struct.
+// FIXME(b/221489531): Remove when we clear out start_version and
+// end_version.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct PatchDictSchema {
+ /// [deprecated(since = "1.1", note = "Use version_range")]
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub end_version: Option<u64>,
+ pub metadata: Option<BTreeMap<String, serde_json::Value>>,
+ #[serde(default, skip_serializing_if = "BTreeSet::is_empty")]
+ pub platforms: BTreeSet<String>,
+ pub rel_patch_path: String,
+ /// [deprecated(since = "1.1", note = "Use version_range")]
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub start_version: Option<u64>,
+ pub version_range: Option<VersionRange>,
+}
+
+#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
+pub struct VersionRange {
+ pub from: Option<u64>,
+ pub until: Option<u64>,
+}
+
+// FIXME(b/221489531): Remove when we clear out start_version and
+// end_version.
+impl PatchDictSchema {
+ pub fn get_start_version(&self) -> Option<u64> {
+ self.version_range
+ .map(|x| x.from)
+ .unwrap_or(self.start_version)
+ }
+
+ pub fn get_end_version(&self) -> Option<u64> {
+ self.version_range
+ .map(|x| x.until)
+ .unwrap_or(self.end_version)
+ }
+}
+
+/// Struct to keep track of patches and their relative paths.
+#[derive(Debug, Clone)]
+pub struct PatchCollection {
+ pub patches: Vec<PatchDictSchema>,
+ pub workdir: PathBuf,
+}
+
+impl PatchCollection {
+ /// Create a `PatchCollection` from a PATCHES.
+ pub fn parse_from_file(json_file: &Path) -> Result<Self> {
+ Ok(Self {
+ patches: serde_json::from_reader(File::open(json_file)?)?,
+ workdir: json_file
+ .parent()
+ .ok_or_else(|| anyhow!("failed to get json_file parent"))?
+ .to_path_buf(),
+ })
+ }
+
+ /// Create a `PatchCollection` from a string literal and a workdir.
+ pub fn parse_from_str(workdir: PathBuf, contents: &str) -> Result<Self> {
+ Ok(Self {
+ patches: serde_json::from_str(contents).context("parsing from str")?,
+ workdir,
+ })
+ }
+
+ /// Copy this collection with patches filtered by given criterion.
+ pub fn filter_patches(&self, f: impl FnMut(&PatchDictSchema) -> bool) -> Self {
+ Self {
+ patches: self.patches.iter().cloned().filter(f).collect(),
+ workdir: self.workdir.clone(),
+ }
+ }
+
+ /// Map over the patches.
+ pub fn map_patches(&self, f: impl FnMut(&PatchDictSchema) -> PatchDictSchema) -> Self {
+ Self {
+ patches: self.patches.iter().map(f).collect(),
+ workdir: self.workdir.clone(),
+ }
+ }
+
+ /// Return true if the collection is tracking any patches.
+ pub fn is_empty(&self) -> bool {
+ self.patches.is_empty()
+ }
+
+ /// Compute the set-set subtraction, returning a new `PatchCollection` which
+ /// keeps the minuend's workdir.
+ pub fn subtract(&self, subtrahend: &Self) -> Result<Self> {
+ let mut new_patches = Vec::new();
+ // This is O(n^2) when it could be much faster, but n is always going to be less
+ // than 1k and speed is not important here.
+ for our_patch in &self.patches {
+ let found_in_sub = subtrahend.patches.iter().any(|sub_patch| {
+ let hash1 = subtrahend
+ .hash_from_rel_patch(sub_patch)
+ .expect("getting hash from subtrahend patch");
+ let hash2 = self
+ .hash_from_rel_patch(our_patch)
+ .expect("getting hash from our patch");
+ hash1 == hash2
+ });
+ if !found_in_sub {
+ new_patches.push(our_patch.clone());
+ }
+ }
+ Ok(Self {
+ patches: new_patches,
+ workdir: self.workdir.clone(),
+ })
+ }
+
+ pub fn union(&self, other: &Self) -> Result<Self> {
+ self.union_helper(
+ other,
+ |p| self.hash_from_rel_patch(p),
+ |p| other.hash_from_rel_patch(p),
+ )
+ }
+
+ fn union_helper(
+ &self,
+ other: &Self,
+ our_hash_f: impl Fn(&PatchDictSchema) -> Result<String>,
+ their_hash_f: impl Fn(&PatchDictSchema) -> Result<String>,
+ ) -> Result<Self> {
+ // 1. For all our patches:
+ // a. If there exists a matching patch hash from `other`:
+ // i. Create a new patch with merged platform info,
+ // ii. add the new patch to our new collection.
+ // iii. Mark the other patch as "merged"
+ // b. Otherwise, copy our patch to the new collection
+ // 2. For all unmerged patches from the `other`
+ // a. Copy their patch into the new collection
+ let mut combined_patches = Vec::new();
+ let mut other_merged = vec![false; other.patches.len()];
+
+ // 1.
+ for p in &self.patches {
+ let our_hash = our_hash_f(p)?;
+ let mut found = false;
+ // a.
+ for (idx, merged) in other_merged.iter_mut().enumerate() {
+ if !*merged {
+ let other_p = &other.patches[idx];
+ let their_hash = their_hash_f(other_p)?;
+ if our_hash == their_hash {
+ // i.
+ let new_platforms =
+ p.platforms.union(&other_p.platforms).cloned().collect();
+ // ii.
+ combined_patches.push(PatchDictSchema {
+ rel_patch_path: p.rel_patch_path.clone(),
+ start_version: p.start_version,
+ end_version: p.end_version,
+ platforms: new_platforms,
+ metadata: p.metadata.clone(),
+ version_range: p.version_range,
+ });
+ // iii.
+ *merged = true;
+ found = true;
+ break;
+ }
+ }
+ }
+ // b.
+ if !found {
+ combined_patches.push(p.clone());
+ }
+ }
+ // 2.
+ // Add any remaining, other-only patches.
+ for (idx, merged) in other_merged.iter().enumerate() {
+ if !*merged {
+ combined_patches.push(other.patches[idx].clone());
+ }
+ }
+
+ Ok(Self {
+ workdir: self.workdir.clone(),
+ patches: combined_patches,
+ })
+ }
+
+ /// Copy all patches from this collection into another existing collection, and write that
+ /// to the existing collection's file.
+ pub fn transpose_write(&self, existing_collection: &mut Self) -> Result<()> {
+ for p in &self.patches {
+ let original_file_path = self.workdir.join(&p.rel_patch_path);
+ let copy_file_path = existing_collection.workdir.join(&p.rel_patch_path);
+ copy_create_parents(&original_file_path, &copy_file_path)?;
+ existing_collection.patches.push(p.clone());
+ }
+ existing_collection.write_patches_json("PATCHES.json")
+ }
+
+ /// Write out the patch collection contents to a PATCHES.json file.
+ fn write_patches_json(&self, filename: &str) -> Result<()> {
+ let write_path = self.workdir.join(filename);
+ let mut new_patches_file = File::create(&write_path)
+ .with_context(|| format!("writing to {}", write_path.display()))?;
+ new_patches_file.write_all(self.serialize_patches()?.as_bytes())?;
+ Ok(())
+ }
+
+ pub fn serialize_patches(&self) -> Result<String> {
+ let mut serialization_buffer = Vec::<u8>::new();
+ // Four spaces to indent json serialization.
+ let mut serializer = serde_json::Serializer::with_formatter(
+ &mut serialization_buffer,
+ serde_json::ser::PrettyFormatter::with_indent(b" "),
+ );
+ self.patches
+ .serialize(&mut serializer)
+ .context("serializing patches to JSON")?;
+ // Append a newline at the end if not present. This is necessary to get
+ // past some pre-upload hooks.
+ if serialization_buffer.last() != Some(&b'\n') {
+ serialization_buffer.push(b'\n');
+ }
+ Ok(std::str::from_utf8(&serialization_buffer)?.to_string())
+ }
+
+ /// Return whether a given patch actually exists on the file system.
+ pub fn patch_exists(&self, patch: &PatchDictSchema) -> bool {
+ self.workdir.join(&patch.rel_patch_path).exists()
+ }
+
+ fn hash_from_rel_patch(&self, patch: &PatchDictSchema) -> Result<String> {
+ hash_from_patch_path(&self.workdir.join(&patch.rel_patch_path))
+ }
+}
+
+impl std::fmt::Display for PatchCollection {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ for (i, p) in self.patches.iter().enumerate() {
+ let title = p
+ .metadata
+ .as_ref()
+ .and_then(|x| x.get("title"))
+ .and_then(serde_json::Value::as_str)
+ .unwrap_or("[No Title]");
+ let path = self.workdir.join(&p.rel_patch_path);
+ writeln!(f, "* {}", title)?;
+ if i == self.patches.len() - 1 {
+ write!(f, " {}", path.display())?;
+ } else {
+ writeln!(f, " {}", path.display())?;
+ }
+ }
+ Ok(())
+ }
+}
+
+/// Generate a PatchCollection incorporating only the diff between current patches and old patch
+/// contents.
+pub fn new_patches(
+ patches_path: &Path,
+ old_patch_contents: &str,
+ platform: &str,
+) -> Result<(PatchCollection, PatchCollection)> {
+ let cur_collection = PatchCollection::parse_from_file(patches_path)
+ .with_context(|| format!("parsing {} PATCHES.json", platform))?;
+ let cur_collection = filter_patches_by_platform(&cur_collection, platform);
+ let cur_collection = cur_collection.filter_patches(|p| cur_collection.patch_exists(p));
+ let new_patches: PatchCollection = {
+ let old_collection = PatchCollection::parse_from_str(
+ patches_path.parent().unwrap().to_path_buf(),
+ old_patch_contents,
+ )?;
+ let old_collection = old_collection.filter_patches(|p| old_collection.patch_exists(p));
+ cur_collection.subtract(&old_collection)?
+ };
+ let new_patches = new_patches.map_patches(|p| {
+ let mut platforms = BTreeSet::new();
+ platforms.extend(["android".to_string(), "chromiumos".to_string()]);
+ PatchDictSchema {
+ platforms: platforms.union(&p.platforms).cloned().collect(),
+ ..p.to_owned()
+ }
+ });
+ Ok((cur_collection, new_patches))
+}
+
+/// Create a new collection with only the patches that apply to the
+/// given platform.
+///
+/// If there's no platform listed, the patch should still apply if the patch file exists.
+pub fn filter_patches_by_platform(collection: &PatchCollection, platform: &str) -> PatchCollection {
+ collection.filter_patches(|p| {
+ p.platforms.contains(platform) || (p.platforms.is_empty() && collection.patch_exists(p))
+ })
+}
+
+/// Get the hash from the patch file contents.
+///
+/// Not every patch file actually contains its own hash,
+/// we must compute the hash ourselves when it's not found.
+fn hash_from_patch(patch_contents: impl Read) -> Result<String> {
+ let mut reader = BufReader::new(patch_contents);
+ let mut buf = String::new();
+ reader.read_line(&mut buf)?;
+ let mut first_line_iter = buf.trim().split(' ').fuse();
+ let (fst_word, snd_word) = (first_line_iter.next(), first_line_iter.next());
+ if let (Some("commit" | "From"), Some(hash_str)) = (fst_word, snd_word) {
+ // If the first line starts with either "commit" or "From", the following
+ // text is almost certainly a commit hash.
+ Ok(hash_str.to_string())
+ } else {
+ // This is an annoying case where the patch isn't actually a commit.
+ // So we'll hash the entire file, and hope that's sufficient.
+ let mut hasher = Sha256::new();
+ hasher.update(&buf); // Have to hash the first line.
+ reader.read_to_string(&mut buf)?;
+ hasher.update(buf); // Hash the rest of the file.
+ let sha = hasher.finalize();
+ Ok(format!("{:x}", &sha))
+ }
+}
+
+fn hash_from_patch_path(patch: &Path) -> Result<String> {
+ let f = File::open(patch).with_context(|| format!("opening patch file {}", patch.display()))?;
+ hash_from_patch(f)
+}
+
+/// Copy a file from one path to another, and create any parent
+/// directories along the way.
+fn copy_create_parents(from: &Path, to: &Path) -> Result<()> {
+ let to_parent = to
+ .parent()
+ .with_context(|| format!("getting parent of {}", to.display()))?;
+ if !to_parent.exists() {
+ std::fs::create_dir_all(to_parent)?;
+ }
+
+ copy(&from, &to)
+ .with_context(|| format!("copying file from {} to {}", &from.display(), &to.display()))?;
+ Ok(())
+}
+
+#[cfg(test)]
+mod test {
+
+ use super::*;
+
+ /// Test we can extract the hash from patch files.
+ #[test]
+ fn test_hash_from_patch() {
+ // Example git patch from Gerrit
+ let desired_hash = "004be4037e1e9c6092323c5c9268acb3ecf9176c";
+ let test_file_contents = "commit 004be4037e1e9c6092323c5c9268acb3ecf9176c\n\
+ Author: An Author <some_email>\n\
+ Date: Thu Aug 6 12:34:16 2020 -0700";
+ assert_eq!(
+ &hash_from_patch(test_file_contents.as_bytes()).unwrap(),
+ desired_hash
+ );
+
+ // Example git patch from upstream
+ let desired_hash = "6f85225ef3791357f9b1aa097b575b0a2b0dff48";
+ let test_file_contents = "From 6f85225ef3791357f9b1aa097b575b0a2b0dff48\n\
+ Mon Sep 17 00:00:00 2001\n\
+ From: Another Author <another_email>\n\
+ Date: Wed, 18 Aug 2021 15:03:03 -0700";
+ assert_eq!(
+ &hash_from_patch(test_file_contents.as_bytes()).unwrap(),
+ desired_hash
+ );
+ }
+
+ #[test]
+ fn test_union() {
+ let patch1 = PatchDictSchema {
+ start_version: Some(0),
+ end_version: Some(1),
+ rel_patch_path: "a".into(),
+ metadata: None,
+ platforms: BTreeSet::from(["x".into()]),
+ version_range: Some(VersionRange {
+ from: Some(0),
+ until: Some(1),
+ }),
+ };
+ let patch2 = PatchDictSchema {
+ rel_patch_path: "b".into(),
+ platforms: BTreeSet::from(["x".into(), "y".into()]),
+ ..patch1.clone()
+ };
+ let patch3 = PatchDictSchema {
+ platforms: BTreeSet::from(["z".into(), "x".into()]),
+ ..patch1.clone()
+ };
+ let collection1 = PatchCollection {
+ workdir: PathBuf::new(),
+ patches: vec![patch1, patch2],
+ };
+ let collection2 = PatchCollection {
+ workdir: PathBuf::new(),
+ patches: vec![patch3],
+ };
+ let union = collection1
+ .union_helper(
+ &collection2,
+ |p| Ok(p.rel_patch_path.to_string()),
+ |p| Ok(p.rel_patch_path.to_string()),
+ )
+ .expect("could not create union");
+ assert_eq!(union.patches.len(), 2);
+ assert_eq!(
+ union.patches[0].platforms.iter().collect::<Vec<&String>>(),
+ vec!["x", "z"]
+ );
+ assert_eq!(
+ union.patches[1].platforms.iter().collect::<Vec<&String>>(),
+ vec!["x", "y"]
+ );
+ }
+
+ #[test]
+ fn test_union_empties() {
+ let patch1 = PatchDictSchema {
+ start_version: Some(0),
+ end_version: Some(1),
+ rel_patch_path: "a".into(),
+ metadata: None,
+ platforms: Default::default(),
+ version_range: Some(VersionRange {
+ from: Some(0),
+ until: Some(1),
+ }),
+ };
+ let collection1 = PatchCollection {
+ workdir: PathBuf::new(),
+ patches: vec![patch1.clone()],
+ };
+ let collection2 = PatchCollection {
+ workdir: PathBuf::new(),
+ patches: vec![patch1],
+ };
+ let union = collection1
+ .union_helper(
+ &collection2,
+ |p| Ok(p.rel_patch_path.to_string()),
+ |p| Ok(p.rel_patch_path.to_string()),
+ )
+ .expect("could not create union");
+ assert_eq!(union.patches.len(), 1);
+ assert_eq!(union.patches[0].platforms.len(), 0);
+ }
+}
diff --git a/llvm_tools/patch_sync/src/version_control.rs b/llvm_tools/patch_sync/src/version_control.rs
new file mode 100644
index 00000000..e07d39d6
--- /dev/null
+++ b/llvm_tools/patch_sync/src/version_control.rs
@@ -0,0 +1,400 @@
+use anyhow::{anyhow, bail, ensure, Context, Result};
+use regex::Regex;
+use std::ffi::OsStr;
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::process::{Command, Output};
+
+const CHROMIUMOS_OVERLAY_REL_PATH: &str = "src/third_party/chromiumos-overlay";
+const ANDROID_LLVM_REL_PATH: &str = "toolchain/llvm_android";
+
+const CROS_MAIN_BRANCH: &str = "main";
+const ANDROID_MAIN_BRANCH: &str = "master"; // nocheck
+const WORK_BRANCH_NAME: &str = "__patch_sync_tmp";
+
+/// Context struct to keep track of both Chromium OS and Android checkouts.
+#[derive(Debug)]
+pub struct RepoSetupContext {
+ pub cros_checkout: PathBuf,
+ pub android_checkout: PathBuf,
+ /// Run `repo sync` before doing any comparisons.
+ pub sync_before: bool,
+ pub wip_mode: bool,
+ pub enable_cq: bool,
+}
+
+impl RepoSetupContext {
+ pub fn setup(&self) -> Result<()> {
+ if self.sync_before {
+ {
+ let crpp = self.cros_patches_path();
+ let cros_git = crpp.parent().unwrap();
+ git_cd_cmd(cros_git, ["checkout", CROS_MAIN_BRANCH])?;
+ }
+ {
+ let anpp = self.android_patches_path();
+ let android_git = anpp.parent().unwrap();
+ git_cd_cmd(android_git, ["checkout", ANDROID_MAIN_BRANCH])?;
+ }
+ repo_cd_cmd(&self.cros_checkout, &["sync", CHROMIUMOS_OVERLAY_REL_PATH])?;
+ repo_cd_cmd(&self.android_checkout, &["sync", ANDROID_LLVM_REL_PATH])?;
+ }
+ Ok(())
+ }
+
+ pub fn cros_repo_upload<S: AsRef<str>>(&self, reviewers: &[S]) -> Result<()> {
+ let llvm_dir = self
+ .cros_checkout
+ .join(&CHROMIUMOS_OVERLAY_REL_PATH)
+ .join("sys-devel/llvm");
+ ensure!(
+ llvm_dir.is_dir(),
+ "CrOS LLVM dir {} is not a directory",
+ llvm_dir.display()
+ );
+ Self::rev_bump_llvm(&llvm_dir)?;
+ let mut extra_args = Vec::new();
+ for reviewer in reviewers {
+ extra_args.push("--re");
+ extra_args.push(reviewer.as_ref());
+ }
+ if self.wip_mode {
+ extra_args.push("--wip");
+ extra_args.push("--no-emails");
+ }
+ if self.enable_cq {
+ extra_args.push("--label=Commit-Queue+1");
+ }
+ Self::repo_upload(
+ &self.cros_checkout,
+ CHROMIUMOS_OVERLAY_REL_PATH,
+ &Self::build_commit_msg(
+ "llvm: Synchronize patches from android",
+ "android",
+ "chromiumos",
+ "BUG=None\nTEST=CQ",
+ ),
+ extra_args,
+ )
+ }
+
+ pub fn android_repo_upload<S: AsRef<str>>(&self, reviewers: &[S]) -> Result<()> {
+ let mut extra_args = Vec::new();
+ for reviewer in reviewers {
+ extra_args.push("--re");
+ extra_args.push(reviewer.as_ref());
+ }
+ if self.wip_mode {
+ extra_args.push("--wip");
+ extra_args.push("--no-emails");
+ }
+ if self.enable_cq {
+ extra_args.push("--label=Presubmit-Ready+1");
+ }
+ Self::repo_upload(
+ &self.android_checkout,
+ ANDROID_LLVM_REL_PATH,
+ &Self::build_commit_msg(
+ "Synchronize patches from chromiumos",
+ "chromiumos",
+ "android",
+ "Test: N/A",
+ ),
+ extra_args,
+ )
+ }
+
+ fn cros_cleanup(&self) -> Result<()> {
+ let git_path = self.cros_checkout.join(CHROMIUMOS_OVERLAY_REL_PATH);
+ Self::cleanup_branch(&git_path, CROS_MAIN_BRANCH, WORK_BRANCH_NAME)
+ .with_context(|| format!("cleaning up branch {}", WORK_BRANCH_NAME))?;
+ Ok(())
+ }
+
+ fn android_cleanup(&self) -> Result<()> {
+ let git_path = self.android_checkout.join(ANDROID_LLVM_REL_PATH);
+ Self::cleanup_branch(&git_path, ANDROID_MAIN_BRANCH, WORK_BRANCH_NAME)
+ .with_context(|| format!("cleaning up branch {}", WORK_BRANCH_NAME))?;
+ Ok(())
+ }
+
+ /// Wrapper around cleanups to ensure both get run, even if errors appear.
+ pub fn cleanup(&self) {
+ if let Err(e) = self.cros_cleanup() {
+ eprintln!("Failed to clean up chromiumos, continuing: {}", e);
+ }
+ if let Err(e) = self.android_cleanup() {
+ eprintln!("Failed to clean up android, continuing: {}", e);
+ }
+ }
+
+ /// Get the Android path to the PATCHES.json file
+ pub fn android_patches_path(&self) -> PathBuf {
+ self.android_checkout
+ .join(&ANDROID_LLVM_REL_PATH)
+ .join("patches/PATCHES.json")
+ }
+
+ /// Get the Chromium OS path to the PATCHES.json file
+ pub fn cros_patches_path(&self) -> PathBuf {
+ self.cros_checkout
+ .join(&CHROMIUMOS_OVERLAY_REL_PATH)
+ .join("sys-devel/llvm/files/PATCHES.json")
+ }
+
+ /// Return the contents of the old PATCHES.json from Chromium OS
+ pub fn old_cros_patch_contents(&self, hash: &str) -> Result<String> {
+ Self::old_file_contents(
+ hash,
+ &self.cros_checkout.join(CHROMIUMOS_OVERLAY_REL_PATH),
+ Path::new("sys-devel/llvm/files/PATCHES.json"),
+ )
+ }
+
+ /// Return the contents of the old PATCHES.json from android
+ pub fn old_android_patch_contents(&self, hash: &str) -> Result<String> {
+ Self::old_file_contents(
+ hash,
+ &self.android_checkout.join(ANDROID_LLVM_REL_PATH),
+ Path::new("patches/PATCHES.json"),
+ )
+ }
+
+ fn repo_upload<'a, I: IntoIterator<Item = &'a str>>(
+ checkout_path: &Path,
+ subproject_git_wd: &'a str,
+ commit_msg: &str,
+ extra_flags: I,
+ ) -> Result<()> {
+ let git_path = &checkout_path.join(&subproject_git_wd);
+ ensure!(
+ git_path.is_dir(),
+ "git_path {} is not a directory",
+ git_path.display()
+ );
+ repo_cd_cmd(
+ checkout_path,
+ &["start", WORK_BRANCH_NAME, subproject_git_wd],
+ )?;
+ let base_args = ["upload", "--br", WORK_BRANCH_NAME, "-y", "--verify"];
+ let new_args = base_args
+ .iter()
+ .copied()
+ .chain(extra_flags)
+ .chain(["--", subproject_git_wd]);
+ git_cd_cmd(git_path, &["add", "."])
+ .and_then(|_| git_cd_cmd(git_path, &["commit", "-m", commit_msg]))
+ .and_then(|_| repo_cd_cmd(checkout_path, new_args))?;
+ Ok(())
+ }
+
+ /// Clean up the git repo after we're done with it.
+ fn cleanup_branch(git_path: &Path, base_branch: &str, rm_branch: &str) -> Result<()> {
+ git_cd_cmd(git_path, ["restore", "."])?;
+ git_cd_cmd(git_path, ["clean", "-fd"])?;
+ git_cd_cmd(git_path, ["checkout", base_branch])?;
+ // It's acceptable to be able to not delete the branch. This may be
+ // because the branch does not exist, which is an expected result.
+ // Since this is a very common case, we won't report any failures related
+ // to this command failure as it'll pollute the stderr logs.
+ let _ = git_cd_cmd(git_path, ["branch", "-D", rm_branch]);
+ Ok(())
+ }
+
+ /// Increment LLVM's revision number
+ fn rev_bump_llvm(llvm_dir: &Path) -> Result<PathBuf> {
+ let ebuild = find_ebuild(llvm_dir)
+ .with_context(|| format!("finding ebuild in {} to rev bump", llvm_dir.display()))?;
+ let ebuild_dir = ebuild.parent().unwrap();
+ let suffix_matcher = Regex::new(r"-r([0-9]+)\.ebuild").unwrap();
+ let ebuild_name = ebuild
+ .file_name()
+ .unwrap()
+ .to_str()
+ .ok_or_else(|| anyhow!("converting ebuild filename to utf-8"))?;
+ let new_path = if let Some(captures) = suffix_matcher.captures(ebuild_name) {
+ let full_suffix = captures.get(0).unwrap().as_str();
+ let cur_version = captures.get(1).unwrap().as_str().parse::<u32>().unwrap();
+ let new_filename =
+ ebuild_name.replace(full_suffix, &format!("-r{}.ebuild", cur_version + 1_u32));
+ let new_path = ebuild_dir.join(new_filename);
+ fs::rename(&ebuild, &new_path)?;
+ new_path
+ } else {
+ // File did not end in a revision. We should append -r1 to the end.
+ let new_filename = ebuild.file_stem().unwrap().to_string_lossy() + "-r1.ebuild";
+ let new_path = ebuild_dir.join(new_filename.as_ref());
+ fs::rename(&ebuild, &new_path)?;
+ new_path
+ };
+ Ok(new_path)
+ }
+
+ /// Return the contents of an old file in git
+ fn old_file_contents(hash: &str, pwd: &Path, file: &Path) -> Result<String> {
+ let git_ref = format!(
+ "{}:{}",
+ hash,
+ file.to_str()
+ .ok_or_else(|| anyhow!("failed to convert filepath to str"))?
+ );
+ let output = git_cd_cmd(pwd, &["show", &git_ref])?;
+ if !output.status.success() {
+ bail!("could not get old file contents for {}", &git_ref)
+ }
+ String::from_utf8(output.stdout)
+ .with_context(|| format!("converting {} file contents to UTF-8", &git_ref))
+ }
+
+ /// Create the commit message
+ fn build_commit_msg(subj: &str, from: &str, to: &str, footer: &str) -> String {
+ format!(
+ "[patch_sync] {}\n\n\
+Copies new PATCHES.json changes from {} to {}.\n
+For questions about this job, contact chromeos-toolchain@google.com\n\n
+{}",
+ subj, from, to, footer
+ )
+ }
+}
+
+/// Return the path of an ebuild located within the given directory.
+fn find_ebuild(dir: &Path) -> Result<PathBuf> {
+ // The logic here is that we create an iterator over all file paths to ebuilds
+ // with _pre in the name. Then we sort those ebuilds based on their revision numbers.
+ // Then we return the highest revisioned one.
+
+ let ebuild_rev_matcher = Regex::new(r"-r([0-9]+)\.ebuild").unwrap();
+ // For LLVM ebuilds, we only want to check for ebuilds that have this in their file name.
+ let per_heuristic = "_pre";
+ // Get an iterator over all ebuilds with a _per in the file name.
+ let ebuild_candidates = fs::read_dir(dir)?.filter_map(|entry| {
+ let entry = entry.ok()?;
+ let path = entry.path();
+ if path.extension()? != "ebuild" {
+ // Not an ebuild, ignore.
+ return None;
+ }
+ let stem = path.file_stem()?.to_str()?;
+ if stem.contains(per_heuristic) {
+ return Some(path);
+ }
+ None
+ });
+ let try_parse_ebuild_rev = |path: PathBuf| -> Option<(u64, PathBuf)> {
+ let name = path.file_name()?;
+ if let Some(rev_match) = ebuild_rev_matcher.captures(name.to_str()?) {
+ let rev_str = rev_match.get(1)?;
+ let rev_num = rev_str.as_str().parse::<u64>().ok()?;
+ return Some((rev_num, path));
+ }
+ // If it doesn't have a revision, then it's revision 0.
+ Some((0, path))
+ };
+ let mut sorted_candidates: Vec<_> =
+ ebuild_candidates.filter_map(try_parse_ebuild_rev).collect();
+ sorted_candidates.sort_unstable_by_key(|x| x.0);
+ let highest_rev_ebuild = sorted_candidates
+ .pop()
+ .ok_or_else(|| anyhow!("could not find ebuild"))?;
+ Ok(highest_rev_ebuild.1)
+}
+
+/// Run a given git command from inside a specified git dir.
+pub fn git_cd_cmd<I, S>(pwd: &Path, args: I) -> Result<Output>
+where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+{
+ let mut command = Command::new("git");
+ command.current_dir(&pwd).args(args);
+ let output = command.output()?;
+ if !output.status.success() {
+ bail!(
+ "git command failed:\n {:?}\nstdout --\n{}\nstderr --\n{}",
+ command,
+ String::from_utf8_lossy(&output.stdout),
+ String::from_utf8_lossy(&output.stderr),
+ );
+ }
+ Ok(output)
+}
+
+pub fn repo_cd_cmd<I, S>(pwd: &Path, args: I) -> Result<()>
+where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+{
+ let mut command = Command::new("repo");
+ command.current_dir(&pwd).args(args);
+ let status = command.status()?;
+ if !status.success() {
+ bail!("repo command failed:\n {:?} \n", command)
+ }
+ Ok(())
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use rand::prelude::Rng;
+ use std::env;
+ use std::fs::File;
+
+ #[test]
+ fn test_revbump_ebuild() {
+ // Random number to append at the end of the test folder to prevent conflicts.
+ let rng: u32 = rand::thread_rng().gen();
+ let llvm_dir = env::temp_dir().join(format!("patch_sync_test_{}", rng));
+ fs::create_dir(&llvm_dir).expect("creating llvm dir in temp directory");
+
+ {
+ // With revision
+ let ebuild_name = "llvm-13.0_pre433403_p20211019-r10.ebuild";
+ let ebuild_path = llvm_dir.join(ebuild_name);
+ File::create(&ebuild_path).expect("creating test ebuild file");
+ let new_ebuild_path =
+ RepoSetupContext::rev_bump_llvm(&llvm_dir).expect("rev bumping the ebuild");
+ assert!(
+ new_ebuild_path.ends_with("llvm-13.0_pre433403_p20211019-r11.ebuild"),
+ "{}",
+ new_ebuild_path.display()
+ );
+ fs::remove_file(new_ebuild_path).expect("removing renamed ebuild file");
+ }
+ {
+ // Without revision
+ let ebuild_name = "llvm-13.0_pre433403_p20211019.ebuild";
+ let ebuild_path = llvm_dir.join(ebuild_name);
+ File::create(&ebuild_path).expect("creating test ebuild file");
+ let new_ebuild_path =
+ RepoSetupContext::rev_bump_llvm(&llvm_dir).expect("rev bumping the ebuild");
+ assert!(
+ new_ebuild_path.ends_with("llvm-13.0_pre433403_p20211019-r1.ebuild"),
+ "{}",
+ new_ebuild_path.display()
+ );
+ fs::remove_file(new_ebuild_path).expect("removing renamed ebuild file");
+ }
+ {
+ // With both
+ let ebuild_name = "llvm-13.0_pre433403_p20211019.ebuild";
+ let ebuild_path = llvm_dir.join(ebuild_name);
+ File::create(&ebuild_path).expect("creating test ebuild file");
+ let ebuild_link_name = "llvm-13.0_pre433403_p20211019-r2.ebuild";
+ let ebuild_link_path = llvm_dir.join(ebuild_link_name);
+ File::create(&ebuild_link_path).expect("creating test ebuild link file");
+ let new_ebuild_path =
+ RepoSetupContext::rev_bump_llvm(&llvm_dir).expect("rev bumping the ebuild");
+ assert!(
+ new_ebuild_path.ends_with("llvm-13.0_pre433403_p20211019-r3.ebuild"),
+ "{}",
+ new_ebuild_path.display()
+ );
+ fs::remove_file(new_ebuild_path).expect("removing renamed ebuild link file");
+ fs::remove_file(ebuild_path).expect("removing renamed ebuild file");
+ }
+
+ fs::remove_dir(&llvm_dir).expect("removing temp test dir");
+ }
+}