aboutsummaryrefslogtreecommitdiff
path: root/afdo_tools
diff options
context:
space:
mode:
Diffstat (limited to 'afdo_tools')
-rwxr-xr-xafdo_tools/update_kernel_afdo424
-rw-r--r--afdo_tools/update_kernel_afdo.cfg2
-rwxr-xr-xafdo_tools/update_kernel_afdo.py803
-rwxr-xr-xafdo_tools/update_kernel_afdo_test.py304
4 files changed, 1108 insertions, 425 deletions
diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo
deleted file mode 100755
index 6bfa53fa..00000000
--- a/afdo_tools/update_kernel_afdo
+++ /dev/null
@@ -1,424 +0,0 @@
-#!/bin/bash
-# Copyright 2020 The ChromiumOS Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Due to crbug.com/1081332, we need to update AFDO metadata
-# manually. This script performs a few checks and generates a
-# new kernel_afdo.json file, which can then be submitted.
-#
-
-USAGE="
-Usage: $(basename "$0") [--help] [--(no)upload] [--nointeractive]
- [main|beta|stable|all]
-
-Description:
- The script takes one optional argument which is the channel where we want
-to update the kernel afdo and creates a commit (or commits with \"all\"
-channels) in the corresponding branch.
- No arguments defaults to \"all\".
- Follow the prompt to upload the changes with --noupload. Otherwise
- the script will automatically create CL and send to the detective
- for review.
- NO CLEAN-UP NEEDED. The script ignores any local changes and keeps
-the current branch unchanged.
-
- Args:
- --help Show this help.
- --upload Upload CLs when the update succeeded (default).
- --noupload Do not upload CLs. Instead, print the upload commands.
- --nointeractive Runs the script without user interaction.
- main|beta|stable Update metadata only on the specified channel.
-"
-
-set -eu
-set -o pipefail
-
-# Branch independent constants.
-# Changes here will affect kernel afdo update in cros branches.
-# -------------------
-ARCHS="amd arm"
-AMD_GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel/amd64
-ARM_GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel/arm
-UPDATE_CONFIG_FILE="afdo_tools/update_kernel_afdo.cfg"
-# CL reviewers and cc.
-REVIEWERS="c-compiler-chrome@google.com"
-CC="denik@google.com,gbiv@google.com"
-# Add skipped chrome branches in ascending order here.
-SKIPPED_BRANCHES="95"
-# NOTE: We enable/disable kernel AFDO starting from a particular branch.
-# For example if we want to enable kernel AFDO in 5.15, first, we do it
-# in main. In this case we want to disable it in beta and stable branches.
-# The second scenario is when we want to disable kernel AFDO (when all devices
-# move to kernelnext and there are no new profiles from the field). In this
-# case we disable AFDO in main but still keep it live in beta and stable.
-declare -A SKIPPED_ARCHKVERS_IN_BRANCHES
-# In SKIPPED_ARCHKVERS_IN_BRANCHES
-# - key is a branch number string;
-# - value is the list of arch/kver separated by space.
-# Example: SKIPPED_ARCHKVERS_IN_BRANCHES["105"]="amd/4.4 arm/5.15"
-# -------------------
-# Kernel tracing was disabled on arm in 114, b/275560674.
-SKIPPED_ARCHKVERS_IN_BRANCHES["114"]="arm/5.15"
-SKIPPED_ARCHKVERS_IN_BRANCHES["115"]="arm/5.15"
-
-script_dir=$(dirname "$0")
-tc_utils_dir="${script_dir}/.."
-# Convert toolchain_utils into the absolute path.
-abs_tc_utils_dir="$(realpath "${tc_utils_dir}")"
-
-# Check profiles uploaded within the last week.
-expected_time=$(date +%s -d "week ago")
-# Upload CLs on success.
-upload_cl=true
-# Interactive mode.
-interactive=true
-# Without arguments the script updates all branches.
-channels=""
-failed_channels=""
-
-declare -A arch_gsbase arch_kvers arch_outfile
-declare -A branch branch_number commit
-remote_repo=$(git -C "${tc_utils_dir}" remote)
-canary_ref="refs/heads/main"
-# Read the last two release-Rxx from remote branches
-# and assign them to stable_ref and beta_ref.
-# sort -V is the version sort which puts R100 after R99.
-# We need `echo` to convert newlines into spaces for read.
-read -r stable_ref beta_ref <<< "$(git -C "${tc_utils_dir}" ls-remote -h \
- "${remote_repo}" release-R\* | cut -f2 | sort -V | tail -n 2 | paste -s)"
-# Branch names which start from release-R.
-branch["beta"]=${beta_ref##*/}
-branch["stable"]=${stable_ref##*/}
-branch["canary"]=${canary_ref##*/}
-
-# Get current branch numbers (number which goes after R).
-branch_number["stable"]=$(echo "${branch["stable"]}" | \
- sed -n -e "s/^release-R\([0-9][0-9]*\).*$/\1/p")
-branch_number["beta"]=$(echo "${branch["beta"]}" | \
- sed -n -e "s/^release-R\([0-9][0-9]*\).*$/\1/p")
-branch_number["canary"]="$((branch_number[beta] + 1))"
-for skipped_branch in ${SKIPPED_BRANCHES} ; do
- if [[ ${branch_number["canary"]} == "${skipped_branch}" ]] ; then
- ((branch_number[canary]++))
- fi
-done
-config_file="$(realpath --relative-to="${tc_utils_dir}" \
- "${tc_utils_dir}/${UPDATE_CONFIG_FILE}")"
-
-for arg in "$@"
-do
- case "${arg}" in
- stable | canary | beta )
- channels="${channels} ${arg}"
- ;;
- main )
- channels="${channels} canary"
- ;;
- all )
- channels="canary beta stable"
- ;;
- --noupload | --no-upload)
- upload_cl=false
- ;;
- --upload)
- upload_cl=true
- ;;
- --nointeractive)
- interactive=false
- ;;
- --help | help | -h )
- echo "${USAGE}"
- exit 0
- ;;
- -*)
- echo "ERROR: Option \"${arg}\" is not supported." >&2
- echo "${USAGE}"
- exit 1
- ;;
- *)
- echo "Channel \"${arg}\" is not supported.
-Must be main (or canary), beta, stable or all." >&2
- echo "${USAGE}"
- exit 1
- esac
-done
-
-if [[ -z "${channels}" ]]
-then
- channels="canary beta stable"
-fi
-
-# Fetch latest branches.
-git -C "${tc_utils_dir}" fetch "${remote_repo}"
-
-worktree_dir=$(mktemp -d)
-echo "-> Working in ${worktree_dir}"
-# Create a worktree and make changes there.
-# This way we don't need to clean-up and sync toolchain_utils before the
-# change. Neither we should care about clean-up after the submit.
-git -C "${tc_utils_dir}" worktree add --detach "${worktree_dir}"
-trap 'git -C "${abs_tc_utils_dir}" worktree remove -f "${worktree_dir}" \
- && git -C "${abs_tc_utils_dir}" branch -D ${channels}' EXIT
-pushd "${worktree_dir}"
-
-for channel in ${channels}
-do
- set +u
- if [[ -n "${commit[${channel}]}" ]]
- then
- echo "Skipping channel ${channel} which already has commit\
- ${commit[${channel}]}."
- continue
- fi
- set -u
-
- errs=""
- successes=0
- curr_branch_number=${branch_number[${channel}]}
- curr_branch=${branch[${channel}]}
- echo
- echo "Checking \"${channel}\" channel..."
- echo "branch_number=${curr_branch_number} branch=${curr_branch}"
-
- git reset --hard HEAD
- git checkout -b "${channel}" "${remote_repo}/${curr_branch}"
-
- # Read branch-dependent constants from $remote_repo.
- # shellcheck source=afdo_tools/update_kernel_afdo.cfg
- if [[ -e "${config_file}" ]]
- then
- # Branch dependent constants were moved to config_file.
- # IMPORTANT: Starting from M-113 update_kernel_afdo reads branch-dependent
- # constants from config_file from remote refs.
- source "${config_file}"
- else
- # DON'T UPDATE THESE CONSTANTS HERE!
- # Update ${config_file} instead.
- AMD_KVERS="4.14 4.19 5.4 5.10"
- ARM_KVERS="5.15"
- AMD_METADATA_FILE="afdo_metadata/kernel_afdo.json"
- ARM_METADATA_FILE="afdo_metadata/kernel_arm_afdo.json"
- fi
-
- amd_outfile="$(realpath --relative-to="${tc_utils_dir}" \
- "${tc_utils_dir}/${AMD_METADATA_FILE}")"
- arm_outfile="$(realpath --relative-to="${tc_utils_dir}" \
- "${tc_utils_dir}/${ARM_METADATA_FILE}")"
- arch_gsbase["amd"]="${AMD_GS_BASE}"
- arch_gsbase["arm"]="${ARM_GS_BASE}"
- arch_kvers["amd"]="${AMD_KVERS}"
- arch_kvers["arm"]="${ARM_KVERS}"
- arch_outfile["amd"]="${amd_outfile}"
- arch_outfile["arm"]="${arm_outfile}"
-
- new_changes=false
- for arch in ${ARCHS}
- do
- json="{"
- sep=""
- for kver in ${arch_kvers[${arch}]}
- do
- # Skip kernels disabled in this branch.
- skipped=false
- for skipped_branch in "${!SKIPPED_ARCHKVERS_IN_BRANCHES[@]}"
- do
- if [[ ${curr_branch_number} == "${skipped_branch}" ]]
- then
- # Current branch is in the keys of SKIPPED_ARCHKVERS_IN_BRANCHES.
- # Now lets check if $arch/$kver is in the list.
- for skipped_archkver in \
- ${SKIPPED_ARCHKVERS_IN_BRANCHES[${skipped_branch}]}
- do
- if [[ "${arch}/${kver}" == "${skipped_archkver}" ]]
- then
- skipped=true
- break
- fi
- done
- fi
- done
- if ${skipped}
- then
- echo "${arch}/${kver} is skipped in branch ${curr_branch_number}."
- continue
- fi
- # Sort the gs output by timestamp, default ordering is by name. So
- # R86-13310.3-1594633089.gcov.xz goes after
- # R86-13310.18-1595237847.gcov.xz.
- latest=$(gsutil.py ls -l "${arch_gsbase[${arch}]}/${kver}/" | sort -k2 | \
- grep "R${curr_branch_number}" | tail -1 || true)
- prev_branch=$((curr_branch_number - 1))
- if [[ -z "${latest}" && "${channel}" != "stable" ]]
- then
- # if no profiles exist for the current branch, try the previous branch
- latest=$(gsutil.py ls -l "${arch_gsbase[${arch}]}/${kver}/" | \
- sort -k2 | grep "R${prev_branch}" | tail -1 || true)
- fi
- if [[ -z "${latest}" ]]
- then
- echo "ERROR: No M${curr_branch_number}, M${prev_branch} profiles in\
- ${arch_gsbase[${arch}]}/${kver}/" >&2
- echo "Skipping ${arch}/${kver}" >&2
- errs="${errs} ${kver}"
- continue
- fi
-
- # Verify that the file has the expected date.
- file_time=$(echo "${latest}" | awk '{print $2}')
- file_time_unix=$(date +%s -d "${file_time}")
- if [ "${file_time_unix}" -lt "${expected_time}" ]
- then
- expected=$(env TZ=UTC date +%Y-%m-%dT%H:%M:%SZ -d @"${expected_time}")
- echo "ERROR: Wrong date for ${kver}: ${file_time} is before\
- ${expected}" >&2
- errs="${errs} ${kver}"
- continue
- fi
-
- # Generate JSON.
- json_kver=$(echo "${kver}" | tr . _)
- # b/147370213 (migrating profiles from gcov format) may result in the
- # pattern below no longer doing the right thing.
- name="$(basename "${latest%.gcov.*}")"
- # Skip kernels with no AFDO support in the current channel.
- if [[ "${name}" == "" ]]
- then
- continue
- fi
- json=$(cat <<EOT
-${json}${sep}
- "chromeos-kernel-${json_kver}": {
- "name": "${name}"
- }
-EOT
- )
- sep=","
- successes=$((successes + 1))
- done # kvers loop
-
- # If we did not succeed for any kvers, exit now.
- if [[ ${successes} -eq 0 ]]
- then
- echo "ERROR: AFDO profiles out of date for all kernel versions" >&2
- failed_channels="${failed_channels} ${channel}"
- continue
- fi
-
- # Write new JSON file.
- # Don't use `echo` since `json` might have esc characters in it.
- printf "%s\n}\n" "${json}" > "${arch_outfile[${arch}]}"
-
- # If no changes were made, say so.
- outdir=$(dirname "${arch_outfile[${arch}]}")
- shortstat=$(cd "${outdir}" &&\
- git status --short "$(basename "${arch_outfile[${arch}]}")")
- [ -z "${shortstat}" ] &&\
- echo "$(basename "${arch_outfile[${arch}]}") is up to date." \
- && continue
-
- # If we had any errors, warn about them.
- if [[ -n "${errs}" ]]
- then
- echo "WARNING: failed to update ${errs} in ${channel}" >&2
- failed_channels="${failed_channels} ${channel}"
- continue
- fi
-
- git add "${arch_outfile[${arch}]}"
- new_changes=true
- done # ARCHS loop
-
- if ! ${new_changes}
- then
- echo "Skipping \"${channel}\" - all profiles are up to date"
- continue
- fi
-
- case "${channel}" in
- canary )
- commit_contents=$'afdo_metadata: Publish the new kernel profiles\n\n'
- for arch in ${ARCHS} ; do
- for kver in ${arch_kvers[${arch}]} ; do
- commit_contents="${commit_contents}Update ${arch} profile on\
- chromeos-kernel-${kver}"$'\n'
- done
- done
- commit_contents="${commit_contents}
-
-BUG=None
-TEST=Verified in kernel-release-afdo-verify-orchestrator"
- ;;
- beta | stable )
- commit_contents="afdo_metadata: Publish the new kernel profiles\
- in ${curr_branch}
-
-Have PM pre-approval because this shouldn't break the release branch.
-
-BUG=None
-TEST=Verified in kernel-release-afdo-verify-orchestrator"
- ;;
- * )
- echo "Internal error: unhandled channel \"${channel}\"" >&2
- exit 2
- esac
-
- if ${interactive}
- then
- git commit -v -e -m "${commit_contents}"
- else
- git commit -m "${commit_contents}"
- fi
-
- commit[${channel}]=$(git -C "${worktree_dir}" rev-parse HEAD)
-done
-
-popd
-echo
-# Array size check doesn't play well with the unbound variable option.
-set +u
-if [[ ${#commit[@]} -gt 0 ]]
-then
- set -u
- echo "The change is applied in ${!commit[*]}."
- if ${upload_cl}
- then
- for channel in "${!commit[@]}"
- do
- if ${interactive}
- then
- (cd "${tc_utils_dir}" && \
- repo upload --br="${channel}" --re="${REVIEWERS}" --cc="${CC}" .)
- else
- (cd "${tc_utils_dir}" && \
- repo upload --br="${channel}" --no-verify -y --re="${REVIEWERS}" \
- --cc="${CC}" .)
- fi
- done
- else
- echo "Run these commands to upload the change:"
- echo
- for channel in "${!commit[@]}"
- do
- echo -e "\tgit -C ${tc_utils_dir} push ${remote_repo} \
- ${commit[${channel}]}:refs/for/${branch[${channel}]}"
- done
- fi
-
- # Report failed channels.
- if [[ -n "${failed_channels}" ]]
- then
- echo
- echo "ERROR: failed to update kernel afdo in ${failed_channels}" >&2
- exit 3
- fi
-else
- # No commits. Check if it is due to failures.
- if [[ -z "${failed_channels}" ]]
- then
- echo "No changes are applied. It looks like AFDO versions are up to date."
- else
- echo "ERROR: update in ${failed_channels} failed" >&2
- exit 3
- fi
-fi
diff --git a/afdo_tools/update_kernel_afdo.cfg b/afdo_tools/update_kernel_afdo.cfg
index 821c9c1f..a69d6f84 100644
--- a/afdo_tools/update_kernel_afdo.cfg
+++ b/afdo_tools/update_kernel_afdo.cfg
@@ -2,7 +2,7 @@
# All changes here won't affect kernel afdo update in branches.
# WARNING: Changes must be submitted to have effect.
-AMD_KVERS="5.4 5.10 5.15"
+AMD_KVERS="5.4 5.10 5.15 6.1"
ARM_KVERS="5.15"
AMD_METADATA_FILE="afdo_metadata/kernel_afdo.json"
ARM_METADATA_FILE="afdo_metadata/kernel_arm_afdo.json"
diff --git a/afdo_tools/update_kernel_afdo.py b/afdo_tools/update_kernel_afdo.py
new file mode 100755
index 00000000..0a299bd2
--- /dev/null
+++ b/afdo_tools/update_kernel_afdo.py
@@ -0,0 +1,803 @@
+#!/usr/bin/env python3
+# Copyright 2024 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This script updates kernel profiles based on what's available in gs://.
+
+It supports updating on canary, stable, and beta branches.
+"""
+
+import argparse
+import dataclasses
+import datetime
+import enum
+import json
+import logging
+import os
+from pathlib import Path
+import re
+import shlex
+import subprocess
+import sys
+from typing import Dict, Iterable, List, Optional, Tuple
+
+from cros_utils import git_utils
+
+
+# Folks who should be on the R-line of any CLs that get uploaded.
+CL_REVIEWERS = (git_utils.REVIEWER_DETECTIVE,)
+
+# Folks who should be on the CC-line of any CLs that get uploaded.
+CL_CC = (
+ "denik@google.com",
+ "gbiv@google.com",
+)
+
+# Determine which gsutil to use.
+# 'gsutil.py' is provided by depot_tools, whereas 'gsutil'
+# is provided by either https://cloud.google.com/sdk/docs/install, or
+# the 'google-cloud-cli' package. Since we need depot_tools to even
+# use 'repo', 'gsutil.py' is guaranteed to exist.
+GSUTIL = "gsutil.py"
+
+
+class Arch(enum.Enum):
+ """An enum for CPU architectures."""
+
+ AMD64 = "amd64"
+ ARM = "arm"
+
+ @property
+ def cwp_gs_location(self) -> str:
+ """Returns the location in gs:// where these profiles live."""
+ if self == self.AMD64:
+ return "gs://chromeos-prebuilt/afdo-job/vetted/kernel/amd64"
+ if self == self.ARM:
+ return "gs://chromeos-prebuilt/afdo-job/vetted/kernel/arm"
+ assert False, f"Uncovered arch -> gs:// mapping for {self}"
+
+
+@dataclasses.dataclass(frozen=True, eq=True, order=True)
+class KernelVersion:
+ """A class representing a version of the kernel."""
+
+ major: int
+ minor: int
+
+ def __str__(self):
+ return f"{self.major}.{self.minor}"
+
+ @classmethod
+ def parse(cls, val: str) -> "KernelVersion":
+ m = re.fullmatch(r"(\d+).(\d+)", val)
+ if not m:
+ raise ValueError(f"{val!r} is an invalid kernel version")
+ return cls(major=int(m.group(1)), minor=int(m.group(2)))
+
+
+# Versions that rolling should be skipped on, for one reason or another.
+SKIPPED_VERSIONS: Dict[int, Iterable[Tuple[Arch, KernelVersion]]] = {
+ # Kernel tracing was disabled on ARM in 114, b/275560674
+ 114: ((Arch.ARM, KernelVersion(5, 15)),),
+ 115: ((Arch.ARM, KernelVersion(5, 15)),),
+}
+
+
+class Channel(enum.Enum):
+ """An enum that discusses channels."""
+
+ # Ordered from closest-to-ToT to farthest-from-ToT
+ CANARY = "canary"
+ BETA = "beta"
+ STABLE = "stable"
+
+ @classmethod
+ def parse(cls, val: str) -> "Channel":
+ for x in cls:
+ if val == x.value:
+ return x
+ raise ValueError(
+ f"No such channel: {val!r}; try one of {[x.value for x in cls]}"
+ )
+
+
+@dataclasses.dataclass(frozen=True)
+class ProfileSelectionInfo:
+ """Preferences about profiles to select."""
+
+ # A consistent timestamp for the program to run with.
+ now: datetime.datetime
+
+ # Maximum age of a profile that can be selected.
+ max_profile_age: datetime.timedelta
+
+
+def get_parser():
+ """Returns an argument parser for this script."""
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "--debug",
+ action="store_true",
+ help="Enable debug logging.",
+ )
+ parser.add_argument(
+ "--upload",
+ action="store_true",
+ help="Automatically upload all changes that were made.",
+ )
+ parser.add_argument(
+ "--fetch",
+ action="store_true",
+ help="Run `git fetch` in toolchain-utils prior to running.",
+ )
+ parser.add_argument(
+ "--max-age-days",
+ type=int,
+ default=10,
+ help="""
+ The maximum number of days old a kernel profile can be before
+ it's ignored by this script. Default: %(default)s
+ """,
+ )
+ parser.add_argument(
+ "--chromeos-tree",
+ type=Path,
+ help="""
+ Root of a ChromeOS tree. This is optional to pass in, but doing so
+ unlocks extra convenience features on `--upload`. This script will try
+ to autodetect a tree if this isn't specified.
+ """,
+ )
+ parser.add_argument(
+ "channel",
+ nargs="*",
+ type=Channel.parse,
+ default=list(Channel),
+ help=f"""
+ Channel(s) to update. If none are passed, this will update all
+ channels. Choose from {[x.value for x in Channel]}.
+ """,
+ )
+ return parser
+
+
+@dataclasses.dataclass(frozen=True, eq=True, order=True)
+class GitBranch:
+ """Represents a ChromeOS branch."""
+
+ remote: str
+ release_number: int
+ branch_name: str
+
+
+def git_checkout(git_dir: Path, branch: GitBranch) -> None:
+ subprocess.run(
+ [
+ "git",
+ "checkout",
+ "--quiet",
+ f"{branch.remote}/{branch.branch_name}",
+ ],
+ check=True,
+ cwd=git_dir,
+ stdin=subprocess.DEVNULL,
+ )
+
+
+def git_fetch(git_dir: Path) -> None:
+ subprocess.run(
+ ["git", "fetch"],
+ check=True,
+ cwd=git_dir,
+ stdin=subprocess.DEVNULL,
+ )
+
+
+def git_rev_parse(git_dir: Path, ref_or_sha: str) -> str:
+ return subprocess.run(
+ ["git", "rev-parse", ref_or_sha],
+ check=True,
+ cwd=git_dir,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ encoding="utf-8",
+ ).stdout.strip()
+
+
+def autodetect_branches(toolchain_utils: Path) -> Dict[Channel, GitBranch]:
+ """Returns GitBranches for each branch type in toolchain_utils."""
+ stdout = subprocess.run(
+ [
+ "git",
+ "branch",
+ "-r",
+ ],
+ cwd=toolchain_utils,
+ check=True,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ encoding="utf-8",
+ ).stdout
+
+ # Match "${remote}/release-R${branch_number}-${build}.B"
+ branch_re = re.compile(r"([^/]+)/(release-R(\d+)-\d+\.B)")
+ branches = []
+ for line in stdout.splitlines():
+ line = line.strip()
+ if m := branch_re.fullmatch(line):
+ remote, branch_name, branch_number = m.groups()
+ branches.append(GitBranch(remote, int(branch_number), branch_name))
+
+ branches.sort(key=lambda x: x.release_number)
+ if len(branches) < 2:
+ raise ValueError(
+ f"Expected at least two branches, but only found {len(branches)}"
+ )
+
+ stable = branches[-2]
+ beta = branches[-1]
+ canary = GitBranch(
+ remote=beta.remote,
+ release_number=beta.release_number + 1,
+ branch_name="main",
+ )
+ return {
+ Channel.CANARY: canary,
+ Channel.BETA: beta,
+ Channel.STABLE: stable,
+ }
+
+
+@dataclasses.dataclass(frozen=True, eq=True, order=True)
+class ArchUpdateConfig:
+ """The AFDO update config for one architecture."""
+
+ versions_to_track: List[KernelVersion]
+ metadata_file: Path
+
+
+def read_update_cfg_file(
+ toolchain_utils: Path, file_path: Path
+) -> Dict[Arch, ArchUpdateConfig]:
+ """Reads `update_kernel_afdo.cfg`."""
+ # These files were originally meant to be `source`d in bash, and are very
+ # simple. These are read from branches, so we'd need cherry-picks to go
+ # back and replace them with a singular format. Could be nice to move to
+ # JSON or something.
+
+ # Parse assignments that look like `FOO="bar"`. No escaping or variable
+ # expansion is supported.
+ kv_re = re.compile(r'^([a-zA-Z_0-9]+)="([^"]*)"(?:\s*#.*)?', re.MULTILINE)
+ kvs = kv_re.findall(file_path.read_text(encoding="utf-8"))
+ # Subtle: the regex above makes it so `kv_re.findall` returns a series of
+ # (variable_name, variable_value).
+ settings = dict(kvs)
+
+ logging.debug("Parsing cfg file gave back settings: %s", settings)
+ archs = (
+ (Arch.AMD64, "AMD"),
+ (Arch.ARM, "ARM"),
+ )
+
+ results = {}
+ for arch, arch_var_name in archs:
+ # This is a space-separated list of kernel versions.
+ kernel_versions = settings[f"{arch_var_name}_KVERS"]
+ parsed_versions = [
+ KernelVersion.parse(x) for x in kernel_versions.split()
+ ]
+
+ metadata_file = settings[f"{arch_var_name}_METADATA_FILE"]
+ results[arch] = ArchUpdateConfig(
+ versions_to_track=parsed_versions,
+ metadata_file=toolchain_utils / metadata_file,
+ )
+ return results
+
+
+@dataclasses.dataclass(frozen=True, eq=True)
+class KernelGsProfile:
+ """Represents a kernel profile in gs://."""
+
+ release_number: int
+ chrome_build: str
+ cwp_timestamp: int
+ suffix: str
+ gs_timestamp: datetime.datetime
+
+ _FILE_NAME_PARSE_RE = re.compile(r"R(\d+)-(\d+\.\d+)-(\d+)(\..+\..+)")
+
+ @property
+ def file_name_no_suffix(self):
+ return (
+ f"R{self.release_number}-{self.chrome_build}-{self.cwp_timestamp}"
+ )
+
+ @property
+ def file_name(self):
+ return f"{self.file_name_no_suffix}{self.suffix}"
+
+ @classmethod
+ def from_file_name(
+ cls, timestamp: datetime.datetime, file_name: str
+ ) -> "KernelGsProfile":
+ m = cls._FILE_NAME_PARSE_RE.fullmatch(file_name)
+ if not m:
+ raise ValueError(f"{file_name!r} doesn't parse as a profile name")
+ release_number, chrome_build, cwp_timestamp, suffix = m.groups()
+ return cls(
+ release_number=int(release_number),
+ chrome_build=chrome_build,
+ cwp_timestamp=int(cwp_timestamp),
+ suffix=suffix,
+ gs_timestamp=timestamp,
+ )
+
+
+def datetime_from_gs_time(timestamp_str: str) -> datetime.datetime:
+ """Parses a datetime from gs."""
+ return datetime.datetime.strptime(
+ timestamp_str, "%Y-%m-%dT%H:%M:%SZ"
+ ).replace(tzinfo=datetime.timezone.utc)
+
+
+class KernelProfileFetcher:
+ """Fetches kernel profiles from gs://. Caches results."""
+
+ def __init__(self):
+ self._cached_results: Dict[str, List[KernelGsProfile]] = {}
+
+ @staticmethod
+ def _parse_gs_stdout(stdout: str) -> List[KernelGsProfile]:
+ line_re = re.compile(r"\s*\d+\s+(\S+T\S+)\s+(gs://.+)")
+ results = []
+ # Ignore the last line, since that's "TOTAL:"
+ for line in stdout.splitlines()[:-1]:
+ line = line.strip()
+ if not line:
+ continue
+ m = line_re.fullmatch(line)
+ if m is None:
+ raise ValueError(f"Unexpected line from gs: {line!r}")
+ timestamp_str, gs_url = m.groups()
+ timestamp = datetime_from_gs_time(timestamp_str)
+ file_name = os.path.basename(gs_url)
+ results.append(KernelGsProfile.from_file_name(timestamp, file_name))
+ return results
+
+ @classmethod
+ def _fetch_impl(cls, gs_url: str) -> List[KernelGsProfile]:
+ cmd = [
+ GSUTIL,
+ "ls",
+ "-l",
+ gs_url,
+ ]
+ result = subprocess.run(
+ cmd,
+ check=False,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ encoding="utf-8",
+ )
+
+ if result.returncode:
+ # If nothing could be found, gsutil will exit after printing this.
+ if "One or more URLs matched no objects." in result.stderr:
+ return []
+ logging.error(
+ "%s failed; stderr:\n%s", shlex.join(cmd), result.stderr
+ )
+ result.check_returncode()
+ assert False, "unreachable"
+
+ return cls._parse_gs_stdout(result.stdout)
+
+ def fetch(self, gs_url: str) -> List[KernelGsProfile]:
+ cached = self._cached_results.get(gs_url)
+ if cached is None:
+ logging.info("Fetching profiles from %s...", gs_url)
+ fetched = self._fetch_impl(gs_url)
+ logging.info("Found %d profiles in %s", len(fetched), gs_url)
+ self._cached_results[gs_url] = fetched
+ cached = fetched
+
+ # Create a copy to keep mutations from causing problems.
+ # KernelGsProfiles are frozen, at least.
+ return cached[:]
+
+
+def find_newest_afdo_artifact(
+ fetcher: KernelProfileFetcher,
+ arch: Arch,
+ kernel_version: KernelVersion,
+ release_number: int,
+) -> Optional[KernelGsProfile]:
+ """Returns info about the latest AFDO artifact for the given parameters."""
+ gs_base_location = arch.cwp_gs_location
+ kernel_profile_dir = os.path.join(gs_base_location, str(kernel_version))
+ kernel_profiles = fetcher.fetch(kernel_profile_dir)
+ if not kernel_profiles:
+ logging.error(
+ "Failed to find any kernel profiles in %s", kernel_profile_dir
+ )
+ return None
+
+ valid_profiles = [
+ x for x in kernel_profiles if x.release_number == release_number
+ ]
+ if not valid_profiles:
+ logging.warning(
+ "Failed to find any M%d kernel profiles in %s",
+ release_number,
+ kernel_profile_dir,
+ )
+ return None
+
+ # We want the most recently uploaded profile, since that should correspond
+ # with the newest profile. If there're multiple profiles for some reason,
+ # choose what _should_ be a consistent tie-breaker.
+ return max(
+ valid_profiles,
+ key=lambda x: (x.gs_timestamp, x.cwp_timestamp, x.chrome_build),
+ )
+
+
+def read_afdo_descriptor_file(path: Path) -> Dict[KernelVersion, str]:
+ """Reads the AFDO descriptor file.
+
+ "AFDO descriptor file" is jargon to refer to the actual JSON file that PUpr
+ monitors.
+ """
+ try:
+ with path.open(encoding="utf-8") as f:
+ raw_contents = json.load(f)
+ except FileNotFoundError:
+ return {}
+
+ # The format of this is:
+ # {
+ # "chromeos-kernel-${major}_${minor}": {
+ # "name": "${profile_gs_name}",
+ # }
+ # }
+ key_re = re.compile(r"^chromeos-kernel-(\d)+_(\d+)$")
+ result = {}
+ for kernel_key, val in raw_contents.items():
+ m = key_re.fullmatch(kernel_key)
+ if not m:
+ raise ValueError(f"Invalid key in JSON: {kernel_key}")
+ major, minor = m.groups()
+ version = KernelVersion(major=int(major), minor=int(minor))
+ result[version] = val["name"]
+ return result
+
+
+def write_afdo_descriptor_file(
+ path: Path, contents: Dict[KernelVersion, str]
+) -> bool:
+ """Writes the file at path with the given contents.
+
+ Returns:
+ True if the file was written due to changes, False otherwise.
+ """
+ contents_dict = {
+ f"chromeos-kernel-{k.major}_{k.minor}": {"name": gs_name}
+ for k, gs_name in contents.items()
+ }
+
+ contents_json = json.dumps(contents_dict, indent=4, sort_keys=True)
+ try:
+ existing_contents = path.read_text(encoding="utf-8")
+ except FileNotFoundError:
+ existing_contents = ""
+
+ # Compare the _textual representation_ of each of these, since things like
+ # formatting changes should be propagated eagerly.
+ if contents_json == existing_contents:
+ return False
+
+ tmp_path = path.with_suffix(".json.tmp")
+ tmp_path.write_text(contents_json, encoding="utf-8")
+ tmp_path.rename(path)
+ return True
+
+
+@dataclasses.dataclass
+class UpdateResult:
+ """Result of `update_afdo_for_channel`."""
+
+ # True if changes were made to the AFDO files that map kernel versions to
+ # AFDO profiles.
+ made_changes: bool
+
+ # Whether issues were had updating one or more profiles. If this is True,
+ # you may expect that there will be logs about the issues already.
+ had_failures: bool
+
+
+def fetch_and_validate_newest_afdo_artifact(
+ fetcher: KernelProfileFetcher,
+ selection_info: ProfileSelectionInfo,
+ arch: Arch,
+ kernel_version: KernelVersion,
+ branch: GitBranch,
+ channel: Channel,
+) -> Optional[Tuple[str, bool]]:
+ """Tries to update one AFDO profile on a branch.
+
+ Returns:
+ None if something failed, and the update couldn't be completed.
+ Otherwise, this returns a tuple of (profile_name, is_old). If `is_old`
+ is True, this function logs an error.
+ """
+ newest_artifact = find_newest_afdo_artifact(
+ fetcher, arch, kernel_version, branch.release_number
+ )
+ # Try an older branch if we're not on stable. We should fail harder if we
+ # only have old profiles on stable, though.
+ if newest_artifact is None and channel != Channel.STABLE:
+ newest_artifact = find_newest_afdo_artifact(
+ fetcher, arch, kernel_version, branch.release_number - 1
+ )
+
+ if newest_artifact is None:
+ logging.error(
+ "No new profile found for %s/%s on M%d; not updating entry",
+ arch,
+ kernel_version,
+ branch.release_number,
+ )
+ return None
+
+ logging.info(
+ "Newest profile is %s for %s/%s on M%d",
+ newest_artifact.file_name,
+ arch,
+ kernel_version,
+ branch.release_number,
+ )
+ age = selection_info.now - newest_artifact.gs_timestamp
+ is_old = False
+ if age > selection_info.max_profile_age:
+ is_old = True
+ logging.error(
+ "Profile %s is %s old. The configured limit is %s.",
+ newest_artifact.file_name,
+ age,
+ selection_info.max_profile_age,
+ )
+ return newest_artifact.file_name_no_suffix, is_old
+
+
+def update_afdo_for_channel(
+ fetcher: KernelProfileFetcher,
+ toolchain_utils: Path,
+ selection_info: ProfileSelectionInfo,
+ channel: Channel,
+ branch: GitBranch,
+ skipped_versions: Dict[int, Iterable[Tuple[Arch, KernelVersion]]],
+) -> UpdateResult:
+ """Updates AFDO on the given channel."""
+ git_checkout(toolchain_utils, branch)
+ update_cfgs = read_update_cfg_file(
+ toolchain_utils,
+ toolchain_utils / "afdo_tools" / "update_kernel_afdo.cfg",
+ )
+
+ to_skip = skipped_versions.get(branch.release_number)
+ made_changes = False
+ had_failures = False
+ for arch, cfg in update_cfgs.items():
+ afdo_mappings = read_afdo_descriptor_file(cfg.metadata_file)
+ for kernel_version in cfg.versions_to_track:
+ if to_skip and (arch, kernel_version) in to_skip:
+ logging.info(
+ "%s/%s on M%d is in the skip list; ignoring it.",
+ arch,
+ kernel_version,
+ branch.release_number,
+ )
+ continue
+
+ artifact_info = fetch_and_validate_newest_afdo_artifact(
+ fetcher,
+ selection_info,
+ arch,
+ kernel_version,
+ branch,
+ channel,
+ )
+ if artifact_info is None:
+ # Assume that the problem was already logged.
+ had_failures = True
+ continue
+
+ newest_name, is_old = artifact_info
+ if is_old:
+ # Assume that the problem was already logged, but continue to
+ # land this in case it makes a difference.
+ had_failures = True
+
+ afdo_mappings[kernel_version] = newest_name
+
+ if write_afdo_descriptor_file(cfg.metadata_file, afdo_mappings):
+ made_changes = True
+ logging.info(
+ "Wrote new AFDO mappings for arch %s on M%d",
+ arch,
+ branch.release_number,
+ )
+ else:
+ logging.info(
+ "No changes to write for arch %s on M%d",
+ arch,
+ branch.release_number,
+ )
+ return UpdateResult(
+ made_changes=made_changes,
+ had_failures=had_failures,
+ )
+
+
+def commit_new_profiles(
+ toolchain_utils: Path, channel: Channel, had_failures: bool
+):
+ """Runs `git commit -a` with an appropriate message."""
+ commit_message_lines = [
+ "afdo_metadata: Publish the new kernel profiles",
+ "",
+ ]
+
+ if had_failures:
+ commit_message_lines += (
+ "This brings some profiles to their newest versions. The CrOS",
+ "toolchain detective has been notified about the failures that",
+ "occurred in this update.",
+ )
+ else:
+ commit_message_lines.append(
+ "This brings all profiles to their newest versions."
+ )
+
+ if channel != Channel.CANARY:
+ commit_message_lines += (
+ "",
+ "Have PM pre-approval because this shouldn't break the release",
+ "branch.",
+ )
+
+ commit_message_lines += (
+ "",
+ "BUG=None",
+ "TEST=Verified in kernel-release-afdo-verify-orchestrator",
+ )
+
+ commit_msg = "\n".join(commit_message_lines)
+ subprocess.run(
+ [
+ "git",
+ "commit",
+ "--quiet",
+ "-a",
+ "-m",
+ commit_msg,
+ ],
+ cwd=toolchain_utils,
+ check=True,
+ stdin=subprocess.DEVNULL,
+ )
+
+
+def upload_head_to_gerrit(
+ toolchain_utils: Path,
+ chromeos_tree: Optional[Path],
+ branch: GitBranch,
+):
+ """Uploads HEAD to gerrit as a CL, and sets reviewers/CCs."""
+ cl_ids = git_utils.upload_to_gerrit(
+ toolchain_utils,
+ branch.remote,
+ branch.branch_name,
+ CL_REVIEWERS,
+ CL_CC,
+ )
+
+ if len(cl_ids) > 1:
+ raise ValueError(f"Unexpected: wanted just one CL upload; got {cl_ids}")
+
+ cl_id = cl_ids[0]
+ logging.info("Uploaded CL http://crrev.com/c/%s successfully.", cl_id)
+
+ if chromeos_tree is None:
+ logging.info(
+ "Skipping gerrit convenience commands, since no CrOS tree was "
+ "specified."
+ )
+ return
+
+ git_utils.try_set_autosubmit_labels(chromeos_tree, cl_id)
+
+
+def find_chromeos_tree_root(a_dir: Path) -> Optional[Path]:
+ for parent in a_dir.parents:
+ if (parent / ".repo").is_dir():
+ return parent
+ return None
+
+
+def main(argv: List[str]) -> None:
+ my_dir = Path(__file__).resolve().parent
+ toolchain_utils = my_dir.parent
+
+ opts = get_parser().parse_args(argv)
+ logging.basicConfig(
+ format=">> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: "
+ "%(message)s",
+ level=logging.DEBUG if opts.debug else logging.INFO,
+ )
+
+ chromeos_tree = opts.chromeos_tree
+ if not chromeos_tree:
+ chromeos_tree = find_chromeos_tree_root(my_dir)
+ if chromeos_tree:
+ logging.info("Autodetected ChromeOS tree root at %s", chromeos_tree)
+
+ if opts.fetch:
+ logging.info("Fetching in %s...", toolchain_utils)
+ git_fetch(toolchain_utils)
+
+ selection_info = ProfileSelectionInfo(
+ now=datetime.datetime.now(datetime.timezone.utc),
+ max_profile_age=datetime.timedelta(days=opts.max_age_days),
+ )
+
+ branches = autodetect_branches(toolchain_utils)
+ logging.debug("Current branches: %s", branches)
+
+ assert all(x in branches for x in Channel), "branches are missing channels?"
+
+ fetcher = KernelProfileFetcher()
+ had_failures = False
+ with git_utils.create_worktree(toolchain_utils) as worktree:
+ for channel in opts.channel:
+ branch = branches[channel]
+ result = update_afdo_for_channel(
+ fetcher,
+ worktree,
+ selection_info,
+ channel,
+ branch,
+ SKIPPED_VERSIONS,
+ )
+ had_failures = had_failures or result.had_failures
+ if not result.made_changes:
+ logging.info("No new updates to post on %s", channel)
+ continue
+
+ commit_new_profiles(worktree, channel, result.had_failures)
+ if opts.upload:
+ logging.info("New profiles were committed. Uploading...")
+ upload_head_to_gerrit(worktree, chromeos_tree, branch)
+ else:
+ logging.info(
+ "--upload not specified. Leaving commit for %s at %s",
+ channel,
+ git_rev_parse(worktree, "HEAD"),
+ )
+
+ if had_failures:
+ sys.exit(
+ "At least one failure was encountered running this script; see "
+ "above logs. Most likely the things you're looking for are logged "
+ "at the ERROR level."
+ )
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/afdo_tools/update_kernel_afdo_test.py b/afdo_tools/update_kernel_afdo_test.py
new file mode 100755
index 00000000..1f365959
--- /dev/null
+++ b/afdo_tools/update_kernel_afdo_test.py
@@ -0,0 +1,304 @@
+#!/usr/bin/env python3
+# Copyright 2024 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for update_kernel_afdo."""
+
+import datetime
+from pathlib import Path
+import shutil
+import subprocess
+import tempfile
+import textwrap
+import unittest
+from unittest import mock
+
+import update_kernel_afdo
+
+
+class Test(unittest.TestCase):
+ """Tests for update_kernel_afdo."""
+
+ def make_tempdir(self) -> Path:
+ x = Path(tempfile.mkdtemp(prefix="update_kernel_afdo_test_"))
+ self.addCleanup(shutil.rmtree, x)
+ return x
+
+ def test_kernel_version_parsing(self):
+ self.assertEqual(
+ update_kernel_afdo.KernelVersion.parse("5.10"),
+ update_kernel_afdo.KernelVersion(major=5, minor=10),
+ )
+
+ with self.assertRaisesRegex(ValueError, ".*invalid kernel version.*"):
+ update_kernel_afdo.KernelVersion.parse("5")
+
+ def test_kernel_version_formatting(self):
+ self.assertEqual(
+ str(update_kernel_afdo.KernelVersion(major=5, minor=10)), "5.10"
+ )
+
+ def test_channel_parsing(self):
+ with self.assertRaisesRegex(ValueError, "No such channel.*"):
+ update_kernel_afdo.Channel.parse("not a channel")
+
+ # Ensure these round-trip.
+ for channel in update_kernel_afdo.Channel:
+ self.assertEqual(
+ channel, update_kernel_afdo.Channel.parse(channel.value)
+ )
+
+ @mock.patch.object(subprocess, "run")
+ def test_branch_autodetection(self, subprocess_run):
+ subprocess_run.return_value = subprocess.CompletedProcess(
+ args=[],
+ returncode=0,
+ stdout=textwrap.dedent(
+ """
+ cros/not-a-release-branch
+ cros/release-R121-15699.B
+ cros/release-R122-15753.B
+ cros/release-R123-15786.B
+ cros/also-not-a-release-branch
+ m/main
+ """
+ ),
+ )
+
+ branch_dict = update_kernel_afdo.autodetect_branches(
+ toolchain_utils=self.make_tempdir()
+ )
+
+ self.assertEqual(
+ branch_dict,
+ {
+ update_kernel_afdo.Channel.CANARY: update_kernel_afdo.GitBranch(
+ remote="cros",
+ release_number=124,
+ branch_name="main",
+ ),
+ update_kernel_afdo.Channel.BETA: update_kernel_afdo.GitBranch(
+ remote="cros",
+ release_number=123,
+ branch_name="release-R123-15786.B",
+ ),
+ update_kernel_afdo.Channel.STABLE: update_kernel_afdo.GitBranch(
+ remote="cros",
+ release_number=122,
+ branch_name="release-R122-15753.B",
+ ),
+ },
+ )
+
+ def test_read_update_cfg_file(self):
+ valid_contents = textwrap.dedent(
+ """
+ # some comment
+ # wow
+ AMD_KVERS="1.0 1.1"
+ ARM_KVERS="1.2"
+ AMD_METADATA_FILE="amd/file/path.json" # comment
+ ARM_METADATA_FILE="arm/file/path.json"
+ """
+ )
+ tmpdir = self.make_tempdir()
+ cfg_path = tmpdir / "test.cfg"
+ cfg_path.write_text(valid_contents, encoding="utf-8")
+ cfg = update_kernel_afdo.read_update_cfg_file(tmpdir, cfg_path)
+ expected_amd64 = update_kernel_afdo.ArchUpdateConfig(
+ versions_to_track=[
+ update_kernel_afdo.KernelVersion(1, 0),
+ update_kernel_afdo.KernelVersion(1, 1),
+ ],
+ metadata_file=tmpdir / "amd/file/path.json",
+ )
+ expected_arm = update_kernel_afdo.ArchUpdateConfig(
+ versions_to_track=[
+ update_kernel_afdo.KernelVersion(1, 2),
+ ],
+ metadata_file=tmpdir / "arm/file/path.json",
+ )
+
+ self.assertEqual(
+ cfg,
+ {
+ update_kernel_afdo.Arch.AMD64: expected_amd64,
+ update_kernel_afdo.Arch.ARM: expected_arm,
+ },
+ )
+
+ def test_parse_kernel_gs_profile(self):
+ timestamp = datetime.datetime.fromtimestamp(1234, datetime.timezone.utc)
+ profile = update_kernel_afdo.KernelGsProfile.from_file_name(
+ timestamp,
+ "R124-15808.0-1710149961.gcov.xz",
+ )
+ self.assertEqual(
+ profile,
+ update_kernel_afdo.KernelGsProfile(
+ release_number=124,
+ chrome_build="15808.0",
+ cwp_timestamp=1710149961,
+ suffix=".gcov.xz",
+ gs_timestamp=timestamp,
+ ),
+ )
+
+ def test_kernel_gs_profile_file_name(self):
+ timestamp = datetime.datetime.fromtimestamp(1234, datetime.timezone.utc)
+ profile = update_kernel_afdo.KernelGsProfile.from_file_name(
+ timestamp,
+ "R124-15808.0-1710149961.gcov.xz",
+ )
+ self.assertEqual(profile.file_name_no_suffix, "R124-15808.0-1710149961")
+ self.assertEqual(profile.file_name, "R124-15808.0-1710149961.gcov.xz")
+
+ def test_gs_time_parsing(self):
+ self.assertEqual(
+ update_kernel_afdo.datetime_from_gs_time("2024-03-04T10:38:50Z"),
+ datetime.datetime(
+ year=2024,
+ month=3,
+ day=4,
+ hour=10,
+ minute=38,
+ second=50,
+ tzinfo=datetime.timezone.utc,
+ ),
+ )
+
+ @mock.patch.object(subprocess, "run")
+ def test_kernel_profile_fetcher_works(self, subprocess_run):
+ subprocess_run.return_value = subprocess.CompletedProcess(
+ args=[],
+ returncode=0,
+ # Don't use textwrap.dedent; linter complains about the line being
+ # too long in that case.
+ stdout="""
+753112 2024-03-04T10:38:50Z gs://here/5.4/R124-15786.10-1709548729.gcov.xz
+TOTAL: 2 objects, 1234 bytes (1.1KiB)
+""",
+ )
+
+ fetcher = update_kernel_afdo.KernelProfileFetcher()
+ results = fetcher.fetch("gs://here/5.4")
+
+ expected_results = [
+ update_kernel_afdo.KernelGsProfile.from_file_name(
+ update_kernel_afdo.datetime_from_gs_time(
+ "2024-03-04T10:38:50Z"
+ ),
+ "R124-15786.10-1709548729.gcov.xz",
+ ),
+ ]
+ self.assertEqual(results, expected_results)
+
+ @mock.patch.object(subprocess, "run")
+ def test_kernel_profile_fetcher_handles_no_profiles(self, subprocess_run):
+ subprocess_run.return_value = subprocess.CompletedProcess(
+ args=[],
+ returncode=1,
+ stderr="\nCommandException: One or more URLs matched no objects.\n",
+ )
+
+ fetcher = update_kernel_afdo.KernelProfileFetcher()
+ results = fetcher.fetch("gs://here/5.4")
+ self.assertEqual(results, [])
+
+ @mock.patch.object(subprocess, "run")
+ def test_kernel_profile_fetcher_caches_urls(self, subprocess_run):
+ subprocess_run.return_value = subprocess.CompletedProcess(
+ args=[],
+ returncode=0,
+ # Don't use textwrap.dedent; linter complains about the line being
+ # too long in that case.
+ stdout="""
+753112 2024-03-04T10:38:50Z gs://here/5.4/R124-15786.10-1709548729.gcov.xz
+TOTAL: 2 objects, 1234 bytes (1.1KiB)
+""",
+ )
+
+ fetcher = update_kernel_afdo.KernelProfileFetcher()
+ # Fetch these twice, and assert both that:
+ # - Only one fetch is performed.
+ # - Mutating the first list won't impact the later fetch.
+ result = fetcher.fetch("gs://here/5.4")
+ self.assertEqual(len(result), 1)
+ del result[:]
+ result = fetcher.fetch("gs://here/5.4")
+ self.assertEqual(len(result), 1)
+ subprocess_run.assert_called_once()
+
+ @mock.patch.object(update_kernel_afdo.KernelProfileFetcher, "fetch")
+ def test_newest_afdo_artifact_finding_works(self, fetch):
+ late = update_kernel_afdo.KernelGsProfile.from_file_name(
+ datetime.datetime.fromtimestamp(1236, datetime.timezone.utc),
+ "R124-15786.10-1709548729.gcov.xz",
+ )
+ early = update_kernel_afdo.KernelGsProfile.from_file_name(
+ datetime.datetime.fromtimestamp(1234, datetime.timezone.utc),
+ "R124-99999.99-9999999999.gcov.xz",
+ )
+ fetch.return_value = [early, late]
+
+ self.assertEqual(
+ update_kernel_afdo.find_newest_afdo_artifact(
+ update_kernel_afdo.KernelProfileFetcher(),
+ update_kernel_afdo.Arch.AMD64,
+ update_kernel_afdo.KernelVersion(5, 4),
+ release_number=124,
+ ),
+ late,
+ )
+
+ def test_afdo_descriptor_file_round_trips(self):
+ tmpdir = self.make_tempdir()
+ file_path = tmpdir / "desc-file.json"
+
+ contents = {
+ update_kernel_afdo.KernelVersion(5, 10): "file1",
+ update_kernel_afdo.KernelVersion(5, 15): "file2",
+ }
+ self.assertTrue(
+ update_kernel_afdo.write_afdo_descriptor_file(file_path, contents)
+ )
+ self.assertEqual(
+ update_kernel_afdo.read_afdo_descriptor_file(file_path),
+ contents,
+ )
+
+ def test_afdo_descriptor_file_refuses_to_rewrite_identical_contents(self):
+ tmpdir = self.make_tempdir()
+ file_path = tmpdir / "desc-file.json"
+
+ contents = {
+ update_kernel_afdo.KernelVersion(5, 10): "file1",
+ update_kernel_afdo.KernelVersion(5, 15): "file2",
+ }
+ self.assertTrue(
+ update_kernel_afdo.write_afdo_descriptor_file(file_path, contents)
+ )
+ self.assertFalse(
+ update_kernel_afdo.write_afdo_descriptor_file(file_path, contents)
+ )
+
+ def test_repo_autodetects_nothing_if_no_repo_dir(self):
+ self.assertIsNone(
+ update_kernel_afdo.find_chromeos_tree_root(
+ Path("/does/not/exist/nor/is/under/a/repo")
+ )
+ )
+
+ def test_repo_autodetects_repo_dir_correctly(self):
+ tmpdir = self.make_tempdir()
+ test_subdir = tmpdir / "a/directory/and/another/one"
+ test_subdir.mkdir(parents=True)
+ (tmpdir / ".repo").mkdir()
+ self.assertEqual(
+ tmpdir, update_kernel_afdo.find_chromeos_tree_root(test_subdir)
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()