aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKelvin Zhang <zhangkelvin@google.com>2022-04-07 04:16:17 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2022-04-07 04:16:17 +0000
commita83d45a9fa2139ee1290d78c1f88be413bde60bb (patch)
treebe8d52d51062da9d63478e17b5c7fed58e9406a5
parente737675ba4c5bd8a8d883c76361728174ad7de7e (diff)
parentfec3837e1bd649ca3faf26677fe22547d28559f0 (diff)
downloadupdate_engine-a83d45a9fa2139ee1290d78c1f88be413bde60bb.tar.gz
Support incremental OTA in ota_extractor am: 91e839cf19 am: fec3837e1b
Original change: https://android-review.googlesource.com/c/platform/system/update_engine/+/2056749 Change-Id: Iad99f91d5077a5a7f7e209cfebf1512aa790647d Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--aosp/ota_extractor.cc115
-rw-r--r--payload_consumer/install_operation_executor.cc1
-rw-r--r--payload_consumer/install_plan.cc79
-rw-r--r--payload_consumer/install_plan.h2
-rw-r--r--payload_consumer/verity_writer_android.cc5
5 files changed, 152 insertions, 50 deletions
diff --git a/aosp/ota_extractor.cc b/aosp/ota_extractor.cc
index 80bb74bf..7492bc5a 100644
--- a/aosp/ota_extractor.cc
+++ b/aosp/ota_extractor.cc
@@ -14,13 +14,13 @@
// limitations under the License.
//
-#include <fcntl.h>
-
+#include <array>
#include <cstdint>
#include <cstdio>
#include <iterator>
#include <memory>
+#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
@@ -28,16 +28,22 @@
#include <base/files/file_path.h>
#include <gflags/gflags.h>
#include <unistd.h>
+#include <xz.h>
#include "update_engine/common/utils.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
#include "update_engine/payload_consumer/install_operation_executor.h"
#include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/payload_consumer/verity_writer_android.h"
#include "update_engine/update_metadata.pb.h"
-#include "xz.h"
DEFINE_string(payload, "", "Path to payload.bin");
+DEFINE_string(
+ input_dir,
+ "",
+ "Directory to read input images. Only required for incremental OTAs");
DEFINE_string(output_dir, "", "Directory to put output images");
DEFINE_int64(payload_offset,
0,
@@ -53,18 +59,55 @@ using chromeos_update_engine::PayloadMetadata;
namespace chromeos_update_engine {
+void WriteVerity(const PartitionUpdate& partition,
+ FileDescriptorPtr fd,
+ const size_t block_size) {
+ // 512KB buffer, arbitrary value. Larger buffers may improve performance.
+ static constexpr size_t BUFFER_SIZE = 1024 * 512;
+ if (partition.hash_tree_extent().num_blocks() == 0 &&
+ partition.fec_extent().num_blocks() == 0) {
+ return;
+ }
+ InstallPlan::Partition install_part;
+ install_part.block_size = block_size;
+ CHECK(install_part.ParseVerityConfig(partition));
+ VerityWriterAndroid writer;
+ CHECK(writer.Init(install_part));
+ std::array<uint8_t, BUFFER_SIZE> buffer;
+ const auto data_size =
+ install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
+ size_t offset = 0;
+ while (offset < data_size) {
+ const auto bytes_to_read =
+ static_cast<ssize_t>(std::min(BUFFER_SIZE, data_size - offset));
+ ssize_t bytes_read;
+ CHECK(
+ utils::ReadAll(fd, buffer.data(), bytes_to_read, offset, &bytes_read));
+ CHECK_EQ(bytes_read, bytes_to_read)
+ << " Failed to read at offset " << offset << " "
+ << android::base::ErrnoNumberAsString(errno);
+ writer.Update(offset, buffer.data(), bytes_read);
+ offset += bytes_read;
+ }
+ CHECK(writer.Finalize(fd.get(), fd.get()));
+ return;
+}
+
bool ExtractImagesFromOTA(const DeltaArchiveManifest& manifest,
const PayloadMetadata& metadata,
int payload_fd,
size_t payload_offset,
+ std::string_view input_dir,
std::string_view output_dir,
const std::set<std::string>& partitions) {
InstallOperationExecutor executor(manifest.block_size());
const size_t data_begin = metadata.GetMetadataSize() +
metadata.GetMetadataSignatureSize() +
payload_offset;
- const base::FilePath path(
+ const base::FilePath output_dir_path(
base::StringPiece(output_dir.data(), output_dir.size()));
+ const base::FilePath input_dir_path(
+ base::StringPiece(input_dir.data(), input_dir.size()));
std::vector<unsigned char> blob;
for (const auto& partition : manifest.partitions()) {
if (!partitions.empty() &&
@@ -74,18 +117,39 @@ bool ExtractImagesFromOTA(const DeltaArchiveManifest& manifest,
LOG(INFO) << "Extracting partition " << partition.partition_name()
<< " size: " << partition.new_partition_info().size();
const auto output_path =
- path.Append(partition.partition_name() + ".img").value();
- auto fd =
+ output_dir_path.Append(partition.partition_name() + ".img").value();
+ const auto input_path =
+ input_dir_path.Append(partition.partition_name() + ".img").value();
+ auto out_fd =
std::make_shared<chromeos_update_engine::EintrSafeFileDescriptor>();
TEST_AND_RETURN_FALSE_ERRNO(
- fd->Open(output_path.c_str(), O_RDWR | O_CREAT, 0644));
+ out_fd->Open(output_path.c_str(), O_RDWR | O_CREAT, 0644));
+ auto in_fd =
+ std::make_shared<chromeos_update_engine::EintrSafeFileDescriptor>();
+ TEST_AND_RETURN_FALSE_ERRNO(in_fd->Open(input_path.c_str(), O_RDONLY));
+
for (const auto& op : partition.operations()) {
+ if (op.has_src_sha256_hash()) {
+ brillo::Blob actual_hash;
+ TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
+ in_fd, op.src_extents(), manifest.block_size(), &actual_hash));
+ CHECK_EQ(HexEncode(ToStringView(actual_hash)),
+ HexEncode(op.src_sha256_hash()));
+ }
+
blob.resize(op.data_length());
const auto op_data_offset = data_begin + op.data_offset();
ssize_t bytes_read = 0;
TEST_AND_RETURN_FALSE(utils::PReadAll(
payload_fd, blob.data(), blob.size(), op_data_offset, &bytes_read));
- auto direct_writer = std::make_unique<DirectExtentWriter>(fd);
+ if (op.has_data_sha256_hash()) {
+ brillo::Blob actual_hash;
+ TEST_AND_RETURN_FALSE(
+ HashCalculator::RawHashOfData(blob, &actual_hash));
+ CHECK_EQ(HexEncode(ToStringView(actual_hash)),
+ HexEncode(op.data_sha256_hash()));
+ }
+ auto direct_writer = std::make_unique<DirectExtentWriter>(out_fd);
if (op.type() == InstallOperation::ZERO) {
TEST_AND_RETURN_FALSE(executor.ExecuteZeroOrDiscardOperation(
op, std::move(direct_writer)));
@@ -94,12 +158,15 @@ bool ExtractImagesFromOTA(const DeltaArchiveManifest& manifest,
op.type() == InstallOperation::REPLACE_XZ) {
TEST_AND_RETURN_FALSE(executor.ExecuteReplaceOperation(
op, std::move(direct_writer), blob.data(), blob.size()));
+ } else if (op.type() == InstallOperation::SOURCE_COPY) {
+ TEST_AND_RETURN_FALSE(executor.ExecuteSourceCopyOperation(
+ op, std::move(direct_writer), in_fd));
} else {
- LOG(ERROR) << "Unsupported operation type: " << op.type() << ", "
- << InstallOperation::Type_Name(op.type());
- return false;
+ TEST_AND_RETURN_FALSE(executor.ExecuteDiffOperation(
+ op, std::move(direct_writer), in_fd, blob.data(), blob.size()));
}
}
+ WriteVerity(partition, out_fd, manifest.block_size());
int err =
truncate64(output_path.c_str(), partition.new_partition_info().size());
if (err) {
@@ -110,14 +177,32 @@ bool ExtractImagesFromOTA(const DeltaArchiveManifest& manifest,
TEST_AND_RETURN_FALSE(
HashCalculator::RawHashOfFile(output_path, &actual_hash));
CHECK_EQ(HexEncode(ToStringView(actual_hash)),
- HexEncode(partition.new_partition_info().hash()));
+ HexEncode(partition.new_partition_info().hash()))
+ << " Partition " << partition.partition_name()
+ << " hash mismatches. Either the source image or OTA package is "
+ "corrupted.";
}
return true;
}
} // namespace chromeos_update_engine
+namespace {
+
+bool IsIncrementalOTA(const DeltaArchiveManifest& manifest) {
+ for (const auto& part : manifest.partitions()) {
+ if (part.has_old_partition_info()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
int main(int argc, char* argv[]) {
+ gflags::SetUsageMessage(
+ "A tool to extract device images from Android OTA packages");
gflags::ParseCommandLineFlags(&argc, &argv, true);
xz_crc32_init();
auto tokens = android::base::Tokenize(FLAGS_partitions, ",");
@@ -172,10 +257,16 @@ int main(int argc, char* argv[]) {
LOG(ERROR) << "Failed to parse manifest!";
return 1;
}
+ if (IsIncrementalOTA(manifest) && FLAGS_input_dir.empty()) {
+ LOG(ERROR) << FLAGS_payload
+ << " is an incremental OTA, --input_dir parameter is required.";
+ return 1;
+ }
return !ExtractImagesFromOTA(manifest,
payload_metadata,
payload_fd,
FLAGS_payload_offset,
+ FLAGS_input_dir,
FLAGS_output_dir,
partitions);
}
diff --git a/payload_consumer/install_operation_executor.cc b/payload_consumer/install_operation_executor.cc
index 69ef9c19..cd6546f4 100644
--- a/payload_consumer/install_operation_executor.cc
+++ b/payload_consumer/install_operation_executor.cc
@@ -255,6 +255,7 @@ bool InstallOperationExecutor::ExecuteDiffOperation(
operation, std::move(writer), source_fd, data, count);
default:
LOG(ERROR) << "Unexpected operation type when executing diff ops "
+ << operation.type() << " "
<< operation.Type_Name(operation.type());
return false;
}
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index db0af4e2..91eb53b3 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -187,6 +187,44 @@ bool InstallPlan::Partition::operator==(
postinstall_optional == that.postinstall_optional);
}
+bool InstallPlan::Partition::ParseVerityConfig(
+ const PartitionUpdate& partition) {
+ if (partition.has_hash_tree_extent()) {
+ Extent extent = partition.hash_tree_data_extent();
+ hash_tree_data_offset = extent.start_block() * block_size;
+ hash_tree_data_size = extent.num_blocks() * block_size;
+ extent = partition.hash_tree_extent();
+ hash_tree_offset = extent.start_block() * block_size;
+ hash_tree_size = extent.num_blocks() * block_size;
+ uint64_t hash_tree_data_end = hash_tree_data_offset + hash_tree_data_size;
+ if (hash_tree_offset < hash_tree_data_end) {
+ LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
+ << hash_tree_data_end << ", but hash tree starts at "
+ << hash_tree_offset;
+ return false;
+ }
+ hash_tree_algorithm = partition.hash_tree_algorithm();
+ hash_tree_salt.assign(partition.hash_tree_salt().begin(),
+ partition.hash_tree_salt().end());
+ }
+ if (partition.has_fec_extent()) {
+ Extent extent = partition.fec_data_extent();
+ fec_data_offset = extent.start_block() * block_size;
+ fec_data_size = extent.num_blocks() * block_size;
+ extent = partition.fec_extent();
+ fec_offset = extent.start_block() * block_size;
+ fec_size = extent.num_blocks() * block_size;
+ uint64_t fec_data_end = fec_data_offset + fec_data_size;
+ if (fec_offset < fec_data_end) {
+ LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
+ << ", but fec starts at " << fec_offset;
+ return false;
+ }
+ fec_roots = partition.fec_roots();
+ }
+ return true;
+}
+
template <typename PartitinoUpdateArray>
bool InstallPlan::ParseManifestToInstallPlan(
const PartitinoUpdateArray& partitions,
@@ -226,42 +264,11 @@ bool InstallPlan::ParseManifestToInstallPlan(
install_part.target_hash.assign(info.hash().begin(), info.hash().end());
install_part.block_size = block_size;
- if (partition.has_hash_tree_extent()) {
- Extent extent = partition.hash_tree_data_extent();
- install_part.hash_tree_data_offset = extent.start_block() * block_size;
- install_part.hash_tree_data_size = extent.num_blocks() * block_size;
- extent = partition.hash_tree_extent();
- install_part.hash_tree_offset = extent.start_block() * block_size;
- install_part.hash_tree_size = extent.num_blocks() * block_size;
- uint64_t hash_tree_data_end =
- install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
- if (install_part.hash_tree_offset < hash_tree_data_end) {
- LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
- << hash_tree_data_end << ", but hash tree starts at "
- << install_part.hash_tree_offset;
- *error = ErrorCode::kDownloadNewPartitionInfoError;
- return false;
- }
- install_part.hash_tree_algorithm = partition.hash_tree_algorithm();
- install_part.hash_tree_salt.assign(partition.hash_tree_salt().begin(),
- partition.hash_tree_salt().end());
- }
- if (partition.has_fec_extent()) {
- Extent extent = partition.fec_data_extent();
- install_part.fec_data_offset = extent.start_block() * block_size;
- install_part.fec_data_size = extent.num_blocks() * block_size;
- extent = partition.fec_extent();
- install_part.fec_offset = extent.start_block() * block_size;
- install_part.fec_size = extent.num_blocks() * block_size;
- uint64_t fec_data_end =
- install_part.fec_data_offset + install_part.fec_data_size;
- if (install_part.fec_offset < fec_data_end) {
- LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
- << ", but fec starts at " << install_part.fec_offset;
- *error = ErrorCode::kDownloadNewPartitionInfoError;
- return false;
- }
- install_part.fec_roots = partition.fec_roots();
+ if (!install_part.ParseVerityConfig(partition)) {
+ *error = ErrorCode::kDownloadNewPartitionInfoError;
+ LOG(INFO) << "Failed to parse partition `" << partition.partition_name()
+ << "` verity configs";
+ return false;
}
install_plan->partitions.push_back(install_part);
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 0278ea51..883aa60c 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -156,6 +156,8 @@ struct InstallPlan {
uint64_t fec_offset{0};
uint64_t fec_size{0};
uint32_t fec_roots{0};
+
+ bool ParseVerityConfig(const PartitionUpdate&);
};
std::vector<Partition> partitions;
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
index ffa29446..91efa3e2 100644
--- a/payload_consumer/verity_writer_android.cc
+++ b/payload_consumer/verity_writer_android.cc
@@ -116,7 +116,8 @@ bool VerityWriterAndroid::Finalize(FileDescriptor* read_fd,
return false;
}
// All hash tree data blocks has been hashed, write hash tree to disk.
- LOG(INFO) << "Writing verity hash tree to " << partition_->target_path;
+ LOG(INFO) << "Writing verity hash tree to "
+ << partition_->readonly_target_path;
if (hash_tree_builder_) {
TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
TEST_AND_RETURN_FALSE_ERRNO(
@@ -130,7 +131,7 @@ bool VerityWriterAndroid::Finalize(FileDescriptor* read_fd,
hash_tree_builder_.reset();
}
if (partition_->fec_size != 0) {
- LOG(INFO) << "Writing verity FEC to " << partition_->target_path;
+ LOG(INFO) << "Writing verity FEC to " << partition_->readonly_target_path;
TEST_AND_RETURN_FALSE(EncodeFEC(read_fd,
write_fd,
partition_->fec_data_offset,