summaryrefslogtreecommitdiff
path: root/fs_mgr
diff options
context:
space:
mode:
authorDaniel Zheng <zhengdaniel@google.com>2024-03-14 16:21:46 -0700
committerDaniel Zheng <zhengdaniel@google.com>2024-03-21 10:52:37 -0700
commitc60d2fc5665d7438bafc89e7f8008a338baff61e (patch)
tree391265db9f44e2fda8440410a791052420905e00 /fs_mgr
parenta8c6a92f4389604cdb400a4eb56ae98012625cf8 (diff)
downloadcore-c60d2fc5665d7438bafc89e7f8008a338baff61e.tar.gz
libsnapshot: chunk iov writes
Currently if our iov that we are trying to write is greater than 1024 our write will fail with error "INVALID ARGUMENT". This is because pwritev() system call takes a max input size of IOV_MAX (which is device dependant). With our increased cache size of 1mb or maybe even more (or if user configures batch size to be large), our write size could be greater than IOV_MAX, and will fail with an unhelpful error. We should chunk these writes to ensure they succeed. Bug: 322279333 Test: cow_api_test + manual testing with large iov write sizes Change-Id: Ia1fb53cbfc743cfcdfc7256ff9df075ad0c2dd38
Diffstat (limited to 'fs_mgr')
-rw-r--r--fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp20
1 files changed, 17 insertions, 3 deletions
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp
index ea1da4b53..8cc9964bf 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp
@@ -717,13 +717,27 @@ bool CowWriterV3::WriteOperation(std::span<const CowOperationV3> ops,
return false;
}
if (!data.empty()) {
- const auto ret = pwritev(fd_, data.data(), data.size(), next_data_pos_);
- if (ret != total_data_size) {
+ int total_written = 0;
+ int i = 0;
+ while (i < data.size()) {
+ int chunk = std::min(static_cast<int>(data.size() - i), IOV_MAX);
+
+ const auto ret = pwritev(fd_, data.data() + i, chunk, next_data_pos_ + total_written);
+ if (ret < 0) {
+ PLOG(ERROR) << "write failed chunk size of: " << chunk
+ << " at offset: " << next_data_pos_ + total_written << " " << errno;
+ return false;
+ }
+ total_written += ret;
+ i += chunk;
+ }
+ if (total_written != total_data_size) {
PLOG(ERROR) << "write failed for data of size: " << data.size()
- << " at offset: " << next_data_pos_ << " " << ret;
+ << " at offset: " << next_data_pos_ << " " << errno;
return false;
}
}
+
header_.op_count += ops.size();
next_data_pos_ += total_data_size;