aboutsummaryrefslogtreecommitdiff
path: root/pw_log_rpc
diff options
context:
space:
mode:
authorCarlos Chinchilla <cachinchilla@google.com>2022-02-23 14:14:19 -0800
committerCQ Bot Account <pigweed-scoped@luci-project-accounts.iam.gserviceaccount.com>2022-03-03 20:20:22 +0000
commite1b7e0ce5b5e21db9a32998848359bcb04b5231d (patch)
tree8207e159c3e6930a82c093f52635e9a45d39cfa0 /pw_log_rpc
parent251aaed336480e8808d5104163e6fe587dabbae9 (diff)
downloadpigweed-e1b7e0ce5b5e21db9a32998848359bcb04b5231d.tar.gz
pw_log_rpc: Make log entries checks more readable
Use the same order in which entries are added to the MultiSink to verify the entries received in unit tests. This avoids reversing the order of the messages, which was needed before. Update unit test to use the more understandable order. Change-Id: I2f6a183d25483eebb363572641ca06cb3f2f7f36 Reviewed-on: https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/85460 Reviewed-by: Armando Montanez <amontanez@google.com> Commit-Queue: Carlos Chinchilla <cachinchilla@google.com> Pigweed-Auto-Submit: Carlos Chinchilla <cachinchilla@google.com>
Diffstat (limited to 'pw_log_rpc')
-rw-r--r--pw_log_rpc/log_service_test.cc184
-rw-r--r--pw_log_rpc/pw_log_rpc_private/test_utils.h11
-rw-r--r--pw_log_rpc/rpc_log_drain_test.cc75
-rw-r--r--pw_log_rpc/test_utils.cc28
4 files changed, 145 insertions, 153 deletions
diff --git a/pw_log_rpc/log_service_test.cc b/pw_log_rpc/log_service_test.cc
index 3afb5f482..33bb47c5b 100644
--- a/pw_log_rpc/log_service_test.cc
+++ b/pw_log_rpc/log_service_test.cc
@@ -217,19 +217,22 @@ TEST_F(LogServiceTest, StartAndEndStream) {
EXPECT_GE(context.responses().size(), 1u);
// Verify data in responses.
- Vector<TestLogEntry, total_entries> message_stack;
+ Vector<TestLogEntry, total_entries> expected_messages;
for (size_t i = 0; i < total_entries; ++i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
}
size_t entries_found = 0;
uint32_t drop_count_found = 0;
for (auto& response : context.responses()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
EXPECT_EQ(entries_found, total_entries);
EXPECT_EQ(drop_count_found, 0u);
@@ -263,23 +266,21 @@ TEST_F(LogServiceTest, HandleDropped) {
// There is at least 1 response with multiple log entries packed.
ASSERT_GE(context.responses().size(), 1u);
- // Add create expected messages in a stack to match the order they arrive
- // in.
- Vector<TestLogEntry, total_entries + 1> message_stack;
- size_t i = total_entries;
- for (; i > entries_before_drop; --i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ Vector<TestLogEntry, total_entries + 1> expected_messages;
+ size_t i = 0;
+ for (; i < entries_before_drop; ++i) {
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
}
- message_stack.push_back(
+ expected_messages.push_back(
{.metadata = kDropMessageMetadata, .dropped = total_drop_count});
- for (; i > 0; --i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ for (; i < total_entries; ++i) {
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
}
// Verify data in responses.
@@ -287,8 +288,11 @@ TEST_F(LogServiceTest, HandleDropped) {
uint32_t drop_count_found = 0;
for (auto& response : context.responses()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
EXPECT_EQ(entries_found, total_entries);
EXPECT_EQ(drop_count_found, total_drop_count);
@@ -328,22 +332,24 @@ TEST_F(LogServiceTest, HandleDroppedBetweenFilteredOutLogs) {
// There is at least 1 response with multiple log entries packed.
ASSERT_GE(context.responses().size(), 1u);
- // Add in the reverse order they are received.
- Vector<TestLogEntry, 2> message_stack;
- message_stack.push_back(
+ Vector<TestLogEntry, 2> expected_messages;
+ expected_messages.push_back(
+ {.metadata = kDropMessageMetadata, .dropped = total_drop_count});
+ expected_messages.push_back(
{.metadata = metadata,
.timestamp = kSampleTimestamp,
.tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
- message_stack.push_back(
- {.metadata = kDropMessageMetadata, .dropped = total_drop_count});
// Verify data in responses.
size_t entries_found = 0;
uint32_t drop_count_found = 0;
for (auto& response : context.responses()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
EXPECT_EQ(entries_found, 1u);
EXPECT_EQ(drop_count_found, total_drop_count);
@@ -372,22 +378,23 @@ TEST_F(LogServiceTest, HandleSmallLogEntryBuffer) {
ASSERT_EQ(context.status(), OkStatus());
ASSERT_EQ(context.responses().size(), 1u);
- // Add in the reverse order they are received.
- Vector<TestLogEntry, 2> message_stack;
- message_stack.push_back(
+ Vector<TestLogEntry, 2> expected_messages{
+ {.metadata = kDropMessageMetadata, .dropped = total_drop_count},
{.metadata = kSampleMetadata,
.timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
- message_stack.push_back(
- {.metadata = kDropMessageMetadata, .dropped = total_drop_count});
+ .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))},
+ };
// Verify data in responses.
size_t entries_found = 0;
uint32_t drop_count_found = 0;
for (auto& response : context.responses()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
// No messages fit the buffer, expect a drop message.
EXPECT_EQ(entries_found, 1u);
@@ -503,19 +510,22 @@ TEST_F(LogServiceTest, InterruptedLogStreamSendsDropCount) {
ASSERT_EQ(output.payloads<Logs::Listen>().size(), successful_packets_sent);
// Verify data in responses.
- Vector<TestLogEntry, max_entries> message_stack;
+ Vector<TestLogEntry, max_entries> expected_messages;
for (size_t i = 0; i < total_entries; ++i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
}
size_t entries_found = 0;
uint32_t drop_count_found = 0;
for (auto& response : output.payloads<Logs::Listen>()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
// Verify that not all the entries were sent.
@@ -531,30 +541,34 @@ TEST_F(LogServiceTest, InterruptedLogStreamSendsDropCount) {
EXPECT_EQ(drain.value()->Open(writer), OkStatus());
EXPECT_EQ(drain.value()->Flush(encoding_buffer_), OkStatus());
- // Add expected messages to the stack in the reverse order they are
- // received.
- message_stack.clear();
// One full packet was dropped. Since all messages are the same length,
// there are entries_found / successful_packets_sent per packet.
const uint32_t total_drop_count = entries_found / successful_packets_sent;
+ Vector<TestLogEntry, max_entries> expected_messages_after_reset;
+ expected_messages_after_reset.push_back(
+ {.metadata = kDropMessageMetadata, .dropped = total_drop_count});
+
const uint32_t remaining_entries = total_entries - total_drop_count;
for (size_t i = 0; i < remaining_entries; ++i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ expected_messages_after_reset.push_back(
+ {.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data =
+ std::as_bytes(std::span(std::string_view(kMessage)))});
}
- message_stack.push_back(
- {.metadata = kDropMessageMetadata, .dropped = total_drop_count});
+ size_t entries_found_after_reset = 0;
for (auto& response : output.payloads<Logs::Listen>()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(entry_decoder,
- message_stack,
- entries_found + total_drop_count,
- drop_count_found);
+ uint32_t expected_sequence_id =
+ entries_found + entries_found_after_reset + total_drop_count;
+ VerifyLogEntries(entry_decoder,
+ expected_messages_after_reset,
+ expected_sequence_id,
+ entries_found_after_reset,
+ drop_count_found);
}
- EXPECT_EQ(entries_found, remaining_entries);
+ EXPECT_EQ(entries_found + entries_found_after_reset, remaining_entries);
EXPECT_EQ(drop_count_found, total_drop_count);
}
@@ -609,12 +623,12 @@ TEST_F(LogServiceTest, InterruptedLogStreamIgnoresErrors) {
// Verify that all messages were sent.
const uint32_t total_drop_count = total_entries - entries_found;
- Vector<TestLogEntry, max_entries> message_stack;
+ Vector<TestLogEntry, max_entries> expected_messages;
for (size_t i = 0; i < entries_found; ++i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
}
entries_found = 0;
@@ -622,15 +636,19 @@ TEST_F(LogServiceTest, InterruptedLogStreamIgnoresErrors) {
uint32_t i = 0;
for (; i < error_on_packet_count; ++i) {
protobuf::Decoder entry_decoder(output.payloads<Logs::Listen>()[i]);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
for (; i < output.payloads<Logs::Listen>().size(); ++i) {
protobuf::Decoder entry_decoder(output.payloads<Logs::Listen>()[i]);
- entries_found += VerifyLogEntries(entry_decoder,
- message_stack,
- entries_found + total_drop_count,
- drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found + total_drop_count,
+ entries_found,
+ drop_count_found);
}
// This drain ignores errors and thus doesn't report drops on its own.
EXPECT_EQ(drop_count_found, 0u);
@@ -675,20 +693,17 @@ TEST_F(LogServiceTest, FilterLogs) {
ASSERT_TRUE(
AddLogEntry(kMessage, different_module_metadata, kSampleTimestamp).ok());
- // Add messages to the stack in the reverse order they are sent.
- Vector<TestLogEntry, 3> message_stack;
- message_stack.push_back(
- {.metadata = error_metadata,
+ Vector<TestLogEntry, 3> expected_messages{
+ {.metadata = info_metadata,
.timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
- message_stack.push_back(
+ .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))},
{.metadata = warn_metadata,
.timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
- message_stack.push_back(
- {.metadata = info_metadata,
+ .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))},
+ {.metadata = error_metadata,
.timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
+ .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))},
+ };
// Set up filter rules for drain at drains_[1].
RpcLogDrain& drain = drains_[1];
@@ -719,8 +734,11 @@ TEST_F(LogServiceTest, FilterLogs) {
uint32_t drop_count_found = 0;
for (auto& response : context.responses()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
EXPECT_EQ(entries_found, 3u);
EXPECT_EQ(drop_count_found, 0u);
diff --git a/pw_log_rpc/pw_log_rpc_private/test_utils.h b/pw_log_rpc/pw_log_rpc_private/test_utils.h
index bdc901167..d358d4ab8 100644
--- a/pw_log_rpc/pw_log_rpc_private/test_utils.h
+++ b/pw_log_rpc/pw_log_rpc_private/test_utils.h
@@ -35,11 +35,12 @@ void VerifyLogEntry(protobuf::Decoder& entry_decoder,
const TestLogEntry& expected_entry,
uint32_t& drop_count_out);
-// Verifies a stream of log entries and updates the total drop count found.
-size_t VerifyLogEntries(protobuf::Decoder& entries_decoder,
- Vector<TestLogEntry>& expected_entries_stack,
- uint32_t expected_first_entry_sequence_id,
- uint32_t& drop_count_out);
+// Verifies a stream of log entries and updates the total entry and drop counts.
+void VerifyLogEntries(protobuf::Decoder& entries_decoder,
+ const Vector<TestLogEntry>& expected_entries,
+ uint32_t expected_first_entry_sequence_id,
+ size_t& entries_count_out,
+ uint32_t& drop_count_out);
size_t CountLogEntries(protobuf::Decoder& entries_decoder);
diff --git a/pw_log_rpc/rpc_log_drain_test.cc b/pw_log_rpc/rpc_log_drain_test.cc
index 83918a7ac..b17d2707c 100644
--- a/pw_log_rpc/rpc_log_drain_test.cc
+++ b/pw_log_rpc/rpc_log_drain_test.cc
@@ -43,48 +43,6 @@ namespace {
static constexpr size_t kBufferSize =
RpcLogDrain::kMinEntrySizeWithoutPayload + 32;
-// Verifies a stream of log entries and updates the total drop count found.
-// expected_entries is expected to be in the same order that messages were
-// added to the multisink.
-void VerifyLogEntriesInCorrectOrder(
- protobuf::Decoder& entries_decoder,
- const Vector<TestLogEntry>& expected_entries,
- uint32_t expected_first_entry_sequence_id,
- uint32_t& drop_count_out) {
- size_t entries_found = 0;
-
- while (entries_decoder.Next().ok()) {
- if (static_cast<pw::log::LogEntries::Fields>(
- entries_decoder.FieldNumber()) ==
- log::LogEntries::Fields::ENTRIES) {
- ConstByteSpan entry;
- EXPECT_EQ(entries_decoder.ReadBytes(&entry), OkStatus());
- protobuf::Decoder entry_decoder(entry);
- if (expected_entries.empty()) {
- break;
- }
-
- ASSERT_LT(entries_found, expected_entries.size());
-
- // Keep track of entries and drops respective counts.
- uint32_t current_drop_count = 0;
- VerifyLogEntry(
- entry_decoder, expected_entries[entries_found], current_drop_count);
- drop_count_out += current_drop_count;
- if (current_drop_count == 0) {
- ++entries_found;
- }
- } else if (static_cast<pw::log::LogEntries::Fields>(
- entries_decoder.FieldNumber()) ==
- log::LogEntries::Fields::FIRST_ENTRY_SEQUENCE_ID) {
- uint32_t first_entry_sequence_id = 0;
- EXPECT_EQ(entries_decoder.ReadUint32(&first_entry_sequence_id),
- OkStatus());
- EXPECT_EQ(expected_first_entry_sequence_id, first_entry_sequence_id);
- }
- }
-}
-
TEST(RpcLogDrain, TryFlushDrainWithClosedWriter) {
// Drain without a writer.
const uint32_t drain_id = 1;
@@ -270,8 +228,6 @@ class TrickleTest : public ::testing::Test {
multisink_.HandleEntry(encoded_log_result.value());
}
- // VerifyLogEntriesInCorrectOrder() expects logs to be in the opposite
- // direction compared to when they were added to the multisink.
void AddLogEntries(const Vector<TestLogEntry>& entries) {
for (const TestLogEntry& entry : entries) {
AddLogEntry(entry);
@@ -325,11 +281,13 @@ TEST_F(TrickleTest, EntriesAreFlushedToSinglePayload) {
EXPECT_EQ(payloads.size(), 1u);
uint32_t drop_count = 0;
+ size_t entries_count = 0;
protobuf::Decoder payload_decoder(payloads[0]);
payload_decoder.Reset(payloads[0]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kExpectedEntries, 0, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kExpectedEntries, 0, entries_count, drop_count);
EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(entries_count, 3u);
}
TEST_F(TrickleTest, ManyLogsOverflowToNextPayload) {
@@ -361,16 +319,20 @@ TEST_F(TrickleTest, ManyLogsOverflowToNextPayload) {
ASSERT_EQ(payloads.size(), 2u);
uint32_t drop_count = 0;
+ size_t entries_count = 0;
protobuf::Decoder payload_decoder(payloads[0]);
payload_decoder.Reset(payloads[0]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kFirstFlushedBundle, 0, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kFirstFlushedBundle, 0, entries_count, drop_count);
EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(entries_count, 3u);
+ entries_count = 0;
payload_decoder.Reset(payloads[1]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kSecondFlushedBundle, 3, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kSecondFlushedBundle, 3, entries_count, drop_count);
EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(entries_count, 3u);
}
TEST_F(TrickleTest, LimitedFlushOverflowsToNextPayload) {
@@ -406,22 +368,27 @@ TEST_F(TrickleTest, LimitedFlushOverflowsToNextPayload) {
output_.payloads<log::pw_rpc::raw::Logs::Listen>(kDrainChannelId);
ASSERT_EQ(first_flush_payloads.size(), 1u);
uint32_t drop_count = 0;
+ size_t entries_count = 0;
protobuf::Decoder payload_decoder(first_flush_payloads[0]);
payload_decoder.Reset(first_flush_payloads[0]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kFirstFlushedBundle, 0, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kFirstFlushedBundle, 0, entries_count, drop_count);
+ EXPECT_EQ(entries_count, 3u);
// An additional flush should produce another payload.
min_delay = drains_[0].Trickle(channel_encode_buffer_);
EXPECT_EQ(min_delay.has_value(), false);
drop_count = 0;
+ entries_count = 0;
+
rpc::PayloadsView second_flush_payloads =
output_.payloads<log::pw_rpc::raw::Logs::Listen>(kDrainChannelId);
ASSERT_EQ(second_flush_payloads.size(), 2u);
payload_decoder.Reset(second_flush_payloads[1]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kSecondFlushedBundle, 3, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kSecondFlushedBundle, 3, entries_count, drop_count);
EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(entries_count, 3u);
}
} // namespace
diff --git a/pw_log_rpc/test_utils.cc b/pw_log_rpc/test_utils.cc
index 452156b4b..df84670e5 100644
--- a/pw_log_rpc/test_utils.cc
+++ b/pw_log_rpc/test_utils.cc
@@ -100,12 +100,16 @@ void VerifyLogEntry(protobuf::Decoder& entry_decoder,
}
}
-// Verifies a stream of log entries and updates the total drop count found.
-size_t VerifyLogEntries(protobuf::Decoder& entries_decoder,
- Vector<TestLogEntry>& expected_entries_stack,
- uint32_t expected_first_entry_sequence_id,
- uint32_t& drop_count_out) {
- size_t entries_found = 0;
+// Compares an encoded LogEntry's fields against the expected sequence ID and
+// LogEntries, and updates the total entry and drop counts. Starts comparing at
+// `expected_entries[entries_count_out]`. `expected_entries` must be in the same
+// order that messages were added to the MultiSink.
+void VerifyLogEntries(protobuf::Decoder& entries_decoder,
+ const Vector<TestLogEntry>& expected_entries,
+ uint32_t expected_first_entry_sequence_id,
+ size_t& entries_count_out,
+ uint32_t& drop_count_out) {
+ size_t entry_index = entries_count_out;
while (entries_decoder.Next().ok()) {
if (static_cast<pw::log::LogEntries::Fields>(
entries_decoder.FieldNumber()) ==
@@ -113,18 +117,21 @@ size_t VerifyLogEntries(protobuf::Decoder& entries_decoder,
ConstByteSpan entry;
EXPECT_EQ(entries_decoder.ReadBytes(&entry), OkStatus());
protobuf::Decoder entry_decoder(entry);
- if (expected_entries_stack.empty()) {
+ if (expected_entries.empty()) {
break;
}
+
+ ASSERT_LT(entry_index, expected_entries.size());
+
// Keep track of entries and drops respective counts.
uint32_t current_drop_count = 0;
VerifyLogEntry(
- entry_decoder, expected_entries_stack.back(), current_drop_count);
+ entry_decoder, expected_entries[entry_index], current_drop_count);
+ ++entry_index;
drop_count_out += current_drop_count;
if (current_drop_count == 0) {
- ++entries_found;
+ ++entries_count_out;
}
- expected_entries_stack.pop_back();
} else if (static_cast<pw::log::LogEntries::Fields>(
entries_decoder.FieldNumber()) ==
log::LogEntries::Fields::FIRST_ENTRY_SEQUENCE_ID) {
@@ -134,7 +141,6 @@ size_t VerifyLogEntries(protobuf::Decoder& entries_decoder,
EXPECT_EQ(expected_first_entry_sequence_id, first_entry_sequence_id);
}
}
- return entries_found;
}
size_t CountLogEntries(protobuf::Decoder& entries_decoder) {