summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2014-02-21 11:18:28 +0000
committerTorne (Richard Coles) <torne@google.com>2014-02-21 11:18:28 +0000
commitf63254a40b1d7bf1e6ba3631d2be69cbab5d537e (patch)
treef229823a6954d713f2f8fb3ce537b9465885f192
parent60aae0634a605dcb5c97e8661fd8c8d1cea50bc4 (diff)
parent3e4cdeca70f8a9499fb746fa4e9a094a449dda86 (diff)
downloadwebrtc-f63254a40b1d7bf1e6ba3631d2be69cbab5d537e.tar.gz
Merge third_party/webrtc from https://chromium.googlesource.com/external/webrtc/trunk/webrtc.git at 3e4cdeca70f8a9499fb746fa4e9a094a449dda86
This commit was generated by merge_from_chromium.py. Change-Id: I661b548f23f598e1cb97e5fd3898c7835a1428d6
-rw-r--r--modules/rtp_rtcp/interface/rtp_rtcp_defines.h3
-rw-r--r--modules/rtp_rtcp/source/fec_test_helper.h3
-rw-r--r--modules/rtp_rtcp/source/forward_error_correction.cc4
-rw-r--r--modules/rtp_rtcp/source/rtp_sender.cc6
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_video.cc10
-rw-r--r--test/configurable_frame_size_encoder.cc4
-rw-r--r--video/video_send_stream_tests.cc168
-rw-r--r--video_engine/overuse_frame_detector.cc34
-rw-r--r--video_engine/overuse_frame_detector.h19
-rw-r--r--video_engine/overuse_frame_detector_unittest.cc19
10 files changed, 224 insertions, 46 deletions
diff --git a/modules/rtp_rtcp/interface/rtp_rtcp_defines.h b/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
index b66e927a..6f99f938 100644
--- a/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
+++ b/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
@@ -27,6 +27,9 @@ namespace webrtc {
const int kVideoPayloadTypeFrequency = 90000;
+// Minimum RTP header size in bytes.
+const uint8_t kRtpHeaderSize = 12;
+
struct AudioPayload
{
uint32_t frequency;
diff --git a/modules/rtp_rtcp/source/fec_test_helper.h b/modules/rtp_rtcp/source/fec_test_helper.h
index e3c3581b..e6426ea7 100644
--- a/modules/rtp_rtcp/source/fec_test_helper.h
+++ b/modules/rtp_rtcp/source/fec_test_helper.h
@@ -17,9 +17,6 @@
namespace webrtc {
enum {
- kRtpHeaderSize = 12
-};
-enum {
kFecPayloadType = 96
};
enum {
diff --git a/modules/rtp_rtcp/source/forward_error_correction.cc b/modules/rtp_rtcp/source/forward_error_correction.cc
index 189e1b05..af2cb9e8 100644
--- a/modules/rtp_rtcp/source/forward_error_correction.cc
+++ b/modules/rtp_rtcp/source/forward_error_correction.cc
@@ -17,15 +17,13 @@
#include <algorithm>
#include <iterator>
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
-// Minimum RTP header size in bytes.
-const uint8_t kRtpHeaderSize = 12;
-
// FEC header size in bytes.
const uint8_t kFecHeaderSize = 10;
diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc
index 0929fd96..0711356e 100644
--- a/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/modules/rtp_rtcp/source/rtp_sender.cc
@@ -301,9 +301,9 @@ uint16_t RTPSender::MaxDataPayloadLength() const {
if (audio_configured_) {
return max_payload_length_ - RTPHeaderLength();
} else {
- return max_payload_length_ - RTPHeaderLength() -
- video_->FECPacketOverhead() - ((rtx_) ? 2 : 0);
- // Include the FEC/ULP/RED overhead.
+ return max_payload_length_ - RTPHeaderLength() // RTP overhead.
+ - video_->FECPacketOverhead() // FEC/ULP/RED overhead.
+ - ((rtx_) ? 2 : 0); // RTX overhead.
}
}
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc
index 7b36f7cc..10bc252b 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -14,6 +14,7 @@
#include <stdlib.h>
#include <string.h>
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/producer_fec.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
@@ -253,8 +254,13 @@ RTPSenderVideo::FECPacketOverhead() const
{
if (_fecEnabled)
{
- return ForwardErrorCorrection::PacketOverhead() +
- REDForFECHeaderLength;
+ // Overhead is FEC headers plus RED for FEC header plus anything in RTP
+ // header beyond the 12 bytes base header (CSRC list, extensions...)
+ // This reason for the header extensions to be included here is that
+ // from an FEC viewpoint, they are part of the payload to be protected.
+ // (The base RTP header is already protected by the FEC header.)
+ return ForwardErrorCorrection::PacketOverhead() + REDForFECHeaderLength +
+ (_rtpSender.RTPHeaderLength() - kRtpHeaderSize);
}
return 0;
}
diff --git a/test/configurable_frame_size_encoder.cc b/test/configurable_frame_size_encoder.cc
index 0046f561..b246da35 100644
--- a/test/configurable_frame_size_encoder.cc
+++ b/test/configurable_frame_size_encoder.cc
@@ -49,7 +49,9 @@ int32_t ConfigurableFrameSizeEncoder::Encode(
encodedImage._timeStamp = inputImage.timestamp();
encodedImage.capture_time_ms_ = inputImage.render_time_ms();
RTPFragmentationHeader* fragmentation = NULL;
- callback_->Encoded(encodedImage, codecSpecificInfo, fragmentation);
+ CodecSpecificInfo specific;
+ memset(&specific, 0, sizeof(specific));
+ callback_->Encoded(encodedImage, &specific, fragmentation);
return WEBRTC_VIDEO_CODEC_OK;
}
diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc
index d3333e0f..f0c190ea 100644
--- a/video/video_send_stream_tests.cc
+++ b/video/video_send_stream_tests.cc
@@ -33,6 +33,8 @@
namespace webrtc {
+enum VideoFormat { kGeneric, kVP8, };
+
class VideoSendStreamTest : public ::testing::Test {
public:
VideoSendStreamTest()
@@ -75,6 +77,8 @@ class VideoSendStreamTest : public ::testing::Test {
uint8_t retransmit_payload_type,
bool enable_pacing);
+ void TestPacketFragmentationSize(VideoFormat format, bool with_fec);
+
void SendsSetSsrcs(size_t num_ssrcs, bool send_single_ssrc_first);
enum { kNumSendSsrcs = 3 };
@@ -587,44 +591,84 @@ TEST_F(VideoSendStreamTest, RetransmitsNackOverRtxWithPacing) {
TestNackRetransmission(kSendRtxSsrc, kSendRtxPayloadType, true);
}
-TEST_F(VideoSendStreamTest, FragmentsAccordingToMaxPacketSize) {
+void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
+ bool with_fec) {
+ static const int kRedPayloadType = 118;
+ static const int kUlpfecPayloadType = 119;
// Observer that verifies that the expected number of packets and bytes
// arrive for each frame size, from start_size to stop_size.
class FrameFragmentationObserver : public test::RtpRtcpObserver,
public EncodedFrameObserver {
public:
- FrameFragmentationObserver(size_t max_packet_size,
+ FrameFragmentationObserver(uint32_t max_packet_size,
uint32_t start_size,
uint32_t stop_size,
- test::ConfigurableFrameSizeEncoder* encoder)
- : RtpRtcpObserver(30 * 1000),
+ test::ConfigurableFrameSizeEncoder* encoder,
+ bool test_generic_packetization,
+ bool use_fec)
+ : RtpRtcpObserver(120 * 1000), // Timeout after two minutes.
+ transport_adapter_(SendTransport()),
+ encoder_(encoder),
max_packet_size_(max_packet_size),
+ stop_size_(stop_size),
+ test_generic_packetization_(test_generic_packetization),
+ use_fec_(use_fec),
+ packet_count_(0),
accumulated_size_(0),
accumulated_payload_(0),
- stop_size_(stop_size),
+ fec_packet_received_(false),
current_size_rtp_(start_size),
- current_size_frame_(start_size),
- encoder_(encoder) {
+ current_size_frame_(start_size) {
// Fragmentation required, this test doesn't make sense without it.
assert(stop_size > max_packet_size);
+ transport_adapter_.Enable();
}
- virtual Action OnSendRtp(const uint8_t* packet, size_t length) OVERRIDE {
+ virtual Action OnSendRtp(const uint8_t* packet, size_t size) OVERRIDE {
+ uint32_t length = static_cast<int>(size);
RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, static_cast<int>(length), &header));
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
EXPECT_LE(length, max_packet_size_);
+ if (use_fec_) {
+ uint8_t payload_type = packet[header.headerLength];
+ bool is_fec = header.payloadType == kRedPayloadType &&
+ payload_type == kUlpfecPayloadType;
+ if (is_fec) {
+ fec_packet_received_ = true;
+ return SEND_PACKET;
+ }
+ }
+
accumulated_size_ += length;
- // Payload size = packet size - minus RTP header, padding and one byte
- // generic header.
- accumulated_payload_ +=
- length - (header.headerLength + header.paddingLength + 1);
+
+ if (use_fec_)
+ TriggerLossReport(header);
+
+ if (test_generic_packetization_) {
+ uint32_t overhead = header.headerLength + header.paddingLength +
+ (1 /* Generic header */);
+ if (use_fec_)
+ overhead += 1; // RED for FEC header.
+ accumulated_payload_ += length - overhead;
+ }
// Marker bit set indicates last packet of a frame.
if (header.markerBit) {
+ if (use_fec_ && accumulated_payload_ == current_size_rtp_ - 1) {
+ // With FEC enabled, frame size is incremented asynchronously, so
+ // "old" frames one byte too small may arrive. Accept, but don't
+ // increase expected frame size.
+ accumulated_size_ = 0;
+ accumulated_payload_ = 0;
+ return SEND_PACKET;
+ }
+
EXPECT_GE(accumulated_size_, current_size_rtp_);
- EXPECT_EQ(accumulated_payload_, current_size_rtp_);
+ if (test_generic_packetization_) {
+ EXPECT_EQ(current_size_rtp_, accumulated_payload_);
+ }
// Last packet of frame; reset counters.
accumulated_size_ = 0;
@@ -633,32 +677,68 @@ TEST_F(VideoSendStreamTest, FragmentsAccordingToMaxPacketSize) {
// Done! (Don't increase size again, might arrive more @ stop_size).
observation_complete_->Set();
} else {
- // Increase next expected frame size.
- ++current_size_rtp_;
+ // Increase next expected frame size. If testing with FEC, make sure
+ // a FEC packet has been received for this frame size before
+ // proceeding, to make sure that redundancy packets don't exceed
+ // size limit.
+ if (!use_fec_) {
+ ++current_size_rtp_;
+ } else if (fec_packet_received_) {
+ fec_packet_received_ = false;
+ ++current_size_rtp_;
+ ++current_size_frame_;
+ }
}
}
return SEND_PACKET;
}
+ void TriggerLossReport(const RTPHeader& header) {
+ // Send lossy receive reports to trigger FEC enabling.
+ if (packet_count_++ % 2 != 0) {
+ // Receive statistics reporting having lost 50% of the packets.
+ FakeReceiveStatistics lossy_receive_stats(
+ kSendSsrc, header.sequenceNumber, packet_count_ / 2, 127);
+ RTCPSender rtcp_sender(
+ 0, false, Clock::GetRealTimeClock(), &lossy_receive_stats);
+ EXPECT_EQ(0, rtcp_sender.RegisterSendTransport(&transport_adapter_));
+
+ rtcp_sender.SetRTCPStatus(kRtcpNonCompound);
+ rtcp_sender.SetRemoteSSRC(kSendSsrc);
+
+ RTCPSender::FeedbackState feedback_state;
+
+ EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpRr));
+ }
+ }
+
virtual void EncodedFrameCallback(const EncodedFrame& encoded_frame) {
// Increase frame size for next encoded frame, in the context of the
// encoder thread.
- if (current_size_frame_ < stop_size_) {
+ if (!use_fec_ &&
+ current_size_frame_.Value() < static_cast<int32_t>(stop_size_)) {
++current_size_frame_;
}
- encoder_->SetFrameSize(current_size_frame_);
+ encoder_->SetFrameSize(current_size_frame_.Value());
}
private:
- size_t max_packet_size_;
- size_t accumulated_size_;
- size_t accumulated_payload_;
+ internal::TransportAdapter transport_adapter_;
+ test::ConfigurableFrameSizeEncoder* const encoder_;
+
+ const uint32_t max_packet_size_;
+ const uint32_t stop_size_;
+ const bool test_generic_packetization_;
+ const bool use_fec_;
+
+ uint32_t packet_count_;
+ uint32_t accumulated_size_;
+ uint32_t accumulated_payload_;
+ bool fec_packet_received_;
- uint32_t stop_size_;
uint32_t current_size_rtp_;
- uint32_t current_size_frame_;
- test::ConfigurableFrameSizeEncoder* encoder_;
+ Atomic32 current_size_frame_;
};
// Use a fake encoder to output a frame of every size in the range [90, 290],
@@ -668,21 +748,59 @@ TEST_F(VideoSendStreamTest, FragmentsAccordingToMaxPacketSize) {
static const uint32_t start = 90;
static const uint32_t stop = 290;
+ // Don't auto increment if FEC is used; continue sending frame size until
+ // a FEC packet has been received.
test::ConfigurableFrameSizeEncoder encoder(stop);
encoder.SetFrameSize(start);
- FrameFragmentationObserver observer(kMaxPacketSize, start, stop, &encoder);
+ FrameFragmentationObserver observer(
+ kMaxPacketSize, start, stop, &encoder, format == kGeneric, with_fec);
Call::Config call_config(observer.SendTransport());
scoped_ptr<Call> call(Call::Create(call_config));
+ observer.SetReceivers(call->Receiver(), NULL);
+
VideoSendStream::Config send_config = GetSendTestConfig(call.get(), 1);
+ if (with_fec) {
+ send_config.rtp.fec.red_payload_type = kRedPayloadType;
+ send_config.rtp.fec.ulpfec_payload_type = kUlpfecPayloadType;
+ }
+
+ if (format == kVP8) {
+ strcpy(send_config.codec.plName, "VP8");
+ send_config.codec.codecType = kVideoCodecVP8;
+ }
+ send_config.pacing = false;
send_config.encoder = &encoder;
send_config.rtp.max_packet_size = kMaxPacketSize;
send_config.post_encode_callback = &observer;
+ // Add an extension header, to make the RTP header larger than the base
+ // length of 12 bytes.
+ static const uint8_t kAbsSendTimeExtensionId = 13;
+ send_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTime, kAbsSendTimeExtensionId));
+
RunSendTest(call.get(), send_config, &observer);
}
+// TODO(sprang): Is there any way of speeding up these tests?
+TEST_F(VideoSendStreamTest, FragmentsGenericAccordingToMaxPacketSize) {
+ TestPacketFragmentationSize(kGeneric, false);
+}
+
+TEST_F(VideoSendStreamTest, FragmentsGenericAccordingToMaxPacketSizeWithFec) {
+ TestPacketFragmentationSize(kGeneric, true);
+}
+
+TEST_F(VideoSendStreamTest, FragmentsVp8AccordingToMaxPacketSize) {
+ TestPacketFragmentationSize(kVP8, false);
+}
+
+TEST_F(VideoSendStreamTest, FragmentsVp8AccordingToMaxPacketSizeWithFec) {
+ TestPacketFragmentationSize(kVP8, true);
+}
+
TEST_F(VideoSendStreamTest, CanChangeSendCodec) {
static const uint8_t kFirstPayloadType = 121;
static const uint8_t kSecondPayloadType = 122;
diff --git a/video_engine/overuse_frame_detector.cc b/video_engine/overuse_frame_detector.cc
index a5e2d6f5..21aa7690 100644
--- a/video_engine/overuse_frame_detector.cc
+++ b/video_engine/overuse_frame_detector.cc
@@ -29,11 +29,16 @@ namespace webrtc {
namespace {
const int64_t kProcessIntervalMs = 5000;
+// Number of initial process times before reporting.
+const int64_t kMinProcessCountBeforeReporting = 3;
+
+const int64_t kFrameTimeoutIntervalMs = 1500;
+
// Consecutive checks above threshold to trigger overuse.
const int kConsecutiveChecksAboveThreshold = 2;
// Minimum samples required to perform a check.
-const size_t kMinFrameSampleCount = 15;
+const size_t kMinFrameSampleCount = 120;
// Weight factor to apply to the standard deviation.
const float kWeightFactor = 0.997f;
@@ -238,9 +243,11 @@ OveruseFrameDetector::OveruseFrameDetector(Clock* clock,
: crit_(CriticalSectionWrapper::CreateCriticalSection()),
normaluse_stddev_ms_(normaluse_stddev_ms),
overuse_stddev_ms_(overuse_stddev_ms),
+ min_process_count_before_reporting_(kMinProcessCountBeforeReporting),
observer_(NULL),
clock_(clock),
next_process_time_(clock_->TimeInMilliseconds()),
+ num_process_times_(0),
last_capture_time_(0),
last_overuse_time_(0),
checks_above_threshold_(0),
@@ -288,26 +295,34 @@ int32_t OveruseFrameDetector::TimeUntilNextProcess() {
return next_process_time_ - clock_->TimeInMilliseconds();
}
+bool OveruseFrameDetector::DetectFrameTimeout(int64_t now) const {
+ if (last_capture_time_ == 0) {
+ return false;
+ }
+ return (now - last_capture_time_) > kFrameTimeoutIntervalMs;
+}
+
void OveruseFrameDetector::FrameCaptured(int width, int height) {
CriticalSectionScoped cs(crit_.get());
+ int64_t now = clock_->TimeInMilliseconds();
int num_pixels = width * height;
- if (num_pixels != num_pixels_) {
+ if (num_pixels != num_pixels_ || DetectFrameTimeout(now)) {
// Frame size changed, reset statistics.
num_pixels_ = num_pixels;
capture_deltas_.Reset();
last_capture_time_ = 0;
capture_queue_delay_->ClearFrames();
+ num_process_times_ = 0;
}
- int64_t time = clock_->TimeInMilliseconds();
if (last_capture_time_ != 0) {
- capture_deltas_.AddSample(time - last_capture_time_);
- encode_usage_->AddSample(time - last_capture_time_);
+ capture_deltas_.AddSample(now - last_capture_time_);
+ encode_usage_->AddSample(now - last_capture_time_);
}
- last_capture_time_ = time;
+ last_capture_time_ = now;
- capture_queue_delay_->FrameCaptured(time);
+ capture_queue_delay_->FrameCaptured(now);
}
void OveruseFrameDetector::FrameProcessingStarted() {
@@ -342,6 +357,7 @@ int32_t OveruseFrameDetector::Process() {
int64_t diff_ms = now - next_process_time_ + kProcessIntervalMs;
next_process_time_ = now + kProcessIntervalMs;
+ ++num_process_times_;
// Don't trigger overuse unless we've seen a certain number of frames.
if (capture_deltas_.Count() < kMinFrameSampleCount)
@@ -349,6 +365,10 @@ int32_t OveruseFrameDetector::Process() {
capture_queue_delay_->CalculateDelayChange(diff_ms);
+ if (num_process_times_ <= min_process_count_before_reporting_) {
+ return 0;
+ }
+
if (IsOverusing()) {
// If the last thing we did was going up, and now have to back down, we need
// to check if this peak was short. If so we should back off to avoid going
diff --git a/video_engine/overuse_frame_detector.h b/video_engine/overuse_frame_detector.h
index 5dbb48a2..c9f691cc 100644
--- a/video_engine/overuse_frame_detector.h
+++ b/video_engine/overuse_frame_detector.h
@@ -14,6 +14,7 @@
#include "webrtc/modules/interface/module.h"
#include "webrtc/system_wrappers/interface/constructor_magic.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/test/testsupport/gtest_prod_util.h"
namespace webrtc {
@@ -104,6 +105,19 @@ class OveruseFrameDetector : public Module {
virtual int32_t Process() OVERRIDE;
private:
+ FRIEND_TEST_ALL_PREFIXES(OveruseFrameDetectorTest, TriggerOveruse);
+ FRIEND_TEST_ALL_PREFIXES(OveruseFrameDetectorTest, OveruseAndRecover);
+ FRIEND_TEST_ALL_PREFIXES(OveruseFrameDetectorTest, DoubleOveruseAndRecover);
+ FRIEND_TEST_ALL_PREFIXES(
+ OveruseFrameDetectorTest, TriggerNormalUsageWithMinProcessCount);
+ FRIEND_TEST_ALL_PREFIXES(
+ OveruseFrameDetectorTest, ConstantOveruseGivesNoNormalUsage);
+ FRIEND_TEST_ALL_PREFIXES(OveruseFrameDetectorTest, LastCaptureJitter);
+
+ void set_min_process_count_before_reporting(int64_t count) {
+ min_process_count_before_reporting_ = count;
+ }
+
class EncodeTimeAvg;
class EncodeUsage;
class CaptureQueueDelay;
@@ -111,6 +125,8 @@ class OveruseFrameDetector : public Module {
bool IsOverusing();
bool IsUnderusing(int64_t time_now);
+ bool DetectFrameTimeout(int64_t now) const;
+
// Protecting all members.
scoped_ptr<CriticalSectionWrapper> crit_;
@@ -118,11 +134,14 @@ class OveruseFrameDetector : public Module {
const float normaluse_stddev_ms_;
const float overuse_stddev_ms_;
+ int64_t min_process_count_before_reporting_;
+
// Observer getting overuse reports.
CpuOveruseObserver* observer_;
Clock* clock_;
int64_t next_process_time_;
+ int64_t num_process_times_;
Statistics capture_deltas_;
int64_t last_capture_time_;
diff --git a/video_engine/overuse_frame_detector_unittest.cc b/video_engine/overuse_frame_detector_unittest.cc
index f974f288..8d45fdb2 100644
--- a/video_engine/overuse_frame_detector_unittest.cc
+++ b/video_engine/overuse_frame_detector_unittest.cc
@@ -50,11 +50,11 @@ class OveruseFrameDetectorTest : public ::testing::Test {
EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(1);
- InsertFramesWithInterval(50, regular_frame_interval_ms);
+ InsertFramesWithInterval(200, regular_frame_interval_ms);
InsertFramesWithInterval(50, 110);
overuse_detector_->Process();
- InsertFramesWithInterval(50, regular_frame_interval_ms);
+ InsertFramesWithInterval(200, regular_frame_interval_ms);
InsertFramesWithInterval(50, 110);
overuse_detector_->Process();
}
@@ -74,21 +74,35 @@ class OveruseFrameDetectorTest : public ::testing::Test {
};
TEST_F(OveruseFrameDetectorTest, TriggerOveruse) {
+ overuse_detector_->set_min_process_count_before_reporting(0);
TriggerOveruse();
}
TEST_F(OveruseFrameDetectorTest, OveruseAndRecover) {
+ overuse_detector_->set_min_process_count_before_reporting(0);
TriggerOveruse();
TriggerNormalUsage();
}
TEST_F(OveruseFrameDetectorTest, DoubleOveruseAndRecover) {
+ overuse_detector_->set_min_process_count_before_reporting(0);
TriggerOveruse();
TriggerOveruse();
TriggerNormalUsage();
}
+TEST_F(OveruseFrameDetectorTest, TriggerNormalUsageWithMinProcessCount) {
+ overuse_detector_->set_min_process_count_before_reporting(1);
+ InsertFramesWithInterval(900, 33);
+ overuse_detector_->Process();
+ EXPECT_EQ(-1, overuse_detector_->last_capture_jitter_ms());
+ clock_->AdvanceTimeMilliseconds(5000);
+ overuse_detector_->Process();
+ EXPECT_GT(overuse_detector_->last_capture_jitter_ms(), 0);
+}
+
TEST_F(OveruseFrameDetectorTest, ConstantOveruseGivesNoNormalUsage) {
+ overuse_detector_->set_min_process_count_before_reporting(0);
EXPECT_CALL(*(observer_.get()), NormalUsage()).Times(0);
for(size_t i = 0; i < 64; ++i)
@@ -96,6 +110,7 @@ TEST_F(OveruseFrameDetectorTest, ConstantOveruseGivesNoNormalUsage) {
}
TEST_F(OveruseFrameDetectorTest, LastCaptureJitter) {
+ overuse_detector_->set_min_process_count_before_reporting(0);
EXPECT_EQ(-1, overuse_detector_->last_capture_jitter_ms());
TriggerOveruse();
EXPECT_GT(overuse_detector_->last_capture_jitter_ms(), 0);