aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules
diff options
context:
space:
mode:
authorphilipel <philipel@webrtc.org>2015-12-21 08:23:20 -0800
committerCommit bot <commit-bot@chromium.org>2015-12-21 16:23:29 +0000
commit5908c71128aea207e42f86468aedb0a6fce3cccb (patch)
tree7ee8ead02e7623e14c6b5d2c14cd381038238c7e /webrtc/modules
parentf5b1abf5b002d74a61e43e80782a544446b27617 (diff)
downloadwebrtc-5908c71128aea207e42f86468aedb0a6fce3cccb.tar.gz
Lint fix for webrtc/modules/video_coding PART 3!
Trying to submit all changes at once proved impossible since there were too many changes in too many files. The changes to PRESUBMIT.py will be uploaded in the last CL. (original CL: https://codereview.webrtc.org/1528503003/) BUG=webrtc:5309 TBR=mflodman@webrtc.org Review URL: https://codereview.webrtc.org/1540243002 Cr-Commit-Position: refs/heads/master@{#11105}
Diffstat (limited to 'webrtc/modules')
-rw-r--r--webrtc/modules/video_coding/test/receiver_tests.h10
-rw-r--r--webrtc/modules/video_coding/test/rtp_player.cc49
-rw-r--r--webrtc/modules/video_coding/test/rtp_player.h17
-rw-r--r--webrtc/modules/video_coding/test/stream_generator.cc15
-rw-r--r--webrtc/modules/video_coding/test/test_util.cc33
-rw-r--r--webrtc/modules/video_coding/test/test_util.h12
-rw-r--r--webrtc/modules/video_coding/test/tester_main.cc49
-rw-r--r--webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc12
-rw-r--r--webrtc/modules/video_coding/test/vcm_payload_sink_factory.h6
-rw-r--r--webrtc/modules/video_coding/test/video_rtp_play.cc8
-rw-r--r--webrtc/modules/video_coding/test/video_source.h117
-rw-r--r--webrtc/modules/video_coding/timestamp_map.cc8
-rw-r--r--webrtc/modules/video_coding/timestamp_map.h2
-rw-r--r--webrtc/modules/video_coding/timing.cc47
-rw-r--r--webrtc/modules/video_coding/timing.h3
-rw-r--r--webrtc/modules/video_coding/timing_unittest.cc22
-rw-r--r--webrtc/modules/video_coding/utility/frame_dropper.cc527
-rw-r--r--webrtc/modules/video_coding/utility/frame_dropper.h114
-rw-r--r--webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h21
-rw-r--r--webrtc/modules/video_coding/utility/moving_average.h16
-rw-r--r--webrtc/modules/video_coding/utility/quality_scaler.cc16
-rw-r--r--webrtc/modules/video_coding/utility/quality_scaler_unittest.cc32
-rw-r--r--webrtc/modules/video_coding/utility/vp8_header_parser.cc22
-rw-r--r--webrtc/modules/video_coding/utility/vp8_header_parser.h54
-rw-r--r--webrtc/modules/video_coding/video_coding_impl.cc37
-rw-r--r--webrtc/modules/video_coding/video_coding_robustness_unittest.cc92
-rw-r--r--webrtc/modules/video_coding/video_sender.cc22
-rw-r--r--webrtc/modules/video_coding/video_sender_unittest.cc29
28 files changed, 643 insertions, 749 deletions
diff --git a/webrtc/modules/video_coding/test/receiver_tests.h b/webrtc/modules/video_coding/test/receiver_tests.h
index 9b9b377e08..d6bac07392 100644
--- a/webrtc/modules/video_coding/test/receiver_tests.h
+++ b/webrtc/modules/video_coding/test/receiver_tests.h
@@ -11,6 +11,9 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
#define WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
+#include <stdio.h>
+#include <string>
+
#include "webrtc/common_types.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
@@ -19,12 +22,9 @@
#include "webrtc/modules/video_coding/test/video_source.h"
#include "webrtc/typedefs.h"
-#include <stdio.h>
-#include <string>
-
class RtpDataCallback : public webrtc::NullRtpData {
public:
- RtpDataCallback(webrtc::VideoCodingModule* vcm) : vcm_(vcm) {}
+ explicit RtpDataCallback(webrtc::VideoCodingModule* vcm) : vcm_(vcm) {}
virtual ~RtpDataCallback() {}
int32_t OnReceivedPayloadData(
@@ -40,4 +40,4 @@ class RtpDataCallback : public webrtc::NullRtpData {
int RtpPlay(const CmdArgs& args);
-#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
diff --git a/webrtc/modules/video_coding/test/rtp_player.cc b/webrtc/modules/video_coding/test/rtp_player.cc
index c9af450f57..9b6490618c 100644
--- a/webrtc/modules/video_coding/test/rtp_player.cc
+++ b/webrtc/modules/video_coding/test/rtp_player.cc
@@ -26,9 +26,9 @@
#include "webrtc/test/rtp_file_reader.h"
#if 1
-# define DEBUG_LOG1(text, arg)
+#define DEBUG_LOG1(text, arg)
#else
-# define DEBUG_LOG1(text, arg) (printf(text "\n", arg))
+#define DEBUG_LOG1(text, arg) (printf(text "\n", arg))
#endif
namespace webrtc {
@@ -41,7 +41,9 @@ enum {
class RawRtpPacket {
public:
- RawRtpPacket(const uint8_t* data, size_t length, uint32_t ssrc,
+ RawRtpPacket(const uint8_t* data,
+ size_t length,
+ uint32_t ssrc,
uint16_t seq_num)
: data_(new uint8_t[length]),
length_(length),
@@ -140,7 +142,7 @@ class LostPackets {
CriticalSectionScoped cs(crit_sect_.get());
int count = 0;
for (ConstRtpPacketIterator it = packets_.begin(); it != packets_.end();
- ++it) {
+ ++it) {
if ((*it)->resend_time_ms() >= 0) {
count++;
}
@@ -164,7 +166,7 @@ class LostPackets {
printf("Packets still lost: %zd\n", packets_.size());
printf("Sequence numbers:\n");
for (ConstRtpPacketIterator it = packets_.begin(); it != packets_.end();
- ++it) {
+ ++it) {
printf("%u, ", (*it)->seq_num());
}
printf("\n");
@@ -231,17 +233,14 @@ class SsrcHandlers {
kDefaultTransmissionTimeOffsetExtensionId);
for (PayloadTypesIterator it = payload_types_.begin();
- it != payload_types_.end(); ++it) {
+ it != payload_types_.end(); ++it) {
VideoCodec codec;
memset(&codec, 0, sizeof(codec));
- strncpy(codec.plName, it->name().c_str(), sizeof(codec.plName)-1);
+ strncpy(codec.plName, it->name().c_str(), sizeof(codec.plName) - 1);
codec.plType = it->payload_type();
codec.codecType = it->codec_type();
- if (handler->rtp_module_->RegisterReceivePayload(codec.plName,
- codec.plType,
- 90000,
- 0,
- codec.maxBitrate) < 0) {
+ if (handler->rtp_module_->RegisterReceivePayload(
+ codec.plName, codec.plType, 90000, 0, codec.maxBitrate) < 0) {
return -1;
}
}
@@ -267,7 +266,8 @@ class SsrcHandlers {
private:
class Handler : public RtpStreamInterface {
public:
- Handler(uint32_t ssrc, const PayloadTypes& payload_types,
+ Handler(uint32_t ssrc,
+ const PayloadTypes& payload_types,
LostPackets* lost_packets)
: rtp_header_parser_(RtpHeaderParser::Create()),
rtp_payload_registry_(new RTPPayloadRegistry(
@@ -290,9 +290,7 @@ class SsrcHandlers {
}
virtual uint32_t ssrc() const { return ssrc_; }
- virtual const PayloadTypes& payload_types() const {
- return payload_types_;
- }
+ virtual const PayloadTypes& payload_types() const { return payload_types_; }
rtc::scoped_ptr<RtpHeaderParser> rtp_header_parser_;
rtc::scoped_ptr<RTPPayloadRegistry> rtp_payload_registry_;
@@ -351,8 +349,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
virtual int NextPacket(int64_t time_now) {
// Send any packets ready to be resent.
for (RawRtpPacket* packet = lost_packets_.NextPacketToResend(time_now);
- packet != NULL;
- packet = lost_packets_.NextPacketToResend(time_now)) {
+ packet != NULL; packet = lost_packets_.NextPacketToResend(time_now)) {
int ret = SendPacket(packet->data(), packet->length());
if (ret > 0) {
printf("Resend: %08x:%u\n", packet->ssrc(), packet->seq_num());
@@ -392,8 +389,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
if (!packet_source_->NextPacket(&next_packet_)) {
end_of_file_ = true;
return 0;
- }
- else if (next_packet_.length == 0) {
+ } else if (next_packet_.length == 0) {
return 0;
}
}
@@ -406,7 +402,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
virtual uint32_t TimeUntilNextPacket() const {
int64_t time_left = (next_rtp_time_ - first_packet_rtp_time_) -
- (clock_->TimeInMilliseconds() - first_packet_time_ms_);
+ (clock_->TimeInMilliseconds() - first_packet_time_ms_);
if (time_left < 0) {
return 0;
}
@@ -438,7 +434,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
if (no_loss_startup_ > 0) {
no_loss_startup_--;
- } else if ((rand() + 1.0)/(RAND_MAX + 1.0) < loss_rate_) {
+ } else if ((rand() + 1.0) / (RAND_MAX + 1.0) < loss_rate_) { // NOLINT
uint16_t seq_num = header.sequenceNumber;
lost_packets_.AddPacket(new RawRtpPacket(data, length, ssrc, seq_num));
DEBUG_LOG1("Dropped packet: %d!", header.header.sequenceNumber);
@@ -470,9 +466,12 @@ class RtpPlayerImpl : public RtpPlayerInterface {
};
RtpPlayerInterface* Create(const std::string& input_filename,
- PayloadSinkFactoryInterface* payload_sink_factory, Clock* clock,
- const PayloadTypes& payload_types, float loss_rate, int64_t rtt_ms,
- bool reordering) {
+ PayloadSinkFactoryInterface* payload_sink_factory,
+ Clock* clock,
+ const PayloadTypes& payload_types,
+ float loss_rate,
+ int64_t rtt_ms,
+ bool reordering) {
rtc::scoped_ptr<test::RtpFileReader> packet_source(
test::RtpFileReader::Create(test::RtpFileReader::kRtpDump,
input_filename));
diff --git a/webrtc/modules/video_coding/test/rtp_player.h b/webrtc/modules/video_coding/test/rtp_player.h
index c227f1c589..e50fb9ac70 100644
--- a/webrtc/modules/video_coding/test/rtp_player.h
+++ b/webrtc/modules/video_coding/test/rtp_player.h
@@ -24,12 +24,12 @@ namespace rtpplayer {
class PayloadCodecTuple {
public:
- PayloadCodecTuple(uint8_t payload_type, const std::string& codec_name,
+ PayloadCodecTuple(uint8_t payload_type,
+ const std::string& codec_name,
VideoCodecType codec_type)
: name_(codec_name),
payload_type_(payload_type),
- codec_type_(codec_type) {
- }
+ codec_type_(codec_type) {}
const std::string& name() const { return name_; }
uint8_t payload_type() const { return payload_type_; }
@@ -87,11 +87,14 @@ class RtpPlayerInterface {
};
RtpPlayerInterface* Create(const std::string& inputFilename,
- PayloadSinkFactoryInterface* payloadSinkFactory, Clock* clock,
- const PayloadTypes& payload_types, float lossRate, int64_t rttMs,
- bool reordering);
+ PayloadSinkFactoryInterface* payloadSinkFactory,
+ Clock* clock,
+ const PayloadTypes& payload_types,
+ float lossRate,
+ int64_t rttMs,
+ bool reordering);
} // namespace rtpplayer
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RTP_PLAYER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RTP_PLAYER_H_
diff --git a/webrtc/modules/video_coding/test/stream_generator.cc b/webrtc/modules/video_coding/test/stream_generator.cc
index 304fe2e013..167d55faff 100644
--- a/webrtc/modules/video_coding/test/stream_generator.cc
+++ b/webrtc/modules/video_coding/test/stream_generator.cc
@@ -22,8 +22,7 @@
namespace webrtc {
StreamGenerator::StreamGenerator(uint16_t start_seq_num, int64_t current_time)
- : packets_(), sequence_number_(start_seq_num), start_time_(current_time) {
-}
+ : packets_(), sequence_number_(start_seq_num), start_time_(current_time) {}
void StreamGenerator::Init(uint16_t start_seq_num, int64_t current_time) {
packets_.clear();
@@ -41,8 +40,8 @@ void StreamGenerator::GenerateFrame(FrameType type,
const int packet_size =
(kFrameSize + num_media_packets / 2) / num_media_packets;
bool marker_bit = (i == num_media_packets - 1);
- packets_.push_back(GeneratePacket(
- sequence_number_, timestamp, packet_size, (i == 0), marker_bit, type));
+ packets_.push_back(GeneratePacket(sequence_number_, timestamp, packet_size,
+ (i == 0), marker_bit, type));
++sequence_number_;
}
for (int i = 0; i < num_empty_packets; ++i) {
@@ -104,7 +103,9 @@ bool StreamGenerator::NextPacket(VCMPacket* packet) {
return true;
}
-void StreamGenerator::DropLastPacket() { packets_.pop_back(); }
+void StreamGenerator::DropLastPacket() {
+ packets_.pop_back();
+}
uint16_t StreamGenerator::NextSequenceNumber() const {
if (packets_.empty())
@@ -112,7 +113,9 @@ uint16_t StreamGenerator::NextSequenceNumber() const {
return packets_.front().seqNum;
}
-int StreamGenerator::PacketsRemaining() const { return packets_.size(); }
+int StreamGenerator::PacketsRemaining() const {
+ return packets_.size();
+}
std::list<VCMPacket>::iterator StreamGenerator::GetPacketIterator(int index) {
std::list<VCMPacket>::iterator it = packets_.begin();
diff --git a/webrtc/modules/video_coding/test/test_util.cc b/webrtc/modules/video_coding/test/test_util.cc
index fc670ada93..7ff663e395 100644
--- a/webrtc/modules/video_coding/test/test_util.cc
+++ b/webrtc/modules/video_coding/test/test_util.cc
@@ -28,12 +28,12 @@ CmdArgs::CmdArgs()
rtt(0),
inputFile(webrtc::test::ProjectRootPath() + "/resources/foreman_cif.yuv"),
outputFile(webrtc::test::OutputPath() +
- "video_coding_test_output_352x288.yuv") {
-}
+ "video_coding_test_output_352x288.yuv") {}
namespace {
-void SplitFilename(const std::string& filename, std::string* basename,
+void SplitFilename(const std::string& filename,
+ std::string* basename,
std::string* extension) {
assert(basename);
assert(extension);
@@ -41,7 +41,7 @@ void SplitFilename(const std::string& filename, std::string* basename,
std::string::size_type idx;
idx = filename.rfind('.');
- if(idx != std::string::npos) {
+ if (idx != std::string::npos) {
*basename = filename.substr(0, idx);
*extension = filename.substr(idx + 1);
} else {
@@ -50,21 +50,24 @@ void SplitFilename(const std::string& filename, std::string* basename,
}
}
-std::string AppendWidthHeightCount(const std::string& filename, int width,
- int height, int count) {
+std::string AppendWidthHeightCount(const std::string& filename,
+ int width,
+ int height,
+ int count) {
std::string basename;
std::string extension;
SplitFilename(filename, &basename, &extension);
std::stringstream ss;
- ss << basename << "_" << count << "." << width << "_" << height << "." <<
- extension;
+ ss << basename << "_" << count << "." << width << "_" << height << "."
+ << extension;
return ss.str();
}
} // namespace
FileOutputFrameReceiver::FileOutputFrameReceiver(
- const std::string& base_out_filename, uint32_t ssrc)
+ const std::string& base_out_filename,
+ uint32_t ssrc)
: out_filename_(),
out_file_(NULL),
timing_file_(NULL),
@@ -80,8 +83,8 @@ FileOutputFrameReceiver::FileOutputFrameReceiver(
SplitFilename(base_out_filename, &basename, &extension);
}
std::stringstream ss;
- ss << basename << "_" << std::hex << std::setw(8) << std::setfill('0') <<
- ssrc << "." << extension;
+ ss << basename << "_" << std::hex << std::setw(8) << std::setfill('0') << ssrc
+ << "." << extension;
out_filename_ = ss.str();
}
@@ -113,8 +116,8 @@ int32_t FileOutputFrameReceiver::FrameToRender(
printf("New size: %dx%d\n", video_frame.width(), video_frame.height());
width_ = video_frame.width();
height_ = video_frame.height();
- std::string filename_with_width_height = AppendWidthHeightCount(
- out_filename_, width_, height_, count_);
+ std::string filename_with_width_height =
+ AppendWidthHeightCount(out_filename_, width_, height_, count_);
++count_;
out_file_ = fopen(filename_with_width_height.c_str(), "wb");
if (out_file_ == NULL) {
@@ -122,7 +125,7 @@ int32_t FileOutputFrameReceiver::FrameToRender(
}
}
fprintf(timing_file_, "%u, %u\n", video_frame.timestamp(),
- webrtc::MaskWord64ToUWord32(video_frame.render_time_ms()));
+ webrtc::MaskWord64ToUWord32(video_frame.render_time_ms()));
if (PrintVideoFrame(video_frame, out_file_) < 0) {
return -1;
}
@@ -130,7 +133,7 @@ int32_t FileOutputFrameReceiver::FrameToRender(
}
webrtc::RtpVideoCodecTypes ConvertCodecType(const char* plname) {
- if (strncmp(plname,"VP8" , 3) == 0) {
+ if (strncmp(plname, "VP8", 3) == 0) {
return webrtc::kRtpVideoVp8;
} else {
// Default value.
diff --git a/webrtc/modules/video_coding/test/test_util.h b/webrtc/modules/video_coding/test/test_util.h
index 30f337d2bb..45b88b9b50 100644
--- a/webrtc/modules/video_coding/test/test_util.h
+++ b/webrtc/modules/video_coding/test/test_util.h
@@ -33,11 +33,13 @@ class NullEvent : public webrtc::EventWrapper {
virtual bool Reset() { return true; }
- virtual webrtc::EventTypeWrapper Wait(unsigned long max_time) {
+ virtual webrtc::EventTypeWrapper Wait(unsigned long max_time) { // NOLINT
return webrtc::kEventTimeout;
}
- virtual bool StartTimer(bool periodic, unsigned long time) { return true; }
+ virtual bool StartTimer(bool periodic, unsigned long time) { // NOLINT
+ return true;
+ }
virtual bool StopTimer() { return true; }
};
@@ -46,9 +48,7 @@ class NullEventFactory : public webrtc::EventFactory {
public:
virtual ~NullEventFactory() {}
- virtual webrtc::EventWrapper* CreateEvent() {
- return new NullEvent;
- }
+ virtual webrtc::EventWrapper* CreateEvent() { return new NullEvent; }
};
class FileOutputFrameReceiver : public webrtc::VCMReceiveCallback {
@@ -57,7 +57,7 @@ class FileOutputFrameReceiver : public webrtc::VCMReceiveCallback {
virtual ~FileOutputFrameReceiver();
// VCMReceiveCallback
- virtual int32_t FrameToRender(webrtc::VideoFrame& video_frame);
+ virtual int32_t FrameToRender(webrtc::VideoFrame& video_frame); // NOLINT
private:
std::string out_filename_;
diff --git a/webrtc/modules/video_coding/test/tester_main.cc b/webrtc/modules/video_coding/test/tester_main.cc
index 0184ff18cc..33ca82007d 100644
--- a/webrtc/modules/video_coding/test/tester_main.cc
+++ b/webrtc/modules/video_coding/test/tester_main.cc
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <stdlib.h>
#include <string.h>
@@ -21,12 +20,15 @@ DEFINE_string(codec, "VP8", "Codec to use (VP8 or I420).");
DEFINE_int32(width, 352, "Width in pixels of the frames in the input file.");
DEFINE_int32(height, 288, "Height in pixels of the frames in the input file.");
DEFINE_int32(rtt, 0, "RTT (round-trip time), in milliseconds.");
-DEFINE_string(input_filename, webrtc::test::ProjectRootPath() +
- "/resources/foreman_cif.yuv", "Input file.");
-DEFINE_string(output_filename, webrtc::test::OutputPath() +
- "video_coding_test_output_352x288.yuv", "Output file.");
+DEFINE_string(input_filename,
+ webrtc::test::ProjectRootPath() + "/resources/foreman_cif.yuv",
+ "Input file.");
+DEFINE_string(output_filename,
+ webrtc::test::OutputPath() +
+ "video_coding_test_output_352x288.yuv",
+ "Output file.");
-using namespace webrtc;
+namespace webrtc {
/*
* Build with EVENT_DEBUG defined
@@ -36,36 +38,37 @@ using namespace webrtc;
int vcmMacrosTests = 0;
int vcmMacrosErrors = 0;
-int ParseArguments(CmdArgs& args) {
- args.width = FLAGS_width;
- args.height = FLAGS_height;
- if (args.width < 1 || args.height < 1) {
+int ParseArguments(CmdArgs* args) {
+ args->width = FLAGS_width;
+ args->height = FLAGS_height;
+ if (args->width < 1 || args->height < 1) {
return -1;
}
- args.codecName = FLAGS_codec;
- if (args.codecName == "VP8") {
- args.codecType = kVideoCodecVP8;
- } else if (args.codecName == "VP9") {
- args.codecType = kVideoCodecVP9;
- } else if (args.codecName == "I420") {
- args.codecType = kVideoCodecI420;
+ args->codecName = FLAGS_codec;
+ if (args->codecName == "VP8") {
+ args->codecType = kVideoCodecVP8;
+ } else if (args->codecName == "VP9") {
+ args->codecType = kVideoCodecVP9;
+ } else if (args->codecName == "I420") {
+ args->codecType = kVideoCodecI420;
} else {
- printf("Invalid codec: %s\n", args.codecName.c_str());
+ printf("Invalid codec: %s\n", args->codecName.c_str());
return -1;
}
- args.inputFile = FLAGS_input_filename;
- args.outputFile = FLAGS_output_filename;
- args.rtt = FLAGS_rtt;
+ args->inputFile = FLAGS_input_filename;
+ args->outputFile = FLAGS_output_filename;
+ args->rtt = FLAGS_rtt;
return 0;
}
+} // namespace webrtc
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
// Initialize WebRTC fileutils.h so paths to resources can be resolved.
webrtc::test::SetExecutablePath(argv[0]);
google::ParseCommandLineFlags(&argc, &argv, true);
CmdArgs args;
- if (ParseArguments(args) != 0) {
+ if (webrtc::ParseArguments(&args) != 0) {
printf("Unable to parse input arguments\n");
return -1;
}
diff --git a/webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc
index 26a0d019bf..d4ee93f7ad 100644
--- a/webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc
+++ b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc
@@ -22,9 +22,8 @@
namespace webrtc {
namespace rtpplayer {
-class VcmPayloadSinkFactory::VcmPayloadSink
- : public PayloadSinkInterface,
- public VCMPacketRequestCallback {
+class VcmPayloadSinkFactory::VcmPayloadSink : public PayloadSinkInterface,
+ public VCMPacketRequestCallback {
public:
VcmPayloadSink(VcmPayloadSinkFactory* factory,
RtpStreamInterface* stream,
@@ -43,9 +42,7 @@ class VcmPayloadSinkFactory::VcmPayloadSink
vcm_->RegisterReceiveCallback(frame_receiver_.get());
}
- virtual ~VcmPayloadSink() {
- factory_->Remove(this);
- }
+ virtual ~VcmPayloadSink() { factory_->Remove(this); }
// PayloadSinkInterface
int32_t OnReceivedPayloadData(const uint8_t* payload_data,
@@ -136,8 +133,7 @@ PayloadSinkInterface* VcmPayloadSinkFactory::Create(
}
const PayloadTypes& plt = stream->payload_types();
- for (PayloadTypesIterator it = plt.begin(); it != plt.end();
- ++it) {
+ for (PayloadTypesIterator it = plt.begin(); it != plt.end(); ++it) {
if (it->codec_type() != kVideoCodecULPFEC &&
it->codec_type() != kVideoCodecRED) {
VideoCodec codec;
diff --git a/webrtc/modules/video_coding/test/vcm_payload_sink_factory.h b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.h
index 1de1cd6975..dae53b0c08 100644
--- a/webrtc/modules/video_coding/test/vcm_payload_sink_factory.h
+++ b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.h
@@ -29,9 +29,11 @@ namespace rtpplayer {
class VcmPayloadSinkFactory : public PayloadSinkFactoryInterface {
public:
VcmPayloadSinkFactory(const std::string& base_out_filename,
- Clock* clock, bool protection_enabled,
+ Clock* clock,
+ bool protection_enabled,
VCMVideoProtection protection_method,
- int64_t rtt_ms, uint32_t render_delay_ms,
+ int64_t rtt_ms,
+ uint32_t render_delay_ms,
uint32_t min_playout_delay_ms);
virtual ~VcmPayloadSinkFactory();
diff --git a/webrtc/modules/video_coding/test/video_rtp_play.cc b/webrtc/modules/video_coding/test/video_rtp_play.cc
index 0a6b7d13e8..cb092e381e 100644
--- a/webrtc/modules/video_coding/test/video_rtp_play.cc
+++ b/webrtc/modules/video_coding/test/video_rtp_play.cc
@@ -48,9 +48,9 @@ int RtpPlay(const CmdArgs& args) {
output_file = webrtc::test::OutputPath() + "RtpPlay_decoded.yuv";
webrtc::SimulatedClock clock(0);
- webrtc::rtpplayer::VcmPayloadSinkFactory factory(output_file, &clock,
- kConfigProtectionEnabled, kConfigProtectionMethod, kConfigRttMs,
- kConfigRenderDelayMs, kConfigMinPlayoutDelayMs);
+ webrtc::rtpplayer::VcmPayloadSinkFactory factory(
+ output_file, &clock, kConfigProtectionEnabled, kConfigProtectionMethod,
+ kConfigRttMs, kConfigRenderDelayMs, kConfigMinPlayoutDelayMs);
rtc::scoped_ptr<webrtc::rtpplayer::RtpPlayerInterface> rtp_player(
webrtc::rtpplayer::Create(args.inputFile, &factory, &clock, payload_types,
kConfigLossRate, kConfigRttMs,
@@ -63,7 +63,7 @@ int RtpPlay(const CmdArgs& args) {
while ((ret = rtp_player->NextPacket(clock.TimeInMilliseconds())) == 0) {
ret = factory.DecodeAndProcessAll(true);
if (ret < 0 || (kConfigMaxRuntimeMs > -1 &&
- clock.TimeInMilliseconds() >= kConfigMaxRuntimeMs)) {
+ clock.TimeInMilliseconds() >= kConfigMaxRuntimeMs)) {
break;
}
clock.AdvanceTimeMilliseconds(1);
diff --git a/webrtc/modules/video_coding/test/video_source.h b/webrtc/modules/video_coding/test/video_source.h
index 6b878e55d2..19d7f50b26 100644
--- a/webrtc/modules/video_coding/test/video_source.h
+++ b/webrtc/modules/video_coding/test/video_source.h
@@ -11,72 +11,75 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
#define WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/typedefs.h"
-
#include <string>
-enum VideoSize
- {
- kUndefined,
- kSQCIF, // 128*96 = 12 288
- kQQVGA, // 160*120 = 19 200
- kQCIF, // 176*144 = 25 344
- kCGA, // 320*200 = 64 000
- kQVGA, // 320*240 = 76 800
- kSIF, // 352*240 = 84 480
- kWQVGA, // 400*240 = 96 000
- kCIF, // 352*288 = 101 376
- kW288p, // 512*288 = 147 456 (WCIF)
- k448p, // 576*448 = 281 088
- kVGA, // 640*480 = 307 200
- k432p, // 720*432 = 311 040
- kW432p, // 768*432 = 331 776
- k4SIF, // 704*480 = 337 920
- kW448p, // 768*448 = 344 064
- kNTSC, // 720*480 = 345 600
- kFW448p, // 800*448 = 358 400
- kWVGA, // 800*480 = 384 000
- k4CIF, // 704*576 = 405 504
- kSVGA, // 800*600 = 480 000
- kW544p, // 960*544 = 522 240
- kW576p, // 1024*576 = 589 824 (W4CIF)
- kHD, // 960*720 = 691 200
- kXGA, // 1024*768 = 786 432
- kWHD, // 1280*720 = 921 600
- kFullHD, // 1440*1080 = 1 555 200
- kWFullHD, // 1920*1080 = 2 073 600
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/typedefs.h"
- kNumberOfVideoSizes
- };
+enum VideoSize {
+ kUndefined,
+ kSQCIF, // 128*96 = 12 288
+ kQQVGA, // 160*120 = 19 200
+ kQCIF, // 176*144 = 25 344
+ kCGA, // 320*200 = 64 000
+ kQVGA, // 320*240 = 76 800
+ kSIF, // 352*240 = 84 480
+ kWQVGA, // 400*240 = 96 000
+ kCIF, // 352*288 = 101 376
+ kW288p, // 512*288 = 147 456 (WCIF)
+ k448p, // 576*448 = 281 088
+ kVGA, // 640*480 = 307 200
+ k432p, // 720*432 = 311 040
+ kW432p, // 768*432 = 331 776
+ k4SIF, // 704*480 = 337 920
+ kW448p, // 768*448 = 344 064
+ kNTSC, // 720*480 = 345 600
+ kFW448p, // 800*448 = 358 400
+ kWVGA, // 800*480 = 384 000
+ k4CIF, // 704*576 = 405 504
+ kSVGA, // 800*600 = 480 000
+ kW544p, // 960*544 = 522 240
+ kW576p, // 1024*576 = 589 824 (W4CIF)
+ kHD, // 960*720 = 691 200
+ kXGA, // 1024*768 = 786 432
+ kWHD, // 1280*720 = 921 600
+ kFullHD, // 1440*1080 = 1 555 200
+ kWFullHD, // 1920*1080 = 2 073 600
+ kNumberOfVideoSizes
+};
-class VideoSource
-{
-public:
+class VideoSource {
+ public:
VideoSource();
- VideoSource(std::string fileName, VideoSize size, float frameRate, webrtc::VideoType type = webrtc::kI420);
- VideoSource(std::string fileName, uint16_t width, uint16_t height,
- float frameRate = 30, webrtc::VideoType type = webrtc::kI420);
+ VideoSource(std::string fileName,
+ VideoSize size,
+ float frameRate,
+ webrtc::VideoType type = webrtc::kI420);
+ VideoSource(std::string fileName,
+ uint16_t width,
+ uint16_t height,
+ float frameRate = 30,
+ webrtc::VideoType type = webrtc::kI420);
- std::string GetFileName() const { return _fileName; }
- uint16_t GetWidth() const { return _width; }
- uint16_t GetHeight() const { return _height; }
- webrtc::VideoType GetType() const { return _type; }
- float GetFrameRate() const { return _frameRate; }
- int GetWidthHeight( VideoSize size);
+ std::string GetFileName() const { return _fileName; }
+ uint16_t GetWidth() const { return _width; }
+ uint16_t GetHeight() const { return _height; }
+ webrtc::VideoType GetType() const { return _type; }
+ float GetFrameRate() const { return _frameRate; }
+ int GetWidthHeight(VideoSize size);
- // Returns the filename with the path (including the leading slash) removed.
- std::string GetName() const;
+ // Returns the filename with the path (including the leading slash) removed.
+ std::string GetName() const;
- size_t GetFrameLength() const;
+ size_t GetFrameLength() const;
-private:
- std::string _fileName;
- uint16_t _width;
- uint16_t _height;
- webrtc::VideoType _type;
- float _frameRate;
+ private:
+ std::string _fileName;
+ uint16_t _width;
+ uint16_t _height;
+ webrtc::VideoType _type;
+ float _frameRate;
};
-#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
diff --git a/webrtc/modules/video_coding/timestamp_map.cc b/webrtc/modules/video_coding/timestamp_map.cc
index 14b16cd60c..97d2777658 100644
--- a/webrtc/modules/video_coding/timestamp_map.cc
+++ b/webrtc/modules/video_coding/timestamp_map.cc
@@ -20,11 +20,9 @@ VCMTimestampMap::VCMTimestampMap(size_t capacity)
: ring_buffer_(new TimestampDataTuple[capacity]),
capacity_(capacity),
next_add_idx_(0),
- next_pop_idx_(0) {
-}
+ next_pop_idx_(0) {}
-VCMTimestampMap::~VCMTimestampMap() {
-}
+VCMTimestampMap::~VCMTimestampMap() {}
void VCMTimestampMap::Add(uint32_t timestamp, VCMFrameInformation* data) {
ring_buffer_[next_add_idx_].timestamp = timestamp;
@@ -62,4 +60,4 @@ VCMFrameInformation* VCMTimestampMap::Pop(uint32_t timestamp) {
bool VCMTimestampMap::IsEmpty() const {
return (next_add_idx_ == next_pop_idx_);
}
-}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/timestamp_map.h b/webrtc/modules/video_coding/timestamp_map.h
index 3d6f1bca0f..435d05895c 100644
--- a/webrtc/modules/video_coding/timestamp_map.h
+++ b/webrtc/modules/video_coding/timestamp_map.h
@@ -44,4 +44,4 @@ class VCMTimestampMap {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
diff --git a/webrtc/modules/video_coding/timing.cc b/webrtc/modules/video_coding/timing.cc
index d2563a4775..08dc307524 100644
--- a/webrtc/modules/video_coding/timing.cc
+++ b/webrtc/modules/video_coding/timing.cc
@@ -10,17 +10,17 @@
#include "webrtc/modules/video_coding/timing.h"
+#include <algorithm>
+
#include "webrtc/modules/video_coding/internal_defines.h"
#include "webrtc/modules/video_coding/jitter_buffer_common.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/metrics.h"
#include "webrtc/system_wrappers/include/timestamp_extrapolator.h"
-
namespace webrtc {
-VCMTiming::VCMTiming(Clock* clock,
- VCMTiming* master_timing)
+VCMTiming::VCMTiming(Clock* clock, VCMTiming* master_timing)
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
clock_(clock),
master_(false),
@@ -120,8 +120,8 @@ void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
// Not initialized, set current delay to target.
current_delay_ms_ = target_delay_ms;
} else if (target_delay_ms != current_delay_ms_) {
- int64_t delay_diff_ms = static_cast<int64_t>(target_delay_ms) -
- current_delay_ms_;
+ int64_t delay_diff_ms =
+ static_cast<int64_t>(target_delay_ms) - current_delay_ms_;
// Never change the delay with more than 100 ms every second. If we're
// changing the delay in too large steps we will get noticeable freezes. By
// limiting the change we can increase the delay in smaller steps, which
@@ -130,11 +130,13 @@ void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
int64_t max_change_ms = 0;
if (frame_timestamp < 0x0000ffff && prev_frame_timestamp_ > 0xffff0000) {
// wrap
- max_change_ms = kDelayMaxChangeMsPerS * (frame_timestamp +
- (static_cast<int64_t>(1) << 32) - prev_frame_timestamp_) / 90000;
+ max_change_ms = kDelayMaxChangeMsPerS *
+ (frame_timestamp + (static_cast<int64_t>(1) << 32) -
+ prev_frame_timestamp_) /
+ 90000;
} else {
max_change_ms = kDelayMaxChangeMsPerS *
- (frame_timestamp - prev_frame_timestamp_) / 90000;
+ (frame_timestamp - prev_frame_timestamp_) / 90000;
}
if (max_change_ms <= 0) {
// Any changes less than 1 ms are truncated and
@@ -155,7 +157,7 @@ void VCMTiming::UpdateCurrentDelay(int64_t render_time_ms,
CriticalSectionScoped cs(crit_sect_);
uint32_t target_delay_ms = TargetDelayInternal();
int64_t delayed_ms = actual_decode_time_ms -
- (render_time_ms - MaxDecodeTimeMs() - render_delay_ms_);
+ (render_time_ms - MaxDecodeTimeMs() - render_delay_ms_);
if (delayed_ms < 0) {
return;
}
@@ -193,8 +195,8 @@ void VCMTiming::IncomingTimestamp(uint32_t time_stamp, int64_t now_ms) {
ts_extrapolator_->Update(now_ms, time_stamp);
}
-int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms)
- const {
+int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp,
+ int64_t now_ms) const {
CriticalSectionScoped cs(crit_sect_);
const int64_t render_time_ms = RenderTimeMsInternal(frame_timestamp, now_ms);
return render_time_ms;
@@ -203,7 +205,7 @@ int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms)
int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp,
int64_t now_ms) const {
int64_t estimated_complete_time_ms =
- ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp);
+ ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp);
if (estimated_complete_time_ms == -1) {
estimated_complete_time_ms = now_ms;
}
@@ -214,19 +216,19 @@ int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp,
}
// Must be called from inside a critical section.
-int32_t VCMTiming::MaxDecodeTimeMs(FrameType frame_type /*= kVideoFrameDelta*/)
- const {
+int32_t VCMTiming::MaxDecodeTimeMs(
+ FrameType frame_type /*= kVideoFrameDelta*/) const {
const int32_t decode_time_ms = codec_timer_.RequiredDecodeTimeMs(frame_type);
assert(decode_time_ms >= 0);
return decode_time_ms;
}
-uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms, int64_t now_ms)
- const {
+uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms,
+ int64_t now_ms) const {
CriticalSectionScoped cs(crit_sect_);
- const int64_t max_wait_time_ms = render_time_ms - now_ms -
- MaxDecodeTimeMs() - render_delay_ms_;
+ const int64_t max_wait_time_ms =
+ render_time_ms - now_ms - MaxDecodeTimeMs() - render_delay_ms_;
if (max_wait_time_ms < 0) {
return 0;
@@ -234,8 +236,8 @@ uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms, int64_t now_ms)
return static_cast<uint32_t>(max_wait_time_ms);
}
-bool VCMTiming::EnoughTimeToDecode(uint32_t available_processing_time_ms)
- const {
+bool VCMTiming::EnoughTimeToDecode(
+ uint32_t available_processing_time_ms) const {
CriticalSectionScoped cs(crit_sect_);
int32_t max_decode_time_ms = MaxDecodeTimeMs();
if (max_decode_time_ms < 0) {
@@ -248,7 +250,8 @@ bool VCMTiming::EnoughTimeToDecode(uint32_t available_processing_time_ms)
max_decode_time_ms = 1;
}
return static_cast<int32_t>(available_processing_time_ms) -
- max_decode_time_ms > 0;
+ max_decode_time_ms >
+ 0;
}
uint32_t VCMTiming::TargetVideoDelay() const {
@@ -258,7 +261,7 @@ uint32_t VCMTiming::TargetVideoDelay() const {
uint32_t VCMTiming::TargetDelayInternal() const {
return std::max(min_playout_delay_ms_,
- jitter_delay_ms_ + MaxDecodeTimeMs() + render_delay_ms_);
+ jitter_delay_ms_ + MaxDecodeTimeMs() + render_delay_ms_);
}
void VCMTiming::GetTimings(int* decode_ms,
diff --git a/webrtc/modules/video_coding/timing.h b/webrtc/modules/video_coding/timing.h
index 46681a3a07..a4d0cf4543 100644
--- a/webrtc/modules/video_coding/timing.h
+++ b/webrtc/modules/video_coding/timing.h
@@ -25,8 +25,7 @@ class VCMTiming {
public:
// The primary timing component should be passed
// if this is the dual timing component.
- VCMTiming(Clock* clock,
- VCMTiming* master_timing = NULL);
+ explicit VCMTiming(Clock* clock, VCMTiming* master_timing = NULL);
~VCMTiming();
// Resets the timing to the initial state.
diff --git a/webrtc/modules/video_coding/timing_unittest.cc b/webrtc/modules/video_coding/timing_unittest.cc
index 02bd88ddb2..2e8df83683 100644
--- a/webrtc/modules/video_coding/timing_unittest.cc
+++ b/webrtc/modules/video_coding/timing_unittest.cc
@@ -55,8 +55,9 @@ TEST(ReceiverTiming, Tests) {
clock.AdvanceTimeMilliseconds(1000);
timing.SetJitterDelay(jitterDelayMs);
timing.UpdateCurrentDelay(timeStamp);
- waitTime = timing.MaxWaitingTime(timing.RenderTimeMs(
- timeStamp, clock.TimeInMilliseconds()), clock.TimeInMilliseconds());
+ waitTime = timing.MaxWaitingTime(
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()),
+ clock.TimeInMilliseconds());
// Since we gradually increase the delay we only get 100 ms every second.
EXPECT_EQ(jitterDelayMs - 10, waitTime);
@@ -85,11 +86,10 @@ TEST(ReceiverTiming, Tests) {
for (int i = 0; i < 10; i++) {
int64_t startTimeMs = clock.TimeInMilliseconds();
clock.AdvanceTimeMilliseconds(10);
- timing.StopDecodeTimer(timeStamp,
- clock.TimeInMilliseconds() - startTimeMs,
- clock.TimeInMilliseconds(),
- timing.RenderTimeMs(
- timeStamp, clock.TimeInMilliseconds()));
+ timing.StopDecodeTimer(
+ timeStamp, clock.TimeInMilliseconds() - startTimeMs,
+ clock.TimeInMilliseconds(),
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()));
timeStamp += 90000 / 25;
clock.AdvanceTimeMilliseconds(1000 / 25 - 10);
timing.IncomingTimestamp(timeStamp, clock.TimeInMilliseconds());
@@ -107,7 +107,7 @@ TEST(ReceiverTiming, Tests) {
uint32_t minTotalDelayMs = 200;
timing.set_min_playout_delay(minTotalDelayMs);
clock.AdvanceTimeMilliseconds(5000);
- timeStamp += 5*90000;
+ timeStamp += 5 * 90000;
timing.UpdateCurrentDelay(timeStamp);
const int kRenderDelayMs = 10;
timing.set_render_delay(kRenderDelayMs);
@@ -123,7 +123,7 @@ TEST(ReceiverTiming, Tests) {
// Reset playout delay.
timing.set_min_playout_delay(0);
clock.AdvanceTimeMilliseconds(5000);
- timeStamp += 5*90000;
+ timeStamp += 5 * 90000;
timing.UpdateCurrentDelay(timeStamp);
}
@@ -137,8 +137,8 @@ TEST(ReceiverTiming, WrapAround) {
timing.IncomingTimestamp(timestamp, clock.TimeInMilliseconds());
clock.AdvanceTimeMilliseconds(1000 / kFramerate);
timestamp += 90000 / kFramerate;
- int64_t render_time = timing.RenderTimeMs(0xFFFFFFFFu,
- clock.TimeInMilliseconds());
+ int64_t render_time =
+ timing.RenderTimeMs(0xFFFFFFFFu, clock.TimeInMilliseconds());
EXPECT_EQ(3 * 1000 / kFramerate, render_time);
render_time = timing.RenderTimeMs(89u, // One second later in 90 kHz.
clock.TimeInMilliseconds());
diff --git a/webrtc/modules/video_coding/utility/frame_dropper.cc b/webrtc/modules/video_coding/utility/frame_dropper.cc
index dfa7df841f..a0aa67be4e 100644
--- a/webrtc/modules/video_coding/utility/frame_dropper.cc
+++ b/webrtc/modules/video_coding/utility/frame_dropper.cc
@@ -12,8 +12,7 @@
#include "webrtc/system_wrappers/include/trace.h"
-namespace webrtc
-{
+namespace webrtc {
const float kDefaultKeyFrameSizeAvgKBits = 0.9f;
const float kDefaultKeyFrameRatio = 0.99f;
@@ -22,339 +21,266 @@ const float kDefaultDropRatioMax = 0.96f;
const float kDefaultMaxTimeToDropFrames = 4.0f; // In seconds.
FrameDropper::FrameDropper()
-:
-_keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
-_keyFrameRatio(kDefaultKeyFrameRatio),
-_dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
-_enabled(true),
-_max_time_drops(kDefaultMaxTimeToDropFrames)
-{
- Reset();
+ : _keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
+ _keyFrameRatio(kDefaultKeyFrameRatio),
+ _dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
+ _enabled(true),
+ _max_time_drops(kDefaultMaxTimeToDropFrames) {
+ Reset();
}
FrameDropper::FrameDropper(float max_time_drops)
-:
-_keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
-_keyFrameRatio(kDefaultKeyFrameRatio),
-_dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
-_enabled(true),
-_max_time_drops(max_time_drops)
-{
- Reset();
+ : _keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
+ _keyFrameRatio(kDefaultKeyFrameRatio),
+ _dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
+ _enabled(true),
+ _max_time_drops(max_time_drops) {
+ Reset();
}
-void
-FrameDropper::Reset()
-{
- _keyFrameRatio.Reset(0.99f);
- _keyFrameRatio.Apply(1.0f, 1.0f/300.0f); // 1 key frame every 10th second in 30 fps
- _keyFrameSizeAvgKbits.Reset(0.9f);
- _keyFrameCount = 0;
- _accumulator = 0.0f;
- _accumulatorMax = 150.0f; // assume 300 kb/s and 0.5 s window
- _targetBitRate = 300.0f;
- _incoming_frame_rate = 30;
- _keyFrameSpreadFrames = 0.5f * _incoming_frame_rate;
- _dropNext = false;
- _dropRatio.Reset(0.9f);
- _dropRatio.Apply(0.0f, 0.0f); // Initialize to 0
- _dropCount = 0;
- _windowSize = 0.5f;
- _wasBelowMax = true;
- _fastMode = false; // start with normal (non-aggressive) mode
- // Cap for the encoder buffer level/accumulator, in secs.
- _cap_buffer_size = 3.0f;
- // Cap on maximum amount of dropped frames between kept frames, in secs.
- _max_time_drops = 4.0f;
+void FrameDropper::Reset() {
+ _keyFrameRatio.Reset(0.99f);
+ _keyFrameRatio.Apply(
+ 1.0f, 1.0f / 300.0f); // 1 key frame every 10th second in 30 fps
+ _keyFrameSizeAvgKbits.Reset(0.9f);
+ _keyFrameCount = 0;
+ _accumulator = 0.0f;
+ _accumulatorMax = 150.0f; // assume 300 kb/s and 0.5 s window
+ _targetBitRate = 300.0f;
+ _incoming_frame_rate = 30;
+ _keyFrameSpreadFrames = 0.5f * _incoming_frame_rate;
+ _dropNext = false;
+ _dropRatio.Reset(0.9f);
+ _dropRatio.Apply(0.0f, 0.0f); // Initialize to 0
+ _dropCount = 0;
+ _windowSize = 0.5f;
+ _wasBelowMax = true;
+ _fastMode = false; // start with normal (non-aggressive) mode
+ // Cap for the encoder buffer level/accumulator, in secs.
+ _cap_buffer_size = 3.0f;
+ // Cap on maximum amount of dropped frames between kept frames, in secs.
+ _max_time_drops = 4.0f;
}
-void
-FrameDropper::Enable(bool enable)
-{
- _enabled = enable;
+void FrameDropper::Enable(bool enable) {
+ _enabled = enable;
}
-void
-FrameDropper::Fill(size_t frameSizeBytes, bool deltaFrame)
-{
- if (!_enabled)
- {
- return;
- }
- float frameSizeKbits = 8.0f * static_cast<float>(frameSizeBytes) / 1000.0f;
- if (!deltaFrame && !_fastMode) // fast mode does not treat key-frames any different
- {
- _keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
- _keyFrameRatio.Apply(1.0, 1.0);
- if (frameSizeKbits > _keyFrameSizeAvgKbits.filtered())
- {
- // Remove the average key frame size since we
- // compensate for key frames when adding delta
- // frames.
- frameSizeKbits -= _keyFrameSizeAvgKbits.filtered();
- }
- else
- {
- // Shouldn't be negative, so zero is the lower bound.
- frameSizeKbits = 0;
- }
- if (_keyFrameRatio.filtered() > 1e-5 &&
- 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
- {
- // We are sending key frames more often than our upper bound for
- // how much we allow the key frame compensation to be spread
- // out in time. Therefor we must use the key frame ratio rather
- // than keyFrameSpreadFrames.
- _keyFrameCount =
- static_cast<int32_t>(1 / _keyFrameRatio.filtered() + 0.5);
- }
- else
- {
- // Compensate for the key frame the following frames
- _keyFrameCount = static_cast<int32_t>(_keyFrameSpreadFrames + 0.5);
- }
+void FrameDropper::Fill(size_t frameSizeBytes, bool deltaFrame) {
+ if (!_enabled) {
+ return;
+ }
+ float frameSizeKbits = 8.0f * static_cast<float>(frameSizeBytes) / 1000.0f;
+ if (!deltaFrame &&
+ !_fastMode) { // fast mode does not treat key-frames any different
+ _keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
+ _keyFrameRatio.Apply(1.0, 1.0);
+ if (frameSizeKbits > _keyFrameSizeAvgKbits.filtered()) {
+ // Remove the average key frame size since we
+ // compensate for key frames when adding delta
+ // frames.
+ frameSizeKbits -= _keyFrameSizeAvgKbits.filtered();
+ } else {
+ // Shouldn't be negative, so zero is the lower bound.
+ frameSizeKbits = 0;
}
- else
- {
- // Decrease the keyFrameRatio
- _keyFrameRatio.Apply(1.0, 0.0);
+ if (_keyFrameRatio.filtered() > 1e-5 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames) {
+ // We are sending key frames more often than our upper bound for
+ // how much we allow the key frame compensation to be spread
+ // out in time. Therefor we must use the key frame ratio rather
+ // than keyFrameSpreadFrames.
+ _keyFrameCount =
+ static_cast<int32_t>(1 / _keyFrameRatio.filtered() + 0.5);
+ } else {
+ // Compensate for the key frame the following frames
+ _keyFrameCount = static_cast<int32_t>(_keyFrameSpreadFrames + 0.5);
}
- // Change the level of the accumulator (bucket)
- _accumulator += frameSizeKbits;
- CapAccumulator();
+ } else {
+ // Decrease the keyFrameRatio
+ _keyFrameRatio.Apply(1.0, 0.0);
+ }
+ // Change the level of the accumulator (bucket)
+ _accumulator += frameSizeKbits;
+ CapAccumulator();
}
-void
-FrameDropper::Leak(uint32_t inputFrameRate)
-{
- if (!_enabled)
- {
- return;
- }
- if (inputFrameRate < 1)
- {
- return;
- }
- if (_targetBitRate < 0.0f)
- {
- return;
- }
- _keyFrameSpreadFrames = 0.5f * inputFrameRate;
- // T is the expected bits per frame (target). If all frames were the same size,
- // we would get T bits per frame. Notice that T is also weighted to be able to
- // force a lower frame rate if wanted.
- float T = _targetBitRate / inputFrameRate;
- if (_keyFrameCount > 0)
- {
- // Perform the key frame compensation
- if (_keyFrameRatio.filtered() > 0 &&
- 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
- {
- T -= _keyFrameSizeAvgKbits.filtered() * _keyFrameRatio.filtered();
- }
- else
- {
- T -= _keyFrameSizeAvgKbits.filtered() / _keyFrameSpreadFrames;
- }
- _keyFrameCount--;
- }
- _accumulator -= T;
- if (_accumulator < 0.0f)
- {
- _accumulator = 0.0f;
+void FrameDropper::Leak(uint32_t inputFrameRate) {
+ if (!_enabled) {
+ return;
+ }
+ if (inputFrameRate < 1) {
+ return;
+ }
+ if (_targetBitRate < 0.0f) {
+ return;
+ }
+ _keyFrameSpreadFrames = 0.5f * inputFrameRate;
+ // T is the expected bits per frame (target). If all frames were the same
+ // size,
+ // we would get T bits per frame. Notice that T is also weighted to be able to
+ // force a lower frame rate if wanted.
+ float T = _targetBitRate / inputFrameRate;
+ if (_keyFrameCount > 0) {
+ // Perform the key frame compensation
+ if (_keyFrameRatio.filtered() > 0 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames) {
+ T -= _keyFrameSizeAvgKbits.filtered() * _keyFrameRatio.filtered();
+ } else {
+ T -= _keyFrameSizeAvgKbits.filtered() / _keyFrameSpreadFrames;
}
- UpdateRatio();
+ _keyFrameCount--;
+ }
+ _accumulator -= T;
+ if (_accumulator < 0.0f) {
+ _accumulator = 0.0f;
+ }
+ UpdateRatio();
}
-void
-FrameDropper::UpdateNack(uint32_t nackBytes)
-{
- if (!_enabled)
- {
- return;
- }
- _accumulator += static_cast<float>(nackBytes) * 8.0f / 1000.0f;
+void FrameDropper::UpdateNack(uint32_t nackBytes) {
+ if (!_enabled) {
+ return;
+ }
+ _accumulator += static_cast<float>(nackBytes) * 8.0f / 1000.0f;
}
-void
-FrameDropper::FillBucket(float inKbits, float outKbits)
-{
- _accumulator += (inKbits - outKbits);
+void FrameDropper::FillBucket(float inKbits, float outKbits) {
+ _accumulator += (inKbits - outKbits);
}
-void
-FrameDropper::UpdateRatio()
-{
- if (_accumulator > 1.3f * _accumulatorMax)
- {
- // Too far above accumulator max, react faster
- _dropRatio.UpdateBase(0.8f);
+void FrameDropper::UpdateRatio() {
+ if (_accumulator > 1.3f * _accumulatorMax) {
+ // Too far above accumulator max, react faster
+ _dropRatio.UpdateBase(0.8f);
+ } else {
+ // Go back to normal reaction
+ _dropRatio.UpdateBase(0.9f);
+ }
+ if (_accumulator > _accumulatorMax) {
+ // We are above accumulator max, and should ideally
+ // drop a frame. Increase the dropRatio and drop
+ // the frame later.
+ if (_wasBelowMax) {
+ _dropNext = true;
}
- else
- {
- // Go back to normal reaction
- _dropRatio.UpdateBase(0.9f);
+ if (_fastMode) {
+ // always drop in aggressive mode
+ _dropNext = true;
}
- if (_accumulator > _accumulatorMax)
- {
- // We are above accumulator max, and should ideally
- // drop a frame. Increase the dropRatio and drop
- // the frame later.
- if (_wasBelowMax)
- {
- _dropNext = true;
- }
- if (_fastMode)
- {
- // always drop in aggressive mode
- _dropNext = true;
- }
- _dropRatio.Apply(1.0f, 1.0f);
- _dropRatio.UpdateBase(0.9f);
- }
- else
- {
- _dropRatio.Apply(1.0f, 0.0f);
- }
- _wasBelowMax = _accumulator < _accumulatorMax;
+ _dropRatio.Apply(1.0f, 1.0f);
+ _dropRatio.UpdateBase(0.9f);
+ } else {
+ _dropRatio.Apply(1.0f, 0.0f);
+ }
+ _wasBelowMax = _accumulator < _accumulatorMax;
}
-// This function signals when to drop frames to the caller. It makes use of the dropRatio
+// This function signals when to drop frames to the caller. It makes use of the
+// dropRatio
// to smooth out the drops over time.
-bool
-FrameDropper::DropFrame()
-{
- if (!_enabled)
- {
- return false;
+bool FrameDropper::DropFrame() {
+ if (!_enabled) {
+ return false;
+ }
+ if (_dropNext) {
+ _dropNext = false;
+ _dropCount = 0;
+ }
+
+ if (_dropRatio.filtered() >= 0.5f) { // Drops per keep
+ // limit is the number of frames we should drop between each kept frame
+ // to keep our drop ratio. limit is positive in this case.
+ float denom = 1.0f - _dropRatio.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
+ }
+ int32_t limit = static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ // Put a bound on the max amount of dropped frames between each kept
+ // frame, in terms of frame rate and window size (secs).
+ int max_limit = static_cast<int>(_incoming_frame_rate * _max_time_drops);
+ if (limit > max_limit) {
+ limit = max_limit;
}
- if (_dropNext)
- {
- _dropNext = false;
+ if (_dropCount < 0) {
+ // Reset the _dropCount since it was negative and should be positive.
+ if (_dropRatio.filtered() > 0.4f) {
+ _dropCount = -_dropCount;
+ } else {
_dropCount = 0;
+ }
}
-
- if (_dropRatio.filtered() >= 0.5f) // Drops per keep
- {
- // limit is the number of frames we should drop between each kept frame
- // to keep our drop ratio. limit is positive in this case.
- float denom = 1.0f - _dropRatio.filtered();
- if (denom < 1e-5)
- {
- denom = (float)1e-5;
- }
- int32_t limit = static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
- // Put a bound on the max amount of dropped frames between each kept
- // frame, in terms of frame rate and window size (secs).
- int max_limit = static_cast<int>(_incoming_frame_rate *
- _max_time_drops);
- if (limit > max_limit) {
- limit = max_limit;
- }
- if (_dropCount < 0)
- {
- // Reset the _dropCount since it was negative and should be positive.
- if (_dropRatio.filtered() > 0.4f)
- {
- _dropCount = -_dropCount;
- }
- else
- {
- _dropCount = 0;
- }
- }
- if (_dropCount < limit)
- {
- // As long we are below the limit we should drop frames.
- _dropCount++;
- return true;
- }
- else
- {
- // Only when we reset _dropCount a frame should be kept.
- _dropCount = 0;
- return false;
- }
+ if (_dropCount < limit) {
+ // As long we are below the limit we should drop frames.
+ _dropCount++;
+ return true;
+ } else {
+ // Only when we reset _dropCount a frame should be kept.
+ _dropCount = 0;
+ return false;
}
- else if (_dropRatio.filtered() > 0.0f &&
- _dropRatio.filtered() < 0.5f) // Keeps per drop
- {
- // limit is the number of frames we should keep between each drop
- // in order to keep the drop ratio. limit is negative in this case,
- // and the _dropCount is also negative.
- float denom = _dropRatio.filtered();
- if (denom < 1e-5)
- {
- denom = (float)1e-5;
- }
- int32_t limit = -static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
- if (_dropCount > 0)
- {
- // Reset the _dropCount since we have a positive
- // _dropCount, and it should be negative.
- if (_dropRatio.filtered() < 0.6f)
- {
- _dropCount = -_dropCount;
- }
- else
- {
- _dropCount = 0;
- }
- }
- if (_dropCount > limit)
- {
- if (_dropCount == 0)
- {
- // Drop frames when we reset _dropCount.
- _dropCount--;
- return true;
- }
- else
- {
- // Keep frames as long as we haven't reached limit.
- _dropCount--;
- return false;
- }
- }
- else
- {
- _dropCount = 0;
- return false;
- }
+ } else if (_dropRatio.filtered() > 0.0f &&
+ _dropRatio.filtered() < 0.5f) { // Keeps per drop
+ // limit is the number of frames we should keep between each drop
+ // in order to keep the drop ratio. limit is negative in this case,
+ // and the _dropCount is also negative.
+ float denom = _dropRatio.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
}
- _dropCount = 0;
- return false;
+ int32_t limit = -static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ if (_dropCount > 0) {
+ // Reset the _dropCount since we have a positive
+ // _dropCount, and it should be negative.
+ if (_dropRatio.filtered() < 0.6f) {
+ _dropCount = -_dropCount;
+ } else {
+ _dropCount = 0;
+ }
+ }
+ if (_dropCount > limit) {
+ if (_dropCount == 0) {
+ // Drop frames when we reset _dropCount.
+ _dropCount--;
+ return true;
+ } else {
+ // Keep frames as long as we haven't reached limit.
+ _dropCount--;
+ return false;
+ }
+ } else {
+ _dropCount = 0;
+ return false;
+ }
+ }
+ _dropCount = 0;
+ return false;
- // A simpler version, unfiltered and quicker
- //bool dropNext = _dropNext;
- //_dropNext = false;
- //return dropNext;
+ // A simpler version, unfiltered and quicker
+ // bool dropNext = _dropNext;
+ // _dropNext = false;
+ // return dropNext;
}
-void
-FrameDropper::SetRates(float bitRate, float incoming_frame_rate)
-{
- // Bit rate of -1 means infinite bandwidth.
- _accumulatorMax = bitRate * _windowSize; // bitRate * windowSize (in seconds)
- if (_targetBitRate > 0.0f && bitRate < _targetBitRate && _accumulator > _accumulatorMax)
- {
- // Rescale the accumulator level if the accumulator max decreases
- _accumulator = bitRate / _targetBitRate * _accumulator;
- }
- _targetBitRate = bitRate;
- CapAccumulator();
- _incoming_frame_rate = incoming_frame_rate;
+void FrameDropper::SetRates(float bitRate, float incoming_frame_rate) {
+ // Bit rate of -1 means infinite bandwidth.
+ _accumulatorMax = bitRate * _windowSize; // bitRate * windowSize (in seconds)
+ if (_targetBitRate > 0.0f && bitRate < _targetBitRate &&
+ _accumulator > _accumulatorMax) {
+ // Rescale the accumulator level if the accumulator max decreases
+ _accumulator = bitRate / _targetBitRate * _accumulator;
+ }
+ _targetBitRate = bitRate;
+ CapAccumulator();
+ _incoming_frame_rate = incoming_frame_rate;
}
-float
-FrameDropper::ActualFrameRate(uint32_t inputFrameRate) const
-{
- if (!_enabled)
- {
- return static_cast<float>(inputFrameRate);
- }
- return inputFrameRate * (1.0f - _dropRatio.filtered());
+float FrameDropper::ActualFrameRate(uint32_t inputFrameRate) const {
+ if (!_enabled) {
+ return static_cast<float>(inputFrameRate);
+ }
+ return inputFrameRate * (1.0f - _dropRatio.filtered());
}
// Put a cap on the accumulator, i.e., don't let it grow beyond some level.
@@ -366,5 +292,4 @@ void FrameDropper::CapAccumulator() {
_accumulator = max_accumulator;
}
}
-
-}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/utility/frame_dropper.h b/webrtc/modules/video_coding/utility/frame_dropper.h
index 2293cefed2..7ec85ea880 100644
--- a/webrtc/modules/video_coding/utility/frame_dropper.h
+++ b/webrtc/modules/video_coding/utility/frame_dropper.h
@@ -23,72 +23,72 @@ namespace webrtc {
// over use when the encoder can't keep its bit rate.
class FrameDropper {
public:
- FrameDropper();
- explicit FrameDropper(float max_time_drops);
- virtual ~FrameDropper() {}
+ FrameDropper();
+ explicit FrameDropper(float max_time_drops);
+ virtual ~FrameDropper() {}
- // Resets the FrameDropper to its initial state.
- // This means that the frameRateWeight is set to its
- // default value as well.
- virtual void Reset();
+ // Resets the FrameDropper to its initial state.
+ // This means that the frameRateWeight is set to its
+ // default value as well.
+ virtual void Reset();
- virtual void Enable(bool enable);
- // Answers the question if it's time to drop a frame
- // if we want to reach a given frame rate. Must be
- // called for every frame.
- //
- // Return value : True if we should drop the current frame
- virtual bool DropFrame();
- // Updates the FrameDropper with the size of the latest encoded
- // frame. The FrameDropper calculates a new drop ratio (can be
- // seen as the probability to drop a frame) and updates its
- // internal statistics.
- //
- // Input:
- // - frameSizeBytes : The size of the latest frame
- // returned from the encoder.
- // - deltaFrame : True if the encoder returned
- // a key frame.
- virtual void Fill(size_t frameSizeBytes, bool deltaFrame);
+ virtual void Enable(bool enable);
+ // Answers the question if it's time to drop a frame
+ // if we want to reach a given frame rate. Must be
+ // called for every frame.
+ //
+ // Return value : True if we should drop the current frame
+ virtual bool DropFrame();
+ // Updates the FrameDropper with the size of the latest encoded
+ // frame. The FrameDropper calculates a new drop ratio (can be
+ // seen as the probability to drop a frame) and updates its
+ // internal statistics.
+ //
+ // Input:
+ // - frameSizeBytes : The size of the latest frame
+ // returned from the encoder.
+ // - deltaFrame : True if the encoder returned
+ // a key frame.
+ virtual void Fill(size_t frameSizeBytes, bool deltaFrame);
- virtual void Leak(uint32_t inputFrameRate);
+ virtual void Leak(uint32_t inputFrameRate);
- void UpdateNack(uint32_t nackBytes);
+ void UpdateNack(uint32_t nackBytes);
- // Sets the target bit rate and the frame rate produced by
- // the camera.
- //
- // Input:
- // - bitRate : The target bit rate
- virtual void SetRates(float bitRate, float incoming_frame_rate);
+ // Sets the target bit rate and the frame rate produced by
+ // the camera.
+ //
+ // Input:
+ // - bitRate : The target bit rate
+ virtual void SetRates(float bitRate, float incoming_frame_rate);
- // Return value : The current average frame rate produced
- // if the DropFrame() function is used as
- // instruction of when to drop frames.
- virtual float ActualFrameRate(uint32_t inputFrameRate) const;
+ // Return value : The current average frame rate produced
+ // if the DropFrame() function is used as
+ // instruction of when to drop frames.
+ virtual float ActualFrameRate(uint32_t inputFrameRate) const;
private:
- void FillBucket(float inKbits, float outKbits);
- void UpdateRatio();
- void CapAccumulator();
+ void FillBucket(float inKbits, float outKbits);
+ void UpdateRatio();
+ void CapAccumulator();
- rtc::ExpFilter _keyFrameSizeAvgKbits;
- rtc::ExpFilter _keyFrameRatio;
- float _keyFrameSpreadFrames;
- int32_t _keyFrameCount;
- float _accumulator;
- float _accumulatorMax;
- float _targetBitRate;
- bool _dropNext;
- rtc::ExpFilter _dropRatio;
- int32_t _dropCount;
- float _windowSize;
- float _incoming_frame_rate;
- bool _wasBelowMax;
- bool _enabled;
- bool _fastMode;
- float _cap_buffer_size;
- float _max_time_drops;
+ rtc::ExpFilter _keyFrameSizeAvgKbits;
+ rtc::ExpFilter _keyFrameRatio;
+ float _keyFrameSpreadFrames;
+ int32_t _keyFrameCount;
+ float _accumulator;
+ float _accumulatorMax;
+ float _targetBitRate;
+ bool _dropNext;
+ rtc::ExpFilter _dropRatio;
+ int32_t _dropCount;
+ float _windowSize;
+ float _incoming_frame_rate;
+ bool _wasBelowMax;
+ bool _enabled;
+ bool _fastMode;
+ float _cap_buffer_size;
+ float _max_time_drops;
}; // end of VCMFrameDropper class
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h b/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h
index dee00ba0b5..b68a4b8d5d 100644
--- a/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h
+++ b/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h
@@ -20,20 +20,13 @@ namespace webrtc {
class MockFrameDropper : public FrameDropper {
public:
- MOCK_METHOD0(Reset,
- void());
- MOCK_METHOD1(Enable,
- void(bool enable));
- MOCK_METHOD0(DropFrame,
- bool());
- MOCK_METHOD2(Fill,
- void(size_t frameSizeBytes, bool deltaFrame));
- MOCK_METHOD1(Leak,
- void(uint32_t inputFrameRate));
- MOCK_METHOD2(SetRates,
- void(float bitRate, float incoming_frame_rate));
- MOCK_CONST_METHOD1(ActualFrameRate,
- float(uint32_t inputFrameRate));
+ MOCK_METHOD0(Reset, void());
+ MOCK_METHOD1(Enable, void(bool enable));
+ MOCK_METHOD0(DropFrame, bool());
+ MOCK_METHOD2(Fill, void(size_t frameSizeBytes, bool deltaFrame));
+ MOCK_METHOD1(Leak, void(uint32_t inputFrameRate));
+ MOCK_METHOD2(SetRates, void(float bitRate, float incoming_frame_rate));
+ MOCK_CONST_METHOD1(ActualFrameRate, float(uint32_t inputFrameRate));
};
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/utility/moving_average.h b/webrtc/modules/video_coding/utility/moving_average.h
index 8de1dd2a43..494bfd51fb 100644
--- a/webrtc/modules/video_coding/utility/moving_average.h
+++ b/webrtc/modules/video_coding/utility/moving_average.h
@@ -16,7 +16,7 @@
#include "webrtc/typedefs.h"
namespace webrtc {
-template<class T>
+template <class T>
class MovingAverage {
public:
MovingAverage();
@@ -30,17 +30,17 @@ class MovingAverage {
std::list<T> samples_;
};
-template<class T>
-MovingAverage<T>::MovingAverage() : sum_(static_cast<T>(0)) {
-}
+template <class T>
+MovingAverage<T>::MovingAverage()
+ : sum_(static_cast<T>(0)) {}
-template<class T>
+template <class T>
void MovingAverage<T>::AddSample(T sample) {
samples_.push_back(sample);
sum_ += sample;
}
-template<class T>
+template <class T>
bool MovingAverage<T>::GetAverage(size_t num_samples, T* avg) {
if (num_samples > samples_.size())
return false;
@@ -55,13 +55,13 @@ bool MovingAverage<T>::GetAverage(size_t num_samples, T* avg) {
return true;
}
-template<class T>
+template <class T>
void MovingAverage<T>::Reset() {
sum_ = static_cast<T>(0);
samples_.clear();
}
-template<class T>
+template <class T>
int MovingAverage<T>::size() {
return samples_.size();
}
diff --git a/webrtc/modules/video_coding/utility/quality_scaler.cc b/webrtc/modules/video_coding/utility/quality_scaler.cc
index 9aae17c6b6..76bf9f5b03 100644
--- a/webrtc/modules/video_coding/utility/quality_scaler.cc
+++ b/webrtc/modules/video_coding/utility/quality_scaler.cc
@@ -26,8 +26,7 @@ QualityScaler::QualityScaler()
downscale_shift_(0),
framerate_down_(false),
min_width_(kDefaultMinDownscaleDimension),
- min_height_(kDefaultMinDownscaleDimension) {
-}
+ min_height_(kDefaultMinDownscaleDimension) {}
void QualityScaler::Init(int low_qp_threshold,
int high_qp_threshold,
@@ -91,7 +90,7 @@ void QualityScaler::OnEncodeFrame(const VideoFrame& frame) {
AdjustScale(false);
}
} else if (average_qp_.GetAverage(num_samples_, &avg_qp) &&
- avg_qp <= low_qp_threshold_) {
+ avg_qp <= low_qp_threshold_) {
if (use_framerate_reduction_ && framerate_down_) {
target_framerate_ = -1;
framerate_down_ = false;
@@ -104,7 +103,7 @@ void QualityScaler::OnEncodeFrame(const VideoFrame& frame) {
assert(downscale_shift_ >= 0);
for (int shift = downscale_shift_;
shift > 0 && (res_.width / 2 >= min_width_) &&
- (res_.height / 2 >= min_height_);
+ (res_.height / 2 >= min_height_);
--shift) {
res_.width /= 2;
res_.height /= 2;
@@ -124,13 +123,8 @@ const VideoFrame& QualityScaler::GetScaledFrame(const VideoFrame& frame) {
if (res.width == frame.width())
return frame;
- scaler_.Set(frame.width(),
- frame.height(),
- res.width,
- res.height,
- kI420,
- kI420,
- kScaleBox);
+ scaler_.Set(frame.width(), frame.height(), res.width, res.height, kI420,
+ kI420, kScaleBox);
if (scaler_.Scale(frame, &scaled_frame_) != 0)
return frame;
diff --git a/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc b/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
index 58f7bee484..bad73a748c 100644
--- a/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
+++ b/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
@@ -33,6 +33,7 @@ class QualityScalerTest : public ::testing::Test {
int width;
int height;
};
+
protected:
enum ScaleDirection {
kKeepScaleAtHighQp,
@@ -43,8 +44,8 @@ class QualityScalerTest : public ::testing::Test {
enum BadQualityMetric { kDropFrame, kReportLowQP };
QualityScalerTest() {
- input_frame_.CreateEmptyFrame(
- kWidth, kHeight, kWidth, kHalfWidth, kHalfWidth);
+ input_frame_.CreateEmptyFrame(kWidth, kHeight, kWidth, kHalfWidth,
+ kHalfWidth);
qs_.Init(kMaxQp / QualityScaler::kDefaultLowQpDenominator, kHighQp, false);
qs_.ReportFramerate(kFramerate);
qs_.OnEncodeFrame(input_frame_);
@@ -97,7 +98,8 @@ class QualityScalerTest : public ::testing::Test {
int num_second,
int initial_framerate);
- void VerifyQualityAdaptation(int initial_framerate, int seconds,
+ void VerifyQualityAdaptation(int initial_framerate,
+ int seconds,
bool expect_spatial_resize,
bool expect_framerate_reduction);
@@ -183,8 +185,8 @@ TEST_F(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) {
void QualityScalerTest::ContinuouslyDownscalesByHalfDimensionsAndBackUp() {
const int initial_min_dimension = input_frame_.width() < input_frame_.height()
- ? input_frame_.width()
- : input_frame_.height();
+ ? input_frame_.width()
+ : input_frame_.height();
int min_dimension = initial_min_dimension;
int current_shift = 0;
// Drop all frames to force-trigger downscaling.
@@ -229,14 +231,14 @@ TEST_F(QualityScalerTest,
const int kOddWidth = 517;
const int kHalfOddWidth = (kOddWidth + 1) / 2;
const int kOddHeight = 1239;
- input_frame_.CreateEmptyFrame(
- kOddWidth, kOddHeight, kOddWidth, kHalfOddWidth, kHalfOddWidth);
+ input_frame_.CreateEmptyFrame(kOddWidth, kOddHeight, kOddWidth, kHalfOddWidth,
+ kHalfOddWidth);
ContinuouslyDownscalesByHalfDimensionsAndBackUp();
}
void QualityScalerTest::DoesNotDownscaleFrameDimensions(int width, int height) {
- input_frame_.CreateEmptyFrame(
- width, height, width, (width + 1) / 2, (width + 1) / 2);
+ input_frame_.CreateEmptyFrame(width, height, width, (width + 1) / 2,
+ (width + 1) / 2);
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
qs_.ReportDroppedFrame();
@@ -259,7 +261,9 @@ TEST_F(QualityScalerTest, DoesNotDownscaleFrom1Px) {
}
QualityScalerTest::Resolution QualityScalerTest::TriggerResolutionChange(
- BadQualityMetric dropframe_lowqp, int num_second, int initial_framerate) {
+ BadQualityMetric dropframe_lowqp,
+ int num_second,
+ int initial_framerate) {
QualityScalerTest::Resolution res;
res.framerate = initial_framerate;
qs_.OnEncodeFrame(input_frame_);
@@ -288,7 +292,9 @@ QualityScalerTest::Resolution QualityScalerTest::TriggerResolutionChange(
}
void QualityScalerTest::VerifyQualityAdaptation(
- int initial_framerate, int seconds, bool expect_spatial_resize,
+ int initial_framerate,
+ int seconds,
+ bool expect_spatial_resize,
bool expect_framerate_reduction) {
const int kDisabledBadQpThreshold = kMaxQp + 1;
qs_.Init(kMaxQp / QualityScaler::kDefaultLowQpDenominator,
@@ -298,8 +304,8 @@ void QualityScalerTest::VerifyQualityAdaptation(
int init_height = qs_.GetScaledResolution().height;
// Test reducing framerate by dropping frame continuously.
- QualityScalerTest::Resolution res = TriggerResolutionChange(
- kDropFrame, seconds, initial_framerate);
+ QualityScalerTest::Resolution res =
+ TriggerResolutionChange(kDropFrame, seconds, initial_framerate);
if (expect_framerate_reduction) {
EXPECT_LT(res.framerate, initial_framerate);
diff --git a/webrtc/modules/video_coding/utility/vp8_header_parser.cc b/webrtc/modules/video_coding/utility/vp8_header_parser.cc
index 13d1616210..631385d0f2 100644
--- a/webrtc/modules/video_coding/utility/vp8_header_parser.cc
+++ b/webrtc/modules/video_coding/utility/vp8_header_parser.cc
@@ -43,12 +43,12 @@ static void VP8LoadNewBytes(VP8BitReader* const br) {
const uint32_t in_bits = *(const uint32_t*)(br->buf_);
br->buf_ += BITS >> 3;
#if defined(WEBRTC_ARCH_BIG_ENDIAN)
- bits = static_cast<uint32_t>(in_bits);
- if (BITS != 8 * sizeof(uint32_t))
- bits >>= (8 * sizeof(uint32_t) - BITS);
+ bits = static_cast<uint32_t>(in_bits);
+ if (BITS != 8 * sizeof(uint32_t))
+ bits >>= (8 * sizeof(uint32_t) - BITS);
#else
- bits = BSwap32(in_bits);
- bits >>= 32 - BITS;
+ bits = BSwap32(in_bits);
+ bits >>= 32 - BITS;
#endif
br->value_ = bits | (br->value_ << BITS);
br->bits_ += BITS;
@@ -60,12 +60,12 @@ static void VP8LoadNewBytes(VP8BitReader* const br) {
static void VP8InitBitReader(VP8BitReader* const br,
const uint8_t* const start,
const uint8_t* const end) {
- br->range_ = 255 - 1;
- br->buf_ = start;
+ br->range_ = 255 - 1;
+ br->buf_ = start;
br->buf_end_ = end;
- br->value_ = 0;
- br->bits_ = -8; // To load the very first 8bits.
- br->eof_ = 0;
+ br->value_ = 0;
+ br->bits_ = -8; // To load the very first 8bits.
+ br->eof_ = 0;
VP8LoadNewBytes(br);
}
@@ -122,7 +122,7 @@ static void ParseSegmentHeader(VP8BitReader* br) {
int s;
VP8Get(br);
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
- VP8Get(br) ? VP8GetSignedValue(br, 7) : 0;
+ VP8Get(br) ? VP8GetSignedValue(br, 7) : 0;
}
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
VP8Get(br) ? VP8GetSignedValue(br, 6) : 0;
diff --git a/webrtc/modules/video_coding/utility/vp8_header_parser.h b/webrtc/modules/video_coding/utility/vp8_header_parser.h
index b4b1d3356f..b0c684c578 100644
--- a/webrtc/modules/video_coding/utility/vp8_header_parser.h
+++ b/webrtc/modules/video_coding/utility/vp8_header_parser.h
@@ -28,46 +28,34 @@ enum {
typedef struct VP8BitReader VP8BitReader;
struct VP8BitReader {
// Boolean decoder.
- uint32_t value_; // Current value.
- uint32_t range_; // Current range minus 1. In [127, 254] interval.
- int bits_; // Number of valid bits left.
+ uint32_t value_; // Current value.
+ uint32_t range_; // Current range minus 1. In [127, 254] interval.
+ int bits_; // Number of valid bits left.
// Read buffer.
- const uint8_t* buf_; // Next byte to be read.
- const uint8_t* buf_end_; // End of read buffer.
- int eof_; // True if input is exhausted.
+ const uint8_t* buf_; // Next byte to be read.
+ const uint8_t* buf_end_; // End of read buffer.
+ int eof_; // True if input is exhausted.
};
const uint8_t kVP8Log2Range[128] = {
- 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 0
-};
+ 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0};
// range = ((range - 1) << kVP8Log2Range[range]) + 1
const uint8_t kVP8NewRange[128] = {
- 127, 127, 191, 127, 159, 191, 223, 127,
- 143, 159, 175, 191, 207, 223, 239, 127,
- 135, 143, 151, 159, 167, 175, 183, 191,
- 199, 207, 215, 223, 231, 239, 247, 127,
- 131, 135, 139, 143, 147, 151, 155, 159,
- 163, 167, 171, 175, 179, 183, 187, 191,
- 195, 199, 203, 207, 211, 215, 219, 223,
- 227, 231, 235, 239, 243, 247, 251, 127,
- 129, 131, 133, 135, 137, 139, 141, 143,
- 145, 147, 149, 151, 153, 155, 157, 159,
- 161, 163, 165, 167, 169, 171, 173, 175,
- 177, 179, 181, 183, 185, 187, 189, 191,
- 193, 195, 197, 199, 201, 203, 205, 207,
- 209, 211, 213, 215, 217, 219, 221, 223,
- 225, 227, 229, 231, 233, 235, 237, 239,
- 241, 243, 245, 247, 249, 251, 253, 127
-};
+ 127, 127, 191, 127, 159, 191, 223, 127, 143, 159, 175, 191, 207, 223, 239,
+ 127, 135, 143, 151, 159, 167, 175, 183, 191, 199, 207, 215, 223, 231, 239,
+ 247, 127, 131, 135, 139, 143, 147, 151, 155, 159, 163, 167, 171, 175, 179,
+ 183, 187, 191, 195, 199, 203, 207, 211, 215, 219, 223, 227, 231, 235, 239,
+ 243, 247, 251, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149,
+ 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179,
+ 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209,
+ 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239,
+ 241, 243, 245, 247, 249, 251, 253, 127};
// Gets the QP, QP range: [0, 127].
// Returns true on success, false otherwise.
diff --git a/webrtc/modules/video_coding/video_coding_impl.cc b/webrtc/modules/video_coding/video_coding_impl.cc
index 64cc090a70..c471ddaf7e 100644
--- a/webrtc/modules/video_coding/video_coding_impl.cc
+++ b/webrtc/modules/video_coding/video_coding_impl.cc
@@ -8,33 +8,33 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/modules/video_coding/video_coding_impl.h"
+
+#include <algorithm>
+
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/encoded_frame.h"
#include "webrtc/modules/video_coding/jitter_buffer.h"
#include "webrtc/modules/video_coding/packet.h"
-#include "webrtc/modules/video_coding/video_coding_impl.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
namespace vcm {
-int64_t
-VCMProcessTimer::Period() const {
- return _periodMs;
+int64_t VCMProcessTimer::Period() const {
+ return _periodMs;
}
-int64_t
-VCMProcessTimer::TimeUntilProcess() const {
- const int64_t time_since_process = _clock->TimeInMilliseconds() - _latestMs;
- const int64_t time_until_process = _periodMs - time_since_process;
- return std::max<int64_t>(time_until_process, 0);
+int64_t VCMProcessTimer::TimeUntilProcess() const {
+ const int64_t time_since_process = _clock->TimeInMilliseconds() - _latestMs;
+ const int64_t time_until_process = _periodMs - time_since_process;
+ return std::max<int64_t>(time_until_process, 0);
}
-void
-VCMProcessTimer::Processed() {
- _latestMs = _clock->TimeInMilliseconds();
+void VCMProcessTimer::Processed() {
+ _latestMs = _clock->TimeInMilliseconds();
}
} // namespace vcm
@@ -59,8 +59,8 @@ class EncodedImageCallbackWrapper : public EncodedImageCallback {
const RTPFragmentationHeader* fragmentation) {
CriticalSectionScoped cs(cs_.get());
if (callback_)
- return callback_->Encoded(
- encoded_image, codec_specific_info, fragmentation);
+ return callback_->Encoded(encoded_image, codec_specific_info,
+ fragmentation);
return 0;
}
@@ -84,9 +84,7 @@ class VideoCodingModuleImpl : public VideoCodingModule {
receiver_(clock, event_factory),
own_event_factory_(owns_event_factory ? event_factory : NULL) {}
- virtual ~VideoCodingModuleImpl() {
- own_event_factory_.reset();
- }
+ virtual ~VideoCodingModuleImpl() { own_event_factory_.reset(); }
int64_t TimeUntilNextProcess() override {
int64_t sender_time = sender_.TimeUntilNextProcess();
@@ -321,9 +319,8 @@ VideoCodingModule* VideoCodingModule::Create(
encoder_rate_observer, qm_settings_callback);
}
-VideoCodingModule* VideoCodingModule::Create(
- Clock* clock,
- EventFactory* event_factory) {
+VideoCodingModule* VideoCodingModule::Create(Clock* clock,
+ EventFactory* event_factory) {
assert(clock);
assert(event_factory);
return new VideoCodingModuleImpl(clock, event_factory, false, nullptr,
diff --git a/webrtc/modules/video_coding/video_coding_robustness_unittest.cc b/webrtc/modules/video_coding/video_coding_robustness_unittest.cc
index 4111109e60..94c62dc38e 100644
--- a/webrtc/modules/video_coding/video_coding_robustness_unittest.cc
+++ b/webrtc/modules/video_coding/video_coding_robustness_unittest.cc
@@ -47,9 +47,7 @@ class VCMRobustnessTest : public ::testing::Test {
vcm_->RegisterExternalDecoder(&decoder_, video_codec_.plType);
}
- virtual void TearDown() {
- VideoCodingModule::Destroy(vcm_);
- }
+ virtual void TearDown() { VideoCodingModule::Destroy(vcm_); }
void InsertPacket(uint32_t timestamp,
uint16_t seq_no,
@@ -87,19 +85,17 @@ TEST_F(VCMRobustnessTest, TestHardNack) {
.With(Args<0, 1>(ElementsAre(6, 7)))
.Times(1);
for (int ts = 0; ts <= 6000; ts += 3000) {
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, ts),
- Field(&EncodedImage::_length,
- kPayloadLen * 3),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, ts),
+ Field(&EncodedImage::_length, kPayloadLen * 3),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
.Times(1)
.InSequence(s);
}
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
- VideoCodingModule::kHardNack,
- kNoErrors));
+ VideoCodingModule::kHardNack, kNoErrors));
InsertPacket(0, 0, true, false, kVideoFrameKey);
InsertPacket(0, 1, false, false, kVideoFrameKey);
@@ -136,14 +132,11 @@ TEST_F(VCMRobustnessTest, TestHardNack) {
}
TEST_F(VCMRobustnessTest, TestHardNackNoneDecoded) {
- EXPECT_CALL(request_callback_, ResendPackets(_, _))
- .Times(0);
- EXPECT_CALL(frame_type_callback_, RequestKeyFrame())
- .Times(1);
+ EXPECT_CALL(request_callback_, ResendPackets(_, _)).Times(0);
+ EXPECT_CALL(frame_type_callback_, RequestKeyFrame()).Times(1);
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
- VideoCodingModule::kHardNack,
- kNoErrors));
+ VideoCodingModule::kHardNack, kNoErrors));
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
@@ -166,46 +159,43 @@ TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
.With(Args<0, 1>(ElementsAre(4)))
.Times(0);
- EXPECT_CALL(decoder_, Copy())
- .Times(0);
- EXPECT_CALL(decoderCopy_, Copy())
- .Times(0);
+ EXPECT_CALL(decoder_, Copy()).Times(0);
+ EXPECT_CALL(decoderCopy_, Copy()).Times(0);
// Decode operations
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
- Field(&EncodedImage::_completeFrame,
- false)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
-
- ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
- VideoCodingModule::kNone,
- kWithErrors));
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
+ Field(&EncodedImage::_completeFrame, false)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+
+ ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(VideoCodingModule::kNone,
+ kWithErrors));
InsertPacket(0, 0, true, false, kVideoFrameKey);
InsertPacket(0, 1, false, false, kVideoFrameKey);
InsertPacket(0, 2, false, true, kVideoFrameKey);
EXPECT_EQ(VCM_OK, vcm_->Decode(33)); // Decode timestamp 0.
- EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
clock_->AdvanceTimeMilliseconds(33);
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
@@ -223,7 +213,7 @@ TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
clock_->AdvanceTimeMilliseconds(10);
EXPECT_EQ(VCM_OK, vcm_->Decode(23)); // Decode timestamp 6000 complete.
- EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
clock_->AdvanceTimeMilliseconds(23);
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
diff --git a/webrtc/modules/video_coding/video_sender.cc b/webrtc/modules/video_coding/video_sender.cc
index 7d8e97b58d..3bc33812a2 100644
--- a/webrtc/modules/video_coding/video_sender.cc
+++ b/webrtc/modules/video_coding/video_sender.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/common_types.h"
#include <algorithm> // std::max
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
+#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/encoded_frame.h"
@@ -126,14 +126,10 @@ int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
_nextFrameTypes.resize(VCM_MAX(sendCodec->numberOfSimulcastStreams, 1),
kVideoFrameDelta);
- _mediaOpt.SetEncodingData(sendCodec->codecType,
- sendCodec->maxBitrate * 1000,
- sendCodec->startBitrate * 1000,
- sendCodec->width,
- sendCodec->height,
- sendCodec->maxFramerate,
- numLayers,
- maxPayloadSize);
+ _mediaOpt.SetEncodingData(sendCodec->codecType, sendCodec->maxBitrate * 1000,
+ sendCodec->startBitrate * 1000, sendCodec->width,
+ sendCodec->height, sendCodec->maxFramerate,
+ numLayers, maxPayloadSize);
return VCM_OK;
}
@@ -158,8 +154,8 @@ VideoCodecType VideoSender::SendCodecBlocking() const {
// Register an external decoder object.
// This can not be used together with external decoder callbacks.
void VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
- uint8_t payloadType,
- bool internalSource /*= false*/) {
+ uint8_t payloadType,
+ bool internalSource /*= false*/) {
RTC_DCHECK(main_thread_.CalledOnValidThread());
rtc::CritScope lock(&send_crit_);
@@ -174,8 +170,8 @@ void VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
}
return;
}
- _codecDataBase.RegisterExternalEncoder(
- externalEncoder, payloadType, internalSource);
+ _codecDataBase.RegisterExternalEncoder(externalEncoder, payloadType,
+ internalSource);
}
// Get encode bitrate
diff --git a/webrtc/modules/video_coding/video_sender_unittest.cc b/webrtc/modules/video_coding/video_sender_unittest.cc
index 3f5c3cdde2..2daa9d7b2d 100644
--- a/webrtc/modules/video_coding/video_sender_unittest.cc
+++ b/webrtc/modules/video_coding/video_sender_unittest.cc
@@ -41,9 +41,7 @@ using webrtc::test::FrameGenerator;
namespace webrtc {
namespace vcm {
namespace {
-enum {
- kMaxNumberOfTemporalLayers = 3
-};
+enum { kMaxNumberOfTemporalLayers = 3 };
struct Vp8StreamInfo {
float framerate_fps[kMaxNumberOfTemporalLayers];
@@ -87,7 +85,7 @@ class EmptyFrameGenerator : public FrameGenerator {
class PacketizationCallback : public VCMPacketizationCallback {
public:
- PacketizationCallback(Clock* clock)
+ explicit PacketizationCallback(Clock* clock)
: clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
virtual ~PacketizationCallback() {}
@@ -211,16 +209,12 @@ class TestVideoSenderWithMockEncoder : public TestVideoSender {
memset(&settings_, 0, sizeof(settings_));
EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &settings_));
settings_.numberOfSimulcastStreams = kNumberOfStreams;
- ConfigureStream(kDefaultWidth / 4,
- kDefaultHeight / 4,
- 100,
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, 100,
&settings_.simulcastStream[0]);
- ConfigureStream(kDefaultWidth / 2,
- kDefaultHeight / 2,
- 500,
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, 500,
&settings_.simulcastStream[1]);
- ConfigureStream(
- kDefaultWidth, kDefaultHeight, 1200, &settings_.simulcastStream[2]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, 1200,
+ &settings_.simulcastStream[2]);
settings_.plType = kUnusedPayloadType; // Use the mocked encoder.
generator_.reset(
new EmptyFrameGenerator(settings_.width, settings_.height));
@@ -244,12 +238,11 @@ class TestVideoSenderWithMockEncoder : public TestVideoSender {
assert(stream < kNumberOfStreams);
std::vector<FrameType> frame_types(kNumberOfStreams, kVideoFrameDelta);
frame_types[stream] = kVideoFrameKey;
- EXPECT_CALL(
- encoder_,
- Encode(_,
- _,
- Pointee(ElementsAreArray(&frame_types[0], frame_types.size()))))
- .Times(1).WillRepeatedly(Return(0));
+ EXPECT_CALL(encoder_,
+ Encode(_, _, Pointee(ElementsAreArray(&frame_types[0],
+ frame_types.size()))))
+ .Times(1)
+ .WillRepeatedly(Return(0));
}
static void ConfigureStream(int width,