aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/video_coding/codecs
diff options
context:
space:
mode:
Diffstat (limited to 'webrtc/modules/video_coding/codecs')
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc19
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h2
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc43
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h2
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc9
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h17
-rw-r--r--webrtc/modules/video_coding/codecs/h264/include/h264.h2
-rw-r--r--webrtc/modules/video_coding/codecs/i420/i420.cc55
-rw-r--r--webrtc/modules/video_coding/codecs/i420/include/i420.h145
-rw-r--r--webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h43
-rw-r--r--webrtc/modules/video_coding/codecs/interface/video_codec_interface.h28
-rw-r--r--webrtc/modules/video_coding/codecs/interface/video_error_codes.h9
-rw-r--r--webrtc/modules/video_coding/codecs/test/packet_manipulator.cc8
-rw-r--r--webrtc/modules/video_coding/codecs/test/packet_manipulator.h13
-rw-r--r--webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc27
-rw-r--r--webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc17
-rw-r--r--webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h1
-rw-r--r--webrtc/modules/video_coding/codecs/test/stats.cc69
-rw-r--r--webrtc/modules/video_coding/codecs/test/stats.h2
-rw-r--r--webrtc/modules/video_coding/codecs/test/stats_unittest.cc16
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor.cc67
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor.h15
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc346
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc44
-rw-r--r--webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc320
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc26
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc157
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/include/vp8.h9
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h14
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc43
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc20
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc40
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc6
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h2
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc4
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc58
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h10
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc33
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc14
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h282
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/temporal_layers.h5
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc22
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_factory.h1
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc262
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_impl.h24
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc93
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/include/vp9.h3
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc93
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h66
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc323
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9.gyp32
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc19
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc2
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc411
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_impl.h49
55 files changed, 2042 insertions, 1400 deletions
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
index 61ef80bbf1..6fee2e6f36 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
@@ -16,7 +16,7 @@
#include "libyuv/convert.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
#include "webrtc/video_frame.h"
@@ -106,8 +106,7 @@ namespace webrtc {
H264VideoToolboxDecoder::H264VideoToolboxDecoder()
: callback_(nullptr),
video_format_(nullptr),
- decompression_session_(nullptr) {
-}
+ decompression_session_(nullptr) {}
H264VideoToolboxDecoder::~H264VideoToolboxDecoder() {
DestroyDecompressionSession();
@@ -129,8 +128,7 @@ int H264VideoToolboxDecoder::Decode(
CMSampleBufferRef sample_buffer = nullptr;
if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
- input_image._length,
- video_format_,
+ input_image._length, video_format_,
&sample_buffer)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -206,11 +204,8 @@ int H264VideoToolboxDecoder::ResetDecompressionSession() {
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixel_format =
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
- CFTypeRef values[attributes_size] = {
- kCFBooleanTrue,
- io_surface_value,
- pixel_format
- };
+ CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
+ pixel_format};
CFDictionaryRef attributes =
internal::CreateCFDictionary(keys, values, attributes_size);
if (io_surface_value) {
@@ -266,6 +261,10 @@ void H264VideoToolboxDecoder::SetVideoFormat(
}
}
+const char* H264VideoToolboxDecoder::ImplementationName() const {
+ return "VideoToolbox";
+}
+
} // namespace webrtc
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
index f54ddb9efd..6d64307a82 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
@@ -45,6 +45,8 @@ class H264VideoToolboxDecoder : public H264Decoder {
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
int ResetDecompressionSession();
void ConfigureDecompressionSession();
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
index d677f8b812..7df4ec74ba 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -99,11 +99,7 @@ struct FrameEncodeParams {
int32_t h,
int64_t rtms,
uint32_t ts)
- : callback(cb),
- width(w),
- height(h),
- render_time_ms(rtms),
- timestamp(ts) {
+ : callback(cb), width(w), height(h), render_time_ms(rtms), timestamp(ts) {
if (csi) {
codec_specific_info = *csi;
} else {
@@ -146,9 +142,8 @@ bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
int ret = libyuv::I420ToNV12(
frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
- frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
- dst_y, dst_stride_y, dst_uv, dst_stride_uv,
- frame.width(), frame.height());
+ frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane), dst_y,
+ dst_stride_y, dst_uv, dst_stride_uv, frame.width(), frame.height());
CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
if (ret) {
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
@@ -188,10 +183,8 @@ void VTCompressionOutputCallback(void* encoder,
// TODO(tkchin): Allocate buffers through a pool.
rtc::scoped_ptr<rtc::Buffer> buffer(new rtc::Buffer());
rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
- if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer,
- is_keyframe,
- buffer.get(),
- header.accept())) {
+ if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer, is_keyframe,
+ buffer.get(), header.accept())) {
return;
}
webrtc::EncodedImage frame(buffer->data(), buffer->size(), buffer->size());
@@ -215,8 +208,7 @@ void VTCompressionOutputCallback(void* encoder,
namespace webrtc {
H264VideoToolboxEncoder::H264VideoToolboxEncoder()
- : callback_(nullptr), compression_session_(nullptr) {
-}
+ : callback_(nullptr), compression_session_(nullptr) {}
H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
DestroyCompressionSession();
@@ -289,8 +281,8 @@ int H264VideoToolboxEncoder::Encode(
CMTimeMake(input_image.render_time_ms(), 1000);
CFDictionaryRef frame_properties = nullptr;
if (is_keyframe_required) {
- CFTypeRef keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
- CFTypeRef values[] = { kCFBooleanTrue };
+ CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
+ CFTypeRef values[] = {kCFBooleanTrue};
frame_properties = internal::CreateCFDictionary(keys, values, 1);
}
rtc::scoped_ptr<internal::FrameEncodeParams> encode_params;
@@ -359,11 +351,8 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixel_format =
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
- CFTypeRef values[attributes_size] = {
- kCFBooleanTrue,
- io_surface_value,
- pixel_format
- };
+ CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
+ pixel_format};
CFDictionaryRef source_attributes =
internal::CreateCFDictionary(keys, values, attributes_size);
if (io_surface_value) {
@@ -376,15 +365,11 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
}
OSStatus status = VTCompressionSessionCreate(
nullptr, // use default allocator
- width_,
- height_,
- kCMVideoCodecType_H264,
+ width_, height_, kCMVideoCodecType_H264,
nullptr, // use default encoder
source_attributes,
nullptr, // use default compressed data allocator
- internal::VTCompressionOutputCallback,
- this,
- &compression_session_);
+ internal::VTCompressionOutputCallback, this, &compression_session_);
if (source_attributes) {
CFRelease(source_attributes);
source_attributes = nullptr;
@@ -434,6 +419,10 @@ void H264VideoToolboxEncoder::DestroyCompressionSession() {
}
}
+const char* H264VideoToolboxEncoder::ImplementationName() const {
+ return "VideoToolbox";
+}
+
} // namespace webrtc
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
index f4fb86fa04..269e0411b2 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
@@ -48,6 +48,8 @@ class H264VideoToolboxEncoder : public H264Encoder {
int Release() override;
+ const char* ImplementationName() const override;
+
private:
int ResetCompressionSession();
void ConfigureCompressionSession();
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
index caca96d3d8..322c213f7b 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
@@ -154,11 +154,10 @@ bool H264CMSampleBufferToAnnexBBuffer(
return true;
}
-bool H264AnnexBBufferToCMSampleBuffer(
- const uint8_t* annexb_buffer,
- size_t annexb_buffer_size,
- CMVideoFormatDescriptionRef video_format,
- CMSampleBufferRef* out_sample_buffer) {
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer) {
RTC_DCHECK(annexb_buffer);
RTC_DCHECK(out_sample_buffer);
*out_sample_buffer = nullptr;
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
index 230dea94a0..31ef525816 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
@@ -9,8 +9,8 @@
*
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
@@ -19,7 +19,7 @@
#include <CoreMedia/CoreMedia.h>
#include "webrtc/base/buffer.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
@@ -39,11 +39,10 @@ bool H264CMSampleBufferToAnnexBBuffer(
// If |is_keyframe| is true then |video_format| is ignored since the format will
// be read from the buffer. Otherwise |video_format| must be provided.
// Caller is responsible for releasing the created sample buffer.
-bool H264AnnexBBufferToCMSampleBuffer(
- const uint8_t* annexb_buffer,
- size_t annexb_buffer_size,
- CMVideoFormatDescriptionRef video_format,
- CMSampleBufferRef* out_sample_buffer);
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer);
// Helper class for reading NALUs from an RTP Annex B buffer.
class AnnexBBufferReader final {
@@ -97,4 +96,4 @@ class AvccBufferWriter final {
} // namespace webrtc
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
diff --git a/webrtc/modules/video_coding/codecs/h264/include/h264.h b/webrtc/modules/video_coding/codecs/h264/include/h264.h
index 3f52839a6c..50ca57c1c9 100644
--- a/webrtc/modules/video_coding/codecs/h264/include/h264.h
+++ b/webrtc/modules/video_coding/codecs/h264/include/h264.h
@@ -23,7 +23,7 @@
#endif // defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/i420/i420.cc b/webrtc/modules/video_coding/codecs/i420/i420.cc
index cf546a07a1..7f06b4cf7d 100644
--- a/webrtc/modules/video_coding/codecs/i420/i420.cc
+++ b/webrtc/modules/video_coding/codecs/i420/i420.cc
@@ -21,20 +21,19 @@ const size_t kI420HeaderSize = 4;
namespace webrtc {
-I420Encoder::I420Encoder() : _inited(false), _encodedImage(),
- _encodedCompleteCallback(NULL) {
-}
+I420Encoder::I420Encoder()
+ : _inited(false), _encodedImage(), _encodedCompleteCallback(NULL) {}
I420Encoder::~I420Encoder() {
_inited = false;
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
}
int I420Encoder::Release() {
// Should allocate an encoded frame and then release it here, for that we
// actually need an init flag.
if (_encodedImage._buffer != NULL) {
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
_encodedImage._buffer = NULL;
}
_inited = false;
@@ -53,7 +52,7 @@ int I420Encoder::InitEncode(const VideoCodec* codecSettings,
// Allocating encoded memory.
if (_encodedImage._buffer != NULL) {
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
_encodedImage._buffer = NULL;
_encodedImage._size = 0;
}
@@ -101,18 +100,18 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
kI420HeaderSize;
if (_encodedImage._size > req_length) {
// Reallocate buffer.
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
_encodedImage._buffer = new uint8_t[req_length];
_encodedImage._size = req_length;
}
- uint8_t *buffer = _encodedImage._buffer;
+ uint8_t* buffer = _encodedImage._buffer;
buffer = InsertHeader(buffer, width, height);
- int ret_length = ExtractBuffer(inputImage, req_length - kI420HeaderSize,
- buffer);
+ int ret_length =
+ ExtractBuffer(inputImage, req_length - kI420HeaderSize, buffer);
if (ret_length < 0)
return WEBRTC_VIDEO_CODEC_MEMORY;
_encodedImage._length = ret_length + kI420HeaderSize;
@@ -121,7 +120,8 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
return WEBRTC_VIDEO_CODEC_OK;
}
-uint8_t* I420Encoder::InsertHeader(uint8_t *buffer, uint16_t width,
+uint8_t* I420Encoder::InsertHeader(uint8_t* buffer,
+ uint16_t width,
uint16_t height) {
*buffer++ = static_cast<uint8_t>(width >> 8);
*buffer++ = static_cast<uint8_t>(width & 0xFF);
@@ -130,30 +130,29 @@ uint8_t* I420Encoder::InsertHeader(uint8_t *buffer, uint16_t width,
return buffer;
}
-int
-I420Encoder::RegisterEncodeCompleteCallback(EncodedImageCallback* callback) {
+int I420Encoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
_encodedCompleteCallback = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
-
-I420Decoder::I420Decoder() : _decodedImage(), _width(0), _height(0),
- _inited(false), _decodeCompleteCallback(NULL) {
-}
+I420Decoder::I420Decoder()
+ : _decodedImage(),
+ _width(0),
+ _height(0),
+ _inited(false),
+ _decodeCompleteCallback(NULL) {}
I420Decoder::~I420Decoder() {
Release();
}
-int
-I420Decoder::Reset() {
+int I420Decoder::Reset() {
return WEBRTC_VIDEO_CODEC_OK;
}
-
-int
-I420Decoder::InitDecode(const VideoCodec* codecSettings,
- int /*numberOfCores */) {
+int I420Decoder::InitDecode(const VideoCodec* codecSettings,
+ int /*numberOfCores */) {
if (codecSettings == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
} else if (codecSettings->width < 1 || codecSettings->height < 1) {
@@ -165,7 +164,8 @@ I420Decoder::InitDecode(const VideoCodec* codecSettings,
return WEBRTC_VIDEO_CODEC_OK;
}
-int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
+int I420Decoder::Decode(const EncodedImage& inputImage,
+ bool /*missingFrames*/,
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* /*codecSpecificInfo*/,
int64_t /*renderTimeMs*/) {
@@ -203,8 +203,8 @@ int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
}
// Set decoded image parameters.
int half_width = (_width + 1) / 2;
- _decodedImage.CreateEmptyFrame(_width, _height,
- _width, half_width, half_width);
+ _decodedImage.CreateEmptyFrame(_width, _height, _width, half_width,
+ half_width);
// Converting from buffer to plane representation.
int ret = ConvertToI420(kI420, buffer, 0, 0, _width, _height, 0,
kVideoRotation_0, &_decodedImage);
@@ -218,7 +218,8 @@ int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
}
const uint8_t* I420Decoder::ExtractHeader(const uint8_t* buffer,
- uint16_t* width, uint16_t* height) {
+ uint16_t* width,
+ uint16_t* height) {
*width = static_cast<uint16_t>(*buffer++) << 8;
*width |= *buffer++;
*height = static_cast<uint16_t>(*buffer++) << 8;
diff --git a/webrtc/modules/video_coding/codecs/i420/include/i420.h b/webrtc/modules/video_coding/codecs/i420/include/i420.h
index 8990ccf878..9f77845e96 100644
--- a/webrtc/modules/video_coding/codecs/i420/include/i420.h
+++ b/webrtc/modules/video_coding/codecs/i420/include/i420.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
#include <vector>
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -24,45 +24,45 @@ class I420Encoder : public VideoEncoder {
virtual ~I420Encoder();
-// Initialize the encoder with the information from the VideoCodec.
-//
-// Input:
-// - codecSettings : Codec settings.
-// - numberOfCores : Number of cores available for the encoder.
-// - maxPayloadSize : The maximum size each payload is allowed
-// to have. Usually MTU - overhead.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
-// <0 - Error
+ // Initialize the encoder with the information from the VideoCodec.
+ //
+ // Input:
+ // - codecSettings : Codec settings.
+ // - numberOfCores : Number of cores available for the encoder.
+ // - maxPayloadSize : The maximum size each payload is allowed
+ // to have. Usually MTU - overhead.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK.
+ // <0 - Error
int InitEncode(const VideoCodec* codecSettings,
int /*numberOfCores*/,
size_t /*maxPayloadSize*/) override;
-// "Encode" an I420 image (as a part of a video stream). The encoded image
-// will be returned to the user via the encode complete callback.
-//
-// Input:
-// - inputImage : Image to be encoded.
-// - codecSpecificInfo : Pointer to codec specific data.
-// - frameType : Frame type to be sent (Key /Delta).
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
-// <0 - Error
+ // "Encode" an I420 image (as a part of a video stream). The encoded image
+ // will be returned to the user via the encode complete callback.
+ //
+ // Input:
+ // - inputImage : Image to be encoded.
+ // - codecSpecificInfo : Pointer to codec specific data.
+ // - frameType : Frame type to be sent (Key /Delta).
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK.
+ // <0 - Error
int Encode(const VideoFrame& inputImage,
const CodecSpecificInfo* /*codecSpecificInfo*/,
const std::vector<FrameType>* /*frame_types*/) override;
-// Register an encode complete callback object.
-//
-// Input:
-// - callback : Callback object which handles encoded images.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
+ // Register an encode complete callback object.
+ //
+ // Input:
+ // - callback : Callback object which handles encoded images.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
-// Free encoder memory.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
+ // Free encoder memory.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
int Release() override;
int SetRates(uint32_t /*newBitRate*/, uint32_t /*frameRate*/) override {
@@ -76,12 +76,13 @@ class I420Encoder : public VideoEncoder {
void OnDroppedFrame() override {}
private:
- static uint8_t* InsertHeader(uint8_t* buffer, uint16_t width,
+ static uint8_t* InsertHeader(uint8_t* buffer,
+ uint16_t width,
uint16_t height);
- bool _inited;
- EncodedImage _encodedImage;
- EncodedImageCallback* _encodedCompleteCallback;
+ bool _inited;
+ EncodedImage _encodedImage;
+ EncodedImageCallback* _encodedCompleteCallback;
}; // class I420Encoder
class I420Decoder : public VideoDecoder {
@@ -90,50 +91,50 @@ class I420Decoder : public VideoDecoder {
virtual ~I420Decoder();
-// Initialize the decoder.
-// The user must notify the codec of width and height values.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK.
-// <0 - Errors
+ // Initialize the decoder.
+ // The user must notify the codec of width and height values.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK.
+ // <0 - Errors
int InitDecode(const VideoCodec* codecSettings,
int /*numberOfCores*/) override;
-// Decode encoded image (as a part of a video stream). The decoded image
-// will be returned to the user through the decode complete callback.
-//
-// Input:
-// - inputImage : Encoded image to be decoded
-// - missingFrames : True if one or more frames have been lost
-// since the previous decode call.
-// - codecSpecificInfo : pointer to specific codec data
-// - renderTimeMs : Render time in Ms
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK
-// <0 - Error
+ // Decode encoded image (as a part of a video stream). The decoded image
+ // will be returned to the user through the decode complete callback.
+ //
+ // Input:
+ // - inputImage : Encoded image to be decoded
+ // - missingFrames : True if one or more frames have been lost
+ // since the previous decode call.
+ // - codecSpecificInfo : pointer to specific codec data
+ // - renderTimeMs : Render time in Ms
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK
+ // <0 - Error
int Decode(const EncodedImage& inputImage,
bool missingFrames,
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* /*codecSpecificInfo*/,
int64_t /*renderTimeMs*/) override;
-// Register a decode complete callback object.
-//
-// Input:
-// - callback : Callback object which handles decoded images.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
+ // Register a decode complete callback object.
+ //
+ // Input:
+ // - callback : Callback object which handles decoded images.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
-// Free decoder memory.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
-// <0 - Error
+ // Free decoder memory.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK.
+ // <0 - Error
int Release() override;
-// Reset decoder state and prepare for a new call.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK.
-// <0 - Error
+ // Reset decoder state and prepare for a new call.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK.
+ // <0 - Error
int Reset() override;
private:
@@ -142,12 +143,12 @@ class I420Decoder : public VideoDecoder {
uint16_t* height);
VideoFrame _decodedImage;
- int _width;
- int _height;
- bool _inited;
- DecodedImageCallback* _decodeCompleteCallback;
+ int _width;
+ int _height;
+ bool _inited;
+ DecodedImageCallback* _decodeCompleteCallback;
}; // class I420Decoder
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
diff --git a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
index 6c926d4794..d727e896ad 100644
--- a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
+++ b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
@@ -11,27 +11,32 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
+#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
+ "use video_coding/include")
#include <string>
+#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class MockEncodedImageCallback : public EncodedImageCallback {
public:
- MOCK_METHOD3(Encoded, int32_t(const EncodedImage& encodedImage,
- const CodecSpecificInfo* codecSpecificInfo,
- const RTPFragmentationHeader* fragmentation));
+ MOCK_METHOD3(Encoded,
+ int32_t(const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const RTPFragmentationHeader* fragmentation));
};
class MockVideoEncoder : public VideoEncoder {
public:
- MOCK_CONST_METHOD2(Version, int32_t(int8_t *version, int32_t length));
- MOCK_METHOD3(InitEncode, int32_t(const VideoCodec* codecSettings,
- int32_t numberOfCores,
- size_t maxPayloadSize));
+ MOCK_CONST_METHOD2(Version, int32_t(int8_t* version, int32_t length));
+ MOCK_METHOD3(InitEncode,
+ int32_t(const VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize));
MOCK_METHOD3(Encode,
int32_t(const VideoFrame& inputImage,
const CodecSpecificInfo* codecSpecificInfo,
@@ -47,22 +52,24 @@ class MockVideoEncoder : public VideoEncoder {
class MockDecodedImageCallback : public DecodedImageCallback {
public:
- MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage));
+ MOCK_METHOD1(Decoded, int32_t(const VideoFrame& decodedImage));
+ MOCK_METHOD2(Decoded,
+ int32_t(const VideoFrame& decodedImage, int64_t decode_time_ms));
MOCK_METHOD1(ReceivedDecodedReferenceFrame,
int32_t(const uint64_t pictureId));
- MOCK_METHOD1(ReceivedDecodedFrame,
- int32_t(const uint64_t pictureId));
+ MOCK_METHOD1(ReceivedDecodedFrame, int32_t(const uint64_t pictureId));
};
class MockVideoDecoder : public VideoDecoder {
public:
- MOCK_METHOD2(InitDecode, int32_t(const VideoCodec* codecSettings,
- int32_t numberOfCores));
- MOCK_METHOD5(Decode, int32_t(const EncodedImage& inputImage,
- bool missingFrames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codecSpecificInfo,
- int64_t renderTimeMs));
+ MOCK_METHOD2(InitDecode,
+ int32_t(const VideoCodec* codecSettings, int32_t numberOfCores));
+ MOCK_METHOD5(Decode,
+ int32_t(const EncodedImage& inputImage,
+ bool missingFrames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codecSpecificInfo,
+ int64_t renderTimeMs));
MOCK_METHOD1(RegisterDecodeCompleteCallback,
int32_t(DecodedImageCallback* callback));
MOCK_METHOD0(Release, int32_t());
diff --git a/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h b/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
index 6363ab7332..6bcfa909bd 100644
--- a/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
+++ b/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
@@ -8,23 +8,24 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
+#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
+ "use video_coding/include")
#include <vector>
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_error_codes.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_error_codes.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_decoder.h"
#include "webrtc/video_encoder.h"
#include "webrtc/video_frame.h"
-namespace webrtc
-{
+namespace webrtc {
-class RTPFragmentationHeader; // forward declaration
+class RTPFragmentationHeader; // forward declaration
// Note: if any pointers are added to this struct, it must be fitted
// with a copy-constructor. See below.
@@ -68,6 +69,10 @@ struct CodecSpecificInfoVP9 {
uint16_t width[kMaxVp9NumberOfSpatialLayers];
uint16_t height[kMaxVp9NumberOfSpatialLayers];
GofInfoVP9 gof;
+
+ // Frame reference data.
+ uint8_t num_ref_pics;
+ uint8_t p_diff[kMaxVp9RefPics];
};
struct CodecSpecificInfoGeneric {
@@ -86,12 +91,11 @@ union CodecSpecificInfoUnion {
// Note: if any pointers are added to this struct or its sub-structs, it
// must be fitted with a copy-constructor. This is because it is copied
// in the copy-constructor of VCMEncodedFrame.
-struct CodecSpecificInfo
-{
- VideoCodecType codecType;
- CodecSpecificInfoUnion codecSpecific;
+struct CodecSpecificInfo {
+ VideoCodecType codecType;
+ CodecSpecificInfoUnion codecSpecific;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
diff --git a/webrtc/modules/video_coding/codecs/interface/video_error_codes.h b/webrtc/modules/video_coding/codecs/interface/video_error_codes.h
index 28e5a32d43..ea8829df80 100644
--- a/webrtc/modules/video_coding/codecs/interface/video_error_codes.h
+++ b/webrtc/modules/video_coding/codecs/interface/video_error_codes.h
@@ -8,8 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
+
+#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
+ "use video_coding/include")
// NOTE: in sync with video_coding_module_defines.h
@@ -29,4 +32,4 @@
#define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc b/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
index 36ba0e8272..b554b4e9ae 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
@@ -57,7 +57,7 @@ int PacketManipulatorImpl::ManipulatePackets(
active_burst_packets_--;
nbr_packets_dropped++;
} else if (RandomUniform() < config_.packet_loss_probability ||
- packet_loss_has_occurred) {
+ packet_loss_has_occurred) {
packet_loss_has_occurred = true;
nbr_packets_dropped++;
if (config_.packet_loss_mode == kBurst) {
@@ -91,9 +91,9 @@ inline double PacketManipulatorImpl::RandomUniform() {
// get the same behavior as long as we're using a fixed initial seed.
critsect_->Enter();
srand(random_seed_);
- random_seed_ = rand();
+ random_seed_ = rand(); // NOLINT (rand_r instead of rand)
critsect_->Leave();
- return (random_seed_ + 1.0)/(RAND_MAX + 1.0);
+ return (random_seed_ + 1.0) / (RAND_MAX + 1.0);
}
const char* PacketLossModeToStr(PacketLossMode e) {
@@ -109,4 +109,4 @@ const char* PacketLossModeToStr(PacketLossMode e) {
}
} // namespace test
-} // namespace webrtcc
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator.h b/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
index 16a9dc22ef..3334be072b 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
@@ -13,7 +13,7 @@
#include <stdlib.h>
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/test/testsupport/packet_reader.h"
@@ -36,10 +36,11 @@ const char* PacketLossModeToStr(PacketLossMode e);
// scenarios caused by network interference.
struct NetworkingConfig {
NetworkingConfig()
- : packet_size_in_bytes(1500), max_payload_size_in_bytes(1440),
- packet_loss_mode(kUniform), packet_loss_probability(0.0),
- packet_loss_burst_length(1) {
- }
+ : packet_size_in_bytes(1500),
+ max_payload_size_in_bytes(1440),
+ packet_loss_mode(kUniform),
+ packet_loss_probability(0.0),
+ packet_loss_burst_length(1) {}
// Packet size in bytes. Default: 1500 bytes.
size_t packet_size_in_bytes;
@@ -93,9 +94,11 @@ class PacketManipulatorImpl : public PacketManipulator {
virtual ~PacketManipulatorImpl();
int ManipulatePackets(webrtc::EncodedImage* encoded_image) override;
virtual void InitializeRandomSeed(unsigned int seed);
+
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
virtual double RandomUniform();
+
private:
PacketReader* packet_reader_;
const NetworkingConfig& config_;
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc b/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
index ace7bc0507..8c3d30dc0d 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
@@ -13,7 +13,7 @@
#include <queue>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h"
#include "webrtc/test/testsupport/unittest_utils.h"
#include "webrtc/typedefs.h"
@@ -25,7 +25,7 @@ const double kNeverDropProbability = 0.0;
const double kAlwaysDropProbability = 1.0;
const int kBurstLength = 1;
-class PacketManipulatorTest: public PacketRelatedTest {
+class PacketManipulatorTest : public PacketRelatedTest {
protected:
PacketReader packet_reader_;
EncodedImage image_;
@@ -50,19 +50,15 @@ class PacketManipulatorTest: public PacketRelatedTest {
virtual ~PacketManipulatorTest() {}
- void SetUp() {
- PacketRelatedTest::SetUp();
- }
+ void SetUp() { PacketRelatedTest::SetUp(); }
- void TearDown() {
- PacketRelatedTest::TearDown();
- }
+ void TearDown() { PacketRelatedTest::TearDown(); }
void VerifyPacketLoss(int expected_nbr_packets_dropped,
int actual_nbr_packets_dropped,
size_t expected_packet_data_length,
uint8_t* expected_packet_data,
- EncodedImage& actual_image) {
+ const EncodedImage& actual_image) {
EXPECT_EQ(expected_nbr_packets_dropped, actual_nbr_packets_dropped);
EXPECT_EQ(expected_packet_data_length, image_._length);
EXPECT_EQ(0, memcmp(expected_packet_data, actual_image._buffer,
@@ -75,10 +71,10 @@ TEST_F(PacketManipulatorTest, Constructor) {
}
TEST_F(PacketManipulatorTest, DropNone) {
- PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
+ PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
- VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength,
- packet_data_, image_);
+ VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength, packet_data_,
+ image_);
}
TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
@@ -87,15 +83,14 @@ TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
- VerifyPacketLoss(0, nbr_packets_dropped, data_length,
- packet_data_, image_);
+ VerifyPacketLoss(0, nbr_packets_dropped, data_length, packet_data_, image_);
}
TEST_F(PacketManipulatorTest, UniformDropAll) {
PacketManipulatorImpl manipulator(&packet_reader_, drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
- VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped,
- 0, packet_data_, image_);
+ VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped, 0,
+ packet_data_, image_);
}
// Use our customized test class to make the second packet being lost
diff --git a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc
index c92cfa48a7..9eba205a88 100644
--- a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc
+++ b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc
@@ -19,13 +19,11 @@ namespace webrtc {
namespace test {
PredictivePacketManipulator::PredictivePacketManipulator(
- PacketReader* packet_reader, const NetworkingConfig& config)
- : PacketManipulatorImpl(packet_reader, config, false) {
-}
-
-PredictivePacketManipulator::~PredictivePacketManipulator() {
-}
+ PacketReader* packet_reader,
+ const NetworkingConfig& config)
+ : PacketManipulatorImpl(packet_reader, config, false) {}
+PredictivePacketManipulator::~PredictivePacketManipulator() {}
void PredictivePacketManipulator::AddRandomResult(double result) {
assert(result >= 0.0 && result <= 1.0);
@@ -33,8 +31,9 @@ void PredictivePacketManipulator::AddRandomResult(double result) {
}
double PredictivePacketManipulator::RandomUniform() {
- if(random_results_.size() == 0u) {
- fprintf(stderr, "No more stored results, please make sure AddRandomResult()"
+ if (random_results_.size() == 0u) {
+ fprintf(stderr,
+ "No more stored results, please make sure AddRandomResult()"
"is called same amount of times you're going to invoke the "
"RandomUniform() function, i.e. once per packet.\n");
assert(false);
@@ -45,4 +44,4 @@ double PredictivePacketManipulator::RandomUniform() {
}
} // namespace test
-} // namespace webrtcc
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h
index 082712d870..45c7848c67 100644
--- a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h
+++ b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h
@@ -31,6 +31,7 @@ class PredictivePacketManipulator : public PacketManipulatorImpl {
// FIFO queue so they will be returned in the same order they were added.
// Result parameter must be 0.0 to 1.0.
void AddRandomResult(double result);
+
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
double RandomUniform() override;
diff --git a/webrtc/modules/video_coding/codecs/test/stats.cc b/webrtc/modules/video_coding/codecs/test/stats.cc
index f87407d223..478b2f4901 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.cc
+++ b/webrtc/modules/video_coding/codecs/test/stats.cc
@@ -39,19 +39,19 @@ Stats::Stats() {}
Stats::~Stats() {}
bool LessForEncodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.encode_time_in_us < s2.encode_time_in_us;
+ return s1.encode_time_in_us < s2.encode_time_in_us;
}
bool LessForDecodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.decode_time_in_us < s2.decode_time_in_us;
+ return s1.decode_time_in_us < s2.decode_time_in_us;
}
bool LessForEncodedSize(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.encoded_frame_length_in_bytes < s2.encoded_frame_length_in_bytes;
+ return s1.encoded_frame_length_in_bytes < s2.encoded_frame_length_in_bytes;
}
bool LessForBitRate(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.bit_rate_in_kbps < s2.bit_rate_in_kbps;
+ return s1.bit_rate_in_kbps < s2.bit_rate_in_kbps;
}
FrameStatistic& Stats::NewFrame(int frame_number) {
@@ -78,8 +78,7 @@ void Stats::PrintSummary() {
size_t nbr_keyframes = 0;
size_t nbr_nonkeyframes = 0;
- for (FrameStatisticsIterator it = stats_.begin();
- it != stats_.end(); ++it) {
+ for (FrameStatisticsIterator it = stats_.begin(); it != stats_.end(); ++it) {
total_encoding_time_in_us += it->encode_time_in_us;
total_decoding_time_in_us += it->decode_time_in_us;
total_encoded_frames_lengths += it->encoded_frame_length_in_bytes;
@@ -96,15 +95,13 @@ void Stats::PrintSummary() {
// ENCODING
printf("Encoding time:\n");
- frame = std::min_element(stats_.begin(),
- stats_.end(), LessForEncodeTime);
- printf(" Min : %7d us (frame %d)\n",
- frame->encode_time_in_us, frame->frame_number);
+ frame = std::min_element(stats_.begin(), stats_.end(), LessForEncodeTime);
+ printf(" Min : %7d us (frame %d)\n", frame->encode_time_in_us,
+ frame->frame_number);
- frame = std::max_element(stats_.begin(),
- stats_.end(), LessForEncodeTime);
- printf(" Max : %7d us (frame %d)\n",
- frame->encode_time_in_us, frame->frame_number);
+ frame = std::max_element(stats_.begin(), stats_.end(), LessForEncodeTime);
+ printf(" Max : %7d us (frame %d)\n", frame->encode_time_in_us,
+ frame->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_encoding_time_in_us / stats_.size()));
@@ -115,7 +112,7 @@ void Stats::PrintSummary() {
// failures)
std::vector<FrameStatistic> decoded_frames;
for (std::vector<FrameStatistic>::iterator it = stats_.begin();
- it != stats_.end(); ++it) {
+ it != stats_.end(); ++it) {
if (it->decoding_successful) {
decoded_frames.push_back(*it);
}
@@ -123,15 +120,15 @@ void Stats::PrintSummary() {
if (decoded_frames.size() == 0) {
printf("No successfully decoded frames exist in this statistics.\n");
} else {
- frame = std::min_element(decoded_frames.begin(),
- decoded_frames.end(), LessForDecodeTime);
- printf(" Min : %7d us (frame %d)\n",
- frame->decode_time_in_us, frame->frame_number);
+ frame = std::min_element(decoded_frames.begin(), decoded_frames.end(),
+ LessForDecodeTime);
+ printf(" Min : %7d us (frame %d)\n", frame->decode_time_in_us,
+ frame->frame_number);
- frame = std::max_element(decoded_frames.begin(),
- decoded_frames.end(), LessForDecodeTime);
- printf(" Max : %7d us (frame %d)\n",
- frame->decode_time_in_us, frame->frame_number);
+ frame = std::max_element(decoded_frames.begin(), decoded_frames.end(),
+ LessForDecodeTime);
+ printf(" Max : %7d us (frame %d)\n", frame->decode_time_in_us,
+ frame->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_decoding_time_in_us / decoded_frames.size()));
@@ -141,13 +138,11 @@ void Stats::PrintSummary() {
// SIZE
printf("Frame sizes:\n");
- frame = std::min_element(stats_.begin(),
- stats_.end(), LessForEncodedSize);
+ frame = std::min_element(stats_.begin(), stats_.end(), LessForEncodedSize);
printf(" Min : %7" PRIuS " bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
- frame = std::max_element(stats_.begin(),
- stats_.end(), LessForEncodedSize);
+ frame = std::max_element(stats_.begin(), stats_.end(), LessForEncodedSize);
printf(" Max : %7" PRIuS " bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
@@ -167,21 +162,17 @@ void Stats::PrintSummary() {
// BIT RATE
printf("Bit rates:\n");
- frame = std::min_element(stats_.begin(),
- stats_.end(), LessForBitRate);
- printf(" Min bit rate: %7d kbps (frame %d)\n",
- frame->bit_rate_in_kbps, frame->frame_number);
+ frame = std::min_element(stats_.begin(), stats_.end(), LessForBitRate);
+ printf(" Min bit rate: %7d kbps (frame %d)\n", frame->bit_rate_in_kbps,
+ frame->frame_number);
- frame = std::max_element(stats_.begin(),
- stats_.end(), LessForBitRate);
- printf(" Max bit rate: %7d kbps (frame %d)\n",
- frame->bit_rate_in_kbps, frame->frame_number);
+ frame = std::max_element(stats_.begin(), stats_.end(), LessForBitRate);
+ printf(" Max bit rate: %7d kbps (frame %d)\n", frame->bit_rate_in_kbps,
+ frame->frame_number);
printf("\n");
- printf("Total encoding time : %7d ms.\n",
- total_encoding_time_in_us / 1000);
- printf("Total decoding time : %7d ms.\n",
- total_decoding_time_in_us / 1000);
+ printf("Total encoding time : %7d ms.\n", total_encoding_time_in_us / 1000);
+ printf("Total decoding time : %7d ms.\n", total_decoding_time_in_us / 1000);
printf("Total processing time: %7d ms.\n",
(total_encoding_time_in_us + total_decoding_time_in_us) / 1000);
}
diff --git a/webrtc/modules/video_coding/codecs/test/stats.h b/webrtc/modules/video_coding/codecs/test/stats.h
index 83ba108bb7..9092631ca1 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.h
+++ b/webrtc/modules/video_coding/codecs/test/stats.h
@@ -13,7 +13,7 @@
#include <vector>
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common_video/include/video_image.h"
namespace webrtc {
namespace test {
diff --git a/webrtc/modules/video_coding/codecs/test/stats_unittest.cc b/webrtc/modules/video_coding/codecs/test/stats_unittest.cc
index a2d27e71d6..0403ccfdb3 100644
--- a/webrtc/modules/video_coding/codecs/test/stats_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/stats_unittest.cc
@@ -16,21 +16,15 @@
namespace webrtc {
namespace test {
-class StatsTest: public testing::Test {
+class StatsTest : public testing::Test {
protected:
- StatsTest() {
- }
+ StatsTest() {}
- virtual ~StatsTest() {
- }
+ virtual ~StatsTest() {}
- void SetUp() {
- stats_ = new Stats();
- }
+ void SetUp() { stats_ = new Stats(); }
- void TearDown() {
- delete stats_;
- }
+ void TearDown() { delete stats_; }
Stats* stats_;
};
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
index c814dfe0e7..7376000bd5 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -93,14 +93,18 @@ bool VideoProcessorImpl::Init() {
int32_t register_result =
encoder_->RegisterEncodeCompleteCallback(encode_callback_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
- fprintf(stderr, "Failed to register encode complete callback, return code: "
- "%d\n", register_result);
+ fprintf(stderr,
+ "Failed to register encode complete callback, return code: "
+ "%d\n",
+ register_result);
return false;
}
register_result = decoder_->RegisterDecodeCompleteCallback(decode_callback_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
- fprintf(stderr, "Failed to register decode complete callback, return code: "
- "%d\n", register_result);
+ fprintf(stderr,
+ "Failed to register decode complete callback, return code: "
+ "%d\n",
+ register_result);
return false;
}
// Init the encoder and decoder
@@ -146,13 +150,14 @@ VideoProcessorImpl::~VideoProcessorImpl() {
delete decode_callback_;
}
-
void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) {
int set_rates_result = encoder_->SetRates(bit_rate, frame_rate);
assert(set_rates_result >= 0);
if (set_rates_result < 0) {
- fprintf(stderr, "Failed to update encoder with new rate %d, "
- "return code: %d\n", bit_rate, set_rates_result);
+ fprintf(stderr,
+ "Failed to update encoder with new rate %d, "
+ "return code: %d\n",
+ bit_rate, set_rates_result);
}
num_dropped_frames_ = 0;
num_spatial_resizes_ = 0;
@@ -175,7 +180,7 @@ int VideoProcessorImpl::NumberSpatialResizes() {
}
bool VideoProcessorImpl::ProcessFrame(int frame_number) {
- assert(frame_number >=0);
+ assert(frame_number >= 0);
if (!initialized_) {
fprintf(stderr, "Attempting to use uninitialized VideoProcessor!\n");
return false;
@@ -186,10 +191,8 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
}
if (frame_reader_->ReadFrame(source_buffer_)) {
// Copy the source frame to the newly read frame data.
- source_frame_.CreateFrame(source_buffer_,
- config_.codec_settings->width,
- config_.codec_settings->height,
- kVideoRotation_0);
+ source_frame_.CreateFrame(source_buffer_, config_.codec_settings->width,
+ config_.codec_settings->height, kVideoRotation_0);
// Ensure we have a new statistics data object we can fill:
FrameStatistic& stat = stats_->NewFrame(frame_number);
@@ -224,10 +227,10 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
// Timestamp is frame number, so this gives us #dropped frames.
- int num_dropped_from_prev_encode = encoded_image._timeStamp -
- prev_time_stamp_ - 1;
- num_dropped_frames_ += num_dropped_from_prev_encode;
- prev_time_stamp_ = encoded_image._timeStamp;
+ int num_dropped_from_prev_encode =
+ encoded_image._timeStamp - prev_time_stamp_ - 1;
+ num_dropped_frames_ += num_dropped_from_prev_encode;
+ prev_time_stamp_ = encoded_image._timeStamp;
if (num_dropped_from_prev_encode > 0) {
// For dropped frames, we write out the last decoded frame to avoid getting
// out of sync for the computation of PSNR and SSIM.
@@ -244,15 +247,16 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
TickTime encode_stop = TickTime::Now();
int frame_number = encoded_image._timeStamp;
FrameStatistic& stat = stats_->stats_[frame_number];
- stat.encode_time_in_us = GetElapsedTimeMicroseconds(encode_start_,
- encode_stop);
+ stat.encode_time_in_us =
+ GetElapsedTimeMicroseconds(encode_start_, encode_stop);
stat.encoding_successful = true;
stat.encoded_frame_length_in_bytes = encoded_image._length;
stat.frame_number = encoded_image._timeStamp;
stat.frame_type = encoded_image._frameType;
stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_;
- stat.total_packets = encoded_image._length /
- config_.networking_config.packet_size_in_bytes + 1;
+ stat.total_packets =
+ encoded_image._length / config_.networking_config.packet_size_in_bytes +
+ 1;
// Perform packet loss if criteria is fullfilled:
bool exclude_this_frame = false;
@@ -280,7 +284,7 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
copied_image._buffer = copied_buffer.get();
if (!exclude_this_frame) {
stat.packets_dropped =
- packet_manipulator_->ManipulatePackets(&copied_image);
+ packet_manipulator_->ManipulatePackets(&copied_image);
}
// Keep track of if frames are lost due to packet loss so we can tell
@@ -305,26 +309,25 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
int frame_number = image.timestamp();
// Report stats
FrameStatistic& stat = stats_->stats_[frame_number];
- stat.decode_time_in_us = GetElapsedTimeMicroseconds(decode_start_,
- decode_stop);
+ stat.decode_time_in_us =
+ GetElapsedTimeMicroseconds(decode_start_, decode_stop);
stat.decoding_successful = true;
// Check for resize action (either down or up):
if (static_cast<int>(image.width()) != last_encoder_frame_width_ ||
- static_cast<int>(image.height()) != last_encoder_frame_height_ ) {
+ static_cast<int>(image.height()) != last_encoder_frame_height_) {
++num_spatial_resizes_;
last_encoder_frame_width_ = image.width();
last_encoder_frame_height_ = image.height();
}
// Check if codec size is different from native/original size, and if so,
// upsample back to original size: needed for PSNR and SSIM computations.
- if (image.width() != config_.codec_settings->width ||
+ if (image.width() != config_.codec_settings->width ||
image.height() != config_.codec_settings->height) {
VideoFrame up_image;
- int ret_val = scaler_.Set(image.width(), image.height(),
- config_.codec_settings->width,
- config_.codec_settings->height,
- kI420, kI420, kScaleBilinear);
+ int ret_val = scaler_.Set(
+ image.width(), image.height(), config_.codec_settings->width,
+ config_.codec_settings->height, kI420, kI420, kScaleBilinear);
assert(ret_val >= 0);
if (ret_val < 0) {
fprintf(stderr, "Failed to set scalar for frame: %d, return code: %d\n",
@@ -366,7 +369,8 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
}
int VideoProcessorImpl::GetElapsedTimeMicroseconds(
- const webrtc::TickTime& start, const webrtc::TickTime& stop) {
+ const webrtc::TickTime& start,
+ const webrtc::TickTime& stop) {
uint64_t encode_time = (stop - start).Microseconds();
assert(encode_time <
static_cast<unsigned int>(std::numeric_limits<int>::max()));
@@ -404,8 +408,7 @@ const char* VideoCodecTypeToStr(webrtc::VideoCodecType e) {
}
// Callbacks
-int32_t
-VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
+int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
const EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) {
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.h b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
index 0b094ae73e..3ee08fd46a 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.h
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
@@ -13,9 +13,10 @@
#include <string>
+#include "webrtc/base/checks.h"
#include "webrtc/common_video/libyuv/include/scaler.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -242,12 +243,16 @@ class VideoProcessorImpl : public VideoProcessor {
// Callback class required to implement according to the VideoDecoder API.
class VideoProcessorDecodeCompleteCallback
- : public webrtc::DecodedImageCallback {
+ : public webrtc::DecodedImageCallback {
public:
- explicit VideoProcessorDecodeCompleteCallback(VideoProcessorImpl* vp)
- : video_processor_(vp) {
+ explicit VideoProcessorDecodeCompleteCallback(VideoProcessorImpl* vp)
+ : video_processor_(vp) {}
+ int32_t Decoded(webrtc::VideoFrame& image) override;
+ int32_t Decoded(webrtc::VideoFrame& image,
+ int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
}
- int32_t Decoded(webrtc::VideoFrame& image) override;
private:
VideoProcessorImpl* video_processor_;
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
index 3d6aedb22a..7b92616e1b 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
@@ -12,17 +12,16 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/frame_reader.h"
#include "webrtc/test/testsupport/frame_writer.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
#include "webrtc/test/testsupport/metrics/video_metrics.h"
#include "webrtc/test/testsupport/packet_reader.h"
#include "webrtc/typedefs.h"
@@ -81,7 +80,6 @@ struct RateControlMetrics {
int num_key_frames;
};
-
// Sequence used is foreman (CIF): may be better to use VGA for resize test.
const int kCIFWidth = 352;
const int kCIFHeight = 288;
@@ -101,7 +99,7 @@ const float kScaleKeyFrameSize = 0.5f;
// dropping/spatial resize, and temporal layers. The limits for the rate
// control metrics are set to be fairly conservative, so failure should only
// happen when some significant regression or breakdown occurs.
-class VideoProcessorIntegrationTest: public testing::Test {
+class VideoProcessorIntegrationTest : public testing::Test {
protected:
VideoEncoder* encoder_;
VideoDecoder* decoder_;
@@ -148,7 +146,6 @@ class VideoProcessorIntegrationTest: public testing::Test {
bool frame_dropper_on_;
bool spatial_resize_on_;
-
VideoProcessorIntegrationTest() {}
virtual ~VideoProcessorIntegrationTest() {}
@@ -165,14 +162,13 @@ class VideoProcessorIntegrationTest: public testing::Test {
// CIF is currently used for all tests below.
// Setup the TestConfig struct for processing of a clip in CIF resolution.
- config_.input_filename =
- webrtc::test::ResourcePath("foreman_cif", "yuv");
+ config_.input_filename = webrtc::test::ResourcePath("foreman_cif", "yuv");
// Generate an output filename in a safe way.
config_.output_filename = webrtc::test::TempFilename(
webrtc::test::OutputPath(), "videoprocessor_integrationtest");
- config_.frame_length_in_bytes = CalcBufferSize(kI420,
- kCIFWidth, kCIFHeight);
+ config_.frame_length_in_bytes =
+ CalcBufferSize(kI420, kCIFWidth, kCIFHeight);
config_.verbose = false;
// Only allow encoder/decoder to use single core, for predictability.
config_.use_single_core = true;
@@ -188,52 +184,46 @@ class VideoProcessorIntegrationTest: public testing::Test {
// These features may be set depending on the test.
switch (config_.codec_settings->codecType) {
- case kVideoCodecVP8:
- config_.codec_settings->codecSpecific.VP8.errorConcealmentOn =
- error_concealment_on_;
- config_.codec_settings->codecSpecific.VP8.denoisingOn =
- denoising_on_;
- config_.codec_settings->codecSpecific.VP8.numberOfTemporalLayers =
- num_temporal_layers_;
- config_.codec_settings->codecSpecific.VP8.frameDroppingOn =
- frame_dropper_on_;
- config_.codec_settings->codecSpecific.VP8.automaticResizeOn =
- spatial_resize_on_;
- config_.codec_settings->codecSpecific.VP8.keyFrameInterval =
- kBaseKeyFrameInterval;
- break;
- case kVideoCodecVP9:
- config_.codec_settings->codecSpecific.VP9.denoisingOn =
- denoising_on_;
- config_.codec_settings->codecSpecific.VP9.numberOfTemporalLayers =
- num_temporal_layers_;
- config_.codec_settings->codecSpecific.VP9.frameDroppingOn =
- frame_dropper_on_;
- config_.codec_settings->codecSpecific.VP9.automaticResizeOn =
- spatial_resize_on_;
- config_.codec_settings->codecSpecific.VP9.keyFrameInterval =
- kBaseKeyFrameInterval;
- break;
- default:
- assert(false);
- break;
- }
- frame_reader_ =
- new webrtc::test::FrameReaderImpl(config_.input_filename,
- config_.frame_length_in_bytes);
- frame_writer_ =
- new webrtc::test::FrameWriterImpl(config_.output_filename,
- config_.frame_length_in_bytes);
+ case kVideoCodecVP8:
+ config_.codec_settings->codecSpecific.VP8.errorConcealmentOn =
+ error_concealment_on_;
+ config_.codec_settings->codecSpecific.VP8.denoisingOn = denoising_on_;
+ config_.codec_settings->codecSpecific.VP8.numberOfTemporalLayers =
+ num_temporal_layers_;
+ config_.codec_settings->codecSpecific.VP8.frameDroppingOn =
+ frame_dropper_on_;
+ config_.codec_settings->codecSpecific.VP8.automaticResizeOn =
+ spatial_resize_on_;
+ config_.codec_settings->codecSpecific.VP8.keyFrameInterval =
+ kBaseKeyFrameInterval;
+ break;
+ case kVideoCodecVP9:
+ config_.codec_settings->codecSpecific.VP9.denoisingOn = denoising_on_;
+ config_.codec_settings->codecSpecific.VP9.numberOfTemporalLayers =
+ num_temporal_layers_;
+ config_.codec_settings->codecSpecific.VP9.frameDroppingOn =
+ frame_dropper_on_;
+ config_.codec_settings->codecSpecific.VP9.automaticResizeOn =
+ spatial_resize_on_;
+ config_.codec_settings->codecSpecific.VP9.keyFrameInterval =
+ kBaseKeyFrameInterval;
+ break;
+ default:
+ assert(false);
+ break;
+ }
+ frame_reader_ = new webrtc::test::FrameReaderImpl(
+ config_.input_filename, config_.frame_length_in_bytes);
+ frame_writer_ = new webrtc::test::FrameWriterImpl(
+ config_.output_filename, config_.frame_length_in_bytes);
ASSERT_TRUE(frame_reader_->Init());
ASSERT_TRUE(frame_writer_->Init());
packet_manipulator_ = new webrtc::test::PacketManipulatorImpl(
&packet_reader_, config_.networking_config, config_.verbose);
- processor_ = new webrtc::test::VideoProcessorImpl(encoder_, decoder_,
- frame_reader_,
- frame_writer_,
- packet_manipulator_,
- config_, &stats_);
+ processor_ = new webrtc::test::VideoProcessorImpl(
+ encoder_, decoder_, frame_reader_, frame_writer_, packet_manipulator_,
+ config_, &stats_);
ASSERT_TRUE(processor_->Init());
}
@@ -247,7 +237,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
encoding_bitrate_[i] = 0.0f;
// Update layer per-frame-bandwidth.
per_frame_bandwidth_[i] = static_cast<float>(bit_rate_layer_[i]) /
- static_cast<float>(frame_rate_layer_[i]);
+ static_cast<float>(frame_rate_layer_[i]);
}
// Set maximum size of key frames, following setting in the VP8 wrapper.
float max_key_size = kScaleKeyFrameSize * kOptimalBufferSize * frame_rate_;
@@ -274,28 +264,28 @@ class VideoProcessorIntegrationTest: public testing::Test {
// Update rate mismatch relative to per-frame bandwidth for delta frames.
if (frame_type == kVideoFrameDelta) {
// TODO(marpan): Should we count dropped (zero size) frames in mismatch?
- sum_frame_size_mismatch_[layer_] += fabs(encoded_size_kbits -
- per_frame_bandwidth_[layer_]) /
- per_frame_bandwidth_[layer_];
+ sum_frame_size_mismatch_[layer_] +=
+ fabs(encoded_size_kbits - per_frame_bandwidth_[layer_]) /
+ per_frame_bandwidth_[layer_];
} else {
- float target_size = (frame_num == 1) ? target_size_key_frame_initial_ :
- target_size_key_frame_;
- sum_key_frame_size_mismatch_ += fabs(encoded_size_kbits - target_size) /
- target_size;
+ float target_size = (frame_num == 1) ? target_size_key_frame_initial_
+ : target_size_key_frame_;
+ sum_key_frame_size_mismatch_ +=
+ fabs(encoded_size_kbits - target_size) / target_size;
num_key_frames_ += 1;
}
sum_encoded_frame_size_[layer_] += encoded_size_kbits;
// Encoding bitrate per layer: from the start of the update/run to the
// current frame.
encoding_bitrate_[layer_] = sum_encoded_frame_size_[layer_] *
- frame_rate_layer_[layer_] /
- num_frames_per_update_[layer_];
+ frame_rate_layer_[layer_] /
+ num_frames_per_update_[layer_];
// Total encoding rate: from the start of the update/run to current frame.
sum_encoded_frame_size_total_ += encoded_size_kbits;
- encoding_bitrate_total_ = sum_encoded_frame_size_total_ * frame_rate_ /
- num_frames_total_;
- perc_encoding_rate_mismatch_ = 100 * fabs(encoding_bitrate_total_ -
- bit_rate_) / bit_rate_;
+ encoding_bitrate_total_ =
+ sum_encoded_frame_size_total_ * frame_rate_ / num_frames_total_;
+ perc_encoding_rate_mismatch_ =
+ 100 * fabs(encoding_bitrate_total_ - bit_rate_) / bit_rate_;
if (perc_encoding_rate_mismatch_ < kPercTargetvsActualMismatch &&
!encoding_rate_within_target_) {
num_frames_to_hit_target_ = num_frames_total_;
@@ -314,34 +304,38 @@ class VideoProcessorIntegrationTest: public testing::Test {
int num_key_frames) {
int num_dropped_frames = processor_->NumberDroppedFrames();
int num_resize_actions = processor_->NumberSpatialResizes();
- printf("For update #: %d,\n "
+ printf(
+ "For update #: %d,\n "
" Target Bitrate: %d,\n"
" Encoding bitrate: %f,\n"
" Frame rate: %d \n",
update_index, bit_rate_, encoding_bitrate_total_, frame_rate_);
- printf(" Number of frames to approach target rate = %d, \n"
- " Number of dropped frames = %d, \n"
- " Number of spatial resizes = %d, \n",
- num_frames_to_hit_target_, num_dropped_frames, num_resize_actions);
+ printf(
+ " Number of frames to approach target rate = %d, \n"
+ " Number of dropped frames = %d, \n"
+ " Number of spatial resizes = %d, \n",
+ num_frames_to_hit_target_, num_dropped_frames, num_resize_actions);
EXPECT_LE(perc_encoding_rate_mismatch_, max_encoding_rate_mismatch);
if (num_key_frames_ > 0) {
- int perc_key_frame_size_mismatch = 100 * sum_key_frame_size_mismatch_ /
- num_key_frames_;
- printf(" Number of Key frames: %d \n"
- " Key frame rate mismatch: %d \n",
- num_key_frames_, perc_key_frame_size_mismatch);
+ int perc_key_frame_size_mismatch =
+ 100 * sum_key_frame_size_mismatch_ / num_key_frames_;
+ printf(
+ " Number of Key frames: %d \n"
+ " Key frame rate mismatch: %d \n",
+ num_key_frames_, perc_key_frame_size_mismatch);
EXPECT_LE(perc_key_frame_size_mismatch, max_key_frame_size_mismatch);
}
printf("\n");
printf("Rates statistics for Layer data \n");
- for (int i = 0; i < num_temporal_layers_ ; i++) {
+ for (int i = 0; i < num_temporal_layers_; i++) {
printf("Layer #%d \n", i);
- int perc_frame_size_mismatch = 100 * sum_frame_size_mismatch_[i] /
- num_frames_per_update_[i];
- int perc_encoding_rate_mismatch = 100 * fabs(encoding_bitrate_[i] -
- bit_rate_layer_[i]) /
- bit_rate_layer_[i];
- printf(" Target Layer Bit rate: %f \n"
+ int perc_frame_size_mismatch =
+ 100 * sum_frame_size_mismatch_[i] / num_frames_per_update_[i];
+ int perc_encoding_rate_mismatch =
+ 100 * fabs(encoding_bitrate_[i] - bit_rate_layer_[i]) /
+ bit_rate_layer_[i];
+ printf(
+ " Target Layer Bit rate: %f \n"
" Layer frame rate: %f, \n"
" Layer per frame bandwidth: %f, \n"
" Layer Encoding bit rate: %f, \n"
@@ -366,13 +360,13 @@ class VideoProcessorIntegrationTest: public testing::Test {
if (num_temporal_layers_ == 1) {
layer_ = 0;
} else if (num_temporal_layers_ == 2) {
- // layer 0: 0 2 4 ...
- // layer 1: 1 3
- if (frame_number % 2 == 0) {
- layer_ = 0;
- } else {
- layer_ = 1;
- }
+ // layer 0: 0 2 4 ...
+ // layer 1: 1 3
+ if (frame_number % 2 == 0) {
+ layer_ = 0;
+ } else {
+ layer_ = 1;
+ }
} else if (num_temporal_layers_ == 3) {
// layer 0: 0 4 8 ...
// layer 1: 2 6
@@ -391,20 +385,20 @@ class VideoProcessorIntegrationTest: public testing::Test {
// Set the bitrate and frame rate per layer, for up to 3 layers.
void SetLayerRates() {
- assert(num_temporal_layers_<= 3);
+ assert(num_temporal_layers_ <= 3);
for (int i = 0; i < num_temporal_layers_; i++) {
float bit_rate_ratio =
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i];
if (i > 0) {
- float bit_rate_delta_ratio = kVp8LayerRateAlloction
- [num_temporal_layers_ - 1][i] -
+ float bit_rate_delta_ratio =
+ kVp8LayerRateAlloction[num_temporal_layers_ - 1][i] -
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i - 1];
bit_rate_layer_[i] = bit_rate_ * bit_rate_delta_ratio;
} else {
bit_rate_layer_[i] = bit_rate_ * bit_rate_ratio;
}
- frame_rate_layer_[i] = frame_rate_ / static_cast<float>(
- 1 << (num_temporal_layers_ - 1));
+ frame_rate_layer_[i] =
+ frame_rate_ / static_cast<float>(1 << (num_temporal_layers_ - 1));
}
if (num_temporal_layers_ == 3) {
frame_rate_layer_[2] = frame_rate_ / 2.0f;
@@ -437,12 +431,12 @@ class VideoProcessorIntegrationTest: public testing::Test {
spatial_resize_on_ = process.spatial_resize_on;
SetUpCodecConfig();
// Update the layers and the codec with the initial rates.
- bit_rate_ = rate_profile.target_bit_rate[0];
+ bit_rate_ = rate_profile.target_bit_rate[0];
frame_rate_ = rate_profile.input_frame_rate[0];
SetLayerRates();
// Set the initial target size for key frame.
- target_size_key_frame_initial_ = 0.5 * kInitialBufferSize *
- bit_rate_layer_[0];
+ target_size_key_frame_initial_ =
+ 0.5 * kInitialBufferSize * bit_rate_layer_[0];
processor_->SetRates(bit_rate_, frame_rate_);
// Process each frame, up to |num_frames|.
int num_frames = rate_profile.num_frames;
@@ -452,7 +446,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
int frame_number = 0;
FrameType frame_type = kVideoFrameDelta;
while (processor_->ProcessFrame(frame_number) &&
- frame_number < num_frames) {
+ frame_number < num_frames) {
// Get the layer index for the frame |frame_number|.
LayerIndexForFrame(frame_number);
// Get the frame_type.
@@ -468,8 +462,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
if (frame_number ==
rate_profile.frame_index_rate_update[update_index + 1]) {
VerifyRateControl(
- update_index,
- rc_metrics[update_index].max_key_frame_size_mismatch,
+ update_index, rc_metrics[update_index].max_key_frame_size_mismatch,
rc_metrics[update_index].max_delta_frame_size_mismatch,
rc_metrics[update_index].max_encoding_rate_mismatch,
rc_metrics[update_index].max_time_hit_target,
@@ -478,23 +471,22 @@ class VideoProcessorIntegrationTest: public testing::Test {
rc_metrics[update_index].num_key_frames);
// Update layer rates and the codec with new rates.
++update_index;
- bit_rate_ = rate_profile.target_bit_rate[update_index];
+ bit_rate_ = rate_profile.target_bit_rate[update_index];
frame_rate_ = rate_profile.input_frame_rate[update_index];
SetLayerRates();
- ResetRateControlMetrics(rate_profile.
- frame_index_rate_update[update_index + 1]);
+ ResetRateControlMetrics(
+ rate_profile.frame_index_rate_update[update_index + 1]);
processor_->SetRates(bit_rate_, frame_rate_);
}
}
- VerifyRateControl(
- update_index,
- rc_metrics[update_index].max_key_frame_size_mismatch,
- rc_metrics[update_index].max_delta_frame_size_mismatch,
- rc_metrics[update_index].max_encoding_rate_mismatch,
- rc_metrics[update_index].max_time_hit_target,
- rc_metrics[update_index].max_num_dropped_frames,
- rc_metrics[update_index].num_spatial_resizes,
- rc_metrics[update_index].num_key_frames);
+ VerifyRateControl(update_index,
+ rc_metrics[update_index].max_key_frame_size_mismatch,
+ rc_metrics[update_index].max_delta_frame_size_mismatch,
+ rc_metrics[update_index].max_encoding_rate_mismatch,
+ rc_metrics[update_index].max_time_hit_target,
+ rc_metrics[update_index].max_num_dropped_frames,
+ rc_metrics[update_index].num_spatial_resizes,
+ rc_metrics[update_index].num_key_frames);
EXPECT_EQ(num_frames, frame_number);
EXPECT_EQ(num_frames + 1, static_cast<int>(stats_.stats_.size()));
@@ -507,16 +499,14 @@ class VideoProcessorIntegrationTest: public testing::Test {
// TODO(marpan): should compute these quality metrics per SetRates update.
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
- EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
- config_.input_filename.c_str(),
- config_.output_filename.c_str(),
- config_.codec_settings->width,
- config_.codec_settings->height,
- &psnr_result,
- &ssim_result));
+ EXPECT_EQ(
+ 0, webrtc::test::I420MetricsFromFiles(
+ config_.input_filename.c_str(), config_.output_filename.c_str(),
+ config_.codec_settings->width, config_.codec_settings->height,
+ &psnr_result, &ssim_result));
printf("PSNR avg: %f, min: %f SSIM avg: %f, min: %f\n",
- psnr_result.average, psnr_result.min,
- ssim_result.average, ssim_result.min);
+ psnr_result.average, psnr_result.min, ssim_result.average,
+ ssim_result.min);
stats_.PrintSummary();
EXPECT_GT(psnr_result.average, quality_metrics.minimum_avg_psnr);
EXPECT_GT(psnr_result.min, quality_metrics.minimum_min_psnr);
@@ -549,7 +539,7 @@ void SetCodecParameters(CodecConfigPars* process_settings,
bool spatial_resize_on) {
process_settings->codec_type = codec_type;
process_settings->packet_loss = packet_loss;
- process_settings->key_frame_interval = key_frame_interval;
+ process_settings->key_frame_interval = key_frame_interval;
process_settings->num_temporal_layers = num_temporal_layers,
process_settings->error_concealment_on = error_concealment_on;
process_settings->denoising_on = denoising_on;
@@ -608,9 +598,7 @@ TEST_F(VideoProcessorIntegrationTest, Process0PercentPacketLossVP9) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -632,13 +620,10 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLossVP9) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
-
// VP9: Run with no packet loss, with varying bitrate (3 rate updates):
// low to high to medium. Check that quality and encoder response to the new
// target rate/per-frame bandwidth (for each rate update) is within limits.
@@ -657,15 +642,13 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossChangeBitRateVP9) {
false, true, false);
// Metrics for expected quality.
QualityMetrics quality_metrics;
- SetQualityMetrics(&quality_metrics, 35.9, 30.0, 0.90, 0.85);
+ SetQualityMetrics(&quality_metrics, 35.7, 30.0, 0.90, 0.85);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
SetRateControlMetrics(rc_metrics, 0, 0, 30, 20, 20, 30, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 2, 0, 20, 20, 60, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 20, 40, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -695,12 +678,10 @@ TEST_F(VideoProcessorIntegrationTest,
SetQualityMetrics(&quality_metrics, 31.5, 18.0, 0.80, 0.44);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
- SetRateControlMetrics(rc_metrics, 0, 35, 50, 70, 15, 45, 0, 1);
+ SetRateControlMetrics(rc_metrics, 0, 38, 50, 75, 15, 45, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 10, 0, 40, 10, 30, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 5, 0, 30, 5, 20, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -721,19 +702,13 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossDenoiserOnVP9) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
// Run with no packet loss, at low bitrate.
-// spatial_resize is on, and for this low bitrate expect two resizes during the
-// sequence; first resize is 3/4, second is 1/2 (from original).
+// spatial_resize is on, for this low bitrate expect one resize in sequence.
// Resize happens on delta frame. Expect only one key frame (first frame).
-// Disable for msan, see
-// https://code.google.com/p/webrtc/issues/detail?id=5110 for details.
-#if !defined(MEMORY_SANITIZER)
TEST_F(VideoProcessorIntegrationTest, ProcessNoLossSpatialResizeFrameDropVP9) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
@@ -743,20 +718,17 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossSpatialResizeFrameDropVP9) {
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
- SetCodecParameters(&process_settings, kVideoCodecVP9, 0.0f, -1,
- 1, false, false, true, true);
+ SetCodecParameters(&process_settings, kVideoCodecVP9, 0.0f, -1, 1, false,
+ false, true, true);
// Metrics for expected quality.
QualityMetrics quality_metrics;
- SetQualityMetrics(&quality_metrics, 25.0, 13.0, 0.70, 0.40);
+ SetQualityMetrics(&quality_metrics, 24.0, 13.0, 0.65, 0.37);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 180, 70, 130, 15, 80, 2, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ SetRateControlMetrics(rc_metrics, 0, 228, 70, 160, 15, 80, 1, 1);
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
-#endif
// TODO(marpan): Add temporal layer test for VP9, once changes are in
// vp9 wrapper for this.
@@ -780,9 +752,7 @@ TEST_F(VideoProcessorIntegrationTest, ProcessZeroPacketLoss) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -804,9 +774,7 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLoss) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -828,9 +796,7 @@ TEST_F(VideoProcessorIntegrationTest, Process10PercentPacketLoss) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -847,8 +813,13 @@ TEST_F(VideoProcessorIntegrationTest, Process10PercentPacketLoss) {
// low to high to medium. Check that quality and encoder response to the new
// target rate/per-frame bandwidth (for each rate update) is within limits.
// One key frame (first frame only) in sequence.
-TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossChangeBitRateVP8)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossChangeBitRateVP8 \
+ DISABLED_ProcessNoLossChangeBitRateVP8
+#else
+#define MAYBE_ProcessNoLossChangeBitRateVP8 ProcessNoLossChangeBitRateVP8
+#endif
+TEST_F(VideoProcessorIntegrationTest, MAYBE_ProcessNoLossChangeBitRateVP8) {
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 200, 30, 0);
@@ -868,9 +839,7 @@ TEST_F(VideoProcessorIntegrationTest,
SetRateControlMetrics(rc_metrics, 0, 0, 45, 20, 10, 15, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 0, 0, 25, 20, 10, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 15, 10, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -881,8 +850,15 @@ TEST_F(VideoProcessorIntegrationTest,
// for the rate control metrics can be lower. One key frame (first frame only).
// Note: quality after update should be higher but we currently compute quality
// metrics averaged over whole sequence run.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8 \
+ DISABLED_ProcessNoLossChangeFrameRateFrameDropVP8
+#else
+#define MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8 \
+ ProcessNoLossChangeFrameRateFrameDropVP8
+#endif
TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossChangeFrameRateFrameDropVP8)) {
+ MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
@@ -903,16 +879,21 @@ TEST_F(VideoProcessorIntegrationTest,
SetRateControlMetrics(rc_metrics, 0, 40, 20, 75, 15, 60, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 10, 0, 25, 10, 35, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 20, 10, 15, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
// Run with no packet loss, at low bitrate. During this time we should've
// resized once. Expect 2 key frames generated (first and one for resize).
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
+ DISABLED_ProcessNoLossSpatialResizeFrameDropVP8
+#else
+#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
+ ProcessNoLossSpatialResizeFrameDropVP8
+#endif
TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossSpatialResizeFrameDropVP8)) {
+ MAYBE_ProcessNoLossSpatialResizeFrameDropVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
@@ -921,17 +902,15 @@ TEST_F(VideoProcessorIntegrationTest,
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
- SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1,
- 1, false, true, true, true);
+ SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1, 1, false,
+ true, true, true);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 25.0, 15.0, 0.70, 0.40);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 160, 60, 120, 20, 70, 1, 2);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -940,8 +919,13 @@ TEST_F(VideoProcessorIntegrationTest,
// encoding rate mismatch are applied to each layer.
// No dropped frames in this test, and internal spatial resizer is off.
// One key frame (first frame only) in sequence, so no spatial resizing.
-TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossTemporalLayersVP8)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossTemporalLayersVP8 \
+ DISABLED_ProcessNoLossTemporalLayersVP8
+#else
+#define MAYBE_ProcessNoLossTemporalLayersVP8 ProcessNoLossTemporalLayersVP8
+#endif
+TEST_F(VideoProcessorIntegrationTest, MAYBE_ProcessNoLossTemporalLayersVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
@@ -960,9 +944,7 @@ TEST_F(VideoProcessorIntegrationTest,
RateControlMetrics rc_metrics[2];
SetRateControlMetrics(rc_metrics, 0, 0, 20, 30, 10, 10, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 0, 0, 30, 15, 10, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
index 88b5467f1f..148d8dc74a 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
@@ -10,10 +10,10 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/mock/mock_packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/test/testsupport/mock/mock_frame_reader.h"
#include "webrtc/test/testsupport/mock/mock_frame_writer.h"
#include "webrtc/test/testsupport/packet_reader.h"
@@ -29,7 +29,7 @@ namespace test {
// Very basic testing for VideoProcessor. It's mostly tested by running the
// video_quality_measurement program.
-class VideoProcessorTest: public testing::Test {
+class VideoProcessorTest : public testing::Test {
protected:
MockVideoEncoder encoder_mock_;
MockVideoDecoder decoder_mock_;
@@ -53,44 +53,34 @@ class VideoProcessorTest: public testing::Test {
void TearDown() {}
void ExpectInit() {
- EXPECT_CALL(encoder_mock_, InitEncode(_, _, _))
- .Times(1);
+ EXPECT_CALL(encoder_mock_, InitEncode(_, _, _)).Times(1);
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_))
- .Times(AtLeast(1));
- EXPECT_CALL(decoder_mock_, InitDecode(_, _))
- .Times(1);
+ .Times(AtLeast(1));
+ EXPECT_CALL(decoder_mock_, InitDecode(_, _)).Times(1);
EXPECT_CALL(decoder_mock_, RegisterDecodeCompleteCallback(_))
- .Times(AtLeast(1));
- EXPECT_CALL(frame_reader_mock_, NumberOfFrames())
- .WillOnce(Return(1));
- EXPECT_CALL(frame_reader_mock_, FrameLength())
- .WillOnce(Return(152064));
+ .Times(AtLeast(1));
+ EXPECT_CALL(frame_reader_mock_, NumberOfFrames()).WillOnce(Return(1));
+ EXPECT_CALL(frame_reader_mock_, FrameLength()).WillOnce(Return(152064));
}
};
TEST_F(VideoProcessorTest, Init) {
ExpectInit();
- VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
- &frame_reader_mock_,
- &frame_writer_mock_,
- &packet_manipulator_mock_, config_,
- &stats_);
+ VideoProcessorImpl video_processor(
+ &encoder_mock_, &decoder_mock_, &frame_reader_mock_, &frame_writer_mock_,
+ &packet_manipulator_mock_, config_, &stats_);
ASSERT_TRUE(video_processor.Init());
}
TEST_F(VideoProcessorTest, ProcessFrame) {
ExpectInit();
- EXPECT_CALL(encoder_mock_, Encode(_, _, _))
- .Times(1);
- EXPECT_CALL(frame_reader_mock_, ReadFrame(_))
- .WillOnce(Return(true));
+ EXPECT_CALL(encoder_mock_, Encode(_, _, _)).Times(1);
+ EXPECT_CALL(frame_reader_mock_, ReadFrame(_)).WillOnce(Return(true));
// Since we don't return any callback from the mock, the decoder will not
// be more than initialized...
- VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
- &frame_reader_mock_,
- &frame_writer_mock_,
- &packet_manipulator_mock_, config_,
- &stats_);
+ VideoProcessorImpl video_processor(
+ &encoder_mock_, &decoder_mock_, &frame_reader_mock_, &frame_writer_mock_,
+ &packet_manipulator_mock_, config_, &stats_);
ASSERT_TRUE(video_processor.Init());
video_processor.ProcessFrame(0);
}
diff --git a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
index 22be5a83cc..37fad483f7 100644
--- a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
+++ b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
@@ -16,7 +16,7 @@
#include <sys/stat.h> // To check for directory existence.
#ifndef S_ISDIR // Not defined in stat.h on Windows.
-#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
+#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR)
#endif
#include "gflags/gflags.h"
@@ -26,7 +26,7 @@
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/frame_reader.h"
#include "webrtc/test/testsupport/frame_writer.h"
@@ -34,68 +34,102 @@
#include "webrtc/test/testsupport/packet_reader.h"
DEFINE_string(test_name, "Quality test", "The name of the test to run. ");
-DEFINE_string(test_description, "", "A more detailed description about what "
+DEFINE_string(test_description,
+ "",
+ "A more detailed description about what "
"the current test is about.");
-DEFINE_string(input_filename, "", "Input file. "
+DEFINE_string(input_filename,
+ "",
+ "Input file. "
"The source video file to be encoded and decoded. Must be in "
".yuv format");
DEFINE_int32(width, -1, "Width in pixels of the frames in the input file.");
DEFINE_int32(height, -1, "Height in pixels of the frames in the input file.");
-DEFINE_int32(framerate, 30, "Frame rate of the input file, in FPS "
+DEFINE_int32(framerate,
+ 30,
+ "Frame rate of the input file, in FPS "
"(frames-per-second). ");
-DEFINE_string(output_dir, ".", "Output directory. "
+DEFINE_string(output_dir,
+ ".",
+ "Output directory. "
"The directory where the output file will be put. Must already "
"exist.");
-DEFINE_bool(use_single_core, false, "Force using a single core. If set to "
+DEFINE_bool(use_single_core,
+ false,
+ "Force using a single core. If set to "
"true, only one core will be used for processing. Using a single "
"core is necessary to get a deterministic behavior for the"
"encoded frames - using multiple cores will produce different "
"encoded frames since multiple cores are competing to consume the "
"byte budget for each frame in parallel. If set to false, "
"the maximum detected number of cores will be used. ");
-DEFINE_bool(disable_fixed_random_seed , false, "Set this flag to disable the"
+DEFINE_bool(disable_fixed_random_seed,
+ false,
+ "Set this flag to disable the"
"usage of a fixed random seed for the random generator used "
"for packet loss. Disabling this will cause consecutive runs "
"loose packets at different locations, which is bad for "
"reproducibility.");
-DEFINE_string(output_filename, "", "Output file. "
+DEFINE_string(output_filename,
+ "",
+ "Output file. "
"The name of the output video file resulting of the processing "
"of the source file. By default this is the same name as the "
"input file with '_out' appended before the extension.");
DEFINE_int32(bitrate, 500, "Bit rate in kilobits/second.");
-DEFINE_int32(keyframe_interval, 0, "Forces a keyframe every Nth frame. "
+DEFINE_int32(keyframe_interval,
+ 0,
+ "Forces a keyframe every Nth frame. "
"0 means the encoder decides when to insert keyframes. Note that "
"the encoder may create a keyframe in other locations in addition "
"to the interval that is set using this parameter.");
-DEFINE_int32(temporal_layers, 0, "The number of temporal layers to use "
+DEFINE_int32(temporal_layers,
+ 0,
+ "The number of temporal layers to use "
"(VP8 specific codec setting). Must be 0-4.");
-DEFINE_int32(packet_size, 1500, "Simulated network packet size in bytes (MTU). "
+DEFINE_int32(packet_size,
+ 1500,
+ "Simulated network packet size in bytes (MTU). "
"Used for packet loss simulation.");
-DEFINE_int32(max_payload_size, 1440, "Max payload size in bytes for the "
+DEFINE_int32(max_payload_size,
+ 1440,
+ "Max payload size in bytes for the "
"encoder.");
-DEFINE_string(packet_loss_mode, "uniform", "Packet loss mode. Two different "
+DEFINE_string(packet_loss_mode,
+ "uniform",
+ "Packet loss mode. Two different "
"packet loss models are supported: uniform or burst. This "
"setting has no effect unless packet_loss_rate is >0. ");
-DEFINE_double(packet_loss_probability, 0.0, "Packet loss probability. A value "
+DEFINE_double(packet_loss_probability,
+ 0.0,
+ "Packet loss probability. A value "
"between 0.0 and 1.0 that defines the probability of a packet "
"being lost. 0.1 means 10% and so on.");
-DEFINE_int32(packet_loss_burst_length, 1, "Packet loss burst length. Defines "
+DEFINE_int32(packet_loss_burst_length,
+ 1,
+ "Packet loss burst length. Defines "
"how many packets will be lost in a burst when a packet has been "
"decided to be lost. Must be >=1.");
-DEFINE_bool(csv, false, "CSV output. Enabling this will output all frame "
+DEFINE_bool(csv,
+ false,
+ "CSV output. Enabling this will output all frame "
"statistics at the end of execution. Recommended to run combined "
"with --noverbose to avoid mixing output.");
-DEFINE_bool(python, false, "Python output. Enabling this will output all frame "
+DEFINE_bool(python,
+ false,
+ "Python output. Enabling this will output all frame "
"statistics as a Python script at the end of execution. "
"Recommended to run combine with --noverbose to avoid mixing "
"output.");
-DEFINE_bool(verbose, true, "Verbose mode. Prints a lot of debugging info. "
+DEFINE_bool(verbose,
+ true,
+ "Verbose mode. Prints a lot of debugging info. "
"Suitable for tracking progress but not for capturing output. "
"Disable with --noverbose flag.");
// Custom log method that only prints if the verbose flag is given.
// Supports all the standard printf parameters and formatting (just forwarded).
-int Log(const char *format, ...) {
+int Log(const char* format, ...) {
int result = 0;
if (FLAGS_verbose) {
va_list args;
@@ -132,9 +166,9 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
// Verify the output dir exists.
struct stat dir_info;
if (!(stat(FLAGS_output_dir.c_str(), &dir_info) == 0 &&
- S_ISDIR(dir_info.st_mode))) {
+ S_ISDIR(dir_info.st_mode))) {
fprintf(stderr, "Cannot find output directory: %s\n",
- FLAGS_output_dir.c_str());
+ FLAGS_output_dir.c_str());
return 3;
}
config->output_dir = FLAGS_output_dir;
@@ -148,16 +182,16 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
startIndex = 0;
}
FLAGS_output_filename =
- FLAGS_input_filename.substr(startIndex,
- FLAGS_input_filename.find_last_of(".")
- - startIndex) + "_out.yuv";
+ FLAGS_input_filename.substr(
+ startIndex, FLAGS_input_filename.find_last_of(".") - startIndex) +
+ "_out.yuv";
}
// Verify output file can be written.
if (FLAGS_output_dir == ".") {
config->output_filename = FLAGS_output_filename;
} else {
- config->output_filename = FLAGS_output_dir + "/"+ FLAGS_output_filename;
+ config->output_filename = FLAGS_output_dir + "/" + FLAGS_output_filename;
}
test_file = fopen(config->output_filename.c_str(), "wb");
if (test_file == NULL) {
@@ -232,27 +266,32 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
// Check packet loss settings
if (FLAGS_packet_loss_mode != "uniform" &&
FLAGS_packet_loss_mode != "burst") {
- fprintf(stderr, "Unsupported packet loss mode, must be 'uniform' or "
+ fprintf(stderr,
+ "Unsupported packet loss mode, must be 'uniform' or "
"'burst'\n.");
return 10;
}
config->networking_config.packet_loss_mode = webrtc::test::kUniform;
if (FLAGS_packet_loss_mode == "burst") {
- config->networking_config.packet_loss_mode = webrtc::test::kBurst;
+ config->networking_config.packet_loss_mode = webrtc::test::kBurst;
}
if (FLAGS_packet_loss_probability < 0.0 ||
FLAGS_packet_loss_probability > 1.0) {
- fprintf(stderr, "Invalid packet loss probability. Must be 0.0 - 1.0, "
- "was: %f\n", FLAGS_packet_loss_probability);
+ fprintf(stderr,
+ "Invalid packet loss probability. Must be 0.0 - 1.0, "
+ "was: %f\n",
+ FLAGS_packet_loss_probability);
return 11;
}
config->networking_config.packet_loss_probability =
FLAGS_packet_loss_probability;
if (FLAGS_packet_loss_burst_length < 1) {
- fprintf(stderr, "Invalid packet loss burst length, must be >=1, "
- "was: %d\n", FLAGS_packet_loss_burst_length);
+ fprintf(stderr,
+ "Invalid packet loss burst length, must be >=1, "
+ "was: %d\n",
+ FLAGS_packet_loss_burst_length);
return 12;
}
config->networking_config.packet_loss_burst_length =
@@ -264,10 +303,9 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
void CalculateSsimVideoMetrics(webrtc::test::TestConfig* config,
webrtc::test::QualityMetricsResult* result) {
Log("Calculating SSIM...\n");
- I420SSIMFromFiles(config->input_filename.c_str(),
- config->output_filename.c_str(),
- config->codec_settings->width,
- config->codec_settings->height, result);
+ I420SSIMFromFiles(
+ config->input_filename.c_str(), config->output_filename.c_str(),
+ config->codec_settings->width, config->codec_settings->height, result);
Log(" Average: %3.2f\n", result->average);
Log(" Min : %3.2f (frame %d)\n", result->min, result->min_frame_number);
Log(" Max : %3.2f (frame %d)\n", result->max, result->max_frame_number);
@@ -276,10 +314,9 @@ void CalculateSsimVideoMetrics(webrtc::test::TestConfig* config,
void CalculatePsnrVideoMetrics(webrtc::test::TestConfig* config,
webrtc::test::QualityMetricsResult* result) {
Log("Calculating PSNR...\n");
- I420PSNRFromFiles(config->input_filename.c_str(),
- config->output_filename.c_str(),
- config->codec_settings->width,
- config->codec_settings->height, result);
+ I420PSNRFromFiles(
+ config->input_filename.c_str(), config->output_filename.c_str(),
+ config->codec_settings->width, config->codec_settings->height, result);
Log(" Average: %3.2f\n", result->average);
Log(" Min : %3.2f (frame %d)\n", result->min, result->min_frame_number);
Log(" Max : %3.2f (frame %d)\n", result->max, result->max_frame_number);
@@ -309,9 +346,11 @@ void PrintConfigurationSummary(const webrtc::test::TestConfig& config) {
void PrintCsvOutput(const webrtc::test::Stats& stats,
const webrtc::test::QualityMetricsResult& ssim_result,
const webrtc::test::QualityMetricsResult& psnr_result) {
- Log("\nCSV output (recommended to run with --noverbose to skip the "
- "above output)\n");
- printf("frame_number encoding_successful decoding_successful "
+ Log(
+ "\nCSV output (recommended to run with --noverbose to skip the "
+ "above output)\n");
+ printf(
+ "frame_number encoding_successful decoding_successful "
"encode_return_code decode_return_code "
"encode_time_in_us decode_time_in_us "
"bit_rate_in_kbps encoded_frame_length_in_bytes frame_type "
@@ -322,22 +361,13 @@ void PrintCsvOutput(const webrtc::test::Stats& stats,
const webrtc::test::FrameStatistic& f = stats.stats_[i];
const webrtc::test::FrameResult& ssim = ssim_result.frames[i];
const webrtc::test::FrameResult& psnr = psnr_result.frames[i];
- printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7" PRIuS ", %d, %2d, %2"
- PRIuS ", %5.3f, %5.2f\n",
- f.frame_number,
- f.encoding_successful,
- f.decoding_successful,
- f.encode_return_code,
- f.decode_return_code,
- f.encode_time_in_us,
- f.decode_time_in_us,
- f.bit_rate_in_kbps,
- f.encoded_frame_length_in_bytes,
- f.frame_type,
- f.packets_dropped,
- f.total_packets,
- ssim.value,
- psnr.value);
+ printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7" PRIuS
+ ", %d, %2d, %2" PRIuS ", %5.3f, %5.2f\n",
+ f.frame_number, f.encoding_successful, f.decoding_successful,
+ f.encode_return_code, f.decode_return_code, f.encode_time_in_us,
+ f.decode_time_in_us, f.bit_rate_in_kbps,
+ f.encoded_frame_length_in_bytes, f.frame_type, f.packets_dropped,
+ f.total_packets, ssim.value, psnr.value);
}
}
@@ -345,91 +375,85 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
const webrtc::test::Stats& stats,
const webrtc::test::QualityMetricsResult& ssim_result,
const webrtc::test::QualityMetricsResult& psnr_result) {
- Log("\nPython output (recommended to run with --noverbose to skip the "
- "above output)\n");
- printf("test_configuration = ["
- "{'name': 'name', 'value': '%s'},\n"
- "{'name': 'description', 'value': '%s'},\n"
- "{'name': 'test_number', 'value': '%d'},\n"
- "{'name': 'input_filename', 'value': '%s'},\n"
- "{'name': 'output_filename', 'value': '%s'},\n"
- "{'name': 'output_dir', 'value': '%s'},\n"
- "{'name': 'packet_size_in_bytes', 'value': '%" PRIuS "'},\n"
- "{'name': 'max_payload_size_in_bytes', 'value': '%" PRIuS "'},\n"
- "{'name': 'packet_loss_mode', 'value': '%s'},\n"
- "{'name': 'packet_loss_probability', 'value': '%f'},\n"
- "{'name': 'packet_loss_burst_length', 'value': '%d'},\n"
- "{'name': 'exclude_frame_types', 'value': '%s'},\n"
- "{'name': 'frame_length_in_bytes', 'value': '%" PRIuS "'},\n"
- "{'name': 'use_single_core', 'value': '%s'},\n"
- "{'name': 'keyframe_interval;', 'value': '%d'},\n"
- "{'name': 'video_codec_type', 'value': '%s'},\n"
- "{'name': 'width', 'value': '%d'},\n"
- "{'name': 'height', 'value': '%d'},\n"
- "{'name': 'bit_rate_in_kbps', 'value': '%d'},\n"
- "]\n",
- config.name.c_str(),
- config.description.c_str(),
- config.test_number,
- config.input_filename.c_str(),
- config.output_filename.c_str(),
- config.output_dir.c_str(),
- config.networking_config.packet_size_in_bytes,
- config.networking_config.max_payload_size_in_bytes,
- PacketLossModeToStr(config.networking_config.packet_loss_mode),
- config.networking_config.packet_loss_probability,
- config.networking_config.packet_loss_burst_length,
- ExcludeFrameTypesToStr(config.exclude_frame_types),
- config.frame_length_in_bytes,
- config.use_single_core ? "True " : "False",
- config.keyframe_interval,
- webrtc::test::VideoCodecTypeToStr(config.codec_settings->codecType),
- config.codec_settings->width,
- config.codec_settings->height,
- config.codec_settings->startBitrate);
- printf("frame_data_types = {"
- "'frame_number': ('number', 'Frame number'),\n"
- "'encoding_successful': ('boolean', 'Encoding successful?'),\n"
- "'decoding_successful': ('boolean', 'Decoding successful?'),\n"
- "'encode_time': ('number', 'Encode time (us)'),\n"
- "'decode_time': ('number', 'Decode time (us)'),\n"
- "'encode_return_code': ('number', 'Encode return code'),\n"
- "'decode_return_code': ('number', 'Decode return code'),\n"
- "'bit_rate': ('number', 'Bit rate (kbps)'),\n"
- "'encoded_frame_length': "
- "('number', 'Encoded frame length (bytes)'),\n"
- "'frame_type': ('string', 'Frame type'),\n"
- "'packets_dropped': ('number', 'Packets dropped'),\n"
- "'total_packets': ('number', 'Total packets'),\n"
- "'ssim': ('number', 'SSIM'),\n"
- "'psnr': ('number', 'PSNR (dB)'),\n"
- "}\n");
+ Log(
+ "\nPython output (recommended to run with --noverbose to skip the "
+ "above output)\n");
+ printf(
+ "test_configuration = ["
+ "{'name': 'name', 'value': '%s'},\n"
+ "{'name': 'description', 'value': '%s'},\n"
+ "{'name': 'test_number', 'value': '%d'},\n"
+ "{'name': 'input_filename', 'value': '%s'},\n"
+ "{'name': 'output_filename', 'value': '%s'},\n"
+ "{'name': 'output_dir', 'value': '%s'},\n"
+ "{'name': 'packet_size_in_bytes', 'value': '%" PRIuS
+ "'},\n"
+ "{'name': 'max_payload_size_in_bytes', 'value': '%" PRIuS
+ "'},\n"
+ "{'name': 'packet_loss_mode', 'value': '%s'},\n"
+ "{'name': 'packet_loss_probability', 'value': '%f'},\n"
+ "{'name': 'packet_loss_burst_length', 'value': '%d'},\n"
+ "{'name': 'exclude_frame_types', 'value': '%s'},\n"
+ "{'name': 'frame_length_in_bytes', 'value': '%" PRIuS
+ "'},\n"
+ "{'name': 'use_single_core', 'value': '%s'},\n"
+ "{'name': 'keyframe_interval;', 'value': '%d'},\n"
+ "{'name': 'video_codec_type', 'value': '%s'},\n"
+ "{'name': 'width', 'value': '%d'},\n"
+ "{'name': 'height', 'value': '%d'},\n"
+ "{'name': 'bit_rate_in_kbps', 'value': '%d'},\n"
+ "]\n",
+ config.name.c_str(), config.description.c_str(), config.test_number,
+ config.input_filename.c_str(), config.output_filename.c_str(),
+ config.output_dir.c_str(), config.networking_config.packet_size_in_bytes,
+ config.networking_config.max_payload_size_in_bytes,
+ PacketLossModeToStr(config.networking_config.packet_loss_mode),
+ config.networking_config.packet_loss_probability,
+ config.networking_config.packet_loss_burst_length,
+ ExcludeFrameTypesToStr(config.exclude_frame_types),
+ config.frame_length_in_bytes, config.use_single_core ? "True " : "False",
+ config.keyframe_interval,
+ webrtc::test::VideoCodecTypeToStr(config.codec_settings->codecType),
+ config.codec_settings->width, config.codec_settings->height,
+ config.codec_settings->startBitrate);
+ printf(
+ "frame_data_types = {"
+ "'frame_number': ('number', 'Frame number'),\n"
+ "'encoding_successful': ('boolean', 'Encoding successful?'),\n"
+ "'decoding_successful': ('boolean', 'Decoding successful?'),\n"
+ "'encode_time': ('number', 'Encode time (us)'),\n"
+ "'decode_time': ('number', 'Decode time (us)'),\n"
+ "'encode_return_code': ('number', 'Encode return code'),\n"
+ "'decode_return_code': ('number', 'Decode return code'),\n"
+ "'bit_rate': ('number', 'Bit rate (kbps)'),\n"
+ "'encoded_frame_length': "
+ "('number', 'Encoded frame length (bytes)'),\n"
+ "'frame_type': ('string', 'Frame type'),\n"
+ "'packets_dropped': ('number', 'Packets dropped'),\n"
+ "'total_packets': ('number', 'Total packets'),\n"
+ "'ssim': ('number', 'SSIM'),\n"
+ "'psnr': ('number', 'PSNR (dB)'),\n"
+ "}\n");
printf("frame_data = [");
for (unsigned int i = 0; i < stats.stats_.size(); ++i) {
const webrtc::test::FrameStatistic& f = stats.stats_[i];
const webrtc::test::FrameResult& ssim = ssim_result.frames[i];
const webrtc::test::FrameResult& psnr = psnr_result.frames[i];
- printf("{'frame_number': %d, "
- "'encoding_successful': %s, 'decoding_successful': %s, "
- "'encode_time': %d, 'decode_time': %d, "
- "'encode_return_code': %d, 'decode_return_code': %d, "
- "'bit_rate': %d, 'encoded_frame_length': %" PRIuS ", "
- "'frame_type': %s, 'packets_dropped': %d, "
- "'total_packets': %" PRIuS ", 'ssim': %f, 'psnr': %f},\n",
- f.frame_number,
- f.encoding_successful ? "True " : "False",
- f.decoding_successful ? "True " : "False",
- f.encode_time_in_us,
- f.decode_time_in_us,
- f.encode_return_code,
- f.decode_return_code,
- f.bit_rate_in_kbps,
- f.encoded_frame_length_in_bytes,
- f.frame_type == webrtc::kVideoFrameDelta ? "'Delta'" : "'Other'",
- f.packets_dropped,
- f.total_packets,
- ssim.value,
- psnr.value);
+ printf(
+ "{'frame_number': %d, "
+ "'encoding_successful': %s, 'decoding_successful': %s, "
+ "'encode_time': %d, 'decode_time': %d, "
+ "'encode_return_code': %d, 'decode_return_code': %d, "
+ "'bit_rate': %d, 'encoded_frame_length': %" PRIuS
+ ", "
+ "'frame_type': %s, 'packets_dropped': %d, "
+ "'total_packets': %" PRIuS ", 'ssim': %f, 'psnr': %f},\n",
+ f.frame_number, f.encoding_successful ? "True " : "False",
+ f.decoding_successful ? "True " : "False", f.encode_time_in_us,
+ f.decode_time_in_us, f.encode_return_code, f.decode_return_code,
+ f.bit_rate_in_kbps, f.encoded_frame_length_in_bytes,
+ f.frame_type == webrtc::kVideoFrameDelta ? "'Delta'" : "'Other'",
+ f.packets_dropped, f.total_packets, ssim.value, psnr.value);
}
printf("]\n");
}
@@ -438,10 +462,14 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
// The input file must be in YUV format.
int main(int argc, char* argv[]) {
std::string program_name = argv[0];
- std::string usage = "Quality test application for video comparisons.\n"
- "Run " + program_name + " --helpshort for usage.\n"
- "Example usage:\n" + program_name +
- " --input_filename=filename.yuv --width=352 --height=288\n";
+ std::string usage =
+ "Quality test application for video comparisons.\n"
+ "Run " +
+ program_name +
+ " --helpshort for usage.\n"
+ "Example usage:\n" +
+ program_name +
+ " --input_filename=filename.yuv --width=352 --height=288\n";
google::SetUsageMessage(usage);
google::ParseCommandLineFlags(&argc, &argv, true);
@@ -478,10 +506,8 @@ int main(int argc, char* argv[]) {
packet_manipulator.InitializeRandomSeed(time(NULL));
}
webrtc::test::VideoProcessor* processor =
- new webrtc::test::VideoProcessorImpl(encoder, decoder,
- &frame_reader,
- &frame_writer,
- &packet_manipulator,
+ new webrtc::test::VideoProcessorImpl(encoder, decoder, &frame_reader,
+ &frame_writer, &packet_manipulator,
config, &stats);
processor->Init();
diff --git a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
index da6008ba3d..9226fa774c 100644
--- a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
@@ -13,8 +13,8 @@
#include <stdlib.h>
#include <string.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "vpx/vpx_encoder.h"
@@ -41,7 +41,7 @@ int DefaultTemporalLayers::CurrentLayerId() const {
int index = pattern_idx_ % temporal_ids_length_;
assert(index >= 0);
return temporal_ids_[index];
- }
+}
bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
int max_bitrate_kbit,
@@ -56,8 +56,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_periodicity = temporal_ids_length_;
cfg->ts_target_bitrate[0] = bitrateKbit;
cfg->ts_rate_decimator[0] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 1;
temporal_pattern_[0] = kTemporalUpdateLastRefAll;
@@ -74,8 +73,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_target_bitrate[1] = bitrateKbit;
cfg->ts_rate_decimator[0] = 2;
cfg->ts_rate_decimator[1] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 8;
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
@@ -103,8 +101,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 8;
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
@@ -138,8 +135,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_rate_decimator[1] = 4;
cfg->ts_rate_decimator[2] = 2;
cfg->ts_rate_decimator[3] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 16;
temporal_pattern_[0] = kTemporalUpdateLast;
@@ -243,7 +239,7 @@ int DefaultTemporalLayers::EncodeFlags(uint32_t timestamp) {
void DefaultTemporalLayers::PopulateCodecSpecific(
bool base_layer_sync,
- CodecSpecificInfoVP8 *vp8_info,
+ CodecSpecificInfoVP8* vp8_info,
uint32_t timestamp) {
assert(number_of_temporal_layers_ > 0);
assert(0 < temporal_ids_length_);
@@ -254,8 +250,8 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
vp8_info->tl0PicIdx = kNoTl0PicIdx;
} else {
if (base_layer_sync) {
- vp8_info->temporalIdx = 0;
- vp8_info->layerSync = true;
+ vp8_info->temporalIdx = 0;
+ vp8_info->layerSync = true;
} else {
vp8_info->temporalIdx = CurrentLayerId();
TemporalReferences temporal_reference =
@@ -267,7 +263,7 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
kTemporalUpdateGoldenWithoutDependencyRefAltRef ||
temporal_reference == kTemporalUpdateNoneNoRefGoldenRefAltRef ||
(temporal_reference == kTemporalUpdateNone &&
- number_of_temporal_layers_ == 4)) {
+ number_of_temporal_layers_ == 4)) {
vp8_info->layerSync = true;
} else {
vp8_info->layerSync = false;
diff --git a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
index 34121cbcf6..461ba69a72 100644
--- a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
@@ -8,9 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h"
#include "vpx/vpx_encoder.h"
@@ -19,47 +18,36 @@
namespace webrtc {
enum {
- kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
+ kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF,
- kTemporalUpdateGoldenWithoutDependency = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateGolden = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateAltrefWithoutDependency = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
+ kTemporalUpdateGoldenWithoutDependency =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltrefWithoutDependency =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_GF |
+ kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
+ kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateLastRefAltRef = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF,
- kTemporalUpdateLastAndGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateLastRefAltRef =
+ VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateLastAndGoldenRefAltRef =
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
};
TEST(TemporalLayersTest, 2Layers) {
@@ -68,29 +56,30 @@ TEST(TemporalLayersTest, 2Layers) {
CodecSpecificInfoVP8 vp8_info;
tl.ConfigureBitrates(500, 500, 30, &cfg);
- int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- };
- int expected_temporal_idx[16] =
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 };
-
- bool expected_layer_sync[16] =
- { false, true, false, false, false, false, false, false,
- false, true, false, false, false, false, false, false };
+ int expected_flags[16] = {
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ };
+ int expected_temporal_idx[16] = {0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1};
+
+ bool expected_layer_sync[16] = {false, true, false, false, false, false,
+ false, false, false, true, false, false,
+ false, false, false, false};
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -108,29 +97,30 @@ TEST(TemporalLayersTest, 3Layers) {
CodecSpecificInfoVP8 vp8_info;
tl.ConfigureBitrates(500, 500, 30, &cfg);
- int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGolden,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGolden,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateNone,
+ int expected_flags[16] = {
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGolden,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGolden,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone,
};
- int expected_temporal_idx[16] =
- { 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2 };
+ int expected_temporal_idx[16] = {0, 2, 1, 2, 0, 2, 1, 2,
+ 0, 2, 1, 2, 0, 2, 1, 2};
- bool expected_layer_sync[16] =
- { false, true, true, false, false, false, false, false,
- false, true, true, false, false, false, false, false };
+ bool expected_layer_sync[16] = {false, true, true, false, false, false,
+ false, false, false, true, true, false,
+ false, false, false, false};
unsigned int timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -165,12 +155,12 @@ TEST(TemporalLayersTest, 4Layers) {
kTemporalUpdateAltref,
kTemporalUpdateNone,
};
- int expected_temporal_idx[16] =
- { 0, 3, 2, 3, 1, 3, 2, 3, 0, 3, 2, 3, 1, 3, 2, 3 };
+ int expected_temporal_idx[16] = {0, 3, 2, 3, 1, 3, 2, 3,
+ 0, 3, 2, 3, 1, 3, 2, 3};
- bool expected_layer_sync[16] =
- { false, true, true, true, true, true, false, true,
- false, true, false, true, false, true, false, true };
+ bool expected_layer_sync[16] = {false, true, true, true, true, true,
+ false, true, false, true, false, true,
+ false, true, false, true};
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -198,8 +188,7 @@ TEST(TemporalLayersTest, KeyFrame) {
kTemporalUpdateGoldenRefAltRef,
kTemporalUpdateNone,
};
- int expected_temporal_idx[8] =
- { 0, 0, 0, 0, 0, 0, 0, 2};
+ int expected_temporal_idx[8] = {0, 0, 0, 0, 0, 0, 0, 2};
uint32_t timestamp = 0;
for (int i = 0; i < 7; ++i) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/include/vp8.h b/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
index f5dae471d2..dd3514235d 100644
--- a/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
+++ b/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
@@ -13,7 +13,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -21,16 +21,15 @@ class VP8Encoder : public VideoEncoder {
public:
static VP8Encoder* Create();
- virtual ~VP8Encoder() {};
+ virtual ~VP8Encoder() {}
}; // end of VP8Encoder class
-
class VP8Decoder : public VideoDecoder {
public:
static VP8Decoder* Create();
- virtual ~VP8Decoder() {};
+ virtual ~VP8Decoder() {}
}; // end of VP8Decoder class
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
diff --git a/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h b/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
index c2cefdd94e..7a27e4429a 100644
--- a/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
+++ b/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
#include "webrtc/common_types.h"
@@ -19,11 +19,11 @@ namespace webrtc {
// Values as required for the VP8 codec (accumulating).
static const float
kVp8LayerRateAlloction[kMaxTemporalStreams][kMaxTemporalStreams] = {
- {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
- {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
- {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
- {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
+ {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
+ {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
+ {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
+ {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
diff --git a/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc b/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
index 15b5af9200..d22601358f 100644
--- a/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
@@ -12,7 +12,7 @@
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
@@ -23,7 +23,8 @@ namespace webrtc {
namespace {
enum {
kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF,
+ VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_REF_ARF,
kTemporalUpdateGolden =
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
@@ -37,13 +38,15 @@ enum {
kTemporalUpdateAltref | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF,
kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY,
kTemporalUpdateNoneNoRefAltref = kTemporalUpdateNone | VP8_EFLAG_NO_REF_ARF,
kTemporalUpdateNoneNoRefGoldenRefAltRef =
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY,
kTemporalUpdateGoldenWithoutDependencyRefAltRef =
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
@@ -133,12 +136,14 @@ class RealTimeTemporalLayers : public TemporalLayers {
layer_ids_length_ = sizeof(layer_ids) / sizeof(*layer_ids);
static const int encode_flags[] = {
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateNone
- };
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone};
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
encode_flags_ = encode_flags;
@@ -153,12 +158,14 @@ class RealTimeTemporalLayers : public TemporalLayers {
layer_ids_length_ = sizeof(layer_ids) / sizeof(*layer_ids);
static const int encode_flags[] = {
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef, kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef, kTemporalUpdateNone
- };
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone};
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
encode_flags_ = encode_flags;
@@ -172,8 +179,8 @@ class RealTimeTemporalLayers : public TemporalLayers {
assert(false);
return false;
}
- memcpy(
- cfg->ts_layer_id, layer_ids_, sizeof(unsigned int) * layer_ids_length_);
+ memcpy(cfg->ts_layer_id, layer_ids_,
+ sizeof(unsigned int) * layer_ids_length_);
return true;
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
index a922e35712..1838e32eb7 100644
--- a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
@@ -25,8 +25,7 @@ ReferencePictureSelection::ReferencePictureSelection()
last_sent_ref_update_time_(0),
established_ref_picture_id_(0),
last_refresh_time_(0),
- rtt_(0) {
-}
+ rtt_(0) {}
void ReferencePictureSelection::Init() {
update_golden_next_ = true;
@@ -62,7 +61,8 @@ bool ReferencePictureSelection::ReceivedSLI(uint32_t now_ts) {
return send_refresh;
}
-int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
+int ReferencePictureSelection::EncodeFlags(int picture_id,
+ bool send_refresh,
uint32_t now_ts) {
int flags = 0;
// We can't refresh the decoder until we have established the key frame.
@@ -87,12 +87,12 @@ int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
received_ack_) {
flags |= VP8_EFLAG_NO_REF_LAST; // Don't reference the last frame.
if (update_golden_next_) {
- flags |= VP8_EFLAG_FORCE_GF; // Update the golden reference.
+ flags |= VP8_EFLAG_FORCE_GF; // Update the golden reference.
flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update alt-ref.
- flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
+ flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
} else {
- flags |= VP8_EFLAG_FORCE_ARF; // Update the alt-ref reference.
- flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
+ flags |= VP8_EFLAG_FORCE_ARF; // Update the alt-ref reference.
+ flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
}
last_sent_ref_picture_id_ = picture_id;
@@ -103,9 +103,9 @@ int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
if (established_golden_)
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
else
- flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
- flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
- flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update the alt-ref frame.
+ flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
+ flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
+ flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update the alt-ref frame.
}
return flags;
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
index c6474e5bd1..742bb96e91 100644
--- a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
@@ -22,25 +22,19 @@ static const uint32_t kMinUpdateInterval = 10;
// Should match the values set in reference_picture_selection.h
static const int kRtt = 10;
-static const int kNoPropagationGolden = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
-static const int kNoPropagationAltRef = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
-static const int kPropagateGolden = VP8_EFLAG_FORCE_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_LAST;
-static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_REF_LAST;
-static const int kRefreshFromGolden = VP8_EFLAG_NO_REF_LAST |
- VP8_EFLAG_NO_REF_ARF;
-static const int kRefreshFromAltRef = VP8_EFLAG_NO_REF_LAST |
- VP8_EFLAG_NO_REF_GF;
-
+static const int kNoPropagationGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+static const int kNoPropagationAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+static const int kPropagateGolden = VP8_EFLAG_FORCE_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST;
+static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_REF_ARF |
+ VP8_EFLAG_NO_REF_LAST;
+static const int kRefreshFromGolden =
+ VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_ARF;
+static const int kRefreshFromAltRef =
+ VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF;
class TestRPS : public ::testing::Test {
protected:
@@ -84,15 +78,15 @@ TEST_F(TestRPS, TestDecoderRefresh) {
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
// Enough time have elapsed since the previous reference propagation, we will
// therefore get both a refresh from golden and a propagation of alt-ref.
- EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time), kRefreshFromGolden |
- kPropagateAltRef);
+ EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time),
+ kRefreshFromGolden | kPropagateAltRef);
rps_.ReceivedRPSI(5);
time += kRtt + 1;
// Enough time for a new refresh, but not enough time for a reference
// propagation.
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
- EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time), kRefreshFromAltRef |
- kNoPropagationAltRef);
+ EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time),
+ kRefreshFromAltRef | kNoPropagationAltRef);
}
TEST_F(TestRPS, TestWrap) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
index 0fbb2a6c40..536587a13e 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -11,10 +11,12 @@
#include <stdlib.h>
+#include <algorithm>
+
#include "webrtc/base/checks.h"
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -188,7 +190,7 @@ void ScreenshareLayers::FrameEncoded(unsigned int size,
}
void ScreenshareLayers::PopulateCodecSpecific(bool base_layer_sync,
- CodecSpecificInfoVP8 *vp8_info,
+ CodecSpecificInfoVP8* vp8_info,
uint32_t timestamp) {
int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(timestamp);
if (number_of_temporal_layers_ == 1) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
index 90a8b1b883..7628758209 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
@@ -15,7 +15,7 @@
#include "webrtc/base/timeutils.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
index 628e336568..f31ed5e4d8 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
@@ -12,9 +12,9 @@
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
-#include "webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h"
using ::testing::_;
using ::testing::NiceMock;
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index 5dc4ac78f1..40e438f7e4 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -215,9 +215,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
}
VideoEncoder* encoder = factory_->Create();
- ret = encoder->InitEncode(&stream_codec,
- number_of_cores,
- max_payload_size);
+ ret = encoder->InitEncode(&stream_codec, number_of_cores, max_payload_size);
if (ret < 0) {
Release();
return ret;
@@ -284,35 +282,25 @@ int SimulcastEncoderAdapter::Encode(
// scale it to match what the encoder expects (below).
if ((dst_width == src_width && dst_height == src_height) ||
input_image.IsZeroSize()) {
- streaminfos_[stream_idx].encoder->Encode(input_image,
- codec_specific_info,
+ streaminfos_[stream_idx].encoder->Encode(input_image, codec_specific_info,
&stream_frame_types);
} else {
VideoFrame dst_frame;
// Making sure that destination frame is of sufficient size.
// Aligning stride values based on width.
- dst_frame.CreateEmptyFrame(dst_width, dst_height,
- dst_width, (dst_width + 1) / 2,
- (dst_width + 1) / 2);
- libyuv::I420Scale(input_image.buffer(kYPlane),
- input_image.stride(kYPlane),
- input_image.buffer(kUPlane),
- input_image.stride(kUPlane),
- input_image.buffer(kVPlane),
- input_image.stride(kVPlane),
- src_width, src_height,
- dst_frame.buffer(kYPlane),
- dst_frame.stride(kYPlane),
- dst_frame.buffer(kUPlane),
- dst_frame.stride(kUPlane),
- dst_frame.buffer(kVPlane),
- dst_frame.stride(kVPlane),
- dst_width, dst_height,
- libyuv::kFilterBilinear);
+ dst_frame.CreateEmptyFrame(dst_width, dst_height, dst_width,
+ (dst_width + 1) / 2, (dst_width + 1) / 2);
+ libyuv::I420Scale(
+ input_image.buffer(kYPlane), input_image.stride(kYPlane),
+ input_image.buffer(kUPlane), input_image.stride(kUPlane),
+ input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width,
+ src_height, dst_frame.buffer(kYPlane), dst_frame.stride(kYPlane),
+ dst_frame.buffer(kUPlane), dst_frame.stride(kUPlane),
+ dst_frame.buffer(kVPlane), dst_frame.stride(kVPlane), dst_width,
+ dst_height, libyuv::kFilterBilinear);
dst_frame.set_timestamp(input_image.timestamp());
dst_frame.set_render_time_ms(input_image.render_time_ms());
- streaminfos_[stream_idx].encoder->Encode(dst_frame,
- codec_specific_info,
+ streaminfos_[stream_idx].encoder->Encode(dst_frame, codec_specific_info,
&stream_frame_types);
}
}
@@ -426,16 +414,17 @@ uint32_t SimulcastEncoderAdapter::GetStreamBitrate(
// current stream's |targetBitrate|, otherwise it's capped by |maxBitrate|.
if (stream_idx < codec_.numberOfSimulcastStreams - 1) {
unsigned int max_rate = codec_.simulcastStream[stream_idx].maxBitrate;
- if (new_bitrate_kbit >= SumStreamTargetBitrate(stream_idx + 1, codec_) +
- codec_.simulcastStream[stream_idx + 1].minBitrate) {
+ if (new_bitrate_kbit >=
+ SumStreamTargetBitrate(stream_idx + 1, codec_) +
+ codec_.simulcastStream[stream_idx + 1].minBitrate) {
max_rate = codec_.simulcastStream[stream_idx].targetBitrate;
}
return std::min(new_bitrate_kbit - sum_target_lower_streams, max_rate);
} else {
- // For the highest stream (highest resolution), the |targetBitRate| and
- // |maxBitrate| are not used. Any excess bitrate (above the targets of
- // all lower streams) is given to this (highest resolution) stream.
- return new_bitrate_kbit - sum_target_lower_streams;
+ // For the highest stream (highest resolution), the |targetBitRate| and
+ // |maxBitrate| are not used. Any excess bitrate (above the targets of
+ // all lower streams) is given to this (highest resolution) stream.
+ return new_bitrate_kbit - sum_target_lower_streams;
}
} else {
// Not enough bitrate for this stream.
@@ -507,4 +496,11 @@ bool SimulcastEncoderAdapter::SupportsNativeHandle() const {
return streaminfos_[0].encoder->SupportsNativeHandle();
}
+const char* SimulcastEncoderAdapter::ImplementationName() const {
+ // We should not be calling this method before streaminfos_ are configured.
+ RTC_DCHECK(!streaminfos_.empty());
+ // TODO(pbos): Support multiple implementation names for different encoders.
+ return streaminfos_[0].encoder->ImplementationName();
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
index afec024abc..05a96c7336 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
@@ -59,6 +59,7 @@ class SimulcastEncoderAdapter : public VP8Encoder {
int GetTargetFramerate() override;
bool SupportsNativeHandle() const override;
+ const char* ImplementationName() const override;
private:
struct StreamInfo {
@@ -71,8 +72,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
send_stream(true) {}
StreamInfo(VideoEncoder* encoder,
EncodedImageCallback* callback,
- unsigned short width,
- unsigned short height,
+ uint16_t width,
+ uint16_t height,
bool send_stream)
: encoder(encoder),
callback(callback),
@@ -83,8 +84,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
// Deleted by SimulcastEncoderAdapter::Release().
VideoEncoder* encoder;
EncodedImageCallback* callback;
- unsigned short width;
- unsigned short height;
+ uint16_t width;
+ uint16_t height;
bool key_frame_request;
bool send_stream;
};
@@ -118,4 +119,3 @@ class SimulcastEncoderAdapter : public VP8Encoder {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_ENCODER_ADAPTER_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
index 218b5e2d1a..86b8e0b345 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
@@ -11,7 +11,7 @@
#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h"
#include "webrtc/modules/video_coding/codecs/vp8/vp8_factory.h"
@@ -27,12 +27,10 @@ static VP8Encoder* CreateTestEncoderAdapter() {
class TestSimulcastEncoderAdapter : public TestVp8Simulcast {
public:
TestSimulcastEncoderAdapter()
- : TestVp8Simulcast(CreateTestEncoderAdapter(),
- VP8Decoder::Create()) {}
+ : TestVp8Simulcast(CreateTestEncoderAdapter(), VP8Decoder::Create()) {}
+
protected:
- virtual void SetUp() {
- TestVp8Simulcast::SetUp();
- }
+ virtual void SetUp() { TestVp8Simulcast::SetUp(); }
virtual void TearDown() {
TestVp8Simulcast::TearDown();
VP8EncoderFactoryConfig::set_use_simulcast_adapter(false);
@@ -97,8 +95,7 @@ TEST_F(TestSimulcastEncoderAdapter, TestSpatioTemporalLayers321PatternEncoder) {
// TODO(ronghuawu): Enable this test when SkipEncodingUnusedStreams option is
// implemented for SimulcastEncoderAdapter.
-TEST_F(TestSimulcastEncoderAdapter,
- DISABLED_TestSkipEncodingUnusedStreams) {
+TEST_F(TestSimulcastEncoderAdapter, DISABLED_TestSkipEncodingUnusedStreams) {
TestVp8Simulcast::TestSkipEncodingUnusedStreams();
}
@@ -127,23 +124,17 @@ class MockVideoEncoder : public VideoEncoder {
return 0;
}
- int32_t Release() override {
- return 0;
- }
+ int32_t Release() override { return 0; }
int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override {
return 0;
}
- MOCK_METHOD2(SetChannelParameters,
- int32_t(uint32_t packetLoss, int64_t rtt));
+ MOCK_METHOD2(SetChannelParameters, int32_t(uint32_t packetLoss, int64_t rtt));
- bool SupportsNativeHandle() const override {
- return supports_native_handle_;
- }
+ bool SupportsNativeHandle() const override { return supports_native_handle_; }
- virtual ~MockVideoEncoder() {
- }
+ virtual ~MockVideoEncoder() {}
const VideoCodec& codec() const { return codec_; }
@@ -200,7 +191,8 @@ class TestSimulcastEncoderAdapterFakeHelper {
EXPECT_TRUE(!factory_->encoders().empty());
for (size_t i = 0; i < factory_->encoders().size(); ++i) {
EXPECT_CALL(*factory_->encoders()[i],
- SetChannelParameters(packetLoss, rtt)).Times(1);
+ SetChannelParameters(packetLoss, rtt))
+ .Times(1);
}
}
@@ -249,8 +241,7 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
void SetupCodec() {
TestVp8Simulcast::DefaultSettings(
- &codec_,
- static_cast<const int*>(kTestTemporalLayerProfile));
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile));
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
adapter_->RegisterEncodeCompleteCallback(this);
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
index 373a55237f..f23affee41 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
@@ -13,18 +13,14 @@
namespace webrtc {
namespace testing {
-class TestVp8Impl
- : public TestVp8Simulcast {
+class TestVp8Impl : public TestVp8Simulcast {
public:
TestVp8Impl()
- : TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
+ : TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
+
protected:
- virtual void SetUp() {
- TestVp8Simulcast::SetUp();
- }
- virtual void TearDown() {
- TestVp8Simulcast::TearDown();
- }
+ virtual void SetUp() { TestVp8Simulcast::SetUp(); }
+ virtual void TearDown() { TestVp8Simulcast::TearDown(); }
};
TEST_F(TestVp8Impl, TestKeyFrameRequestsOnAllStreams) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
index e4fc986545..7a7a2c253b 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -14,10 +14,11 @@
#include <algorithm>
#include <vector>
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
#include "webrtc/video_frame.h"
@@ -43,10 +44,8 @@ const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
-template<typename T> void SetExpectedValues3(T value0,
- T value1,
- T value2,
- T* expected_values) {
+template <typename T>
+void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
expected_values[0] = value0;
expected_values[1] = value1;
expected_values[2] = value2;
@@ -54,15 +53,14 @@ template<typename T> void SetExpectedValues3(T value0,
class Vp8TestEncodedImageCallback : public EncodedImageCallback {
public:
- Vp8TestEncodedImageCallback()
- : picture_id_(-1) {
+ Vp8TestEncodedImageCallback() : picture_id_(-1) {
memset(temporal_layer_, -1, sizeof(temporal_layer_));
memset(layer_sync_, false, sizeof(layer_sync_));
}
~Vp8TestEncodedImageCallback() {
- delete [] encoded_key_frame_._buffer;
- delete [] encoded_frame_._buffer;
+ delete[] encoded_key_frame_._buffer;
+ delete[] encoded_frame_._buffer;
}
virtual int32_t Encoded(const EncodedImage& encoded_image,
@@ -71,22 +69,20 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
// Only store the base layer.
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
if (encoded_image._frameType == kVideoFrameKey) {
- delete [] encoded_key_frame_._buffer;
+ delete[] encoded_key_frame_._buffer;
encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
encoded_key_frame_._size = encoded_image._size;
encoded_key_frame_._length = encoded_image._length;
encoded_key_frame_._frameType = kVideoFrameKey;
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
- memcpy(encoded_key_frame_._buffer,
- encoded_image._buffer,
+ memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
encoded_image._length);
} else {
- delete [] encoded_frame_._buffer;
+ delete[] encoded_frame_._buffer;
encoded_frame_._buffer = new uint8_t[encoded_image._size];
encoded_frame_._size = encoded_image._size;
encoded_frame_._length = encoded_image._length;
- memcpy(encoded_frame_._buffer,
- encoded_image._buffer,
+ memcpy(encoded_frame_._buffer, encoded_image._buffer,
encoded_image._length);
}
}
@@ -97,8 +93,10 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
codec_specific_info->codecSpecific.VP8.temporalIdx;
return 0;
}
- void GetLastEncodedFrameInfo(int* picture_id, int* temporal_layer,
- bool* layer_sync, int stream) {
+ void GetLastEncodedFrameInfo(int* picture_id,
+ int* temporal_layer,
+ bool* layer_sync,
+ int stream) {
*picture_id = picture_id_;
*temporal_layer = temporal_layer_[stream];
*layer_sync = layer_sync_[stream];
@@ -120,10 +118,8 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
class Vp8TestDecodedImageCallback : public DecodedImageCallback {
public:
- Vp8TestDecodedImageCallback()
- : decoded_frames_(0) {
- }
- virtual int32_t Decoded(VideoFrame& decoded_image) {
+ Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
+ int32_t Decoded(VideoFrame& decoded_image) override {
for (int i = 0; i < decoded_image.width(); ++i) {
EXPECT_NEAR(kColorY, decoded_image.buffer(kYPlane)[i], 1);
}
@@ -136,9 +132,11 @@ class Vp8TestDecodedImageCallback : public DecodedImageCallback {
decoded_frames_++;
return 0;
}
- int DecodedFrames() {
- return decoded_frames_;
+ int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
}
+ int DecodedFrames() { return decoded_frames_; }
private:
int decoded_frames_;
@@ -161,8 +159,7 @@ class SkipEncodingUnusedStreamsTest {
std::vector<unsigned int> configured_bitrates;
for (std::vector<TemporalLayers*>::const_iterator it =
spy_factory->spying_layers_.begin();
- it != spy_factory->spying_layers_.end();
- ++it) {
+ it != spy_factory->spying_layers_.end(); ++it) {
configured_bitrates.push_back(
static_cast<SpyingTemporalLayers*>(*it)->configured_bitrate_);
}
@@ -185,8 +182,8 @@ class SkipEncodingUnusedStreamsTest {
int framerate,
vpx_codec_enc_cfg_t* cfg) override {
configured_bitrate_ = bitrate_kbit;
- return layers_->ConfigureBitrates(
- bitrate_kbit, max_bitrate_kbit, framerate, cfg);
+ return layers_->ConfigureBitrates(bitrate_kbit, max_bitrate_kbit,
+ framerate, cfg);
}
void PopulateCodecSpecific(bool base_layer_sync,
@@ -228,16 +225,15 @@ class SkipEncodingUnusedStreamsTest {
class TestVp8Simulcast : public ::testing::Test {
public:
TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
- : encoder_(encoder),
- decoder_(decoder) {}
+ : encoder_(encoder), decoder_(decoder) {}
// Creates an VideoFrame from |plane_colors|.
static void CreateImage(VideoFrame* frame, int plane_colors[kNumOfPlanes]) {
for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
- int width = (plane_num != kYPlane ? (frame->width() + 1) / 2 :
- frame->width());
- int height = (plane_num != kYPlane ? (frame->height() + 1) / 2 :
- frame->height());
+ int width =
+ (plane_num != kYPlane ? (frame->width() + 1) / 2 : frame->width());
+ int height =
+ (plane_num != kYPlane ? (frame->height() + 1) / 2 : frame->height());
PlaneType plane_type = static_cast<PlaneType>(plane_num);
uint8_t* data = frame->buffer(plane_type);
// Setting allocated area to zero - setting only image size to
@@ -267,24 +263,15 @@ class TestVp8Simulcast : public ::testing::Test {
settings->height = kDefaultHeight;
settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
ASSERT_EQ(3, kNumberOfSimulcastStreams);
- ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4,
- kMaxBitrates[0],
- kMinBitrates[0],
- kTargetBitrates[0],
- &settings->simulcastStream[0],
- temporal_layer_profile[0]);
- ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2,
- kMaxBitrates[1],
- kMinBitrates[1],
- kTargetBitrates[1],
- &settings->simulcastStream[1],
- temporal_layer_profile[1]);
- ConfigureStream(kDefaultWidth, kDefaultHeight,
- kMaxBitrates[2],
- kMinBitrates[2],
- kTargetBitrates[2],
- &settings->simulcastStream[2],
- temporal_layer_profile[2]);
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
+ kMinBitrates[0], kTargetBitrates[0],
+ &settings->simulcastStream[0], temporal_layer_profile[0]);
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
+ kMinBitrates[1], kTargetBitrates[1],
+ &settings->simulcastStream[1], temporal_layer_profile[1]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
+ kMinBitrates[2], kTargetBitrates[2],
+ &settings->simulcastStream[2], temporal_layer_profile[2]);
settings->codecSpecific.VP8.resilience = kResilientStream;
settings->codecSpecific.VP8.denoisingOn = true;
settings->codecSpecific.VP8.errorConcealmentOn = false;
@@ -312,9 +299,7 @@ class TestVp8Simulcast : public ::testing::Test {
}
protected:
- virtual void SetUp() {
- SetUpCodec(kDefaultTemporalLayerProfile);
- }
+ virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); }
virtual void SetUpCodec(const int* temporal_layer_profile) {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
@@ -323,14 +308,14 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
int half_width = (kDefaultWidth + 1) / 2;
- input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
- kDefaultWidth, half_width, half_width);
+ input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
+ half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
}
virtual void TearDown() {
@@ -342,28 +327,34 @@ class TestVp8Simulcast : public ::testing::Test {
ASSERT_GE(expected_video_streams, 0);
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
if (expected_video_streams >= 1) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)), _, _)
- )
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
if (expected_video_streams >= 2) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)), _, _)
- )
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
if (expected_video_streams >= 3) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight)), _, _))
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
@@ -477,8 +468,8 @@ class TestVp8Simulcast : public ::testing::Test {
void TestPaddingOneStreamTwoMaxedOut() {
// We are just below limit of sending third stream, so we should get
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] - 1, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
@@ -491,8 +482,8 @@ class TestVp8Simulcast : public ::testing::Test {
void TestSendAllStreams() {
// We have just enough to send all streams.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2], 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
@@ -505,8 +496,7 @@ class TestVp8Simulcast : public ::testing::Test {
void TestDisablingStreams() {
// We should get three media streams.
- encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
- kMaxBitrates[2], 30);
+ encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
@@ -517,8 +507,8 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] / 2, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
ExpectStreams(kVideoFrameDelta, 2);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
@@ -537,16 +527,16 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] / 2, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 2);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should get all three streams.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kTargetBitrates[2], 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 3);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
@@ -571,20 +561,20 @@ class TestVp8Simulcast : public ::testing::Test {
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
// The for loop above did not set the bitrate of the highest layer.
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- maxBitrate = 0;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]
+ .maxBitrate = 0;
// The highest layer has to correspond to the non-simulcast resolution.
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- width = settings_.width;
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- height = settings_.height;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
+ settings_.width;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
+ settings_.height;
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
// Encode one frame and verify.
@@ -612,21 +602,17 @@ class TestVp8Simulcast : public ::testing::Test {
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
}
- void TestSwitchingToOneStream() {
- SwitchingToOneStream(1024, 768);
- }
+ void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); }
- void TestSwitchingToOneOddStream() {
- SwitchingToOneStream(1023, 769);
- }
+ void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); }
void TestRPSIEncoder() {
Vp8TestEncodedImageCallback encoder_callback;
@@ -777,67 +763,55 @@ class TestVp8Simulcast : public ::testing::Test {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
- int expected_temporal_idx[3] = { -1, -1, -1};
+ int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
// Test the layer pattern and sync flag for various spatial-temporal patterns.
@@ -858,67 +832,55 @@ class TestVp8Simulcast : public ::testing::Test {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
- int expected_temporal_idx[3] = { -1, -1, -1};
+ int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
void TestStrideEncodeDecode() {
@@ -932,8 +894,8 @@ class TestVp8Simulcast : public ::testing::Test {
// 1. stride > width 2. stride_y != stride_uv/2
int stride_y = kDefaultWidth + 20;
int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
- input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
- stride_y, stride_uv, stride_uv);
+ input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, stride_y,
+ stride_uv, stride_uv);
// Set color.
int plane_offset[kNumOfPlanes];
plane_offset[kYPlane] = kColorY;
@@ -963,10 +925,9 @@ class TestVp8Simulcast : public ::testing::Test {
void TestSkipEncodingUnusedStreams() {
SkipEncodingUnusedStreamsTest test;
std::vector<unsigned int> configured_bitrate =
- test.RunTest(encoder_.get(),
- &settings_,
- 1); // Target bit rate 1, to force all streams but the
- // base one to be exceeding bandwidth constraints.
+ test.RunTest(encoder_.get(), &settings_,
+ 1); // Target bit rate 1, to force all streams but the
+ // base one to be exceeding bandwidth constraints.
EXPECT_EQ(static_cast<size_t>(kNumberOfSimulcastStreams),
configured_bitrate.size());
@@ -975,8 +936,7 @@ class TestVp8Simulcast : public ::testing::Test {
int stream = 0;
for (std::vector<unsigned int>::const_iterator it =
configured_bitrate.begin();
- it != configured_bitrate.end();
- ++it) {
+ it != configured_bitrate.end(); ++it) {
if (stream == 0) {
EXPECT_EQ(min_bitrate, *it);
} else {
diff --git a/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h b/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
index 7607210d5c..47112c64aa 100644
--- a/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
+++ b/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
@@ -14,7 +14,8 @@
#include "vpx/vpx_encoder.h"
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common.h"
+#include "webrtc/common_video/include/video_image.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -30,6 +31,8 @@ class TemporalLayers {
virtual ~Factory() {}
virtual TemporalLayers* Create(int temporal_layers,
uint8_t initial_tl0_pic_idx) const;
+ static const ConfigOptionID identifier =
+ ConfigOptionID::kTemporalLayersFactory;
};
virtual ~TemporalLayers() {}
diff --git a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 5ec674f16a..c3d77da063 100644
--- a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -11,12 +11,12 @@
#include <stdio.h>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -78,7 +78,11 @@ class Vp8UnitTestDecodeCompleteCallback : public webrtc::DecodedImageCallback {
public:
explicit Vp8UnitTestDecodeCompleteCallback(VideoFrame* frame)
: decoded_frame_(frame), decode_complete(false) {}
- int Decoded(webrtc::VideoFrame& frame);
+ int32_t Decoded(VideoFrame& frame) override;
+ int32_t Decoded(VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
+ }
bool DecodeComplete();
private:
@@ -216,7 +220,12 @@ TEST_F(TestVp8Impl, EncoderParameterTest) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_inst_, 1));
}
-TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AlignedStrideEncodeDecode DISABLED_AlignedStrideEncodeDecode
+#else
+#define MAYBE_AlignedStrideEncodeDecode AlignedStrideEncodeDecode
+#endif
+TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u);
@@ -232,7 +241,12 @@ TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
EXPECT_EQ(kTestNtpTimeMs, decoded_frame_.ntp_time_ms());
}
-TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(DecodeWithACompleteKeyFrame)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_DecodeWithACompleteKeyFrame DISABLED_DecodeWithACompleteKeyFrame
+#else
+#define MAYBE_DecodeWithACompleteKeyFrame DecodeWithACompleteKeyFrame
+#endif
+TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u);
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h b/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
index 84745ea5a1..52f8aa30b8 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
@@ -32,4 +32,3 @@ class VP8EncoderFactoryConfig {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_FACTORY_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 029ccd1f27..5a04f6a43d 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -16,7 +16,7 @@
#include <algorithm>
// NOTE(ajm): Path provided by gyp.
-#include "libyuv/scale.h" // NOLINT
+#include "libyuv/scale.h" // NOLINT
#include "libyuv/convert.h" // NOLINT
#include "webrtc/base/checks.h"
@@ -24,8 +24,8 @@
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
@@ -68,10 +68,9 @@ std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec,
std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams);
// Allocate min -> target bitrates as long as we have bitrate to spend.
size_t last_active_stream = 0;
- for (size_t i = 0;
- i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
- bitrate_to_allocate_kbps >=
- static_cast<int>(codec.simulcastStream[i].minBitrate);
+ for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
+ bitrate_to_allocate_kbps >=
+ static_cast<int>(codec.simulcastStream[i].minBitrate);
++i) {
last_active_stream = i;
int allocated_bitrate_kbps =
@@ -132,7 +131,7 @@ bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) {
return true;
}
-int NumStreamsDisabled(std::vector<bool>& streams) {
+int NumStreamsDisabled(const std::vector<bool>& streams) {
int num_disabled = 0;
for (bool stream : streams) {
if (!stream)
@@ -183,7 +182,7 @@ int VP8EncoderImpl::Release() {
while (!encoded_images_.empty()) {
EncodedImage& image = encoded_images_.back();
- delete [] image._buffer;
+ delete[] image._buffer;
encoded_images_.pop_back();
}
while (!encoders_.empty()) {
@@ -289,10 +288,8 @@ int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
target_bitrate = tl0_bitrate;
}
configurations_[i].rc_target_bitrate = target_bitrate;
- temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate,
- max_bitrate,
- framerate,
- &configurations_[i]);
+ temporal_layers_[stream_idx]->ConfigureBitrates(
+ target_bitrate, max_bitrate, framerate, &configurations_[i]);
if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -301,6 +298,10 @@ int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP8EncoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
void VP8EncoderImpl::SetStreamState(bool send_stream,
int stream_idx) {
if (send_stream && !send_stream_[stream_idx]) {
@@ -311,8 +312,8 @@ void VP8EncoderImpl::SetStreamState(bool send_stream,
}
void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
- int num_temporal_layers,
- const VideoCodec& codec) {
+ int num_temporal_layers,
+ const VideoCodec& codec) {
const Config default_options;
const TemporalLayers::Factory& tl_factory =
(codec.extra_options ? codec.extra_options : &default_options)
@@ -330,15 +331,16 @@ void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
for (int i = 0; i < num_streams; ++i) {
// TODO(andresp): crash if layers is invalid.
int layers = codec.simulcastStream[i].numberOfTemporalLayers;
- if (layers < 1) layers = 1;
+ if (layers < 1)
+ layers = 1;
temporal_layers_.push_back(tl_factory.Create(layers, rand()));
}
}
}
int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
- int number_of_cores,
- size_t /*maxPayloadSize */) {
+ int number_of_cores,
+ size_t /*maxPayloadSize */) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -375,12 +377,13 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
- int num_temporal_layers = doing_simulcast ?
- inst->simulcastStream[0].numberOfTemporalLayers :
- inst->codecSpecific.VP8.numberOfTemporalLayers;
+ int num_temporal_layers =
+ doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
+ : inst->codecSpecific.VP8.numberOfTemporalLayers;
// TODO(andresp): crash if num temporal layers is bananas.
- if (num_temporal_layers < 1) num_temporal_layers = 1;
+ if (num_temporal_layers < 1)
+ num_temporal_layers = 1;
SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
@@ -410,7 +413,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
int idx = number_of_streams - 1;
for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
int gcd = GCD(inst->simulcastStream[idx].width,
- inst->simulcastStream[idx-1].width);
+ inst->simulcastStream[idx - 1].width);
downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
send_stream_[i] = false;
@@ -422,20 +425,20 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
}
for (int i = 0; i < number_of_streams; ++i) {
// Random start, 16 bits is enough.
- picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF;
+ picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
last_key_frame_picture_id_[i] = -1;
// allocate memory for encoded image
if (encoded_images_[i]._buffer != NULL) {
- delete [] encoded_images_[i]._buffer;
+ delete[] encoded_images_[i]._buffer;
}
- encoded_images_[i]._size = CalcBufferSize(kI420,
- codec_.width, codec_.height);
+ encoded_images_[i]._size =
+ CalcBufferSize(kI420, codec_.width, codec_.height);
encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size];
encoded_images_[i]._completeFrame = true;
}
// populate encoder configuration with default values
- if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(),
- &configurations_[0], 0)) {
+ if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &configurations_[0],
+ 0)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
// setting the time base of the codec
@@ -459,8 +462,8 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
break;
case kResilientFrames:
#ifdef INDEPENDENT_PARTITIONS
- configurations_[0]-g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT |
- VPX_ERROR_RESILIENT_PARTITIONS;
+ configurations_[0] - g_error_resilient =
+ VPX_ERROR_RESILIENT_DEFAULT | VPX_ERROR_RESILIENT_PARTITIONS;
break;
#else
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; // Not supported
@@ -536,20 +539,18 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
// Determine number of threads based on the image size and #cores.
// TODO(fbarchard): Consider number of Simulcast layers.
- configurations_[0].g_threads = NumberOfThreads(configurations_[0].g_w,
- configurations_[0].g_h,
- number_of_cores);
+ configurations_[0].g_threads = NumberOfThreads(
+ configurations_[0].g_w, configurations_[0].g_h, number_of_cores);
// Creating a wrapper to the image - setting image data to NULL.
// Actual pointer will be set in encode. Setting align to 1, as it
// is meaningless (no memory allocation is done here).
- vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height,
- 1, NULL);
+ vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1,
+ NULL);
if (encoders_.size() == 1) {
configurations_[0].rc_target_bitrate = inst->startBitrate;
- temporal_layers_[0]->ConfigureBitrates(inst->startBitrate,
- inst->maxBitrate,
+ temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate,
inst->maxFramerate,
&configurations_[0]);
} else {
@@ -641,20 +642,15 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
if (encoders_.size() > 1) {
- int error = vpx_codec_enc_init_multi(&encoders_[0],
- vpx_codec_vp8_cx(),
- &configurations_[0],
- encoders_.size(),
- flags,
- &downsampling_factors_[0]);
+ int error = vpx_codec_enc_init_multi(&encoders_[0], vpx_codec_vp8_cx(),
+ &configurations_[0], encoders_.size(),
+ flags, &downsampling_factors_[0]);
if (error) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
} else {
- if (vpx_codec_enc_init(&encoders_[0],
- vpx_codec_vp8_cx(),
- &configurations_[0],
- flags)) {
+ if (vpx_codec_enc_init(&encoders_[0], vpx_codec_vp8_cx(),
+ &configurations_[0], flags)) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
}
@@ -671,13 +667,13 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
#else
denoiser_state = kDenoiserOnAdaptive;
#endif
- vpx_codec_control(&encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
- codec_.codecSpecific.VP8.denoisingOn ?
- denoiser_state : kDenoiserOff);
+ vpx_codec_control(
+ &encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
if (encoders_.size() > 2) {
- vpx_codec_control(&encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
- codec_.codecSpecific.VP8.denoisingOn ?
- denoiser_state : kDenoiserOff);
+ vpx_codec_control(
+ &encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
}
for (size_t i = 0; i < encoders_.size(); ++i) {
// Allow more screen content to be detected as static.
@@ -710,14 +706,12 @@ uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
// Don't go below 3 times the per frame bandwidth.
const uint32_t minIntraTh = 300;
- return (targetPct < minIntraTh) ? minIntraTh: targetPct;
+ return (targetPct < minIntraTh) ? minIntraTh : targetPct;
}
int VP8EncoderImpl::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
- TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp());
-
if (!inited_)
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
if (frame.IsZeroSize())
@@ -731,7 +725,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame;
if (quality_scaler_enabled_ && (input_image.width() != codec_.width ||
- input_image.height() != codec_.height)) {
+ input_image.height() != codec_.height)) {
int ret = UpdateCodecFrameSize(input_image);
if (ret < 0)
return ret;
@@ -747,11 +741,11 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
raw_images_[0].planes[VPX_PLANE_Y] =
- const_cast<uint8_t*>(input_image.buffer(kYPlane));
+ const_cast<uint8_t*>(input_image.buffer(kYPlane));
raw_images_[0].planes[VPX_PLANE_U] =
- const_cast<uint8_t*>(input_image.buffer(kUPlane));
+ const_cast<uint8_t*>(input_image.buffer(kUPlane));
raw_images_[0].planes[VPX_PLANE_V] =
- const_cast<uint8_t*>(input_image.buffer(kVPlane));
+ const_cast<uint8_t*>(input_image.buffer(kVPlane));
raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane);
@@ -760,17 +754,17 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
for (size_t i = 1; i < encoders_.size(); ++i) {
// Scale the image down a number of times by downsampling factor
libyuv::I420Scale(
- raw_images_[i-1].planes[VPX_PLANE_Y],
- raw_images_[i-1].stride[VPX_PLANE_Y],
- raw_images_[i-1].planes[VPX_PLANE_U],
- raw_images_[i-1].stride[VPX_PLANE_U],
- raw_images_[i-1].planes[VPX_PLANE_V],
- raw_images_[i-1].stride[VPX_PLANE_V],
- raw_images_[i-1].d_w, raw_images_[i-1].d_h,
- raw_images_[i].planes[VPX_PLANE_Y], raw_images_[i].stride[VPX_PLANE_Y],
- raw_images_[i].planes[VPX_PLANE_U], raw_images_[i].stride[VPX_PLANE_U],
- raw_images_[i].planes[VPX_PLANE_V], raw_images_[i].stride[VPX_PLANE_V],
- raw_images_[i].d_w, raw_images_[i].d_h, libyuv::kFilterBilinear);
+ raw_images_[i - 1].planes[VPX_PLANE_Y],
+ raw_images_[i - 1].stride[VPX_PLANE_Y],
+ raw_images_[i - 1].planes[VPX_PLANE_U],
+ raw_images_[i - 1].stride[VPX_PLANE_U],
+ raw_images_[i - 1].planes[VPX_PLANE_V],
+ raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w,
+ raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y],
+ raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U],
+ raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V],
+ raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w,
+ raw_images_[i].d_h, libyuv::kFilterBilinear);
}
vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
for (size_t i = 0; i < encoders_.size(); ++i) {
@@ -805,8 +799,8 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
if (send_key_frame) {
// Adapt the size of the key frame when in screenshare with 1 temporal
// layer.
- if (encoders_.size() == 1 && codec_.mode == kScreensharing
- && codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
+ if (encoders_.size() == 1 && codec_.mode == kScreensharing &&
+ codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
const uint32_t forceKeyFrameIntraTh = 100;
vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
forceKeyFrameIntraTh);
@@ -818,13 +812,12 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
}
std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
} else if (codec_specific_info &&
- codec_specific_info->codecType == kVideoCodecVP8) {
+ codec_specific_info->codecType == kVideoCodecVP8) {
if (feedback_mode_) {
// Handle RPSI and SLI messages and set up the appropriate encode flags.
bool sendRefresh = false;
if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
- rps_.ReceivedRPSI(
- codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
+ rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
}
if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
sendRefresh = rps_.ReceivedSLI(input_image.timestamp());
@@ -876,8 +869,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
}
vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]);
- vpx_codec_control(&encoders_[i],
- VP8E_SET_TEMPORAL_LAYER_ID,
+ vpx_codec_control(&encoders_[i], VP8E_SET_TEMPORAL_LAYER_ID,
temporal_layers_[stream_idx]->CurrentLayerId());
}
// TODO(holmer): Ideally the duration should be the timestamp diff of this
@@ -895,7 +887,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// Reset specific intra frame thresholds, following the key frame.
if (send_key_frame) {
vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
- rc_max_intra_target_);
+ rc_max_intra_target_);
}
if (error)
return WEBRTC_VIDEO_CODEC_ERROR;
@@ -913,8 +905,7 @@ int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) {
codec_.simulcastStream[0].height = input_image.height();
}
// Update the cpu_speed setting for resolution change.
- vpx_codec_control(&(encoders_[0]),
- VP8E_SET_CPUUSED,
+ vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED,
SetCpuSpeed(codec_.width, codec_.height));
raw_images_[0].w = codec_.width;
raw_images_[0].h = codec_.height;
@@ -947,13 +938,12 @@ void VP8EncoderImpl::PopulateCodecSpecific(
}
vp8Info->simulcastIdx = stream_idx;
vp8Info->keyIdx = kNoKeyIdx; // TODO(hlundin) populate this
- vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ?
- true : false;
+ vp8Info->nonReference =
+ (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? true : false;
bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ||
- only_predicting_from_key_frame;
+ only_predicting_from_key_frame;
temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point,
- vp8Info,
- timestamp);
+ vp8Info, timestamp);
// Prepare next.
picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
}
@@ -966,27 +956,26 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
int stream_idx = static_cast<int>(encoders_.size()) - 1;
int result = WEBRTC_VIDEO_CODEC_OK;
for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
- ++encoder_idx, --stream_idx) {
+ ++encoder_idx, --stream_idx) {
vpx_codec_iter_t iter = NULL;
int part_idx = 0;
encoded_images_[encoder_idx]._length = 0;
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
RTPFragmentationHeader frag_info;
// token_partitions_ is number of bits used.
- frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_)
- + 1);
+ frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) +
+ 1);
CodecSpecificInfo codec_specific;
- const vpx_codec_cx_pkt_t *pkt = NULL;
- while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx],
- &iter)) != NULL) {
+ const vpx_codec_cx_pkt_t* pkt = NULL;
+ while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
+ NULL) {
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT: {
uint32_t length = encoded_images_[encoder_idx]._length;
memcpy(&encoded_images_[encoder_idx]._buffer[length],
- pkt->data.frame.buf,
- pkt->data.frame.sz);
+ pkt->data.frame.buf, pkt->data.frame.sz);
frag_info.fragmentationOffset[part_idx] = length;
- frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
+ frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
frag_info.fragmentationPlType[part_idx] = 0; // not known here
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
@@ -1063,7 +1052,6 @@ int VP8EncoderImpl::RegisterEncodeCompleteCallback(
return WEBRTC_VIDEO_CODEC_OK;
}
-
VP8DecoderImpl::VP8DecoderImpl()
: decode_complete_callback_(NULL),
inited_(false),
@@ -1075,8 +1063,7 @@ VP8DecoderImpl::VP8DecoderImpl()
propagation_cnt_(-1),
last_frame_width_(0),
last_frame_height_(0),
- key_frame_required_(true) {
-}
+ key_frame_required_(true) {}
VP8DecoderImpl::~VP8DecoderImpl() {
inited_ = true; // in order to do the actual release
@@ -1092,8 +1079,7 @@ int VP8DecoderImpl::Reset() {
return WEBRTC_VIDEO_CODEC_OK;
}
-int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
- int number_of_cores) {
+int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
int ret_val = Release();
if (ret_val < 0) {
return ret_val;
@@ -1104,12 +1090,12 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
if (inst && inst->codecType == kVideoCodecVP8) {
feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
}
- vpx_codec_dec_cfg_t cfg;
+ vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1)
cfg.threads = 1;
cfg.h = cfg.w = 0; // set after decode
-vpx_codec_flags_t flags = 0;
+ vpx_codec_flags_t flags = 0;
#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64)
flags = VPX_CODEC_USE_POSTPROC;
#ifdef INDEPENDENT_PARTITIONS
@@ -1134,10 +1120,10 @@ vpx_codec_flags_t flags = 0;
}
int VP8DecoderImpl::Decode(const EncodedImage& input_image,
- bool missing_frames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codec_specific_info,
- int64_t /*render_time_ms*/) {
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t /*render_time_ms*/) {
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
@@ -1188,9 +1174,9 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
if (input_image._frameType == kVideoFrameKey &&
input_image._completeFrame) {
propagation_cnt_ = -1;
- // Start count on first loss.
+ // Start count on first loss.
} else if ((!input_image._completeFrame || missing_frames) &&
- propagation_cnt_ == -1) {
+ propagation_cnt_ == -1) {
propagation_cnt_ = 0;
}
if (propagation_cnt_ >= 0) {
@@ -1242,15 +1228,15 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) {
const uint32_t bytes_to_copy = input_image._length;
if (last_keyframe_._size < bytes_to_copy) {
- delete [] last_keyframe_._buffer;
+ delete[] last_keyframe_._buffer;
last_keyframe_._buffer = NULL;
last_keyframe_._size = 0;
}
uint8_t* temp_buffer = last_keyframe_._buffer; // Save buffer ptr.
- uint32_t temp_size = last_keyframe_._size; // Save size.
- last_keyframe_ = input_image; // Shallow copy.
- last_keyframe_._buffer = temp_buffer; // Restore buffer ptr.
- last_keyframe_._size = temp_size; // Restore buffer size.
+ uint32_t temp_size = last_keyframe_._size; // Save size.
+ last_keyframe_ = input_image; // Shallow copy.
+ last_keyframe_._buffer = temp_buffer; // Restore buffer ptr.
+ last_keyframe_._size = temp_size; // Restore buffer size.
if (!last_keyframe_._buffer) {
// Allocate memory.
last_keyframe_._size = bytes_to_copy;
@@ -1300,7 +1286,8 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
}
if (picture_id > -1) {
if (((reference_updates & VP8_GOLD_FRAME) ||
- (reference_updates & VP8_ALTR_FRAME)) && !corrupted) {
+ (reference_updates & VP8_ALTR_FRAME)) &&
+ !corrupted) {
decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id);
}
decode_complete_callback_->ReceivedDecodedFrame(picture_id);
@@ -1323,14 +1310,10 @@ int VP8DecoderImpl::DecodePartitions(
const EncodedImage& input_image,
const RTPFragmentationHeader* fragmentation) {
for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
- const uint8_t* partition = input_image._buffer +
- fragmentation->fragmentationOffset[i];
- const uint32_t partition_length =
- fragmentation->fragmentationLength[i];
- if (vpx_codec_decode(decoder_,
- partition,
- partition_length,
- 0,
+ const uint8_t* partition =
+ input_image._buffer + fragmentation->fragmentationOffset[i];
+ const uint32_t partition_length = fragmentation->fragmentationLength[i];
+ if (vpx_codec_decode(decoder_, partition, partition_length, 0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -1343,8 +1326,8 @@ int VP8DecoderImpl::DecodePartitions(
}
int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
- uint32_t timestamp,
- int64_t ntp_time_ms) {
+ uint32_t timestamp,
+ int64_t ntp_time_ms) {
if (img == NULL) {
// Decoder OK and NULL image => No show frame
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
@@ -1354,14 +1337,13 @@ int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
// Allocate memory for decoded image.
VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h),
timestamp, 0, kVideoRotation_0);
- libyuv::I420Copy(
- img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
- img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
- img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
- decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
- decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
- decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
- img->d_w, img->d_h);
+ libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
+ decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
+ decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
+ img->d_w, img->d_h);
decoded_image.set_ntp_time_ms(ntp_time_ms);
int ret = decode_complete_callback_->Decoded(decoded_image);
if (ret != 0)
@@ -1380,7 +1362,7 @@ int VP8DecoderImpl::RegisterDecodeCompleteCallback(
int VP8DecoderImpl::Release() {
if (last_keyframe_._buffer != NULL) {
- delete [] last_keyframe_._buffer;
+ delete[] last_keyframe_._buffer;
last_keyframe_._buffer = NULL;
}
if (decoder_ != NULL) {
@@ -1400,15 +1382,19 @@ int VP8DecoderImpl::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP8DecoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) {
// The type of frame to copy should be set in ref_frame_->frame_type
// before the call to this function.
- if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_)
- != VPX_CODEC_OK) {
+ if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) !=
+ VPX_CODEC_OK) {
return -1;
}
- if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_)
- != VPX_CODEC_OK) {
+ if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
+ VPX_CODEC_OK) {
return -1;
}
return 0;
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
index ba14ed5841..9d5fb713a4 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
@@ -22,12 +22,12 @@
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
-#include "webrtc/common_video/interface/i420_buffer_pool.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/common_video/include/i420_buffer_pool.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include "webrtc/video_frame.h"
namespace webrtc {
@@ -58,8 +58,11 @@ class VP8EncoderImpl : public VP8Encoder {
void OnDroppedFrame() override {}
+ const char* ImplementationName() const override;
+
private:
- void SetupTemporalLayers(int num_streams, int num_temporal_layers,
+ void SetupTemporalLayers(int num_streams,
+ int num_temporal_layers,
const VideoCodec& codec);
// Set the cpu_speed setting for encoder based on resolution and/or platform.
@@ -126,15 +129,17 @@ class VP8DecoderImpl : public VP8Decoder {
int InitDecode(const VideoCodec* inst, int number_of_cores) override;
int Decode(const EncodedImage& input_image,
- bool missing_frames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codec_specific_info,
- int64_t /*render_time_ms*/) override;
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t /*render_time_ms*/) override;
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
int Release() override;
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
// Copy reference image from this _decoder to the _decoder in copyTo. Set
// which frame type to copy in _refFrame->frame_type before the call to
@@ -165,4 +170,3 @@ class VP8DecoderImpl : public VP8Decoder {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
index 5843d83fa7..9e546653db 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
@@ -1,4 +1,4 @@
- /*
+/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
@@ -9,8 +9,9 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common_video/include/video_image.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -22,8 +23,7 @@
class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
public:
explicit Vp8SequenceCoderEncodeCallback(FILE* encoded_file)
- : encoded_file_(encoded_file),
- encoded_bytes_(0) {}
+ : encoded_file_(encoded_file), encoded_bytes_(0) {}
~Vp8SequenceCoderEncodeCallback();
int Encoded(const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
@@ -31,6 +31,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
// Returns the encoded image.
webrtc::EncodedImage encoded_image() { return encoded_image_; }
size_t encoded_bytes() { return encoded_bytes_; }
+
private:
webrtc::EncodedImage encoded_image_;
FILE* encoded_file_;
@@ -38,7 +39,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
};
Vp8SequenceCoderEncodeCallback::~Vp8SequenceCoderEncodeCallback() {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
}
int Vp8SequenceCoderEncodeCallback::Encoded(
@@ -46,7 +47,7 @@ int Vp8SequenceCoderEncodeCallback::Encoded(
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation) {
if (encoded_image_._size < encoded_image._size) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
encoded_image_._buffer = new uint8_t[encoded_image._size];
encoded_image_._size = encoded_image._size;
@@ -68,7 +69,11 @@ class Vp8SequenceCoderDecodeCallback : public webrtc::DecodedImageCallback {
public:
explicit Vp8SequenceCoderDecodeCallback(FILE* decoded_file)
: decoded_file_(decoded_file) {}
- int Decoded(webrtc::VideoFrame& frame);
+ int32_t Decoded(webrtc::VideoFrame& frame) override;
+ int32_t Decoded(webrtc::VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
+ }
bool DecodeComplete();
private:
@@ -80,16 +85,16 @@ int Vp8SequenceCoderDecodeCallback::Decoded(webrtc::VideoFrame& image) {
return 0;
}
-int SequenceCoder(webrtc::test::CommandLineParser& parser) {
- int width = strtol((parser.GetFlag("w")).c_str(), NULL, 10);
- int height = strtol((parser.GetFlag("h")).c_str(), NULL, 10);
- int framerate = strtol((parser.GetFlag("f")).c_str(), NULL, 10);
+int SequenceCoder(webrtc::test::CommandLineParser* parser) {
+ int width = strtol((parser->GetFlag("w")).c_str(), NULL, 10);
+ int height = strtol((parser->GetFlag("h")).c_str(), NULL, 10);
+ int framerate = strtol((parser->GetFlag("f")).c_str(), NULL, 10);
if (width <= 0 || height <= 0 || framerate <= 0) {
fprintf(stderr, "Error: Resolution cannot be <= 0!\n");
return -1;
}
- int target_bitrate = strtol((parser.GetFlag("b")).c_str(), NULL, 10);
+ int target_bitrate = strtol((parser->GetFlag("b")).c_str(), NULL, 10);
if (target_bitrate <= 0) {
fprintf(stderr, "Error: Bit-rate cannot be <= 0!\n");
return -1;
@@ -97,20 +102,20 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
// SetUp
// Open input file.
- std::string encoded_file_name = parser.GetFlag("encoded_file");
+ std::string encoded_file_name = parser->GetFlag("encoded_file");
FILE* encoded_file = fopen(encoded_file_name.c_str(), "wb");
if (encoded_file == NULL) {
fprintf(stderr, "Error: Cannot open encoded file\n");
return -1;
}
- std::string input_file_name = parser.GetFlag("input_file");
+ std::string input_file_name = parser->GetFlag("input_file");
FILE* input_file = fopen(input_file_name.c_str(), "rb");
if (input_file == NULL) {
fprintf(stderr, "Error: Cannot open input file\n");
return -1;
}
// Open output file.
- std::string output_file_name = parser.GetFlag("output_file");
+ std::string output_file_name = parser->GetFlag("output_file");
FILE* output_file = fopen(output_file_name.c_str(), "wb");
if (output_file == NULL) {
fprintf(stderr, "Error: Cannot open output file\n");
@@ -118,8 +123,8 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
}
// Get range of frames: will encode num_frames following start_frame).
- int start_frame = strtol((parser.GetFlag("start_frame")).c_str(), NULL, 10);
- int num_frames = strtol((parser.GetFlag("num_frames")).c_str(), NULL, 10);
+ int start_frame = strtol((parser->GetFlag("start_frame")).c_str(), NULL, 10);
+ int num_frames = strtol((parser->GetFlag("num_frames")).c_str(), NULL, 10);
// Codec SetUp.
webrtc::VideoCodec inst;
@@ -157,8 +162,8 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
int frames_processed = 0;
input_frame.CreateEmptyFrame(width, height, width, half_width, half_width);
while (!feof(input_file) &&
- (num_frames == -1 || frames_processed < num_frames)) {
- if (fread(frame_buffer.get(), 1, length, input_file) != length)
+ (num_frames == -1 || frames_processed < num_frames)) {
+ if (fread(frame_buffer.get(), 1, length, input_file) != length)
continue;
if (frame_cnt >= start_frame) {
webrtc::ConvertToI420(webrtc::kI420, frame_buffer.get(), 0, 0, width,
@@ -179,33 +184,35 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
printf("Actual bitrate: %f kbps\n", actual_bit_rate / 1000);
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
- input_file_name.c_str(), output_file_name.c_str(),
- inst.width, inst.height,
- &psnr_result, &ssim_result));
+ input_file_name.c_str(), output_file_name.c_str(),
+ inst.width, inst.height, &psnr_result, &ssim_result));
printf("PSNR avg: %f[dB], min: %f[dB]\nSSIM avg: %f, min: %f\n",
- psnr_result.average, psnr_result.min,
- ssim_result.average, ssim_result.min);
+ psnr_result.average, psnr_result.min, ssim_result.average,
+ ssim_result.min);
return frame_cnt;
}
int main(int argc, char** argv) {
std::string program_name = argv[0];
- std::string usage = "Encode and decodes a video sequence, and writes"
- "results to a file.\n"
- "Example usage:\n" + program_name + " functionality"
- " --w=352 --h=288 --input_file=input.yuv --output_file=output.yuv "
- " Command line flags:\n"
- " - width(int): The width of the input file. Default: 352\n"
- " - height(int): The height of the input file. Default: 288\n"
- " - input_file(string): The YUV file to encode."
- " Default: foreman.yuv\n"
- " - encoded_file(string): The vp8 encoded file (encoder output)."
- " Default: vp8_encoded.vp8\n"
- " - output_file(string): The yuv decoded file (decoder output)."
- " Default: vp8_decoded.yuv\n."
- " - start_frame - frame number in which encoding will begin. Default: 0"
- " - num_frames - Number of frames to be processed. "
- " Default: -1 (entire sequence).";
+ std::string usage =
+ "Encode and decodes a video sequence, and writes"
+ "results to a file.\n"
+ "Example usage:\n" +
+ program_name +
+ " functionality"
+ " --w=352 --h=288 --input_file=input.yuv --output_file=output.yuv "
+ " Command line flags:\n"
+ " - width(int): The width of the input file. Default: 352\n"
+ " - height(int): The height of the input file. Default: 288\n"
+ " - input_file(string): The YUV file to encode."
+ " Default: foreman.yuv\n"
+ " - encoded_file(string): The vp8 encoded file (encoder output)."
+ " Default: vp8_encoded.vp8\n"
+ " - output_file(string): The yuv decoded file (decoder output)."
+ " Default: vp8_decoded.yuv\n."
+ " - start_frame - frame number in which encoding will begin. Default: 0"
+ " - num_frames - Number of frames to be processed. "
+ " Default: -1 (entire sequence).";
webrtc::test::CommandLineParser parser;
@@ -223,8 +230,8 @@ int main(int argc, char** argv) {
parser.SetFlag("output_file", webrtc::test::OutputPath() + "vp8_decoded.yuv");
parser.SetFlag("encoded_file",
webrtc::test::OutputPath() + "vp8_encoded.vp8");
- parser.SetFlag("input_file", webrtc::test::ResourcePath("foreman_cif",
- "yuv"));
+ parser.SetFlag("input_file",
+ webrtc::test::ResourcePath("foreman_cif", "yuv"));
parser.SetFlag("help", "false");
parser.ProcessFlags();
@@ -234,5 +241,5 @@ int main(int argc, char** argv) {
}
parser.PrintEnteredFlags();
- return SequenceCoder(parser);
+ return SequenceCoder(&parser);
}
diff --git a/webrtc/modules/video_coding/codecs/vp9/include/vp9.h b/webrtc/modules/video_coding/codecs/vp9/include/vp9.h
index cd77f72dcb..3bcbe46b3a 100644
--- a/webrtc/modules/video_coding/codecs/vp9/include/vp9.h
+++ b/webrtc/modules/video_coding/codecs/vp9/include/vp9.h
@@ -12,7 +12,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -23,7 +23,6 @@ class VP9Encoder : public VideoEncoder {
virtual ~VP9Encoder() {}
};
-
class VP9Decoder : public VideoDecoder {
public:
static VP9Decoder* Create();
diff --git a/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc
new file mode 100644
index 0000000000..c7ed78a192
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+*
+* Use of this source code is governed by a BSD-style license
+* that can be found in the LICENSE file in the root of the source
+* tree. An additional intellectual property rights grant can be found
+* in the file PATENTS. All contributing project authors may
+* be found in the AUTHORS file in the root of the source tree.
+*/
+
+#include <algorithm>
+#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
+#include "webrtc/base/checks.h"
+
+namespace webrtc {
+
+ScreenshareLayersVP9::ScreenshareLayersVP9(uint8_t num_layers)
+ : num_layers_(num_layers),
+ start_layer_(0),
+ last_timestamp_(0),
+ timestamp_initialized_(false) {
+ RTC_DCHECK_GT(num_layers, 0);
+ RTC_DCHECK_LE(num_layers, kMaxVp9NumberOfSpatialLayers);
+ memset(bits_used_, 0, sizeof(bits_used_));
+ memset(threshold_kbps_, 0, sizeof(threshold_kbps_));
+}
+
+uint8_t ScreenshareLayersVP9::GetStartLayer() const {
+ return start_layer_;
+}
+
+void ScreenshareLayersVP9::ConfigureBitrate(int threshold_kbps,
+ uint8_t layer_id) {
+ // The upper layer is always the layer we spill frames
+ // to when the bitrate becomes to high, therefore setting
+ // a max limit is not allowed. The top layer bitrate is
+ // never used either so configuring it makes no difference.
+ RTC_DCHECK_LT(layer_id, num_layers_ - 1);
+ threshold_kbps_[layer_id] = threshold_kbps;
+}
+
+void ScreenshareLayersVP9::LayerFrameEncoded(unsigned int size_bytes,
+ uint8_t layer_id) {
+ RTC_DCHECK_LT(layer_id, num_layers_);
+ bits_used_[layer_id] += size_bytes * 8;
+}
+
+VP9EncoderImpl::SuperFrameRefSettings
+ScreenshareLayersVP9::GetSuperFrameSettings(uint32_t timestamp,
+ bool is_keyframe) {
+ VP9EncoderImpl::SuperFrameRefSettings settings;
+ if (!timestamp_initialized_) {
+ last_timestamp_ = timestamp;
+ timestamp_initialized_ = true;
+ }
+ float time_diff = (timestamp - last_timestamp_) / 90.f;
+ float total_bits_used = 0;
+ float total_threshold_kbps = 0;
+ start_layer_ = 0;
+
+ // Up to (num_layers - 1) because we only have
+ // (num_layers - 1) thresholds to check.
+ for (int layer_id = 0; layer_id < num_layers_ - 1; ++layer_id) {
+ bits_used_[layer_id] = std::max(
+ 0.f, bits_used_[layer_id] - time_diff * threshold_kbps_[layer_id]);
+ total_bits_used += bits_used_[layer_id];
+ total_threshold_kbps += threshold_kbps_[layer_id];
+
+ // If this is a keyframe then there should be no
+ // references to any previous frames.
+ if (!is_keyframe) {
+ settings.layer[layer_id].ref_buf1 = layer_id;
+ if (total_bits_used > total_threshold_kbps * 1000)
+ start_layer_ = layer_id + 1;
+ }
+
+ settings.layer[layer_id].upd_buf = layer_id;
+ }
+ // Since the above loop does not iterate over the last layer
+ // the reference of the last layer has to be set after the loop,
+ // and if this is a keyframe there should be no references to
+ // any previous frames.
+ if (!is_keyframe)
+ settings.layer[num_layers_ - 1].ref_buf1 = num_layers_ - 1;
+
+ settings.layer[num_layers_ - 1].upd_buf = num_layers_ - 1;
+ settings.is_keyframe = is_keyframe;
+ settings.start_layer = start_layer_;
+ settings.stop_layer = num_layers_ - 1;
+ last_timestamp_ = timestamp;
+ return settings;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h
new file mode 100644
index 0000000000..5a901ae359
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+*
+* Use of this source code is governed by a BSD-style license
+* that can be found in the LICENSE file in the root of the source
+* tree. An additional intellectual property rights grant can be found
+* in the file PATENTS. All contributing project authors may
+* be found in the AUTHORS file in the root of the source tree.
+*/
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
+
+#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
+
+namespace webrtc {
+
+class ScreenshareLayersVP9 {
+ public:
+ explicit ScreenshareLayersVP9(uint8_t num_layers);
+
+ // The target bitrate for layer with id layer_id.
+ void ConfigureBitrate(int threshold_kbps, uint8_t layer_id);
+
+ // The current start layer.
+ uint8_t GetStartLayer() const;
+
+ // Update the layer with the size of the layer frame.
+ void LayerFrameEncoded(unsigned int size_bytes, uint8_t layer_id);
+
+ // Get the layer settings for the next superframe.
+ //
+ // In short, each time the GetSuperFrameSettings is called the
+ // bitrate of every layer is calculated and if the cummulative
+ // bitrate exceeds the configured cummulative bitrates
+ // (ConfigureBitrate to configure) up to and including that
+ // layer then the resulting encoding settings for the
+ // superframe will only encode layers above that layer.
+ VP9EncoderImpl::SuperFrameRefSettings GetSuperFrameSettings(
+ uint32_t timestamp,
+ bool is_keyframe);
+
+ private:
+ // How many layers that are used.
+ uint8_t num_layers_;
+
+ // The index of the first layer to encode.
+ uint8_t start_layer_;
+
+ // Cummulative target kbps for the different layers.
+ float threshold_kbps_[kMaxVp9NumberOfSpatialLayers - 1];
+
+ // How many bits that has been used for a certain layer. Increased in
+ // FrameEncoded() by the size of the encoded frame and decreased in
+ // GetSuperFrameSettings() depending on the time between frames.
+ float bits_used_[kMaxVp9NumberOfSpatialLayers];
+
+ // Timestamp of last frame.
+ uint32_t last_timestamp_;
+
+ // If the last_timestamp_ has been set.
+ bool timestamp_initialized_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
diff --git a/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc
new file mode 100644
index 0000000000..5eb7b237ac
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "vpx/vp8cx.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
+#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+typedef VP9EncoderImpl::SuperFrameRefSettings Settings;
+
+const uint32_t kTickFrequency = 90000;
+
+class ScreenshareLayerTestVP9 : public ::testing::Test {
+ protected:
+ ScreenshareLayerTestVP9() : clock_(0) {}
+ virtual ~ScreenshareLayerTestVP9() {}
+
+ void InitScreenshareLayers(int layers) {
+ layers_.reset(new ScreenshareLayersVP9(layers));
+ }
+
+ void ConfigureBitrateForLayer(int kbps, uint8_t layer_id) {
+ layers_->ConfigureBitrate(kbps, layer_id);
+ }
+
+ void AdvanceTime(int64_t milliseconds) {
+ clock_.AdvanceTimeMilliseconds(milliseconds);
+ }
+
+ void AddKilobitsToLayer(int kilobits, uint8_t layer_id) {
+ layers_->LayerFrameEncoded(kilobits * 1000 / 8, layer_id);
+ }
+
+ void EqualRefsForLayer(const Settings& actual, uint8_t layer_id) {
+ EXPECT_EQ(expected_.layer[layer_id].upd_buf,
+ actual.layer[layer_id].upd_buf);
+ EXPECT_EQ(expected_.layer[layer_id].ref_buf1,
+ actual.layer[layer_id].ref_buf1);
+ EXPECT_EQ(expected_.layer[layer_id].ref_buf2,
+ actual.layer[layer_id].ref_buf2);
+ EXPECT_EQ(expected_.layer[layer_id].ref_buf3,
+ actual.layer[layer_id].ref_buf3);
+ }
+
+ void EqualRefs(const Settings& actual) {
+ for (unsigned int layer_id = 0; layer_id < kMaxVp9NumberOfSpatialLayers;
+ ++layer_id) {
+ EqualRefsForLayer(actual, layer_id);
+ }
+ }
+
+ void EqualStartStopKeyframe(const Settings& actual) {
+ EXPECT_EQ(expected_.start_layer, actual.start_layer);
+ EXPECT_EQ(expected_.stop_layer, actual.stop_layer);
+ EXPECT_EQ(expected_.is_keyframe, actual.is_keyframe);
+ }
+
+ // Check that the settings returned by GetSuperFrameSettings() is
+ // equal to the expected_ settings.
+ void EqualToExpected() {
+ uint32_t frame_timestamp_ =
+ clock_.TimeInMilliseconds() * (kTickFrequency / 1000);
+ Settings actual =
+ layers_->GetSuperFrameSettings(frame_timestamp_, expected_.is_keyframe);
+ EqualRefs(actual);
+ EqualStartStopKeyframe(actual);
+ }
+
+ Settings expected_;
+ SimulatedClock clock_;
+ rtc::scoped_ptr<ScreenshareLayersVP9> layers_;
+};
+
+TEST_F(ScreenshareLayerTestVP9, NoRefsOnKeyFrame) {
+ const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
+ InitScreenshareLayers(kNumLayers);
+ expected_.start_layer = 0;
+ expected_.stop_layer = kNumLayers - 1;
+
+ for (int l = 0; l < kNumLayers; ++l) {
+ expected_.layer[l].upd_buf = l;
+ }
+ expected_.is_keyframe = true;
+ EqualToExpected();
+
+ for (int l = 0; l < kNumLayers; ++l) {
+ expected_.layer[l].ref_buf1 = l;
+ }
+ expected_.is_keyframe = false;
+ EqualToExpected();
+}
+
+// Test if it is possible to send at a high bitrate (over the threshold)
+// after a longer period of low bitrate. This should not be possible.
+TEST_F(ScreenshareLayerTestVP9, DontAccumelateAvailableBitsOverTime) {
+ InitScreenshareLayers(2);
+ ConfigureBitrateForLayer(100, 0);
+
+ expected_.layer[0].upd_buf = 0;
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].upd_buf = 1;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.start_layer = 0;
+ expected_.stop_layer = 1;
+
+ // Send 10 frames at a low bitrate (50 kbps)
+ for (int i = 0; i < 10; ++i) {
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(10, 0);
+ }
+
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(301, 0);
+
+ // Send 10 frames at a high bitrate (200 kbps)
+ expected_.start_layer = 1;
+ for (int i = 0; i < 10; ++i) {
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(40, 1);
+ }
+}
+
+// Test if used bits are accumelated over layers, as they should;
+TEST_F(ScreenshareLayerTestVP9, AccumelateUsedBitsOverLayers) {
+ const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
+ InitScreenshareLayers(kNumLayers);
+ for (int l = 0; l < kNumLayers - 1; ++l)
+ ConfigureBitrateForLayer(100, l);
+ for (int l = 0; l < kNumLayers; ++l) {
+ expected_.layer[l].upd_buf = l;
+ expected_.layer[l].ref_buf1 = l;
+ }
+
+ expected_.start_layer = 0;
+ expected_.stop_layer = kNumLayers - 1;
+ EqualToExpected();
+
+ for (int layer = 0; layer < kNumLayers - 1; ++layer) {
+ expected_.start_layer = layer;
+ EqualToExpected();
+ AddKilobitsToLayer(101, layer);
+ }
+}
+
+// General testing of the bitrate controller.
+TEST_F(ScreenshareLayerTestVP9, 2LayerBitrate) {
+ InitScreenshareLayers(2);
+ ConfigureBitrateForLayer(100, 0);
+
+ expected_.layer[0].upd_buf = 0;
+ expected_.layer[1].upd_buf = 1;
+ expected_.layer[0].ref_buf1 = -1;
+ expected_.layer[1].ref_buf1 = -1;
+ expected_.start_layer = 0;
+ expected_.stop_layer = 1;
+
+ expected_.is_keyframe = true;
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.is_keyframe = false;
+ AdvanceTime(199);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ expected_.start_layer = 1;
+ for (int frame = 0; frame < 3; ++frame) {
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 1);
+ }
+
+ // Just before enough bits become available for L0 @0.999 seconds.
+ AdvanceTime(199);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 1);
+
+ // Just after enough bits become available for L0 @1.0001 seconds.
+ expected_.start_layer = 0;
+ AdvanceTime(2);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ // Keyframes always encode all layers, even if it is over budget.
+ expected_.layer[0].ref_buf1 = -1;
+ expected_.layer[1].ref_buf1 = -1;
+ expected_.is_keyframe = true;
+ AdvanceTime(499);
+ EqualToExpected();
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.start_layer = 1;
+ expected_.is_keyframe = false;
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ // 400 kb in L0 --> @3 second mark to fall below the threshold..
+ // just before @2.999 seconds.
+ expected_.is_keyframe = false;
+ AdvanceTime(1499);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 1);
+
+ // just after @3.001 seconds.
+ expected_.start_layer = 0;
+ AdvanceTime(2);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+}
+
+// General testing of the bitrate controller.
+TEST_F(ScreenshareLayerTestVP9, 3LayerBitrate) {
+ InitScreenshareLayers(3);
+ ConfigureBitrateForLayer(100, 0);
+ ConfigureBitrateForLayer(100, 1);
+
+ for (int l = 0; l < 3; ++l) {
+ expected_.layer[l].upd_buf = l;
+ expected_.layer[l].ref_buf1 = l;
+ }
+ expected_.start_layer = 0;
+ expected_.stop_layer = 2;
+
+ EqualToExpected();
+ AddKilobitsToLayer(105, 0);
+ AddKilobitsToLayer(30, 1);
+
+ AdvanceTime(199);
+ EqualToExpected();
+ AddKilobitsToLayer(105, 0);
+ AddKilobitsToLayer(30, 1);
+
+ expected_.start_layer = 1;
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(130, 1);
+
+ expected_.start_layer = 2;
+ AdvanceTime(200);
+ EqualToExpected();
+
+ // 400 kb in L1 --> @1.0 second mark to fall below threshold.
+ // 210 kb in L0 --> @1.1 second mark to fall below threshold.
+ // Just before L1 @0.999 seconds.
+ AdvanceTime(399);
+ EqualToExpected();
+
+ // Just after L1 @1.001 seconds.
+ expected_.start_layer = 1;
+ AdvanceTime(2);
+ EqualToExpected();
+
+ // Just before L0 @1.099 seconds.
+ AdvanceTime(99);
+ EqualToExpected();
+
+ // Just after L0 @1.101 seconds.
+ expected_.start_layer = 0;
+ AdvanceTime(2);
+ EqualToExpected();
+
+ // @1.1 seconds
+ AdvanceTime(99);
+ EqualToExpected();
+ AddKilobitsToLayer(200, 1);
+
+ expected_.is_keyframe = true;
+ for (int l = 0; l < 3; ++l)
+ expected_.layer[l].ref_buf1 = -1;
+ AdvanceTime(200);
+ EqualToExpected();
+
+ expected_.is_keyframe = false;
+ expected_.start_layer = 2;
+ for (int l = 0; l < 3; ++l)
+ expected_.layer[l].ref_buf1 = l;
+ AdvanceTime(200);
+ EqualToExpected();
+}
+
+// Test that the bitrate calculations are
+// correct when the timestamp wrap.
+TEST_F(ScreenshareLayerTestVP9, TimestampWrap) {
+ InitScreenshareLayers(2);
+ ConfigureBitrateForLayer(100, 0);
+
+ expected_.layer[0].upd_buf = 0;
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].upd_buf = 1;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.start_layer = 0;
+ expected_.stop_layer = 1;
+
+ // Advance time to just before the timestamp wraps.
+ AdvanceTime(std::numeric_limits<uint32_t>::max() / (kTickFrequency / 1000));
+ EqualToExpected();
+ AddKilobitsToLayer(200, 0);
+
+ // Wrap
+ expected_.start_layer = 1;
+ AdvanceTime(1);
+ EqualToExpected();
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9.gyp b/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
index 752521c5cb..8993d79bd7 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
@@ -14,30 +14,26 @@
{
'target_name': 'webrtc_vp9',
'type': 'static_library',
- 'dependencies': [
- '<(webrtc_root)/common_video/common_video.gyp:common_video',
- '<(webrtc_root)/modules/video_coding/utility/video_coding_utility.gyp:video_coding_utility',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- ],
'conditions': [
['build_libvpx==1', {
'dependencies': [
'<(libvpx_dir)/libvpx.gyp:libvpx_new',
],
}],
- ['build_vp9==1', {
- 'sources': [
- 'include/vp9.h',
- 'vp9_frame_buffer_pool.cc',
- 'vp9_frame_buffer_pool.h',
- 'vp9_impl.cc',
- 'vp9_impl.h',
- ],
- }, {
- 'sources': [
- 'vp9_dummy_impl.cc',
- ],
- }],
+ ],
+ 'dependencies': [
+ '<(webrtc_root)/common_video/common_video.gyp:common_video',
+ '<(webrtc_root)/modules/video_coding/utility/video_coding_utility.gyp:video_coding_utility',
+ '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+ ],
+ 'sources': [
+ 'include/vp9.h',
+ 'screenshare_layers.cc',
+ 'screenshare_layers.h',
+ 'vp9_frame_buffer_pool.cc',
+ 'vp9_frame_buffer_pool.h',
+ 'vp9_impl.cc',
+ 'vp9_impl.h',
],
},
],
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc
deleted file mode 100644
index 491ccbe79c..0000000000
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- *
- */
-
-// This file contains an implementation of empty webrtc VP9 encoder/decoder
-// factories so it is possible to build webrtc without linking with vp9.
-#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
-
-namespace webrtc {
-VP9Encoder* VP9Encoder::Create() { return nullptr; }
-VP9Decoder* VP9Decoder::Create() { return nullptr; }
-}
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
index bedbe68ca8..62c05d34fa 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
@@ -16,7 +16,7 @@
#include "vpx/vpx_frame_buffer.h"
#include "webrtc/base/checks.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/base/logging.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
index 0ca7eeabe9..e554795519 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -21,36 +21,31 @@
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
-#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/keep_ref_until_done.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
#include "webrtc/system_wrappers/include/tick_util.h"
-namespace {
-
-// VP9DecoderImpl::ReturnFrame helper function used with WrappedI420Buffer.
-static void WrappedI420BufferNoLongerUsedCb(
- webrtc::Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer) {
- img_buffer->Release();
-}
-
-} // anonymous namespace
-
namespace webrtc {
// Only positive speeds, range for real-time coding currently is: 5 - 8.
// Lower means slower/better quality, higher means fastest/lower quality.
int GetCpuSpeed(int width, int height) {
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ return 8;
+#else
// For smaller resolutions, use lower speed setting (get some coding gain at
// the cost of increased encoding complexity).
if (width * height <= 352 * 288)
return 5;
else
return 7;
+#endif
}
VP9Encoder* VP9Encoder::Create() {
@@ -59,7 +54,7 @@ VP9Encoder* VP9Encoder::Create() {
void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
void* user_data) {
- VP9EncoderImpl* enc = (VP9EncoderImpl*)(user_data);
+ VP9EncoderImpl* enc = static_cast<VP9EncoderImpl*>(user_data);
enc->GetEncodedLayerFrame(pkt);
}
@@ -76,9 +71,12 @@ VP9EncoderImpl::VP9EncoderImpl()
raw_(NULL),
input_image_(NULL),
tl0_pic_idx_(0),
- gof_idx_(0),
+ frames_since_kf_(0),
num_temporal_layers_(0),
- num_spatial_layers_(0) {
+ num_spatial_layers_(0),
+ frames_encoded_(0),
+ // Use two spatial when screensharing with flexible mode.
+ spatial_layer_(new ScreenshareLayersVP9(2)) {
memset(&codec_, 0, sizeof(codec_));
uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
srand(seed);
@@ -90,7 +88,7 @@ VP9EncoderImpl::~VP9EncoderImpl() {
int VP9EncoderImpl::Release() {
if (encoded_image_._buffer != NULL) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
}
if (encoder_ != NULL) {
@@ -112,42 +110,72 @@ int VP9EncoderImpl::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
+bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const {
+ // We check target_bitrate_bps of the 0th layer to see if the spatial layers
+ // (i.e. bitrates) were explicitly configured.
+ return num_spatial_layers_ > 1 &&
+ codec_.spatialLayers[0].target_bitrate_bps > 0;
+}
+
bool VP9EncoderImpl::SetSvcRates() {
- float rate_ratio[VPX_MAX_LAYERS] = {0};
- float total = 0;
uint8_t i = 0;
- for (i = 0; i < num_spatial_layers_; ++i) {
- if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
- svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
+ if (ExplicitlyConfiguredSpatialLayers()) {
+ if (num_temporal_layers_ > 1) {
+ LOG(LS_ERROR) << "Multiple temporal layers when manually specifying "
+ "spatial layers not implemented yet!";
return false;
}
- rate_ratio[i] = static_cast<float>(
- svc_internal_.svc_params.scaling_factor_num[i]) /
- svc_internal_.svc_params.scaling_factor_den[i];
- total += rate_ratio[i];
- }
-
- for (i = 0; i < num_spatial_layers_; ++i) {
- config_->ss_target_bitrate[i] = static_cast<unsigned int>(
- config_->rc_target_bitrate * rate_ratio[i] / total);
- if (num_temporal_layers_ == 1) {
- config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
- } else if (num_temporal_layers_ == 2) {
- config_->layer_target_bitrate[i * num_temporal_layers_] =
- config_->ss_target_bitrate[i] * 2 / 3;
- config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
- config_->ss_target_bitrate[i];
- } else if (num_temporal_layers_ == 3) {
- config_->layer_target_bitrate[i * num_temporal_layers_] =
- config_->ss_target_bitrate[i] / 2;
- config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
- config_->layer_target_bitrate[i * num_temporal_layers_] +
- (config_->ss_target_bitrate[i] / 4);
- config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
- config_->ss_target_bitrate[i];
- } else {
- return false;
+ int total_bitrate_bps = 0;
+ for (i = 0; i < num_spatial_layers_; ++i)
+ total_bitrate_bps += codec_.spatialLayers[i].target_bitrate_bps;
+ // If total bitrate differs now from what has been specified at the
+ // beginning, update the bitrates in the same ratio as before.
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ config_->ss_target_bitrate[i] = config_->layer_target_bitrate[i] =
+ static_cast<int>(static_cast<int64_t>(config_->rc_target_bitrate) *
+ codec_.spatialLayers[i].target_bitrate_bps /
+ total_bitrate_bps);
+ }
+ } else {
+ float rate_ratio[VPX_MAX_LAYERS] = {0};
+ float total = 0;
+
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
+ svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
+ LOG(LS_ERROR) << "Scaling factors not specified!";
+ return false;
+ }
+ rate_ratio[i] =
+ static_cast<float>(svc_internal_.svc_params.scaling_factor_num[i]) /
+ svc_internal_.svc_params.scaling_factor_den[i];
+ total += rate_ratio[i];
+ }
+
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ config_->ss_target_bitrate[i] = static_cast<unsigned int>(
+ config_->rc_target_bitrate * rate_ratio[i] / total);
+ if (num_temporal_layers_ == 1) {
+ config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 2) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] * 2 / 3;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 3) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] / 2;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->layer_target_bitrate[i * num_temporal_layers_] +
+ (config_->ss_target_bitrate[i] / 4);
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
+ config_->ss_target_bitrate[i];
+ } else {
+ LOG(LS_ERROR) << "Unsupported number of temporal layers: "
+ << num_temporal_layers_;
+ return false;
+ }
}
}
@@ -178,6 +206,7 @@ int VP9EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
}
config_->rc_target_bitrate = new_bitrate_kbit;
codec_.maxFramerate = new_framerate;
+ spatial_layer_->ConfigureBitrate(new_bitrate_kbit, 0);
if (!SetSvcRates()) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
@@ -216,6 +245,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
+
int retVal = Release();
if (retVal < 0) {
return retVal;
@@ -237,10 +267,10 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
num_temporal_layers_ = 1;
// Random start 16 bits is enough.
- picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF;
+ picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
// Allocate memory for encoded image
if (encoded_image_._buffer != NULL) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
}
encoded_image_._size = CalcBufferSize(kI420, codec_.width, codec_.height);
encoded_image_._buffer = new uint8_t[encoded_image_._size];
@@ -248,8 +278,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
// Creating a wrapper to the image - setting image data to NULL. Actual
// pointer will be set in encode. Setting align to 1, as it is meaningless
// (actual memory is not allocated).
- raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height,
- 1, NULL);
+ raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height, 1,
+ NULL);
// Populate encoder configuration with default values.
if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
return WEBRTC_VIDEO_CODEC_ERROR;
@@ -264,8 +294,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
config_->g_lag_in_frames = 0; // 0- no frame lagging
config_->g_threads = 1;
// Rate control settings.
- config_->rc_dropframe_thresh = inst->codecSpecific.VP9.frameDroppingOn ?
- 30 : 0;
+ config_->rc_dropframe_thresh =
+ inst->codecSpecific.VP9.frameDroppingOn ? 30 : 0;
config_->rc_end_usage = VPX_CBR;
config_->g_pass = VPX_RC_ONE_PASS;
config_->rc_min_quantizer = 2;
@@ -277,24 +307,32 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
config_->rc_buf_sz = 1000;
// Set the maximum target size of any key-frame.
rc_max_intra_target_ = MaxIntraTarget(config_->rc_buf_optimal_sz);
- if (inst->codecSpecific.VP9.keyFrameInterval > 0) {
+ if (inst->codecSpecific.VP9.keyFrameInterval > 0) {
config_->kf_mode = VPX_KF_AUTO;
config_->kf_max_dist = inst->codecSpecific.VP9.keyFrameInterval;
+ // Needs to be set (in svc mode) to get correct periodic key frame interval
+ // (will have no effect in non-svc).
+ config_->kf_min_dist = config_->kf_max_dist;
} else {
config_->kf_mode = VPX_KF_DISABLED;
}
- config_->rc_resize_allowed = inst->codecSpecific.VP9.automaticResizeOn ?
- 1 : 0;
+ config_->rc_resize_allowed =
+ inst->codecSpecific.VP9.automaticResizeOn ? 1 : 0;
// Determine number of threads based on the image size and #cores.
- config_->g_threads = NumberOfThreads(config_->g_w,
- config_->g_h,
- number_of_cores);
+ config_->g_threads =
+ NumberOfThreads(config_->g_w, config_->g_h, number_of_cores);
cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h);
// TODO(asapersson): Check configuration of temporal switch up and increase
// pattern length.
- if (num_temporal_layers_ == 1) {
+ is_flexible_mode_ = inst->codecSpecific.VP9.flexibleMode;
+ if (is_flexible_mode_) {
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+ config_->ts_number_layers = num_temporal_layers_;
+ if (codec_.mode == kScreensharing)
+ spatial_layer_->ConfigureBitrate(inst->startBitrate, 0);
+ } else if (num_temporal_layers_ == 1) {
gof_.SetGofInfoVP9(kTemporalStructureMode1);
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
config_->ts_number_layers = 1;
@@ -326,7 +364,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
- tl0_pic_idx_ = static_cast<uint8_t>(rand());
+ tl0_pic_idx_ = static_cast<uint8_t>(rand()); // NOLINT
return InitAndSetControlSettings(inst);
}
@@ -347,16 +385,28 @@ int VP9EncoderImpl::NumberOfThreads(int width,
}
int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
- config_->ss_number_layers = num_spatial_layers_;
-
- int scaling_factor_num = 256;
- for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ // Set QP-min/max per spatial and temporal layer.
+ int tot_num_layers = num_spatial_layers_ * num_temporal_layers_;
+ for (int i = 0; i < tot_num_layers; ++i) {
svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
- // 1:2 scaling in each dimension.
- svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
- svc_internal_.svc_params.scaling_factor_den[i] = 256;
- scaling_factor_num /= 2;
+ }
+ config_->ss_number_layers = num_spatial_layers_;
+ if (ExplicitlyConfiguredSpatialLayers()) {
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ const auto& layer = codec_.spatialLayers[i];
+ svc_internal_.svc_params.scaling_factor_num[i] = layer.scaling_factor_num;
+ svc_internal_.svc_params.scaling_factor_den[i] = layer.scaling_factor_den;
+ }
+ } else {
+ int scaling_factor_num = 256;
+ for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ // 1:2 scaling in each dimension.
+ svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
+ svc_internal_.svc_params.scaling_factor_den[i] = 256;
+ if (codec_.mode != kScreensharing)
+ scaling_factor_num /= 2;
+ }
}
if (!SetSvcRates()) {
@@ -381,8 +431,10 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
}
// Register callback for getting each spatial layer.
vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
- VP9EncoderImpl::EncoderOutputCodedPacketCallback, (void*)(this)};
- vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK, (void*)(&cbp));
+ VP9EncoderImpl::EncoderOutputCodedPacketCallback,
+ reinterpret_cast<void*>(this)};
+ vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK,
+ reinterpret_cast<void*>(&cbp));
// Control function to set the number of column tiles in encoding a frame, in
// log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns.
@@ -417,7 +469,7 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) {
optimal_buffer_size * scale_par * codec_.maxFramerate / 10;
// Don't go below 3 times the per frame bandwidth.
const uint32_t min_intra_size = 300;
- return (target_pct < min_intra_size) ? min_intra_size: target_pct;
+ return (target_pct < min_intra_size) ? min_intra_size : target_pct;
}
int VP9EncoderImpl::Encode(const VideoFrame& input_image,
@@ -455,12 +507,35 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
- int flags = 0;
+ vpx_enc_frame_flags_t flags = 0;
bool send_keyframe = (frame_type == kVideoFrameKey);
if (send_keyframe) {
// Key frame request from caller.
flags = VPX_EFLAG_FORCE_KF;
}
+
+ if (is_flexible_mode_) {
+ SuperFrameRefSettings settings;
+
+ // These structs are copied when calling vpx_codec_control,
+ // therefore it is ok for them to go out of scope.
+ vpx_svc_ref_frame_config enc_layer_conf;
+ vpx_svc_layer_id layer_id;
+
+ if (codec_.mode == kRealtimeVideo) {
+ // Real time video not yet implemented in flexible mode.
+ RTC_NOTREACHED();
+ } else {
+ settings = spatial_layer_->GetSuperFrameSettings(input_image.timestamp(),
+ send_keyframe);
+ }
+ enc_layer_conf = GenerateRefsAndFlags(settings);
+ layer_id.temporal_layer_id = 0;
+ layer_id.spatial_layer_id = settings.start_layer;
+ vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
+ vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf);
+ }
+
assert(codec_.maxFramerate > 0);
uint32_t duration = 90000 / codec_.maxFramerate;
if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags,
@@ -473,12 +548,12 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
}
void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
- const vpx_codec_cx_pkt& pkt,
- uint32_t timestamp) {
+ const vpx_codec_cx_pkt& pkt,
+ uint32_t timestamp) {
assert(codec_specific != NULL);
codec_specific->codecType = kVideoCodecVP9;
- CodecSpecificInfoVP9 *vp9_info = &(codec_specific->codecSpecific.VP9);
- // TODO(asapersson): Set correct values.
+ CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9);
+ // TODO(asapersson): Set correct value.
vp9_info->inter_pic_predicted =
(pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true;
vp9_info->flexible_mode = codec_.codecSpecific.VP9.flexibleMode;
@@ -486,9 +561,6 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
!codec_.codecSpecific.VP9.flexibleMode)
? true
: false;
- if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
- gof_idx_ = 0;
- }
vpx_svc_layer_id_t layer_id = {0};
vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
@@ -511,25 +583,31 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
vp9_info->ss_data_available = false;
}
- if (vp9_info->flexible_mode) {
- vp9_info->gof_idx = kNoGofIdx;
+ // TODO(asapersson): this info has to be obtained from the encoder.
+ vp9_info->temporal_up_switch = false;
+
+ bool is_first_frame = false;
+ if (is_flexible_mode_) {
+ is_first_frame =
+ layer_id.spatial_layer_id == spatial_layer_->GetStartLayer();
} else {
- vp9_info->gof_idx =
- static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof);
+ is_first_frame = layer_id.spatial_layer_id == 0;
}
- // TODO(asapersson): this info has to be obtained from the encoder.
- vp9_info->temporal_up_switch = true;
-
- if (layer_id.spatial_layer_id == 0) {
+ if (is_first_frame) {
picture_id_ = (picture_id_ + 1) & 0x7FFF;
// TODO(asapersson): this info has to be obtained from the encoder.
vp9_info->inter_layer_predicted = false;
+ ++frames_since_kf_;
} else {
// TODO(asapersson): this info has to be obtained from the encoder.
vp9_info->inter_layer_predicted = true;
}
+ if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
+ frames_since_kf_ = 0;
+ }
+
vp9_info->picture_id = picture_id_;
if (!vp9_info->flexible_mode) {
@@ -542,6 +620,20 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
// Always populate this, so that the packetizer can properly set the marker
// bit.
vp9_info->num_spatial_layers = num_spatial_layers_;
+
+ vp9_info->num_ref_pics = 0;
+ if (vp9_info->flexible_mode) {
+ vp9_info->gof_idx = kNoGofIdx;
+ vp9_info->num_ref_pics = num_ref_pics_[layer_id.spatial_layer_id];
+ for (int i = 0; i < num_ref_pics_[layer_id.spatial_layer_id]; ++i) {
+ vp9_info->p_diff[i] = p_diff_[layer_id.spatial_layer_id][i];
+ }
+ } else {
+ vp9_info->gof_idx =
+ static_cast<uint8_t>(frames_since_kf_ % gof_.num_frames_in_gof);
+ vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
+ }
+
if (vp9_info->ss_data_available) {
vp9_info->spatial_layer_resolution_present = true;
for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) {
@@ -577,6 +669,14 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
frag_info.fragmentationPlType[part_idx] = 0;
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz);
+
+ vpx_svc_layer_id_t layer_id = {0};
+ vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+ if (is_flexible_mode_ && codec_.mode == kScreensharing)
+ spatial_layer_->LayerFrameEncoded(
+ static_cast<unsigned int>(encoded_image_._length),
+ layer_id.spatial_layer_id);
+
assert(encoded_image_._length <= encoded_image_._size);
// End of frame.
@@ -598,6 +698,108 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
return WEBRTC_VIDEO_CODEC_OK;
}
+vpx_svc_ref_frame_config VP9EncoderImpl::GenerateRefsAndFlags(
+ const SuperFrameRefSettings& settings) {
+ static const vpx_enc_frame_flags_t kAllFlags =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST |
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF;
+ vpx_svc_ref_frame_config sf_conf = {};
+ if (settings.is_keyframe) {
+ // Used later on to make sure we don't make any invalid references.
+ memset(buffer_updated_at_frame_, -1, sizeof(buffer_updated_at_frame_));
+ for (int layer = settings.start_layer; layer <= settings.stop_layer;
+ ++layer) {
+ num_ref_pics_[layer] = 0;
+ buffer_updated_at_frame_[settings.layer[layer].upd_buf] = frames_encoded_;
+ // When encoding a keyframe only the alt_fb_idx is used
+ // to specify which layer ends up in which buffer.
+ sf_conf.alt_fb_idx[layer] = settings.layer[layer].upd_buf;
+ }
+ } else {
+ for (int layer_idx = settings.start_layer; layer_idx <= settings.stop_layer;
+ ++layer_idx) {
+ vpx_enc_frame_flags_t layer_flags = kAllFlags;
+ num_ref_pics_[layer_idx] = 0;
+ int8_t refs[3] = {settings.layer[layer_idx].ref_buf1,
+ settings.layer[layer_idx].ref_buf2,
+ settings.layer[layer_idx].ref_buf3};
+
+ for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
+ if (refs[ref_idx] == -1)
+ continue;
+
+ RTC_DCHECK_GE(refs[ref_idx], 0);
+ RTC_DCHECK_LE(refs[ref_idx], 7);
+ // Easier to remove flags from all flags rather than having to
+ // build the flags from 0.
+ switch (num_ref_pics_[layer_idx]) {
+ case 0: {
+ sf_conf.lst_fb_idx[layer_idx] = refs[ref_idx];
+ layer_flags &= ~VP8_EFLAG_NO_REF_LAST;
+ break;
+ }
+ case 1: {
+ sf_conf.gld_fb_idx[layer_idx] = refs[ref_idx];
+ layer_flags &= ~VP8_EFLAG_NO_REF_GF;
+ break;
+ }
+ case 2: {
+ sf_conf.alt_fb_idx[layer_idx] = refs[ref_idx];
+ layer_flags &= ~VP8_EFLAG_NO_REF_ARF;
+ break;
+ }
+ }
+ // Make sure we don't reference a buffer that hasn't been
+ // used at all or hasn't been used since a keyframe.
+ RTC_DCHECK_NE(buffer_updated_at_frame_[refs[ref_idx]], -1);
+
+ p_diff_[layer_idx][num_ref_pics_[layer_idx]] =
+ frames_encoded_ - buffer_updated_at_frame_[refs[ref_idx]];
+ num_ref_pics_[layer_idx]++;
+ }
+
+ bool upd_buf_same_as_a_ref = false;
+ if (settings.layer[layer_idx].upd_buf != -1) {
+ for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
+ if (settings.layer[layer_idx].upd_buf == refs[ref_idx]) {
+ switch (ref_idx) {
+ case 0: {
+ layer_flags &= ~VP8_EFLAG_NO_UPD_LAST;
+ break;
+ }
+ case 1: {
+ layer_flags &= ~VP8_EFLAG_NO_UPD_GF;
+ break;
+ }
+ case 2: {
+ layer_flags &= ~VP8_EFLAG_NO_UPD_ARF;
+ break;
+ }
+ }
+ upd_buf_same_as_a_ref = true;
+ break;
+ }
+ }
+ if (!upd_buf_same_as_a_ref) {
+ // If we have three references and a buffer is specified to be
+ // updated, then that buffer must be the same as one of the
+ // three references.
+ RTC_CHECK_LT(num_ref_pics_[layer_idx], kMaxVp9RefPics);
+
+ sf_conf.alt_fb_idx[layer_idx] = settings.layer[layer_idx].upd_buf;
+ layer_flags ^= VP8_EFLAG_NO_UPD_ARF;
+ }
+
+ int updated_buffer = settings.layer[layer_idx].upd_buf;
+ buffer_updated_at_frame_[updated_buffer] = frames_encoded_;
+ sf_conf.frame_flags[layer_idx] = layer_flags;
+ }
+ }
+ }
+ ++frames_encoded_;
+ return sf_conf;
+}
+
int VP9EncoderImpl::SetChannelParameters(uint32_t packet_loss, int64_t rtt) {
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -608,6 +810,10 @@ int VP9EncoderImpl::RegisterEncodeCompleteCallback(
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP9EncoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
VP9Decoder* VP9Decoder::Create() {
return new VP9DecoderImpl();
}
@@ -652,7 +858,7 @@ int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
if (decoder_ == NULL) {
decoder_ = new vpx_codec_ctx_t;
}
- vpx_codec_dec_cfg_t cfg;
+ vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1)
cfg.threads = 1;
cfg.h = cfg.w = 0; // set after decode
@@ -705,10 +911,8 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
}
// During decode libvpx may get and release buffers from |frame_buffer_pool_|.
// In practice libvpx keeps a few (~3-4) buffers alive at a time.
- if (vpx_codec_decode(decoder_,
- buffer,
- static_cast<unsigned int>(input_image._length),
- 0,
+ if (vpx_codec_decode(decoder_, buffer,
+ static_cast<unsigned int>(input_image._length), 0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -730,24 +934,22 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, uint32_t timestamp) {
}
// This buffer contains all of |img|'s image data, a reference counted
- // Vp9FrameBuffer. Performing AddRef/Release ensures it is not released and
- // recycled during use (libvpx is done with the buffers after a few
+ // Vp9FrameBuffer. (libvpx is done with the buffers after a few
// vpx_codec_decode calls or vpx_codec_destroy).
Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer =
static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv);
- img_buffer->AddRef();
// The buffer can be used directly by the VideoFrame (without copy) by
// using a WrappedI420Buffer.
rtc::scoped_refptr<WrappedI420Buffer> img_wrapped_buffer(
new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
- img->d_w, img->d_h,
- img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
- img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
- img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V],
// WrappedI420Buffer's mechanism for allowing the release of its frame
// buffer is through a callback function. This is where we should
// release |img_buffer|.
- rtc::Bind(&WrappedI420BufferNoLongerUsedCb, img_buffer)));
+ rtc::KeepRefUntilDone(img_buffer)));
VideoFrame decoded_image;
decoded_image.set_video_frame_buffer(img_wrapped_buffer);
@@ -781,4 +983,9 @@ int VP9DecoderImpl::Release() {
inited_ = false;
return WEBRTC_VIDEO_CODEC_OK;
}
+
+const char* VP9DecoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
index f9c123079e..bfa4540304 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -9,8 +9,10 @@
*
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
+
+#include <vector>
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
@@ -21,6 +23,8 @@
namespace webrtc {
+class ScreenshareLayersVP9;
+
class VP9EncoderImpl : public VP9Encoder {
public:
VP9EncoderImpl();
@@ -45,6 +49,22 @@ class VP9EncoderImpl : public VP9Encoder {
void OnDroppedFrame() override {}
+ const char* ImplementationName() const override;
+
+ struct LayerFrameRefSettings {
+ int8_t upd_buf = -1; // -1 - no update, 0..7 - update buffer 0..7
+ int8_t ref_buf1 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
+ int8_t ref_buf2 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
+ int8_t ref_buf3 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
+ };
+
+ struct SuperFrameRefSettings {
+ LayerFrameRefSettings layer[kMaxVp9NumberOfSpatialLayers];
+ uint8_t start_layer = 0; // The first spatial layer to be encoded.
+ uint8_t stop_layer = 0; // The last spatial layer to be encoded.
+ bool is_keyframe = false;
+ };
+
private:
// Determine number of encoder threads to use.
int NumberOfThreads(int width, int height, int number_of_cores);
@@ -56,8 +76,18 @@ class VP9EncoderImpl : public VP9Encoder {
const vpx_codec_cx_pkt& pkt,
uint32_t timestamp);
+ bool ExplicitlyConfiguredSpatialLayers() const;
bool SetSvcRates();
+ // Used for flexible mode to set the flags and buffer references used
+ // by the encoder. Also calculates the references used by the RTP
+ // packetizer.
+ //
+ // Has to be called for every frame (keyframes included) to update the
+ // state used to calculate references.
+ vpx_svc_ref_frame_config GenerateRefsAndFlags(
+ const SuperFrameRefSettings& settings);
+
virtual int GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt);
// Callback function for outputting packets per spatial layer.
@@ -88,11 +118,18 @@ class VP9EncoderImpl : public VP9Encoder {
GofInfoVP9 gof_; // Contains each frame's temporal information for
// non-flexible mode.
uint8_t tl0_pic_idx_; // Only used in non-flexible mode.
- size_t gof_idx_; // Only used in non-flexible mode.
+ size_t frames_since_kf_;
uint8_t num_temporal_layers_;
uint8_t num_spatial_layers_;
-};
+ // Used for flexible mode.
+ bool is_flexible_mode_;
+ int64_t buffer_updated_at_frame_[kNumVp9Buffers];
+ int64_t frames_encoded_;
+ uint8_t num_ref_pics_[kMaxVp9NumberOfSpatialLayers];
+ uint8_t p_diff_[kMaxVp9NumberOfSpatialLayers][kMaxVp9RefPics];
+ rtc::scoped_ptr<ScreenshareLayersVP9> spatial_layer_;
+};
class VP9DecoderImpl : public VP9Decoder {
public:
@@ -114,6 +151,8 @@ class VP9DecoderImpl : public VP9Decoder {
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
int ReturnFrame(const vpx_image_t* img, uint32_t timeStamp);
@@ -127,4 +166,4 @@ class VP9DecoderImpl : public VP9Decoder {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_