aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/video_coding/codecs/vp8
diff options
context:
space:
mode:
Diffstat (limited to 'webrtc/modules/video_coding/codecs/vp8')
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc26
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc157
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/include/vp8.h9
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h14
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc43
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc20
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc40
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc6
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h2
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc4
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc58
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h10
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc33
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc14
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h282
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/temporal_layers.h5
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc22
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_factory.h1
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc262
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_impl.h24
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc93
21 files changed, 534 insertions, 591 deletions
diff --git a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
index da6008ba3d..9226fa774c 100644
--- a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
@@ -13,8 +13,8 @@
#include <stdlib.h>
#include <string.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "vpx/vpx_encoder.h"
@@ -41,7 +41,7 @@ int DefaultTemporalLayers::CurrentLayerId() const {
int index = pattern_idx_ % temporal_ids_length_;
assert(index >= 0);
return temporal_ids_[index];
- }
+}
bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
int max_bitrate_kbit,
@@ -56,8 +56,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_periodicity = temporal_ids_length_;
cfg->ts_target_bitrate[0] = bitrateKbit;
cfg->ts_rate_decimator[0] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 1;
temporal_pattern_[0] = kTemporalUpdateLastRefAll;
@@ -74,8 +73,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_target_bitrate[1] = bitrateKbit;
cfg->ts_rate_decimator[0] = 2;
cfg->ts_rate_decimator[1] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 8;
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
@@ -103,8 +101,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 8;
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
@@ -138,8 +135,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_rate_decimator[1] = 4;
cfg->ts_rate_decimator[2] = 2;
cfg->ts_rate_decimator[3] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 16;
temporal_pattern_[0] = kTemporalUpdateLast;
@@ -243,7 +239,7 @@ int DefaultTemporalLayers::EncodeFlags(uint32_t timestamp) {
void DefaultTemporalLayers::PopulateCodecSpecific(
bool base_layer_sync,
- CodecSpecificInfoVP8 *vp8_info,
+ CodecSpecificInfoVP8* vp8_info,
uint32_t timestamp) {
assert(number_of_temporal_layers_ > 0);
assert(0 < temporal_ids_length_);
@@ -254,8 +250,8 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
vp8_info->tl0PicIdx = kNoTl0PicIdx;
} else {
if (base_layer_sync) {
- vp8_info->temporalIdx = 0;
- vp8_info->layerSync = true;
+ vp8_info->temporalIdx = 0;
+ vp8_info->layerSync = true;
} else {
vp8_info->temporalIdx = CurrentLayerId();
TemporalReferences temporal_reference =
@@ -267,7 +263,7 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
kTemporalUpdateGoldenWithoutDependencyRefAltRef ||
temporal_reference == kTemporalUpdateNoneNoRefGoldenRefAltRef ||
(temporal_reference == kTemporalUpdateNone &&
- number_of_temporal_layers_ == 4)) {
+ number_of_temporal_layers_ == 4)) {
vp8_info->layerSync = true;
} else {
vp8_info->layerSync = false;
diff --git a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
index 34121cbcf6..461ba69a72 100644
--- a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
@@ -8,9 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h"
#include "vpx/vpx_encoder.h"
@@ -19,47 +18,36 @@
namespace webrtc {
enum {
- kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
+ kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF,
- kTemporalUpdateGoldenWithoutDependency = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateGolden = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateAltrefWithoutDependency = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
+ kTemporalUpdateGoldenWithoutDependency =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltrefWithoutDependency =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_GF |
+ kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
+ kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateLastRefAltRef = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF,
- kTemporalUpdateLastAndGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateLastRefAltRef =
+ VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateLastAndGoldenRefAltRef =
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
};
TEST(TemporalLayersTest, 2Layers) {
@@ -68,29 +56,30 @@ TEST(TemporalLayersTest, 2Layers) {
CodecSpecificInfoVP8 vp8_info;
tl.ConfigureBitrates(500, 500, 30, &cfg);
- int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- };
- int expected_temporal_idx[16] =
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 };
-
- bool expected_layer_sync[16] =
- { false, true, false, false, false, false, false, false,
- false, true, false, false, false, false, false, false };
+ int expected_flags[16] = {
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ };
+ int expected_temporal_idx[16] = {0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1};
+
+ bool expected_layer_sync[16] = {false, true, false, false, false, false,
+ false, false, false, true, false, false,
+ false, false, false, false};
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -108,29 +97,30 @@ TEST(TemporalLayersTest, 3Layers) {
CodecSpecificInfoVP8 vp8_info;
tl.ConfigureBitrates(500, 500, 30, &cfg);
- int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGolden,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGolden,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateNone,
+ int expected_flags[16] = {
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGolden,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGolden,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone,
};
- int expected_temporal_idx[16] =
- { 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2 };
+ int expected_temporal_idx[16] = {0, 2, 1, 2, 0, 2, 1, 2,
+ 0, 2, 1, 2, 0, 2, 1, 2};
- bool expected_layer_sync[16] =
- { false, true, true, false, false, false, false, false,
- false, true, true, false, false, false, false, false };
+ bool expected_layer_sync[16] = {false, true, true, false, false, false,
+ false, false, false, true, true, false,
+ false, false, false, false};
unsigned int timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -165,12 +155,12 @@ TEST(TemporalLayersTest, 4Layers) {
kTemporalUpdateAltref,
kTemporalUpdateNone,
};
- int expected_temporal_idx[16] =
- { 0, 3, 2, 3, 1, 3, 2, 3, 0, 3, 2, 3, 1, 3, 2, 3 };
+ int expected_temporal_idx[16] = {0, 3, 2, 3, 1, 3, 2, 3,
+ 0, 3, 2, 3, 1, 3, 2, 3};
- bool expected_layer_sync[16] =
- { false, true, true, true, true, true, false, true,
- false, true, false, true, false, true, false, true };
+ bool expected_layer_sync[16] = {false, true, true, true, true, true,
+ false, true, false, true, false, true,
+ false, true, false, true};
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -198,8 +188,7 @@ TEST(TemporalLayersTest, KeyFrame) {
kTemporalUpdateGoldenRefAltRef,
kTemporalUpdateNone,
};
- int expected_temporal_idx[8] =
- { 0, 0, 0, 0, 0, 0, 0, 2};
+ int expected_temporal_idx[8] = {0, 0, 0, 0, 0, 0, 0, 2};
uint32_t timestamp = 0;
for (int i = 0; i < 7; ++i) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/include/vp8.h b/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
index f5dae471d2..dd3514235d 100644
--- a/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
+++ b/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
@@ -13,7 +13,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -21,16 +21,15 @@ class VP8Encoder : public VideoEncoder {
public:
static VP8Encoder* Create();
- virtual ~VP8Encoder() {};
+ virtual ~VP8Encoder() {}
}; // end of VP8Encoder class
-
class VP8Decoder : public VideoDecoder {
public:
static VP8Decoder* Create();
- virtual ~VP8Decoder() {};
+ virtual ~VP8Decoder() {}
}; // end of VP8Decoder class
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
diff --git a/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h b/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
index c2cefdd94e..7a27e4429a 100644
--- a/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
+++ b/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
#include "webrtc/common_types.h"
@@ -19,11 +19,11 @@ namespace webrtc {
// Values as required for the VP8 codec (accumulating).
static const float
kVp8LayerRateAlloction[kMaxTemporalStreams][kMaxTemporalStreams] = {
- {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
- {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
- {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
- {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
+ {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
+ {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
+ {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
+ {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
diff --git a/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc b/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
index 15b5af9200..d22601358f 100644
--- a/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
@@ -12,7 +12,7 @@
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
@@ -23,7 +23,8 @@ namespace webrtc {
namespace {
enum {
kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF,
+ VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_REF_ARF,
kTemporalUpdateGolden =
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
@@ -37,13 +38,15 @@ enum {
kTemporalUpdateAltref | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF,
kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY,
kTemporalUpdateNoneNoRefAltref = kTemporalUpdateNone | VP8_EFLAG_NO_REF_ARF,
kTemporalUpdateNoneNoRefGoldenRefAltRef =
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY,
kTemporalUpdateGoldenWithoutDependencyRefAltRef =
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
@@ -133,12 +136,14 @@ class RealTimeTemporalLayers : public TemporalLayers {
layer_ids_length_ = sizeof(layer_ids) / sizeof(*layer_ids);
static const int encode_flags[] = {
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateNone
- };
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone};
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
encode_flags_ = encode_flags;
@@ -153,12 +158,14 @@ class RealTimeTemporalLayers : public TemporalLayers {
layer_ids_length_ = sizeof(layer_ids) / sizeof(*layer_ids);
static const int encode_flags[] = {
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef, kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef, kTemporalUpdateNone
- };
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone};
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
encode_flags_ = encode_flags;
@@ -172,8 +179,8 @@ class RealTimeTemporalLayers : public TemporalLayers {
assert(false);
return false;
}
- memcpy(
- cfg->ts_layer_id, layer_ids_, sizeof(unsigned int) * layer_ids_length_);
+ memcpy(cfg->ts_layer_id, layer_ids_,
+ sizeof(unsigned int) * layer_ids_length_);
return true;
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
index a922e35712..1838e32eb7 100644
--- a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
@@ -25,8 +25,7 @@ ReferencePictureSelection::ReferencePictureSelection()
last_sent_ref_update_time_(0),
established_ref_picture_id_(0),
last_refresh_time_(0),
- rtt_(0) {
-}
+ rtt_(0) {}
void ReferencePictureSelection::Init() {
update_golden_next_ = true;
@@ -62,7 +61,8 @@ bool ReferencePictureSelection::ReceivedSLI(uint32_t now_ts) {
return send_refresh;
}
-int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
+int ReferencePictureSelection::EncodeFlags(int picture_id,
+ bool send_refresh,
uint32_t now_ts) {
int flags = 0;
// We can't refresh the decoder until we have established the key frame.
@@ -87,12 +87,12 @@ int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
received_ack_) {
flags |= VP8_EFLAG_NO_REF_LAST; // Don't reference the last frame.
if (update_golden_next_) {
- flags |= VP8_EFLAG_FORCE_GF; // Update the golden reference.
+ flags |= VP8_EFLAG_FORCE_GF; // Update the golden reference.
flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update alt-ref.
- flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
+ flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
} else {
- flags |= VP8_EFLAG_FORCE_ARF; // Update the alt-ref reference.
- flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
+ flags |= VP8_EFLAG_FORCE_ARF; // Update the alt-ref reference.
+ flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
}
last_sent_ref_picture_id_ = picture_id;
@@ -103,9 +103,9 @@ int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
if (established_golden_)
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
else
- flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
- flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
- flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update the alt-ref frame.
+ flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
+ flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
+ flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update the alt-ref frame.
}
return flags;
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
index c6474e5bd1..742bb96e91 100644
--- a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
@@ -22,25 +22,19 @@ static const uint32_t kMinUpdateInterval = 10;
// Should match the values set in reference_picture_selection.h
static const int kRtt = 10;
-static const int kNoPropagationGolden = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
-static const int kNoPropagationAltRef = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
-static const int kPropagateGolden = VP8_EFLAG_FORCE_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_LAST;
-static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_REF_LAST;
-static const int kRefreshFromGolden = VP8_EFLAG_NO_REF_LAST |
- VP8_EFLAG_NO_REF_ARF;
-static const int kRefreshFromAltRef = VP8_EFLAG_NO_REF_LAST |
- VP8_EFLAG_NO_REF_GF;
-
+static const int kNoPropagationGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+static const int kNoPropagationAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+static const int kPropagateGolden = VP8_EFLAG_FORCE_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST;
+static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_REF_ARF |
+ VP8_EFLAG_NO_REF_LAST;
+static const int kRefreshFromGolden =
+ VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_ARF;
+static const int kRefreshFromAltRef =
+ VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF;
class TestRPS : public ::testing::Test {
protected:
@@ -84,15 +78,15 @@ TEST_F(TestRPS, TestDecoderRefresh) {
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
// Enough time have elapsed since the previous reference propagation, we will
// therefore get both a refresh from golden and a propagation of alt-ref.
- EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time), kRefreshFromGolden |
- kPropagateAltRef);
+ EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time),
+ kRefreshFromGolden | kPropagateAltRef);
rps_.ReceivedRPSI(5);
time += kRtt + 1;
// Enough time for a new refresh, but not enough time for a reference
// propagation.
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
- EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time), kRefreshFromAltRef |
- kNoPropagationAltRef);
+ EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time),
+ kRefreshFromAltRef | kNoPropagationAltRef);
}
TEST_F(TestRPS, TestWrap) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
index 0fbb2a6c40..536587a13e 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -11,10 +11,12 @@
#include <stdlib.h>
+#include <algorithm>
+
#include "webrtc/base/checks.h"
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -188,7 +190,7 @@ void ScreenshareLayers::FrameEncoded(unsigned int size,
}
void ScreenshareLayers::PopulateCodecSpecific(bool base_layer_sync,
- CodecSpecificInfoVP8 *vp8_info,
+ CodecSpecificInfoVP8* vp8_info,
uint32_t timestamp) {
int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(timestamp);
if (number_of_temporal_layers_ == 1) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
index 90a8b1b883..7628758209 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
@@ -15,7 +15,7 @@
#include "webrtc/base/timeutils.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
index 628e336568..f31ed5e4d8 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
@@ -12,9 +12,9 @@
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
-#include "webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h"
using ::testing::_;
using ::testing::NiceMock;
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index 5dc4ac78f1..40e438f7e4 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -215,9 +215,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
}
VideoEncoder* encoder = factory_->Create();
- ret = encoder->InitEncode(&stream_codec,
- number_of_cores,
- max_payload_size);
+ ret = encoder->InitEncode(&stream_codec, number_of_cores, max_payload_size);
if (ret < 0) {
Release();
return ret;
@@ -284,35 +282,25 @@ int SimulcastEncoderAdapter::Encode(
// scale it to match what the encoder expects (below).
if ((dst_width == src_width && dst_height == src_height) ||
input_image.IsZeroSize()) {
- streaminfos_[stream_idx].encoder->Encode(input_image,
- codec_specific_info,
+ streaminfos_[stream_idx].encoder->Encode(input_image, codec_specific_info,
&stream_frame_types);
} else {
VideoFrame dst_frame;
// Making sure that destination frame is of sufficient size.
// Aligning stride values based on width.
- dst_frame.CreateEmptyFrame(dst_width, dst_height,
- dst_width, (dst_width + 1) / 2,
- (dst_width + 1) / 2);
- libyuv::I420Scale(input_image.buffer(kYPlane),
- input_image.stride(kYPlane),
- input_image.buffer(kUPlane),
- input_image.stride(kUPlane),
- input_image.buffer(kVPlane),
- input_image.stride(kVPlane),
- src_width, src_height,
- dst_frame.buffer(kYPlane),
- dst_frame.stride(kYPlane),
- dst_frame.buffer(kUPlane),
- dst_frame.stride(kUPlane),
- dst_frame.buffer(kVPlane),
- dst_frame.stride(kVPlane),
- dst_width, dst_height,
- libyuv::kFilterBilinear);
+ dst_frame.CreateEmptyFrame(dst_width, dst_height, dst_width,
+ (dst_width + 1) / 2, (dst_width + 1) / 2);
+ libyuv::I420Scale(
+ input_image.buffer(kYPlane), input_image.stride(kYPlane),
+ input_image.buffer(kUPlane), input_image.stride(kUPlane),
+ input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width,
+ src_height, dst_frame.buffer(kYPlane), dst_frame.stride(kYPlane),
+ dst_frame.buffer(kUPlane), dst_frame.stride(kUPlane),
+ dst_frame.buffer(kVPlane), dst_frame.stride(kVPlane), dst_width,
+ dst_height, libyuv::kFilterBilinear);
dst_frame.set_timestamp(input_image.timestamp());
dst_frame.set_render_time_ms(input_image.render_time_ms());
- streaminfos_[stream_idx].encoder->Encode(dst_frame,
- codec_specific_info,
+ streaminfos_[stream_idx].encoder->Encode(dst_frame, codec_specific_info,
&stream_frame_types);
}
}
@@ -426,16 +414,17 @@ uint32_t SimulcastEncoderAdapter::GetStreamBitrate(
// current stream's |targetBitrate|, otherwise it's capped by |maxBitrate|.
if (stream_idx < codec_.numberOfSimulcastStreams - 1) {
unsigned int max_rate = codec_.simulcastStream[stream_idx].maxBitrate;
- if (new_bitrate_kbit >= SumStreamTargetBitrate(stream_idx + 1, codec_) +
- codec_.simulcastStream[stream_idx + 1].minBitrate) {
+ if (new_bitrate_kbit >=
+ SumStreamTargetBitrate(stream_idx + 1, codec_) +
+ codec_.simulcastStream[stream_idx + 1].minBitrate) {
max_rate = codec_.simulcastStream[stream_idx].targetBitrate;
}
return std::min(new_bitrate_kbit - sum_target_lower_streams, max_rate);
} else {
- // For the highest stream (highest resolution), the |targetBitRate| and
- // |maxBitrate| are not used. Any excess bitrate (above the targets of
- // all lower streams) is given to this (highest resolution) stream.
- return new_bitrate_kbit - sum_target_lower_streams;
+ // For the highest stream (highest resolution), the |targetBitRate| and
+ // |maxBitrate| are not used. Any excess bitrate (above the targets of
+ // all lower streams) is given to this (highest resolution) stream.
+ return new_bitrate_kbit - sum_target_lower_streams;
}
} else {
// Not enough bitrate for this stream.
@@ -507,4 +496,11 @@ bool SimulcastEncoderAdapter::SupportsNativeHandle() const {
return streaminfos_[0].encoder->SupportsNativeHandle();
}
+const char* SimulcastEncoderAdapter::ImplementationName() const {
+ // We should not be calling this method before streaminfos_ are configured.
+ RTC_DCHECK(!streaminfos_.empty());
+ // TODO(pbos): Support multiple implementation names for different encoders.
+ return streaminfos_[0].encoder->ImplementationName();
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
index afec024abc..05a96c7336 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
@@ -59,6 +59,7 @@ class SimulcastEncoderAdapter : public VP8Encoder {
int GetTargetFramerate() override;
bool SupportsNativeHandle() const override;
+ const char* ImplementationName() const override;
private:
struct StreamInfo {
@@ -71,8 +72,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
send_stream(true) {}
StreamInfo(VideoEncoder* encoder,
EncodedImageCallback* callback,
- unsigned short width,
- unsigned short height,
+ uint16_t width,
+ uint16_t height,
bool send_stream)
: encoder(encoder),
callback(callback),
@@ -83,8 +84,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
// Deleted by SimulcastEncoderAdapter::Release().
VideoEncoder* encoder;
EncodedImageCallback* callback;
- unsigned short width;
- unsigned short height;
+ uint16_t width;
+ uint16_t height;
bool key_frame_request;
bool send_stream;
};
@@ -118,4 +119,3 @@ class SimulcastEncoderAdapter : public VP8Encoder {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_ENCODER_ADAPTER_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
index 218b5e2d1a..86b8e0b345 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
@@ -11,7 +11,7 @@
#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h"
#include "webrtc/modules/video_coding/codecs/vp8/vp8_factory.h"
@@ -27,12 +27,10 @@ static VP8Encoder* CreateTestEncoderAdapter() {
class TestSimulcastEncoderAdapter : public TestVp8Simulcast {
public:
TestSimulcastEncoderAdapter()
- : TestVp8Simulcast(CreateTestEncoderAdapter(),
- VP8Decoder::Create()) {}
+ : TestVp8Simulcast(CreateTestEncoderAdapter(), VP8Decoder::Create()) {}
+
protected:
- virtual void SetUp() {
- TestVp8Simulcast::SetUp();
- }
+ virtual void SetUp() { TestVp8Simulcast::SetUp(); }
virtual void TearDown() {
TestVp8Simulcast::TearDown();
VP8EncoderFactoryConfig::set_use_simulcast_adapter(false);
@@ -97,8 +95,7 @@ TEST_F(TestSimulcastEncoderAdapter, TestSpatioTemporalLayers321PatternEncoder) {
// TODO(ronghuawu): Enable this test when SkipEncodingUnusedStreams option is
// implemented for SimulcastEncoderAdapter.
-TEST_F(TestSimulcastEncoderAdapter,
- DISABLED_TestSkipEncodingUnusedStreams) {
+TEST_F(TestSimulcastEncoderAdapter, DISABLED_TestSkipEncodingUnusedStreams) {
TestVp8Simulcast::TestSkipEncodingUnusedStreams();
}
@@ -127,23 +124,17 @@ class MockVideoEncoder : public VideoEncoder {
return 0;
}
- int32_t Release() override {
- return 0;
- }
+ int32_t Release() override { return 0; }
int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override {
return 0;
}
- MOCK_METHOD2(SetChannelParameters,
- int32_t(uint32_t packetLoss, int64_t rtt));
+ MOCK_METHOD2(SetChannelParameters, int32_t(uint32_t packetLoss, int64_t rtt));
- bool SupportsNativeHandle() const override {
- return supports_native_handle_;
- }
+ bool SupportsNativeHandle() const override { return supports_native_handle_; }
- virtual ~MockVideoEncoder() {
- }
+ virtual ~MockVideoEncoder() {}
const VideoCodec& codec() const { return codec_; }
@@ -200,7 +191,8 @@ class TestSimulcastEncoderAdapterFakeHelper {
EXPECT_TRUE(!factory_->encoders().empty());
for (size_t i = 0; i < factory_->encoders().size(); ++i) {
EXPECT_CALL(*factory_->encoders()[i],
- SetChannelParameters(packetLoss, rtt)).Times(1);
+ SetChannelParameters(packetLoss, rtt))
+ .Times(1);
}
}
@@ -249,8 +241,7 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
void SetupCodec() {
TestVp8Simulcast::DefaultSettings(
- &codec_,
- static_cast<const int*>(kTestTemporalLayerProfile));
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile));
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
adapter_->RegisterEncodeCompleteCallback(this);
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
index 373a55237f..f23affee41 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
@@ -13,18 +13,14 @@
namespace webrtc {
namespace testing {
-class TestVp8Impl
- : public TestVp8Simulcast {
+class TestVp8Impl : public TestVp8Simulcast {
public:
TestVp8Impl()
- : TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
+ : TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
+
protected:
- virtual void SetUp() {
- TestVp8Simulcast::SetUp();
- }
- virtual void TearDown() {
- TestVp8Simulcast::TearDown();
- }
+ virtual void SetUp() { TestVp8Simulcast::SetUp(); }
+ virtual void TearDown() { TestVp8Simulcast::TearDown(); }
};
TEST_F(TestVp8Impl, TestKeyFrameRequestsOnAllStreams) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
index e4fc986545..7a7a2c253b 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -14,10 +14,11 @@
#include <algorithm>
#include <vector>
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
#include "webrtc/video_frame.h"
@@ -43,10 +44,8 @@ const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
-template<typename T> void SetExpectedValues3(T value0,
- T value1,
- T value2,
- T* expected_values) {
+template <typename T>
+void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
expected_values[0] = value0;
expected_values[1] = value1;
expected_values[2] = value2;
@@ -54,15 +53,14 @@ template<typename T> void SetExpectedValues3(T value0,
class Vp8TestEncodedImageCallback : public EncodedImageCallback {
public:
- Vp8TestEncodedImageCallback()
- : picture_id_(-1) {
+ Vp8TestEncodedImageCallback() : picture_id_(-1) {
memset(temporal_layer_, -1, sizeof(temporal_layer_));
memset(layer_sync_, false, sizeof(layer_sync_));
}
~Vp8TestEncodedImageCallback() {
- delete [] encoded_key_frame_._buffer;
- delete [] encoded_frame_._buffer;
+ delete[] encoded_key_frame_._buffer;
+ delete[] encoded_frame_._buffer;
}
virtual int32_t Encoded(const EncodedImage& encoded_image,
@@ -71,22 +69,20 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
// Only store the base layer.
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
if (encoded_image._frameType == kVideoFrameKey) {
- delete [] encoded_key_frame_._buffer;
+ delete[] encoded_key_frame_._buffer;
encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
encoded_key_frame_._size = encoded_image._size;
encoded_key_frame_._length = encoded_image._length;
encoded_key_frame_._frameType = kVideoFrameKey;
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
- memcpy(encoded_key_frame_._buffer,
- encoded_image._buffer,
+ memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
encoded_image._length);
} else {
- delete [] encoded_frame_._buffer;
+ delete[] encoded_frame_._buffer;
encoded_frame_._buffer = new uint8_t[encoded_image._size];
encoded_frame_._size = encoded_image._size;
encoded_frame_._length = encoded_image._length;
- memcpy(encoded_frame_._buffer,
- encoded_image._buffer,
+ memcpy(encoded_frame_._buffer, encoded_image._buffer,
encoded_image._length);
}
}
@@ -97,8 +93,10 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
codec_specific_info->codecSpecific.VP8.temporalIdx;
return 0;
}
- void GetLastEncodedFrameInfo(int* picture_id, int* temporal_layer,
- bool* layer_sync, int stream) {
+ void GetLastEncodedFrameInfo(int* picture_id,
+ int* temporal_layer,
+ bool* layer_sync,
+ int stream) {
*picture_id = picture_id_;
*temporal_layer = temporal_layer_[stream];
*layer_sync = layer_sync_[stream];
@@ -120,10 +118,8 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
class Vp8TestDecodedImageCallback : public DecodedImageCallback {
public:
- Vp8TestDecodedImageCallback()
- : decoded_frames_(0) {
- }
- virtual int32_t Decoded(VideoFrame& decoded_image) {
+ Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
+ int32_t Decoded(VideoFrame& decoded_image) override {
for (int i = 0; i < decoded_image.width(); ++i) {
EXPECT_NEAR(kColorY, decoded_image.buffer(kYPlane)[i], 1);
}
@@ -136,9 +132,11 @@ class Vp8TestDecodedImageCallback : public DecodedImageCallback {
decoded_frames_++;
return 0;
}
- int DecodedFrames() {
- return decoded_frames_;
+ int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
}
+ int DecodedFrames() { return decoded_frames_; }
private:
int decoded_frames_;
@@ -161,8 +159,7 @@ class SkipEncodingUnusedStreamsTest {
std::vector<unsigned int> configured_bitrates;
for (std::vector<TemporalLayers*>::const_iterator it =
spy_factory->spying_layers_.begin();
- it != spy_factory->spying_layers_.end();
- ++it) {
+ it != spy_factory->spying_layers_.end(); ++it) {
configured_bitrates.push_back(
static_cast<SpyingTemporalLayers*>(*it)->configured_bitrate_);
}
@@ -185,8 +182,8 @@ class SkipEncodingUnusedStreamsTest {
int framerate,
vpx_codec_enc_cfg_t* cfg) override {
configured_bitrate_ = bitrate_kbit;
- return layers_->ConfigureBitrates(
- bitrate_kbit, max_bitrate_kbit, framerate, cfg);
+ return layers_->ConfigureBitrates(bitrate_kbit, max_bitrate_kbit,
+ framerate, cfg);
}
void PopulateCodecSpecific(bool base_layer_sync,
@@ -228,16 +225,15 @@ class SkipEncodingUnusedStreamsTest {
class TestVp8Simulcast : public ::testing::Test {
public:
TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
- : encoder_(encoder),
- decoder_(decoder) {}
+ : encoder_(encoder), decoder_(decoder) {}
// Creates an VideoFrame from |plane_colors|.
static void CreateImage(VideoFrame* frame, int plane_colors[kNumOfPlanes]) {
for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
- int width = (plane_num != kYPlane ? (frame->width() + 1) / 2 :
- frame->width());
- int height = (plane_num != kYPlane ? (frame->height() + 1) / 2 :
- frame->height());
+ int width =
+ (plane_num != kYPlane ? (frame->width() + 1) / 2 : frame->width());
+ int height =
+ (plane_num != kYPlane ? (frame->height() + 1) / 2 : frame->height());
PlaneType plane_type = static_cast<PlaneType>(plane_num);
uint8_t* data = frame->buffer(plane_type);
// Setting allocated area to zero - setting only image size to
@@ -267,24 +263,15 @@ class TestVp8Simulcast : public ::testing::Test {
settings->height = kDefaultHeight;
settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
ASSERT_EQ(3, kNumberOfSimulcastStreams);
- ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4,
- kMaxBitrates[0],
- kMinBitrates[0],
- kTargetBitrates[0],
- &settings->simulcastStream[0],
- temporal_layer_profile[0]);
- ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2,
- kMaxBitrates[1],
- kMinBitrates[1],
- kTargetBitrates[1],
- &settings->simulcastStream[1],
- temporal_layer_profile[1]);
- ConfigureStream(kDefaultWidth, kDefaultHeight,
- kMaxBitrates[2],
- kMinBitrates[2],
- kTargetBitrates[2],
- &settings->simulcastStream[2],
- temporal_layer_profile[2]);
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
+ kMinBitrates[0], kTargetBitrates[0],
+ &settings->simulcastStream[0], temporal_layer_profile[0]);
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
+ kMinBitrates[1], kTargetBitrates[1],
+ &settings->simulcastStream[1], temporal_layer_profile[1]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
+ kMinBitrates[2], kTargetBitrates[2],
+ &settings->simulcastStream[2], temporal_layer_profile[2]);
settings->codecSpecific.VP8.resilience = kResilientStream;
settings->codecSpecific.VP8.denoisingOn = true;
settings->codecSpecific.VP8.errorConcealmentOn = false;
@@ -312,9 +299,7 @@ class TestVp8Simulcast : public ::testing::Test {
}
protected:
- virtual void SetUp() {
- SetUpCodec(kDefaultTemporalLayerProfile);
- }
+ virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); }
virtual void SetUpCodec(const int* temporal_layer_profile) {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
@@ -323,14 +308,14 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
int half_width = (kDefaultWidth + 1) / 2;
- input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
- kDefaultWidth, half_width, half_width);
+ input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
+ half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
}
virtual void TearDown() {
@@ -342,28 +327,34 @@ class TestVp8Simulcast : public ::testing::Test {
ASSERT_GE(expected_video_streams, 0);
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
if (expected_video_streams >= 1) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)), _, _)
- )
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
if (expected_video_streams >= 2) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)), _, _)
- )
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
if (expected_video_streams >= 3) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight)), _, _))
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
@@ -477,8 +468,8 @@ class TestVp8Simulcast : public ::testing::Test {
void TestPaddingOneStreamTwoMaxedOut() {
// We are just below limit of sending third stream, so we should get
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] - 1, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
@@ -491,8 +482,8 @@ class TestVp8Simulcast : public ::testing::Test {
void TestSendAllStreams() {
// We have just enough to send all streams.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2], 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
@@ -505,8 +496,7 @@ class TestVp8Simulcast : public ::testing::Test {
void TestDisablingStreams() {
// We should get three media streams.
- encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
- kMaxBitrates[2], 30);
+ encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
@@ -517,8 +507,8 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] / 2, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
ExpectStreams(kVideoFrameDelta, 2);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
@@ -537,16 +527,16 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] / 2, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 2);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should get all three streams.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kTargetBitrates[2], 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 3);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
@@ -571,20 +561,20 @@ class TestVp8Simulcast : public ::testing::Test {
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
// The for loop above did not set the bitrate of the highest layer.
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- maxBitrate = 0;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]
+ .maxBitrate = 0;
// The highest layer has to correspond to the non-simulcast resolution.
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- width = settings_.width;
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- height = settings_.height;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
+ settings_.width;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
+ settings_.height;
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
// Encode one frame and verify.
@@ -612,21 +602,17 @@ class TestVp8Simulcast : public ::testing::Test {
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
}
- void TestSwitchingToOneStream() {
- SwitchingToOneStream(1024, 768);
- }
+ void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); }
- void TestSwitchingToOneOddStream() {
- SwitchingToOneStream(1023, 769);
- }
+ void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); }
void TestRPSIEncoder() {
Vp8TestEncodedImageCallback encoder_callback;
@@ -777,67 +763,55 @@ class TestVp8Simulcast : public ::testing::Test {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
- int expected_temporal_idx[3] = { -1, -1, -1};
+ int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
// Test the layer pattern and sync flag for various spatial-temporal patterns.
@@ -858,67 +832,55 @@ class TestVp8Simulcast : public ::testing::Test {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
- int expected_temporal_idx[3] = { -1, -1, -1};
+ int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
void TestStrideEncodeDecode() {
@@ -932,8 +894,8 @@ class TestVp8Simulcast : public ::testing::Test {
// 1. stride > width 2. stride_y != stride_uv/2
int stride_y = kDefaultWidth + 20;
int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
- input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
- stride_y, stride_uv, stride_uv);
+ input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, stride_y,
+ stride_uv, stride_uv);
// Set color.
int plane_offset[kNumOfPlanes];
plane_offset[kYPlane] = kColorY;
@@ -963,10 +925,9 @@ class TestVp8Simulcast : public ::testing::Test {
void TestSkipEncodingUnusedStreams() {
SkipEncodingUnusedStreamsTest test;
std::vector<unsigned int> configured_bitrate =
- test.RunTest(encoder_.get(),
- &settings_,
- 1); // Target bit rate 1, to force all streams but the
- // base one to be exceeding bandwidth constraints.
+ test.RunTest(encoder_.get(), &settings_,
+ 1); // Target bit rate 1, to force all streams but the
+ // base one to be exceeding bandwidth constraints.
EXPECT_EQ(static_cast<size_t>(kNumberOfSimulcastStreams),
configured_bitrate.size());
@@ -975,8 +936,7 @@ class TestVp8Simulcast : public ::testing::Test {
int stream = 0;
for (std::vector<unsigned int>::const_iterator it =
configured_bitrate.begin();
- it != configured_bitrate.end();
- ++it) {
+ it != configured_bitrate.end(); ++it) {
if (stream == 0) {
EXPECT_EQ(min_bitrate, *it);
} else {
diff --git a/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h b/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
index 7607210d5c..47112c64aa 100644
--- a/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
+++ b/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
@@ -14,7 +14,8 @@
#include "vpx/vpx_encoder.h"
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common.h"
+#include "webrtc/common_video/include/video_image.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -30,6 +31,8 @@ class TemporalLayers {
virtual ~Factory() {}
virtual TemporalLayers* Create(int temporal_layers,
uint8_t initial_tl0_pic_idx) const;
+ static const ConfigOptionID identifier =
+ ConfigOptionID::kTemporalLayersFactory;
};
virtual ~TemporalLayers() {}
diff --git a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 5ec674f16a..c3d77da063 100644
--- a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -11,12 +11,12 @@
#include <stdio.h>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -78,7 +78,11 @@ class Vp8UnitTestDecodeCompleteCallback : public webrtc::DecodedImageCallback {
public:
explicit Vp8UnitTestDecodeCompleteCallback(VideoFrame* frame)
: decoded_frame_(frame), decode_complete(false) {}
- int Decoded(webrtc::VideoFrame& frame);
+ int32_t Decoded(VideoFrame& frame) override;
+ int32_t Decoded(VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
+ }
bool DecodeComplete();
private:
@@ -216,7 +220,12 @@ TEST_F(TestVp8Impl, EncoderParameterTest) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_inst_, 1));
}
-TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AlignedStrideEncodeDecode DISABLED_AlignedStrideEncodeDecode
+#else
+#define MAYBE_AlignedStrideEncodeDecode AlignedStrideEncodeDecode
+#endif
+TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u);
@@ -232,7 +241,12 @@ TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
EXPECT_EQ(kTestNtpTimeMs, decoded_frame_.ntp_time_ms());
}
-TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(DecodeWithACompleteKeyFrame)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_DecodeWithACompleteKeyFrame DISABLED_DecodeWithACompleteKeyFrame
+#else
+#define MAYBE_DecodeWithACompleteKeyFrame DecodeWithACompleteKeyFrame
+#endif
+TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u);
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h b/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
index 84745ea5a1..52f8aa30b8 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
@@ -32,4 +32,3 @@ class VP8EncoderFactoryConfig {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_FACTORY_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 029ccd1f27..5a04f6a43d 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -16,7 +16,7 @@
#include <algorithm>
// NOTE(ajm): Path provided by gyp.
-#include "libyuv/scale.h" // NOLINT
+#include "libyuv/scale.h" // NOLINT
#include "libyuv/convert.h" // NOLINT
#include "webrtc/base/checks.h"
@@ -24,8 +24,8 @@
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
@@ -68,10 +68,9 @@ std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec,
std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams);
// Allocate min -> target bitrates as long as we have bitrate to spend.
size_t last_active_stream = 0;
- for (size_t i = 0;
- i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
- bitrate_to_allocate_kbps >=
- static_cast<int>(codec.simulcastStream[i].minBitrate);
+ for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
+ bitrate_to_allocate_kbps >=
+ static_cast<int>(codec.simulcastStream[i].minBitrate);
++i) {
last_active_stream = i;
int allocated_bitrate_kbps =
@@ -132,7 +131,7 @@ bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) {
return true;
}
-int NumStreamsDisabled(std::vector<bool>& streams) {
+int NumStreamsDisabled(const std::vector<bool>& streams) {
int num_disabled = 0;
for (bool stream : streams) {
if (!stream)
@@ -183,7 +182,7 @@ int VP8EncoderImpl::Release() {
while (!encoded_images_.empty()) {
EncodedImage& image = encoded_images_.back();
- delete [] image._buffer;
+ delete[] image._buffer;
encoded_images_.pop_back();
}
while (!encoders_.empty()) {
@@ -289,10 +288,8 @@ int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
target_bitrate = tl0_bitrate;
}
configurations_[i].rc_target_bitrate = target_bitrate;
- temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate,
- max_bitrate,
- framerate,
- &configurations_[i]);
+ temporal_layers_[stream_idx]->ConfigureBitrates(
+ target_bitrate, max_bitrate, framerate, &configurations_[i]);
if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -301,6 +298,10 @@ int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP8EncoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
void VP8EncoderImpl::SetStreamState(bool send_stream,
int stream_idx) {
if (send_stream && !send_stream_[stream_idx]) {
@@ -311,8 +312,8 @@ void VP8EncoderImpl::SetStreamState(bool send_stream,
}
void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
- int num_temporal_layers,
- const VideoCodec& codec) {
+ int num_temporal_layers,
+ const VideoCodec& codec) {
const Config default_options;
const TemporalLayers::Factory& tl_factory =
(codec.extra_options ? codec.extra_options : &default_options)
@@ -330,15 +331,16 @@ void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
for (int i = 0; i < num_streams; ++i) {
// TODO(andresp): crash if layers is invalid.
int layers = codec.simulcastStream[i].numberOfTemporalLayers;
- if (layers < 1) layers = 1;
+ if (layers < 1)
+ layers = 1;
temporal_layers_.push_back(tl_factory.Create(layers, rand()));
}
}
}
int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
- int number_of_cores,
- size_t /*maxPayloadSize */) {
+ int number_of_cores,
+ size_t /*maxPayloadSize */) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -375,12 +377,13 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
- int num_temporal_layers = doing_simulcast ?
- inst->simulcastStream[0].numberOfTemporalLayers :
- inst->codecSpecific.VP8.numberOfTemporalLayers;
+ int num_temporal_layers =
+ doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
+ : inst->codecSpecific.VP8.numberOfTemporalLayers;
// TODO(andresp): crash if num temporal layers is bananas.
- if (num_temporal_layers < 1) num_temporal_layers = 1;
+ if (num_temporal_layers < 1)
+ num_temporal_layers = 1;
SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
@@ -410,7 +413,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
int idx = number_of_streams - 1;
for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
int gcd = GCD(inst->simulcastStream[idx].width,
- inst->simulcastStream[idx-1].width);
+ inst->simulcastStream[idx - 1].width);
downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
send_stream_[i] = false;
@@ -422,20 +425,20 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
}
for (int i = 0; i < number_of_streams; ++i) {
// Random start, 16 bits is enough.
- picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF;
+ picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
last_key_frame_picture_id_[i] = -1;
// allocate memory for encoded image
if (encoded_images_[i]._buffer != NULL) {
- delete [] encoded_images_[i]._buffer;
+ delete[] encoded_images_[i]._buffer;
}
- encoded_images_[i]._size = CalcBufferSize(kI420,
- codec_.width, codec_.height);
+ encoded_images_[i]._size =
+ CalcBufferSize(kI420, codec_.width, codec_.height);
encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size];
encoded_images_[i]._completeFrame = true;
}
// populate encoder configuration with default values
- if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(),
- &configurations_[0], 0)) {
+ if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &configurations_[0],
+ 0)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
// setting the time base of the codec
@@ -459,8 +462,8 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
break;
case kResilientFrames:
#ifdef INDEPENDENT_PARTITIONS
- configurations_[0]-g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT |
- VPX_ERROR_RESILIENT_PARTITIONS;
+ configurations_[0] - g_error_resilient =
+ VPX_ERROR_RESILIENT_DEFAULT | VPX_ERROR_RESILIENT_PARTITIONS;
break;
#else
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; // Not supported
@@ -536,20 +539,18 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
// Determine number of threads based on the image size and #cores.
// TODO(fbarchard): Consider number of Simulcast layers.
- configurations_[0].g_threads = NumberOfThreads(configurations_[0].g_w,
- configurations_[0].g_h,
- number_of_cores);
+ configurations_[0].g_threads = NumberOfThreads(
+ configurations_[0].g_w, configurations_[0].g_h, number_of_cores);
// Creating a wrapper to the image - setting image data to NULL.
// Actual pointer will be set in encode. Setting align to 1, as it
// is meaningless (no memory allocation is done here).
- vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height,
- 1, NULL);
+ vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1,
+ NULL);
if (encoders_.size() == 1) {
configurations_[0].rc_target_bitrate = inst->startBitrate;
- temporal_layers_[0]->ConfigureBitrates(inst->startBitrate,
- inst->maxBitrate,
+ temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate,
inst->maxFramerate,
&configurations_[0]);
} else {
@@ -641,20 +642,15 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
if (encoders_.size() > 1) {
- int error = vpx_codec_enc_init_multi(&encoders_[0],
- vpx_codec_vp8_cx(),
- &configurations_[0],
- encoders_.size(),
- flags,
- &downsampling_factors_[0]);
+ int error = vpx_codec_enc_init_multi(&encoders_[0], vpx_codec_vp8_cx(),
+ &configurations_[0], encoders_.size(),
+ flags, &downsampling_factors_[0]);
if (error) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
} else {
- if (vpx_codec_enc_init(&encoders_[0],
- vpx_codec_vp8_cx(),
- &configurations_[0],
- flags)) {
+ if (vpx_codec_enc_init(&encoders_[0], vpx_codec_vp8_cx(),
+ &configurations_[0], flags)) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
}
@@ -671,13 +667,13 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
#else
denoiser_state = kDenoiserOnAdaptive;
#endif
- vpx_codec_control(&encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
- codec_.codecSpecific.VP8.denoisingOn ?
- denoiser_state : kDenoiserOff);
+ vpx_codec_control(
+ &encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
if (encoders_.size() > 2) {
- vpx_codec_control(&encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
- codec_.codecSpecific.VP8.denoisingOn ?
- denoiser_state : kDenoiserOff);
+ vpx_codec_control(
+ &encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
}
for (size_t i = 0; i < encoders_.size(); ++i) {
// Allow more screen content to be detected as static.
@@ -710,14 +706,12 @@ uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
// Don't go below 3 times the per frame bandwidth.
const uint32_t minIntraTh = 300;
- return (targetPct < minIntraTh) ? minIntraTh: targetPct;
+ return (targetPct < minIntraTh) ? minIntraTh : targetPct;
}
int VP8EncoderImpl::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
- TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp());
-
if (!inited_)
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
if (frame.IsZeroSize())
@@ -731,7 +725,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame;
if (quality_scaler_enabled_ && (input_image.width() != codec_.width ||
- input_image.height() != codec_.height)) {
+ input_image.height() != codec_.height)) {
int ret = UpdateCodecFrameSize(input_image);
if (ret < 0)
return ret;
@@ -747,11 +741,11 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
raw_images_[0].planes[VPX_PLANE_Y] =
- const_cast<uint8_t*>(input_image.buffer(kYPlane));
+ const_cast<uint8_t*>(input_image.buffer(kYPlane));
raw_images_[0].planes[VPX_PLANE_U] =
- const_cast<uint8_t*>(input_image.buffer(kUPlane));
+ const_cast<uint8_t*>(input_image.buffer(kUPlane));
raw_images_[0].planes[VPX_PLANE_V] =
- const_cast<uint8_t*>(input_image.buffer(kVPlane));
+ const_cast<uint8_t*>(input_image.buffer(kVPlane));
raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane);
@@ -760,17 +754,17 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
for (size_t i = 1; i < encoders_.size(); ++i) {
// Scale the image down a number of times by downsampling factor
libyuv::I420Scale(
- raw_images_[i-1].planes[VPX_PLANE_Y],
- raw_images_[i-1].stride[VPX_PLANE_Y],
- raw_images_[i-1].planes[VPX_PLANE_U],
- raw_images_[i-1].stride[VPX_PLANE_U],
- raw_images_[i-1].planes[VPX_PLANE_V],
- raw_images_[i-1].stride[VPX_PLANE_V],
- raw_images_[i-1].d_w, raw_images_[i-1].d_h,
- raw_images_[i].planes[VPX_PLANE_Y], raw_images_[i].stride[VPX_PLANE_Y],
- raw_images_[i].planes[VPX_PLANE_U], raw_images_[i].stride[VPX_PLANE_U],
- raw_images_[i].planes[VPX_PLANE_V], raw_images_[i].stride[VPX_PLANE_V],
- raw_images_[i].d_w, raw_images_[i].d_h, libyuv::kFilterBilinear);
+ raw_images_[i - 1].planes[VPX_PLANE_Y],
+ raw_images_[i - 1].stride[VPX_PLANE_Y],
+ raw_images_[i - 1].planes[VPX_PLANE_U],
+ raw_images_[i - 1].stride[VPX_PLANE_U],
+ raw_images_[i - 1].planes[VPX_PLANE_V],
+ raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w,
+ raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y],
+ raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U],
+ raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V],
+ raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w,
+ raw_images_[i].d_h, libyuv::kFilterBilinear);
}
vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
for (size_t i = 0; i < encoders_.size(); ++i) {
@@ -805,8 +799,8 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
if (send_key_frame) {
// Adapt the size of the key frame when in screenshare with 1 temporal
// layer.
- if (encoders_.size() == 1 && codec_.mode == kScreensharing
- && codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
+ if (encoders_.size() == 1 && codec_.mode == kScreensharing &&
+ codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
const uint32_t forceKeyFrameIntraTh = 100;
vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
forceKeyFrameIntraTh);
@@ -818,13 +812,12 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
}
std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
} else if (codec_specific_info &&
- codec_specific_info->codecType == kVideoCodecVP8) {
+ codec_specific_info->codecType == kVideoCodecVP8) {
if (feedback_mode_) {
// Handle RPSI and SLI messages and set up the appropriate encode flags.
bool sendRefresh = false;
if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
- rps_.ReceivedRPSI(
- codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
+ rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
}
if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
sendRefresh = rps_.ReceivedSLI(input_image.timestamp());
@@ -876,8 +869,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
}
vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]);
- vpx_codec_control(&encoders_[i],
- VP8E_SET_TEMPORAL_LAYER_ID,
+ vpx_codec_control(&encoders_[i], VP8E_SET_TEMPORAL_LAYER_ID,
temporal_layers_[stream_idx]->CurrentLayerId());
}
// TODO(holmer): Ideally the duration should be the timestamp diff of this
@@ -895,7 +887,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// Reset specific intra frame thresholds, following the key frame.
if (send_key_frame) {
vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
- rc_max_intra_target_);
+ rc_max_intra_target_);
}
if (error)
return WEBRTC_VIDEO_CODEC_ERROR;
@@ -913,8 +905,7 @@ int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) {
codec_.simulcastStream[0].height = input_image.height();
}
// Update the cpu_speed setting for resolution change.
- vpx_codec_control(&(encoders_[0]),
- VP8E_SET_CPUUSED,
+ vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED,
SetCpuSpeed(codec_.width, codec_.height));
raw_images_[0].w = codec_.width;
raw_images_[0].h = codec_.height;
@@ -947,13 +938,12 @@ void VP8EncoderImpl::PopulateCodecSpecific(
}
vp8Info->simulcastIdx = stream_idx;
vp8Info->keyIdx = kNoKeyIdx; // TODO(hlundin) populate this
- vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ?
- true : false;
+ vp8Info->nonReference =
+ (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? true : false;
bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ||
- only_predicting_from_key_frame;
+ only_predicting_from_key_frame;
temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point,
- vp8Info,
- timestamp);
+ vp8Info, timestamp);
// Prepare next.
picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
}
@@ -966,27 +956,26 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
int stream_idx = static_cast<int>(encoders_.size()) - 1;
int result = WEBRTC_VIDEO_CODEC_OK;
for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
- ++encoder_idx, --stream_idx) {
+ ++encoder_idx, --stream_idx) {
vpx_codec_iter_t iter = NULL;
int part_idx = 0;
encoded_images_[encoder_idx]._length = 0;
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
RTPFragmentationHeader frag_info;
// token_partitions_ is number of bits used.
- frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_)
- + 1);
+ frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) +
+ 1);
CodecSpecificInfo codec_specific;
- const vpx_codec_cx_pkt_t *pkt = NULL;
- while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx],
- &iter)) != NULL) {
+ const vpx_codec_cx_pkt_t* pkt = NULL;
+ while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
+ NULL) {
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT: {
uint32_t length = encoded_images_[encoder_idx]._length;
memcpy(&encoded_images_[encoder_idx]._buffer[length],
- pkt->data.frame.buf,
- pkt->data.frame.sz);
+ pkt->data.frame.buf, pkt->data.frame.sz);
frag_info.fragmentationOffset[part_idx] = length;
- frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
+ frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
frag_info.fragmentationPlType[part_idx] = 0; // not known here
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
@@ -1063,7 +1052,6 @@ int VP8EncoderImpl::RegisterEncodeCompleteCallback(
return WEBRTC_VIDEO_CODEC_OK;
}
-
VP8DecoderImpl::VP8DecoderImpl()
: decode_complete_callback_(NULL),
inited_(false),
@@ -1075,8 +1063,7 @@ VP8DecoderImpl::VP8DecoderImpl()
propagation_cnt_(-1),
last_frame_width_(0),
last_frame_height_(0),
- key_frame_required_(true) {
-}
+ key_frame_required_(true) {}
VP8DecoderImpl::~VP8DecoderImpl() {
inited_ = true; // in order to do the actual release
@@ -1092,8 +1079,7 @@ int VP8DecoderImpl::Reset() {
return WEBRTC_VIDEO_CODEC_OK;
}
-int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
- int number_of_cores) {
+int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
int ret_val = Release();
if (ret_val < 0) {
return ret_val;
@@ -1104,12 +1090,12 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
if (inst && inst->codecType == kVideoCodecVP8) {
feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
}
- vpx_codec_dec_cfg_t cfg;
+ vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1)
cfg.threads = 1;
cfg.h = cfg.w = 0; // set after decode
-vpx_codec_flags_t flags = 0;
+ vpx_codec_flags_t flags = 0;
#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64)
flags = VPX_CODEC_USE_POSTPROC;
#ifdef INDEPENDENT_PARTITIONS
@@ -1134,10 +1120,10 @@ vpx_codec_flags_t flags = 0;
}
int VP8DecoderImpl::Decode(const EncodedImage& input_image,
- bool missing_frames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codec_specific_info,
- int64_t /*render_time_ms*/) {
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t /*render_time_ms*/) {
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
@@ -1188,9 +1174,9 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
if (input_image._frameType == kVideoFrameKey &&
input_image._completeFrame) {
propagation_cnt_ = -1;
- // Start count on first loss.
+ // Start count on first loss.
} else if ((!input_image._completeFrame || missing_frames) &&
- propagation_cnt_ == -1) {
+ propagation_cnt_ == -1) {
propagation_cnt_ = 0;
}
if (propagation_cnt_ >= 0) {
@@ -1242,15 +1228,15 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) {
const uint32_t bytes_to_copy = input_image._length;
if (last_keyframe_._size < bytes_to_copy) {
- delete [] last_keyframe_._buffer;
+ delete[] last_keyframe_._buffer;
last_keyframe_._buffer = NULL;
last_keyframe_._size = 0;
}
uint8_t* temp_buffer = last_keyframe_._buffer; // Save buffer ptr.
- uint32_t temp_size = last_keyframe_._size; // Save size.
- last_keyframe_ = input_image; // Shallow copy.
- last_keyframe_._buffer = temp_buffer; // Restore buffer ptr.
- last_keyframe_._size = temp_size; // Restore buffer size.
+ uint32_t temp_size = last_keyframe_._size; // Save size.
+ last_keyframe_ = input_image; // Shallow copy.
+ last_keyframe_._buffer = temp_buffer; // Restore buffer ptr.
+ last_keyframe_._size = temp_size; // Restore buffer size.
if (!last_keyframe_._buffer) {
// Allocate memory.
last_keyframe_._size = bytes_to_copy;
@@ -1300,7 +1286,8 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
}
if (picture_id > -1) {
if (((reference_updates & VP8_GOLD_FRAME) ||
- (reference_updates & VP8_ALTR_FRAME)) && !corrupted) {
+ (reference_updates & VP8_ALTR_FRAME)) &&
+ !corrupted) {
decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id);
}
decode_complete_callback_->ReceivedDecodedFrame(picture_id);
@@ -1323,14 +1310,10 @@ int VP8DecoderImpl::DecodePartitions(
const EncodedImage& input_image,
const RTPFragmentationHeader* fragmentation) {
for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
- const uint8_t* partition = input_image._buffer +
- fragmentation->fragmentationOffset[i];
- const uint32_t partition_length =
- fragmentation->fragmentationLength[i];
- if (vpx_codec_decode(decoder_,
- partition,
- partition_length,
- 0,
+ const uint8_t* partition =
+ input_image._buffer + fragmentation->fragmentationOffset[i];
+ const uint32_t partition_length = fragmentation->fragmentationLength[i];
+ if (vpx_codec_decode(decoder_, partition, partition_length, 0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -1343,8 +1326,8 @@ int VP8DecoderImpl::DecodePartitions(
}
int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
- uint32_t timestamp,
- int64_t ntp_time_ms) {
+ uint32_t timestamp,
+ int64_t ntp_time_ms) {
if (img == NULL) {
// Decoder OK and NULL image => No show frame
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
@@ -1354,14 +1337,13 @@ int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
// Allocate memory for decoded image.
VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h),
timestamp, 0, kVideoRotation_0);
- libyuv::I420Copy(
- img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
- img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
- img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
- decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
- decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
- decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
- img->d_w, img->d_h);
+ libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
+ decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
+ decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
+ img->d_w, img->d_h);
decoded_image.set_ntp_time_ms(ntp_time_ms);
int ret = decode_complete_callback_->Decoded(decoded_image);
if (ret != 0)
@@ -1380,7 +1362,7 @@ int VP8DecoderImpl::RegisterDecodeCompleteCallback(
int VP8DecoderImpl::Release() {
if (last_keyframe_._buffer != NULL) {
- delete [] last_keyframe_._buffer;
+ delete[] last_keyframe_._buffer;
last_keyframe_._buffer = NULL;
}
if (decoder_ != NULL) {
@@ -1400,15 +1382,19 @@ int VP8DecoderImpl::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP8DecoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) {
// The type of frame to copy should be set in ref_frame_->frame_type
// before the call to this function.
- if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_)
- != VPX_CODEC_OK) {
+ if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) !=
+ VPX_CODEC_OK) {
return -1;
}
- if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_)
- != VPX_CODEC_OK) {
+ if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
+ VPX_CODEC_OK) {
return -1;
}
return 0;
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
index ba14ed5841..9d5fb713a4 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
@@ -22,12 +22,12 @@
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
-#include "webrtc/common_video/interface/i420_buffer_pool.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/common_video/include/i420_buffer_pool.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include "webrtc/video_frame.h"
namespace webrtc {
@@ -58,8 +58,11 @@ class VP8EncoderImpl : public VP8Encoder {
void OnDroppedFrame() override {}
+ const char* ImplementationName() const override;
+
private:
- void SetupTemporalLayers(int num_streams, int num_temporal_layers,
+ void SetupTemporalLayers(int num_streams,
+ int num_temporal_layers,
const VideoCodec& codec);
// Set the cpu_speed setting for encoder based on resolution and/or platform.
@@ -126,15 +129,17 @@ class VP8DecoderImpl : public VP8Decoder {
int InitDecode(const VideoCodec* inst, int number_of_cores) override;
int Decode(const EncodedImage& input_image,
- bool missing_frames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codec_specific_info,
- int64_t /*render_time_ms*/) override;
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t /*render_time_ms*/) override;
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
int Release() override;
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
// Copy reference image from this _decoder to the _decoder in copyTo. Set
// which frame type to copy in _refFrame->frame_type before the call to
@@ -165,4 +170,3 @@ class VP8DecoderImpl : public VP8Decoder {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
index 5843d83fa7..9e546653db 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
@@ -1,4 +1,4 @@
- /*
+/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
@@ -9,8 +9,9 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common_video/include/video_image.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -22,8 +23,7 @@
class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
public:
explicit Vp8SequenceCoderEncodeCallback(FILE* encoded_file)
- : encoded_file_(encoded_file),
- encoded_bytes_(0) {}
+ : encoded_file_(encoded_file), encoded_bytes_(0) {}
~Vp8SequenceCoderEncodeCallback();
int Encoded(const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
@@ -31,6 +31,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
// Returns the encoded image.
webrtc::EncodedImage encoded_image() { return encoded_image_; }
size_t encoded_bytes() { return encoded_bytes_; }
+
private:
webrtc::EncodedImage encoded_image_;
FILE* encoded_file_;
@@ -38,7 +39,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
};
Vp8SequenceCoderEncodeCallback::~Vp8SequenceCoderEncodeCallback() {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
}
int Vp8SequenceCoderEncodeCallback::Encoded(
@@ -46,7 +47,7 @@ int Vp8SequenceCoderEncodeCallback::Encoded(
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation) {
if (encoded_image_._size < encoded_image._size) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
encoded_image_._buffer = new uint8_t[encoded_image._size];
encoded_image_._size = encoded_image._size;
@@ -68,7 +69,11 @@ class Vp8SequenceCoderDecodeCallback : public webrtc::DecodedImageCallback {
public:
explicit Vp8SequenceCoderDecodeCallback(FILE* decoded_file)
: decoded_file_(decoded_file) {}
- int Decoded(webrtc::VideoFrame& frame);
+ int32_t Decoded(webrtc::VideoFrame& frame) override;
+ int32_t Decoded(webrtc::VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
+ }
bool DecodeComplete();
private:
@@ -80,16 +85,16 @@ int Vp8SequenceCoderDecodeCallback::Decoded(webrtc::VideoFrame& image) {
return 0;
}
-int SequenceCoder(webrtc::test::CommandLineParser& parser) {
- int width = strtol((parser.GetFlag("w")).c_str(), NULL, 10);
- int height = strtol((parser.GetFlag("h")).c_str(), NULL, 10);
- int framerate = strtol((parser.GetFlag("f")).c_str(), NULL, 10);
+int SequenceCoder(webrtc::test::CommandLineParser* parser) {
+ int width = strtol((parser->GetFlag("w")).c_str(), NULL, 10);
+ int height = strtol((parser->GetFlag("h")).c_str(), NULL, 10);
+ int framerate = strtol((parser->GetFlag("f")).c_str(), NULL, 10);
if (width <= 0 || height <= 0 || framerate <= 0) {
fprintf(stderr, "Error: Resolution cannot be <= 0!\n");
return -1;
}
- int target_bitrate = strtol((parser.GetFlag("b")).c_str(), NULL, 10);
+ int target_bitrate = strtol((parser->GetFlag("b")).c_str(), NULL, 10);
if (target_bitrate <= 0) {
fprintf(stderr, "Error: Bit-rate cannot be <= 0!\n");
return -1;
@@ -97,20 +102,20 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
// SetUp
// Open input file.
- std::string encoded_file_name = parser.GetFlag("encoded_file");
+ std::string encoded_file_name = parser->GetFlag("encoded_file");
FILE* encoded_file = fopen(encoded_file_name.c_str(), "wb");
if (encoded_file == NULL) {
fprintf(stderr, "Error: Cannot open encoded file\n");
return -1;
}
- std::string input_file_name = parser.GetFlag("input_file");
+ std::string input_file_name = parser->GetFlag("input_file");
FILE* input_file = fopen(input_file_name.c_str(), "rb");
if (input_file == NULL) {
fprintf(stderr, "Error: Cannot open input file\n");
return -1;
}
// Open output file.
- std::string output_file_name = parser.GetFlag("output_file");
+ std::string output_file_name = parser->GetFlag("output_file");
FILE* output_file = fopen(output_file_name.c_str(), "wb");
if (output_file == NULL) {
fprintf(stderr, "Error: Cannot open output file\n");
@@ -118,8 +123,8 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
}
// Get range of frames: will encode num_frames following start_frame).
- int start_frame = strtol((parser.GetFlag("start_frame")).c_str(), NULL, 10);
- int num_frames = strtol((parser.GetFlag("num_frames")).c_str(), NULL, 10);
+ int start_frame = strtol((parser->GetFlag("start_frame")).c_str(), NULL, 10);
+ int num_frames = strtol((parser->GetFlag("num_frames")).c_str(), NULL, 10);
// Codec SetUp.
webrtc::VideoCodec inst;
@@ -157,8 +162,8 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
int frames_processed = 0;
input_frame.CreateEmptyFrame(width, height, width, half_width, half_width);
while (!feof(input_file) &&
- (num_frames == -1 || frames_processed < num_frames)) {
- if (fread(frame_buffer.get(), 1, length, input_file) != length)
+ (num_frames == -1 || frames_processed < num_frames)) {
+ if (fread(frame_buffer.get(), 1, length, input_file) != length)
continue;
if (frame_cnt >= start_frame) {
webrtc::ConvertToI420(webrtc::kI420, frame_buffer.get(), 0, 0, width,
@@ -179,33 +184,35 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
printf("Actual bitrate: %f kbps\n", actual_bit_rate / 1000);
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
- input_file_name.c_str(), output_file_name.c_str(),
- inst.width, inst.height,
- &psnr_result, &ssim_result));
+ input_file_name.c_str(), output_file_name.c_str(),
+ inst.width, inst.height, &psnr_result, &ssim_result));
printf("PSNR avg: %f[dB], min: %f[dB]\nSSIM avg: %f, min: %f\n",
- psnr_result.average, psnr_result.min,
- ssim_result.average, ssim_result.min);
+ psnr_result.average, psnr_result.min, ssim_result.average,
+ ssim_result.min);
return frame_cnt;
}
int main(int argc, char** argv) {
std::string program_name = argv[0];
- std::string usage = "Encode and decodes a video sequence, and writes"
- "results to a file.\n"
- "Example usage:\n" + program_name + " functionality"
- " --w=352 --h=288 --input_file=input.yuv --output_file=output.yuv "
- " Command line flags:\n"
- " - width(int): The width of the input file. Default: 352\n"
- " - height(int): The height of the input file. Default: 288\n"
- " - input_file(string): The YUV file to encode."
- " Default: foreman.yuv\n"
- " - encoded_file(string): The vp8 encoded file (encoder output)."
- " Default: vp8_encoded.vp8\n"
- " - output_file(string): The yuv decoded file (decoder output)."
- " Default: vp8_decoded.yuv\n."
- " - start_frame - frame number in which encoding will begin. Default: 0"
- " - num_frames - Number of frames to be processed. "
- " Default: -1 (entire sequence).";
+ std::string usage =
+ "Encode and decodes a video sequence, and writes"
+ "results to a file.\n"
+ "Example usage:\n" +
+ program_name +
+ " functionality"
+ " --w=352 --h=288 --input_file=input.yuv --output_file=output.yuv "
+ " Command line flags:\n"
+ " - width(int): The width of the input file. Default: 352\n"
+ " - height(int): The height of the input file. Default: 288\n"
+ " - input_file(string): The YUV file to encode."
+ " Default: foreman.yuv\n"
+ " - encoded_file(string): The vp8 encoded file (encoder output)."
+ " Default: vp8_encoded.vp8\n"
+ " - output_file(string): The yuv decoded file (decoder output)."
+ " Default: vp8_decoded.yuv\n."
+ " - start_frame - frame number in which encoding will begin. Default: 0"
+ " - num_frames - Number of frames to be processed. "
+ " Default: -1 (entire sequence).";
webrtc::test::CommandLineParser parser;
@@ -223,8 +230,8 @@ int main(int argc, char** argv) {
parser.SetFlag("output_file", webrtc::test::OutputPath() + "vp8_decoded.yuv");
parser.SetFlag("encoded_file",
webrtc::test::OutputPath() + "vp8_encoded.vp8");
- parser.SetFlag("input_file", webrtc::test::ResourcePath("foreman_cif",
- "yuv"));
+ parser.SetFlag("input_file",
+ webrtc::test::ResourcePath("foreman_cif", "yuv"));
parser.SetFlag("help", "false");
parser.ProcessFlags();
@@ -234,5 +241,5 @@ int main(int argc, char** argv) {
}
parser.PrintEnteredFlags();
- return SequenceCoder(parser);
+ return SequenceCoder(&parser);
}