summaryrefslogtreecommitdiff
path: root/media/base
diff options
context:
space:
mode:
Diffstat (limited to 'media/base')
-rw-r--r--media/base/constants.cc2
-rw-r--r--media/base/constants.h3
-rw-r--r--media/base/executablehelpers.h100
-rw-r--r--media/base/fakemediaengine.h8
-rw-r--r--media/base/filemediaengine.h2
-rw-r--r--media/base/filemediaengine_unittest.cc3
-rw-r--r--media/base/hybridvideoengine.cc356
-rw-r--r--media/base/hybridvideoengine.h286
-rw-r--r--media/base/hybridvideoengine_unittest.cc486
-rw-r--r--media/base/mediachannel.h7
-rw-r--r--media/base/mediaengine.h11
-rw-r--r--media/base/testutils.cc10
-rwxr-xr-xmedia/base/videoadapter_unittest.cc205
-rw-r--r--media/base/videoengine_unittest.h19
-rw-r--r--media/base/videoframe.cc57
-rw-r--r--media/base/videoframe_unittest.h51
16 files changed, 327 insertions, 1279 deletions
diff --git a/media/base/constants.cc b/media/base/constants.cc
index cd10ef7..19a960f 100644
--- a/media/base/constants.cc
+++ b/media/base/constants.cc
@@ -59,6 +59,7 @@ const char kCodecParamSPropStereo[] = "sprop-stereo";
const char kCodecParamStereo[] = "stereo";
const char kCodecParamUseInbandFec[] = "useinbandfec";
const char kCodecParamMaxAverageBitrate[] = "maxaveragebitrate";
+const char kCodecParamMaxPlaybackRate[] = "maxplaybackrate";
const char kCodecParamSctpProtocol[] = "protocol";
const char kCodecParamSctpStreams[] = "streams";
@@ -72,6 +73,7 @@ const int kOpusDefaultMinPTime = 3;
const int kOpusDefaultSPropStereo = 0;
const int kOpusDefaultStereo = 0;
const int kOpusDefaultUseInbandFec = 0;
+const int kOpusDefaultMaxPlaybackRate = 48000;
const int kPreferredMaxPTime = 60;
const int kPreferredMinPTime = 10;
diff --git a/media/base/constants.h b/media/base/constants.h
index 5ac1be2..5168acb 100644
--- a/media/base/constants.h
+++ b/media/base/constants.h
@@ -62,6 +62,7 @@ extern const char kCodecParamSPropStereo[];
extern const char kCodecParamStereo[];
extern const char kCodecParamUseInbandFec[];
extern const char kCodecParamMaxAverageBitrate[];
+extern const char kCodecParamMaxPlaybackRate[];
extern const char kCodecParamSctpProtocol[];
extern const char kCodecParamSctpStreams[];
@@ -79,6 +80,8 @@ extern const int kOpusDefaultMinPTime;
extern const int kOpusDefaultSPropStereo;
extern const int kOpusDefaultStereo;
extern const int kOpusDefaultUseInbandFec;
+extern const int kOpusDefaultMaxPlaybackRate;
+
// Prefered values in this code base. Note that they may differ from the default
// values in http://tools.ietf.org/html/draft-spittka-payload-rtp-opus-03
// Only frames larger or equal to 10 ms are currently supported in this code
diff --git a/media/base/executablehelpers.h b/media/base/executablehelpers.h
new file mode 100644
index 0000000..2dde010
--- /dev/null
+++ b/media/base/executablehelpers.h
@@ -0,0 +1,100 @@
+/*
+ * libjingle
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_BASE_EXECUTABLEHELPERS_H_
+#define TALK_MEDIA_BASE_EXECUTABLEHELPERS_H_
+
+#ifdef OSX
+#include <mach-o/dyld.h>
+#endif
+
+#include <string>
+
+#include "webrtc/base/logging.h"
+#include "webrtc/base/pathutils.h"
+
+namespace rtc {
+
+// Returns the path to the running executable or an empty path.
+// TODO(thorcarpenter): Consolidate with FluteClient::get_executable_dir.
+inline Pathname GetExecutablePath() {
+ const int32 kMaxExePathSize = 255;
+#ifdef WIN32
+ TCHAR exe_path_buffer[kMaxExePathSize];
+ DWORD copied_length = GetModuleFileName(NULL, // NULL = Current process
+ exe_path_buffer, kMaxExePathSize);
+ if (0 == copied_length) {
+ LOG(LS_ERROR) << "Copied length is zero";
+ return rtc::Pathname();
+ }
+ if (kMaxExePathSize == copied_length) {
+ LOG(LS_ERROR) << "Buffer too small";
+ return rtc::Pathname();
+ }
+#ifdef UNICODE
+ std::wstring wdir(exe_path_buffer);
+ std::string dir_tmp(wdir.begin(), wdir.end());
+ rtc::Pathname path(dir_tmp);
+#else // UNICODE
+ rtc::Pathname path(exe_path_buffer);
+#endif // UNICODE
+#elif defined(OSX) || defined(LINUX)
+ char exe_path_buffer[kMaxExePathSize];
+#ifdef OSX
+ uint32_t copied_length = kMaxExePathSize - 1;
+ if (_NSGetExecutablePath(exe_path_buffer, &copied_length) == -1) {
+ LOG(LS_ERROR) << "Buffer too small";
+ return rtc::Pathname();
+ }
+#elif defined LINUX
+ int32 copied_length = kMaxExePathSize - 1;
+ const char* kProcExeFmt = "/proc/%d/exe";
+ char proc_exe_link[40];
+ snprintf(proc_exe_link, sizeof(proc_exe_link), kProcExeFmt, getpid());
+ copied_length = readlink(proc_exe_link, exe_path_buffer, copied_length);
+ if (copied_length == -1) {
+ LOG_ERR(LS_ERROR) << "Error reading link " << proc_exe_link;
+ return rtc::Pathname();
+ }
+ if (copied_length == kMaxExePathSize - 1) {
+ LOG(LS_ERROR) << "Probably truncated result when reading link "
+ << proc_exe_link;
+ return rtc::Pathname();
+ }
+ exe_path_buffer[copied_length] = '\0';
+#endif // LINUX
+ rtc::Pathname path(exe_path_buffer);
+#else // Android || IOS
+ rtc::Pathname path;
+#endif // OSX || LINUX
+ return path;
+}
+
+} // namespace rtc
+
+#endif // TALK_MEDIA_BASE_EXECUTABLEHELPERS_H_
+
diff --git a/media/base/fakemediaengine.h b/media/base/fakemediaengine.h
index 7bc3958..a6eabef 100644
--- a/media/base/fakemediaengine.h
+++ b/media/base/fakemediaengine.h
@@ -868,7 +868,7 @@ class FakeVoiceEngine : public FakeBaseEngine {
class FakeVideoEngine : public FakeBaseEngine {
public:
- FakeVideoEngine() : renderer_(NULL), capture_(false), processor_(NULL) {
+ FakeVideoEngine() : capture_(false), processor_(NULL) {
// Add a fake video codec. Note that the name must not be "" as there are
// sanity checks against that.
codecs_.push_back(VideoCodec(0, "fake_video_codec", 0, 0, 0, 0));
@@ -926,10 +926,6 @@ class FakeVideoEngine : public FakeBaseEngine {
options_changed_ = true;
return true;
}
- bool SetLocalRenderer(VideoRenderer* r) {
- renderer_ = r;
- return true;
- }
bool SetCapture(bool capture) {
capture_ = capture;
return true;
@@ -946,7 +942,6 @@ class FakeVideoEngine : public FakeBaseEngine {
std::vector<VideoCodec> codecs_;
VideoEncoderConfig default_encoder_config_;
std::string in_device_;
- VideoRenderer* renderer_;
bool capture_;
VideoProcessor* processor_;
VideoOptions options_;
@@ -994,7 +989,6 @@ class FakeMediaEngine :
}
const std::string& audio_in_device() const { return voice_.in_device_; }
const std::string& audio_out_device() const { return voice_.out_device_; }
- VideoRenderer* local_renderer() { return video_.renderer_; }
int voice_loglevel() const { return voice_.loglevel_; }
const std::string& voice_logfilter() const { return voice_.logfilter_; }
int video_loglevel() const { return video_.loglevel_; }
diff --git a/media/base/filemediaengine.h b/media/base/filemediaengine.h
index e546328..d3e99a8 100644
--- a/media/base/filemediaengine.h
+++ b/media/base/filemediaengine.h
@@ -88,7 +88,6 @@ class FileMediaEngine : public MediaEngineInterface {
virtual SoundclipMedia* CreateSoundclip() { return NULL; }
virtual AudioOptions GetAudioOptions() const { return AudioOptions(); }
virtual bool SetAudioOptions(const AudioOptions& options) { return true; }
- virtual bool SetVideoOptions(const VideoOptions& options) { return true; }
virtual bool SetAudioDelayOffset(int offset) { return true; }
virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) {
return true;
@@ -113,7 +112,6 @@ class FileMediaEngine : public MediaEngineInterface {
virtual bool SetOutputVolume(int level) { return true; }
virtual int GetInputLevel() { return 0; }
virtual bool SetLocalMonitor(bool enable) { return true; }
- virtual bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
// TODO(whyuan): control channel send?
virtual bool SetVideoCapture(bool capture) { return true; }
virtual const std::vector<AudioCodec>& audio_codecs() {
diff --git a/media/base/filemediaengine_unittest.cc b/media/base/filemediaengine_unittest.cc
index 1f7405d..c542baf 100644
--- a/media/base/filemediaengine_unittest.cc
+++ b/media/base/filemediaengine_unittest.cc
@@ -223,8 +223,6 @@ TEST_F(FileMediaEngineTest, TestDefaultImplementation) {
EXPECT_TRUE(NULL == engine_->CreateSoundclip());
cricket::AudioOptions audio_options;
EXPECT_TRUE(engine_->SetAudioOptions(audio_options));
- cricket::VideoOptions video_options;
- EXPECT_TRUE(engine_->SetVideoOptions(video_options));
VideoEncoderConfig video_encoder_config;
EXPECT_TRUE(engine_->SetDefaultVideoEncoderConfig(video_encoder_config));
EXPECT_TRUE(engine_->SetSoundDevices(NULL, NULL));
@@ -232,7 +230,6 @@ TEST_F(FileMediaEngineTest, TestDefaultImplementation) {
EXPECT_TRUE(engine_->SetOutputVolume(0));
EXPECT_EQ(0, engine_->GetInputLevel());
EXPECT_TRUE(engine_->SetLocalMonitor(true));
- EXPECT_TRUE(engine_->SetLocalRenderer(NULL));
EXPECT_TRUE(engine_->SetVideoCapture(true));
EXPECT_EQ(0U, engine_->audio_codecs().size());
EXPECT_EQ(0U, engine_->video_codecs().size());
diff --git a/media/base/hybridvideoengine.cc b/media/base/hybridvideoengine.cc
deleted file mode 100644
index 289c4fe..0000000
--- a/media/base/hybridvideoengine.cc
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * libjingle
- * Copyright 2004 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/media/base/hybridvideoengine.h"
-
-#include "webrtc/base/logging.h"
-
-namespace cricket {
-
-HybridVideoMediaChannel::HybridVideoMediaChannel(
- HybridVideoEngineInterface* engine,
- VideoMediaChannel* channel1,
- VideoMediaChannel* channel2)
- : engine_(engine),
- channel1_(channel1),
- channel2_(channel2),
- active_channel_(NULL),
- sending_(false) {
-}
-
-HybridVideoMediaChannel::~HybridVideoMediaChannel() {
-}
-
-void HybridVideoMediaChannel::SetInterface(NetworkInterface* iface) {
- if (channel1_) {
- channel1_->SetInterface(iface);
- }
- if (channel2_) {
- channel2_->SetInterface(iface);
- }
-}
-
-bool HybridVideoMediaChannel::SetOptions(const VideoOptions &options) {
- bool ret = true;
- if (channel1_) {
- ret = channel1_->SetOptions(options);
- }
- if (channel2_ && ret) {
- ret = channel2_->SetOptions(options);
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::GetOptions(VideoOptions *options) const {
- if (active_channel_) {
- return active_channel_->GetOptions(options);
- }
- if (channel1_) {
- return channel1_->GetOptions(options);
- }
- if (channel2_) {
- return channel2_->GetOptions(options);
- }
- return false;
-}
-
-bool HybridVideoMediaChannel::SetRecvCodecs(
- const std::vector<VideoCodec>& codecs) {
- // Only give each channel the codecs it knows about.
- bool ret = true;
- std::vector<VideoCodec> codecs1, codecs2;
- SplitCodecs(codecs, &codecs1, &codecs2);
- if (channel1_) {
- ret = channel1_->SetRecvCodecs(codecs1);
- }
- if (channel2_ && ret) {
- ret = channel2_->SetRecvCodecs(codecs2);
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::SetRecvRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions) {
- bool ret = true;
- if (channel1_) {
- ret = channel1_->SetRecvRtpHeaderExtensions(extensions);
- }
- if (channel2_ && ret) {
- ret = channel2_->SetRecvRtpHeaderExtensions(extensions);
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::SetRenderer(uint32 ssrc,
- VideoRenderer* renderer) {
- bool ret = true;
- if (channel1_) {
- ret = channel1_->SetRenderer(ssrc, renderer);
- }
- if (channel2_ && ret) {
- ret = channel2_->SetRenderer(ssrc, renderer);
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::SetRender(bool render) {
- bool ret = true;
- if (channel1_) {
- ret = channel1_->SetRender(render);
- }
- if (channel2_ && ret) {
- ret = channel2_->SetRender(render);
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::MuteStream(uint32 ssrc, bool muted) {
- bool ret = true;
- if (channel1_) {
- ret = channel1_->MuteStream(ssrc, muted);
- }
- if (channel2_ && ret) {
- ret = channel2_->MuteStream(ssrc, muted);
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::SetSendCodecs(
- const std::vector<VideoCodec>& codecs) {
- // Use the input to this function to decide what impl we're going to use.
- if (!active_channel_ && !SelectActiveChannel(codecs)) {
- LOG(LS_WARNING) << "Failed to select active channel";
- return false;
- }
- // Only give the active channel the codecs it knows about.
- std::vector<VideoCodec> codecs1, codecs2;
- SplitCodecs(codecs, &codecs1, &codecs2);
- const std::vector<VideoCodec>& codecs_to_set =
- (active_channel_ == channel1_.get()) ? codecs1 : codecs2;
- bool return_value = active_channel_->SetSendCodecs(codecs_to_set);
- if (!return_value) {
- return false;
- }
- VideoCodec send_codec;
- return_value = active_channel_->GetSendCodec(&send_codec);
- if (!return_value) {
- return false;
- }
- engine_->OnNewSendResolution(send_codec.width, send_codec.height);
- active_channel_->UpdateAspectRatio(send_codec.width, send_codec.height);
- return true;
-}
-
-bool HybridVideoMediaChannel::GetSendCodec(VideoCodec* send_codec) {
- if (!active_channel_) {
- return false;
- }
- return active_channel_->GetSendCodec(send_codec);
-}
-
-bool HybridVideoMediaChannel::SetSendStreamFormat(uint32 ssrc,
- const VideoFormat& format) {
- return active_channel_ && active_channel_->SetSendStreamFormat(ssrc, format);
-}
-
-bool HybridVideoMediaChannel::SetSendRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions) {
- return active_channel_ &&
- active_channel_->SetSendRtpHeaderExtensions(extensions);
-}
-
-bool HybridVideoMediaChannel::SetStartSendBandwidth(int bps) {
- return active_channel_ && active_channel_->SetStartSendBandwidth(bps);
-}
-
-bool HybridVideoMediaChannel::SetMaxSendBandwidth(int bps) {
- return active_channel_ && active_channel_->SetMaxSendBandwidth(bps);
-}
-
-bool HybridVideoMediaChannel::SetSend(bool send) {
- if (send == sending()) {
- return true; // no action required if already set.
- }
-
- bool ret = active_channel_ &&
- active_channel_->SetSend(send);
-
- // Returns error and don't connect the signal if starting up.
- // Disconnects the signal anyway if shutting down.
- if (ret || !send) {
- // TODO(juberti): Remove this hack that connects the WebRTC channel
- // to the capturer.
- if (active_channel_ == channel1_.get()) {
- engine_->OnSendChange1(channel1_.get(), send);
- } else {
- engine_->OnSendChange2(channel2_.get(), send);
- }
- // If succeeded, remember the state as is.
- // If failed to open, sending_ should be false.
- // If failed to stop, sending_ should also be false, as we disconnect the
- // capture anyway.
- // The failure on SetSend(false) is a known issue in webrtc.
- sending_ = send;
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::SetCapturer(uint32 ssrc,
- VideoCapturer* capturer) {
- bool ret = true;
- if (channel1_.get()) {
- ret = channel1_->SetCapturer(ssrc, capturer);
- }
- if (channel2_.get() && ret) {
- ret = channel2_->SetCapturer(ssrc, capturer);
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::AddSendStream(const StreamParams& sp) {
- bool ret = true;
- if (channel1_) {
- ret = channel1_->AddSendStream(sp);
- }
- if (channel2_ && ret) {
- ret = channel2_->AddSendStream(sp);
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::RemoveSendStream(uint32 ssrc) {
- bool ret = true;
- if (channel1_) {
- ret = channel1_->RemoveSendStream(ssrc);
- }
- if (channel2_ && ret) {
- ret = channel2_->RemoveSendStream(ssrc);
- }
- return ret;
-}
-
-bool HybridVideoMediaChannel::AddRecvStream(const StreamParams& sp) {
- return active_channel_ &&
- active_channel_->AddRecvStream(sp);
-}
-
-bool HybridVideoMediaChannel::RemoveRecvStream(uint32 ssrc) {
- return active_channel_ &&
- active_channel_->RemoveRecvStream(ssrc);
-}
-
-bool HybridVideoMediaChannel::SendIntraFrame() {
- return active_channel_ &&
- active_channel_->SendIntraFrame();
-}
-
-bool HybridVideoMediaChannel::RequestIntraFrame() {
- return active_channel_ &&
- active_channel_->RequestIntraFrame();
-}
-
-bool HybridVideoMediaChannel::GetStats(
- const StatsOptions& options, VideoMediaInfo* info) {
- // TODO(juberti): Ensure that returning no stats until SetSendCodecs is OK.
- return active_channel_ &&
- active_channel_->GetStats(options, info);
-}
-
-void HybridVideoMediaChannel::OnPacketReceived(
- rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
- // Eat packets until we have an active channel;
- if (active_channel_) {
- active_channel_->OnPacketReceived(packet, packet_time);
- } else {
- LOG(LS_INFO) << "HybridVideoChannel: Eating early RTP packet";
- }
-}
-
-void HybridVideoMediaChannel::OnRtcpReceived(
- rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
- // Eat packets until we have an active channel;
- if (active_channel_) {
- active_channel_->OnRtcpReceived(packet, packet_time);
- } else {
- LOG(LS_INFO) << "HybridVideoChannel: Eating early RTCP packet";
- }
-}
-
-void HybridVideoMediaChannel::OnReadyToSend(bool ready) {
- if (channel1_) {
- channel1_->OnReadyToSend(ready);
- }
- if (channel2_) {
- channel2_->OnReadyToSend(ready);
- }
-}
-
-void HybridVideoMediaChannel::UpdateAspectRatio(int ratio_w, int ratio_h) {
- if (active_channel_) active_channel_->UpdateAspectRatio(ratio_w, ratio_h);
-}
-
-bool HybridVideoMediaChannel::SelectActiveChannel(
- const std::vector<VideoCodec>& codecs) {
- if (!active_channel_ && !codecs.empty()) {
- if (engine_->HasCodec1(codecs[0])) {
- channel2_.reset();
- active_channel_ = channel1_.get();
- } else if (engine_->HasCodec2(codecs[0])) {
- channel1_.reset();
- active_channel_ = channel2_.get();
- }
- }
- if (NULL == active_channel_) {
- return false;
- }
- // Connect signals from the active channel.
- active_channel_->SignalMediaError.connect(
- this,
- &HybridVideoMediaChannel::OnMediaError);
- return true;
-}
-
-void HybridVideoMediaChannel::SplitCodecs(
- const std::vector<VideoCodec>& codecs,
- std::vector<VideoCodec>* codecs1, std::vector<VideoCodec>* codecs2) {
- codecs1->clear();
- codecs2->clear();
- for (size_t i = 0; i < codecs.size(); ++i) {
- if (engine_->HasCodec1(codecs[i])) {
- codecs1->push_back(codecs[i]);
- }
- if (engine_->HasCodec2(codecs[i])) {
- codecs2->push_back(codecs[i]);
- }
- }
-}
-
-void HybridVideoMediaChannel::OnMediaError(uint32 ssrc, Error error) {
- SignalMediaError(ssrc, error);
-}
-
-} // namespace cricket
diff --git a/media/base/hybridvideoengine.h b/media/base/hybridvideoengine.h
deleted file mode 100644
index 004d3cf..0000000
--- a/media/base/hybridvideoengine.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * libjingle
- * Copyright 2004 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_MEDIA_BASE_HYBRIDVIDEOENGINE_H_
-#define TALK_MEDIA_BASE_HYBRIDVIDEOENGINE_H_
-
-#include <string>
-#include <vector>
-
-#include "talk/media/base/codec.h"
-#include "talk/media/base/mediachannel.h"
-#include "talk/media/base/videocapturer.h"
-#include "talk/media/base/videocommon.h"
-#include "webrtc/base/logging.h"
-#include "webrtc/base/sigslotrepeater.h"
-
-namespace cricket {
-
-struct Device;
-struct VideoFormat;
-class HybridVideoEngineInterface;
-class VideoCapturer;
-class VideoFrame;
-class VideoRenderer;
-
-// HybridVideoMediaChannels work with a HybridVideoEngine to combine
-// two unrelated VideoMediaChannel implementations into a single class.
-class HybridVideoMediaChannel : public VideoMediaChannel {
- public:
- HybridVideoMediaChannel(HybridVideoEngineInterface* engine,
- VideoMediaChannel* channel1,
- VideoMediaChannel* channel2);
- virtual ~HybridVideoMediaChannel();
-
- // VideoMediaChannel methods
- virtual void SetInterface(NetworkInterface* iface);
- virtual bool SetOptions(const VideoOptions& options);
- virtual bool GetOptions(VideoOptions* options) const;
- virtual bool AddSendStream(const StreamParams& sp);
- virtual bool RemoveSendStream(uint32 ssrc);
- virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer);
- virtual bool SetRender(bool render);
- virtual bool MuteStream(uint32 ssrc, bool muted);
-
- virtual bool SetRecvCodecs(const std::vector<VideoCodec>& codecs);
- virtual bool SetRecvRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions);
-
- virtual bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
- virtual bool GetSendCodec(VideoCodec* codec);
- virtual bool SetSendStreamFormat(uint32 ssrc, const VideoFormat& format);
- virtual bool SetSendRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions);
- virtual bool SetStartSendBandwidth(int bps);
- virtual bool SetMaxSendBandwidth(int bps);
- virtual bool SetSend(bool send);
-
- virtual bool AddRecvStream(const StreamParams& sp);
- virtual bool RemoveRecvStream(uint32 ssrc);
- virtual bool SetCapturer(uint32 ssrc, VideoCapturer* capturer);
-
- virtual bool SendIntraFrame();
- virtual bool RequestIntraFrame();
-
- virtual bool GetStats(const StatsOptions& options, VideoMediaInfo* info);
-
- virtual void OnPacketReceived(rtc::Buffer* packet,
- const rtc::PacketTime& packet_time);
- virtual void OnRtcpReceived(rtc::Buffer* packet,
- const rtc::PacketTime& packet_time);
- virtual void OnReadyToSend(bool ready);
-
- virtual void UpdateAspectRatio(int ratio_w, int ratio_h);
-
- void OnLocalFrame(VideoCapturer*, const VideoFrame*);
- void OnLocalFrameFormat(VideoCapturer*, const VideoFormat*);
-
- bool sending() const { return sending_; }
-
- private:
- bool SelectActiveChannel(const std::vector<VideoCodec>& codecs);
- void SplitCodecs(const std::vector<VideoCodec>& codecs,
- std::vector<VideoCodec>* codecs1,
- std::vector<VideoCodec>* codecs2);
-
- void OnMediaError(uint32 ssrc, Error error);
-
- HybridVideoEngineInterface* engine_;
- rtc::scoped_ptr<VideoMediaChannel> channel1_;
- rtc::scoped_ptr<VideoMediaChannel> channel2_;
- VideoMediaChannel* active_channel_;
- bool sending_;
-};
-
-// Interface class for HybridVideoChannels to talk to the engine.
-class HybridVideoEngineInterface {
- public:
- virtual ~HybridVideoEngineInterface() {}
- virtual bool HasCodec1(const VideoCodec& codec) = 0;
- virtual bool HasCodec2(const VideoCodec& codec) = 0;
- virtual void OnSendChange1(VideoMediaChannel* channel1, bool send) = 0;
- virtual void OnSendChange2(VideoMediaChannel* channel1, bool send) = 0;
- virtual void OnNewSendResolution(int width, int height) = 0;
-};
-
-// The HybridVideoEngine class combines two unrelated VideoEngine impls
-// into a single class. It creates HybridVideoMediaChannels that also contain
-// a VideoMediaChannel implementation from each engine. Policy is then used
-// during call setup to determine which VideoMediaChannel should be used.
-// Currently, this policy is based on what codec the remote side wants to use.
-template<class VIDEO1, class VIDEO2>
-class HybridVideoEngine : public HybridVideoEngineInterface {
- public:
- HybridVideoEngine() {
- // Unify the codec lists.
- codecs_ = video1_.codecs();
- codecs_.insert(codecs_.end(), video2_.codecs().begin(),
- video2_.codecs().end());
-
- rtp_header_extensions_ = video1_.rtp_header_extensions();
- rtp_header_extensions_.insert(rtp_header_extensions_.end(),
- video2_.rtp_header_extensions().begin(),
- video2_.rtp_header_extensions().end());
-
- SignalCaptureStateChange.repeat(video2_.SignalCaptureStateChange);
- }
-
- bool Init(rtc::Thread* worker_thread) {
- if (!video1_.Init(worker_thread)) {
- LOG(LS_ERROR) << "Failed to init VideoEngine1";
- return false;
- }
- if (!video2_.Init(worker_thread)) {
- LOG(LS_ERROR) << "Failed to init VideoEngine2";
- video1_.Terminate();
- return false;
- }
- return true;
- }
- void Terminate() {
- video1_.Terminate();
- video2_.Terminate();
- }
-
- int GetCapabilities() {
- return (video1_.GetCapabilities() | video2_.GetCapabilities());
- }
- HybridVideoMediaChannel* CreateChannel(VoiceMediaChannel* channel) {
- rtc::scoped_ptr<VideoMediaChannel> channel1(
- video1_.CreateChannel(channel));
- if (!channel1) {
- LOG(LS_ERROR) << "Failed to create VideoMediaChannel1";
- return NULL;
- }
- rtc::scoped_ptr<VideoMediaChannel> channel2(
- video2_.CreateChannel(channel));
- if (!channel2) {
- LOG(LS_ERROR) << "Failed to create VideoMediaChannel2";
- return NULL;
- }
- return new HybridVideoMediaChannel(this,
- channel1.release(), channel2.release());
- }
-
- bool SetOptions(const VideoOptions& options) {
- return video1_.SetOptions(options) && video2_.SetOptions(options);
- }
- bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) {
- VideoEncoderConfig conf = config;
- if (video1_.codecs().size() > 0) {
- conf.max_codec.name = video1_.codecs()[0].name;
- if (!video1_.SetDefaultEncoderConfig(conf)) {
- LOG(LS_ERROR) << "Failed to SetDefaultEncoderConfig for video1";
- return false;
- }
- }
- if (video2_.codecs().size() > 0) {
- conf.max_codec.name = video2_.codecs()[0].name;
- if (!video2_.SetDefaultEncoderConfig(conf)) {
- LOG(LS_ERROR) << "Failed to SetDefaultEncoderConfig for video2";
- return false;
- }
- }
- return true;
- }
- VideoEncoderConfig GetDefaultEncoderConfig() const {
- // This looks pretty strange, but, in practice, it'll do sane things if
- // GetDefaultEncoderConfig is only called after SetDefaultEncoderConfig,
- // since both engines should be essentially equivalent at that point. If it
- // hasn't been called, though, we'll use the first meaningful encoder
- // config, or the config from the second video engine if neither are
- // meaningful.
- VideoEncoderConfig config = video1_.GetDefaultEncoderConfig();
- if (config.max_codec.width != 0) {
- return config;
- } else {
- return video2_.GetDefaultEncoderConfig();
- }
- }
- const std::vector<VideoCodec>& codecs() const {
- return codecs_;
- }
- const std::vector<RtpHeaderExtension>& rtp_header_extensions() const {
- return rtp_header_extensions_;
- }
- void SetLogging(int min_sev, const char* filter) {
- video1_.SetLogging(min_sev, filter);
- video2_.SetLogging(min_sev, filter);
- }
-
- VideoFormat GetStartCaptureFormat() const {
- return video2_.GetStartCaptureFormat();
- }
-
- // TODO(juberti): Remove these functions after we do the capturer refactoring.
- // For now they are set to always use the second engine for capturing, which
- // is convenient given our intended use case.
- bool SetCaptureDevice(const Device* device) {
- return video2_.SetCaptureDevice(device);
- }
- VideoCapturer* GetVideoCapturer() const {
- return video2_.GetVideoCapturer();
- }
- bool SetLocalRenderer(VideoRenderer* renderer) {
- return video2_.SetLocalRenderer(renderer);
- }
- sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
-
- virtual bool HasCodec1(const VideoCodec& codec) {
- return HasCodec(video1_, codec);
- }
- virtual bool HasCodec2(const VideoCodec& codec) {
- return HasCodec(video2_, codec);
- }
- template<typename VIDEO>
- bool HasCodec(const VIDEO& engine, const VideoCodec& codec) const {
- for (std::vector<VideoCodec>::const_iterator i = engine.codecs().begin();
- i != engine.codecs().end();
- ++i) {
- if (i->Matches(codec)) {
- return true;
- }
- }
- return false;
- }
- virtual void OnSendChange1(VideoMediaChannel* channel1, bool send) {
- }
- virtual void OnSendChange2(VideoMediaChannel* channel2, bool send) {
- }
- virtual void OnNewSendResolution(int width, int height) {
- }
-
- protected:
- VIDEO1 video1_;
- VIDEO2 video2_;
- std::vector<VideoCodec> codecs_;
- std::vector<RtpHeaderExtension> rtp_header_extensions_;
-};
-
-} // namespace cricket
-
-#endif // TALK_MEDIA_BASE_HYBRIDVIDEOENGINE_H_
diff --git a/media/base/hybridvideoengine_unittest.cc b/media/base/hybridvideoengine_unittest.cc
deleted file mode 100644
index 7b409ea..0000000
--- a/media/base/hybridvideoengine_unittest.cc
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * libjingle
- * Copyright 2004 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/media/base/fakemediaengine.h"
-#include "talk/media/base/fakenetworkinterface.h"
-#include "talk/media/base/fakevideocapturer.h"
-#include "talk/media/base/hybridvideoengine.h"
-#include "talk/media/base/mediachannel.h"
-#include "talk/media/base/testutils.h"
-#include "webrtc/base/gunit.h"
-
-static const cricket::VideoCodec kGenericCodec(97, "Generic", 640, 360, 30, 0);
-static const cricket::VideoCodec kVp8Codec(100, "VP8", 640, 360, 30, 0);
-static const cricket::VideoCodec kCodecsVp8Only[] = { kVp8Codec };
-static const cricket::VideoCodec kCodecsGenericOnly[] = { kGenericCodec };
-static const cricket::VideoCodec kCodecsVp8First[] = { kVp8Codec,
- kGenericCodec };
-static const cricket::VideoCodec kCodecsGenericFirst[] = { kGenericCodec,
- kVp8Codec };
-
-using cricket::StreamParams;
-
-class FakeVp8VideoEngine : public cricket::FakeVideoEngine {
- public:
- FakeVp8VideoEngine() {
- SetCodecs(MAKE_VECTOR(kCodecsVp8Only));
- }
-};
-class FakeGenericVideoEngine : public cricket::FakeVideoEngine {
- public:
- FakeGenericVideoEngine() {
- SetCodecs(MAKE_VECTOR(kCodecsGenericOnly));
- }
-
- // For testing purposes, mimic the behavior of a media engine that throws out
- // resolutions that don't match the codec list. A width or height of 0
- // trivially will never match the codec list, so this is sufficient for
- // testing the case we want (0x0).
- virtual bool FindCodec(const cricket::VideoCodec& codec) {
- if (codec.width == 0 || codec.height == 0) {
- return false;
- } else {
- return cricket::FakeVideoEngine::FindCodec(codec);
- }
- }
-};
-class HybridVideoEngineForTest : public cricket::HybridVideoEngine<
- FakeVp8VideoEngine, FakeGenericVideoEngine> {
- public:
- HybridVideoEngineForTest()
- :
- num_ch1_send_on_(0),
- num_ch1_send_off_(0),
- send_width_(0),
- send_height_(0) { }
- cricket::FakeVideoEngine* sub_engine1() { return &video1_; }
- cricket::FakeVideoEngine* sub_engine2() { return &video2_; }
-
- // From base class HybridVideoEngine.
- void OnSendChange1(cricket::VideoMediaChannel* channel1, bool send) {
- if (send) {
- ++num_ch1_send_on_;
- } else {
- ++num_ch1_send_off_;
- }
- }
- // From base class HybridVideoEngine
- void OnNewSendResolution(int width, int height) {
- send_width_ = width;
- send_height_ = height;
- }
-
- int num_ch1_send_on() const { return num_ch1_send_on_; }
- int num_ch1_send_off() const { return num_ch1_send_off_; }
-
- int send_width() const { return send_width_; }
- int send_height() const { return send_height_; }
-
- private:
- int num_ch1_send_on_;
- int num_ch1_send_off_;
-
- int send_width_;
- int send_height_;
-};
-
-class HybridVideoEngineTest : public testing::Test {
- public:
- HybridVideoEngineTest() : sub_channel1_(NULL), sub_channel2_(NULL) {
- }
- ~HybridVideoEngineTest() {
- engine_.Terminate();
- }
- bool SetupEngine() {
- bool result = engine_.Init(rtc::Thread::Current());
- if (result) {
- channel_.reset(engine_.CreateChannel(NULL));
- result = (channel_.get() != NULL);
- sub_channel1_ = engine_.sub_engine1()->GetChannel(0);
- sub_channel2_ = engine_.sub_engine2()->GetChannel(0);
- }
- return result;
- }
- bool SetupRenderAndAddStream(const StreamParams& sp) {
- if (!SetupEngine())
- return false;
- channel_->SetInterface(transport_.get());
- return channel_->SetRecvCodecs(engine_.codecs()) &&
- channel_->AddSendStream(sp) &&
- channel_->SetRender(true);
- }
- void DeliverPacket(const void* data, int len) {
- rtc::Buffer packet(data, len);
- channel_->OnPacketReceived(&packet, rtc::CreatePacketTime(0));
- }
- void DeliverRtcp(const void* data, int len) {
- rtc::Buffer packet(data, len);
- channel_->OnRtcpReceived(&packet, rtc::CreatePacketTime(0));
- }
-
- protected:
- void TestSetSendCodecs(cricket::FakeVideoEngine* sub_engine,
- const std::vector<cricket::VideoCodec>& codecs) {
- EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
- EXPECT_TRUE(channel_->SetSendCodecs(codecs));
- cricket::FakeVideoMediaChannel* sub_channel = sub_engine->GetChannel(0);
- ASSERT_EQ(1U, sub_channel->send_codecs().size());
- EXPECT_EQ(codecs[0], sub_channel->send_codecs()[0]);
- EXPECT_TRUE(channel_->SetSend(true));
- EXPECT_TRUE(sub_channel->sending());
- }
- void TestSetSendBandwidth(cricket::FakeVideoEngine* sub_engine,
- const std::vector<cricket::VideoCodec>& codecs,
- int start_bitrate,
- int max_bitrate) {
- EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
- EXPECT_TRUE(channel_->SetSendCodecs(codecs));
- EXPECT_TRUE(channel_->SetStartSendBandwidth(start_bitrate));
- EXPECT_TRUE(channel_->SetMaxSendBandwidth(max_bitrate));
- cricket::FakeVideoMediaChannel* sub_channel = sub_engine->GetChannel(0);
- EXPECT_EQ(start_bitrate, sub_channel->start_bps());
- EXPECT_EQ(max_bitrate, sub_channel->max_bps());
- }
- HybridVideoEngineForTest engine_;
- rtc::scoped_ptr<cricket::HybridVideoMediaChannel> channel_;
- rtc::scoped_ptr<cricket::FakeNetworkInterface> transport_;
- cricket::FakeVideoMediaChannel* sub_channel1_;
- cricket::FakeVideoMediaChannel* sub_channel2_;
-};
-
-TEST_F(HybridVideoEngineTest, StartupShutdown) {
- EXPECT_TRUE(engine_.Init(rtc::Thread::Current()));
- engine_.Terminate();
-}
-
-// Tests that SetDefaultVideoEncoderConfig passes down to both engines.
-TEST_F(HybridVideoEngineTest, SetDefaultVideoEncoderConfig) {
- cricket::VideoEncoderConfig config(
- cricket::VideoCodec(105, "", 640, 400, 30, 0), 1, 2);
- EXPECT_TRUE(engine_.SetDefaultEncoderConfig(config));
-
- cricket::VideoEncoderConfig config_1 = config;
- config_1.max_codec.name = kCodecsVp8Only[0].name;
- EXPECT_EQ(config_1, engine_.sub_engine1()->default_encoder_config());
-
- cricket::VideoEncoderConfig config_2 = config;
- config_2.max_codec.name = kCodecsGenericOnly[0].name;
- EXPECT_EQ(config_2, engine_.sub_engine2()->default_encoder_config());
-}
-
-// Tests that GetDefaultVideoEncoderConfig picks a meaningful encoder config
-// based on the underlying engine config and then after a call to
-// SetDefaultEncoderConfig on the hybrid engine.
-TEST_F(HybridVideoEngineTest, SetDefaultVideoEncoderConfigDefaultValue) {
- cricket::VideoEncoderConfig blank_config;
- cricket::VideoEncoderConfig meaningful_config1(
- cricket::VideoCodec(111, "abcd", 320, 240, 30, 0), 1, 2);
- cricket::VideoEncoderConfig meaningful_config2(
- cricket::VideoCodec(111, "abcd", 1280, 720, 30, 0), 1, 2);
- cricket::VideoEncoderConfig meaningful_config3(
- cricket::VideoCodec(111, "abcd", 640, 360, 30, 0), 1, 2);
- engine_.sub_engine1()->SetDefaultEncoderConfig(blank_config);
- engine_.sub_engine2()->SetDefaultEncoderConfig(blank_config);
- EXPECT_EQ(blank_config, engine_.GetDefaultEncoderConfig());
-
- engine_.sub_engine2()->SetDefaultEncoderConfig(meaningful_config2);
- EXPECT_EQ(meaningful_config2, engine_.GetDefaultEncoderConfig());
-
- engine_.sub_engine1()->SetDefaultEncoderConfig(meaningful_config1);
- EXPECT_EQ(meaningful_config1, engine_.GetDefaultEncoderConfig());
-
- EXPECT_TRUE(engine_.SetDefaultEncoderConfig(meaningful_config3));
- // The overall config should now match, though the codec name will have been
- // rewritten for the first media engine.
- meaningful_config3.max_codec.name = kCodecsVp8Only[0].name;
- EXPECT_EQ(meaningful_config3, engine_.GetDefaultEncoderConfig());
-}
-
-// Tests that our engine has the right codecs in the right order.
-TEST_F(HybridVideoEngineTest, CheckCodecs) {
- const std::vector<cricket::VideoCodec>& c = engine_.codecs();
- ASSERT_EQ(2U, c.size());
- EXPECT_EQ(kVp8Codec, c[0]);
- EXPECT_EQ(kGenericCodec, c[1]);
-}
-
-// Tests that our engine has the right caps.
-TEST_F(HybridVideoEngineTest, CheckCaps) {
- EXPECT_EQ(cricket::VIDEO_SEND | cricket::VIDEO_RECV,
- engine_.GetCapabilities());
-}
-
-// Tests that we can create and destroy a channel.
-TEST_F(HybridVideoEngineTest, CreateChannel) {
- EXPECT_TRUE(SetupEngine());
- EXPECT_TRUE(sub_channel1_ != NULL);
- EXPECT_TRUE(sub_channel2_ != NULL);
-}
-
-// Tests that we properly handle failures in CreateChannel.
-TEST_F(HybridVideoEngineTest, CreateChannelFail) {
- engine_.sub_engine1()->set_fail_create_channel(true);
- EXPECT_FALSE(SetupEngine());
- EXPECT_TRUE(channel_.get() == NULL);
- EXPECT_TRUE(sub_channel1_ == NULL);
- EXPECT_TRUE(sub_channel2_ == NULL);
- engine_.sub_engine1()->set_fail_create_channel(false);
- engine_.sub_engine2()->set_fail_create_channel(true);
- EXPECT_FALSE(SetupEngine());
- EXPECT_TRUE(channel_.get() == NULL);
- EXPECT_TRUE(sub_channel1_ == NULL);
- EXPECT_TRUE(sub_channel2_ == NULL);
-}
-
-// Test that we set our inbound codecs and settings properly.
-TEST_F(HybridVideoEngineTest, SetLocalDescription) {
- EXPECT_TRUE(SetupEngine());
- channel_->SetInterface(transport_.get());
- EXPECT_TRUE(channel_->SetRecvCodecs(engine_.codecs()));
- ASSERT_EQ(1U, sub_channel1_->recv_codecs().size());
- ASSERT_EQ(1U, sub_channel2_->recv_codecs().size());
- EXPECT_EQ(kVp8Codec, sub_channel1_->recv_codecs()[0]);
- EXPECT_EQ(kGenericCodec, sub_channel2_->recv_codecs()[0]);
- StreamParams stream;
- stream.id = "TestStream";
- stream.ssrcs.push_back(1234);
- stream.cname = "5678";
- EXPECT_TRUE(channel_->AddSendStream(stream));
- EXPECT_EQ(1234U, sub_channel1_->send_ssrc());
- EXPECT_EQ(1234U, sub_channel2_->send_ssrc());
- EXPECT_EQ("5678", sub_channel1_->rtcp_cname());
- EXPECT_EQ("5678", sub_channel2_->rtcp_cname());
- EXPECT_TRUE(channel_->SetRender(true));
- // We've called SetRender, so we should be playing out, but not yet sending.
- EXPECT_TRUE(sub_channel1_->playout());
- EXPECT_TRUE(sub_channel2_->playout());
- EXPECT_FALSE(sub_channel1_->sending());
- EXPECT_FALSE(sub_channel2_->sending());
- // We may get SetSend(false) calls during call setup.
- // Since this causes no change in state, they should no-op and return true.
- EXPECT_TRUE(channel_->SetSend(false));
- EXPECT_FALSE(sub_channel1_->sending());
- EXPECT_FALSE(sub_channel2_->sending());
-}
-
-TEST_F(HybridVideoEngineTest, OnNewSendResolution) {
- EXPECT_TRUE(SetupEngine());
- EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
- EXPECT_EQ(640, engine_.send_width());
- EXPECT_EQ(360, engine_.send_height());
-}
-
-// Test that we converge to the active channel for engine 1.
-TEST_F(HybridVideoEngineTest, SetSendCodecs1) {
- // This will nuke the object that sub_channel2_ points to.
- TestSetSendCodecs(engine_.sub_engine1(), MAKE_VECTOR(kCodecsVp8First));
- EXPECT_TRUE(engine_.sub_engine2()->GetChannel(0) == NULL);
-}
-
-// Test that we converge to the active channel for engine 2.
-TEST_F(HybridVideoEngineTest, SetSendCodecs2) {
- // This will nuke the object that sub_channel1_ points to.
- TestSetSendCodecs(engine_.sub_engine2(), MAKE_VECTOR(kCodecsGenericFirst));
- EXPECT_TRUE(engine_.sub_engine1()->GetChannel(0) == NULL);
-}
-
-// Test that we don't accidentally eat 0x0 in SetSendCodecs
-TEST_F(HybridVideoEngineTest, SetSendCodecs0x0) {
- EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
- // Send using generic codec, but with 0x0 resolution.
- std::vector<cricket::VideoCodec> codecs(MAKE_VECTOR(kCodecsGenericFirst));
- codecs.resize(1);
- codecs[0].width = 0;
- codecs[0].height = 0;
- EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-}
-
-// Test setting the send bandwidth for VP8.
-TEST_F(HybridVideoEngineTest, SetSendBandwidth1) {
- TestSetSendBandwidth(engine_.sub_engine1(),
- MAKE_VECTOR(kCodecsVp8First),
- 100000,
- 384000);
-}
-
-// Test setting the send bandwidth for a generic codec.
-TEST_F(HybridVideoEngineTest, SetSendBandwidth2) {
- TestSetSendBandwidth(engine_.sub_engine2(),
- MAKE_VECTOR(kCodecsGenericFirst),
- 100001,
- 384002);
-}
-
-// Test that we dump RTP packets that arrive early.
-TEST_F(HybridVideoEngineTest, HandleEarlyRtp) {
- static const uint8 kPacket[1024] = { 0 };
- static const uint8 kRtcp[1024] = { 1 };
- EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
- DeliverPacket(kPacket, sizeof(kPacket));
- DeliverRtcp(kRtcp, sizeof(kRtcp));
- EXPECT_TRUE(sub_channel1_->CheckNoRtp());
- EXPECT_TRUE(sub_channel2_->CheckNoRtp());
- EXPECT_TRUE(sub_channel1_->CheckNoRtcp());
- EXPECT_TRUE(sub_channel2_->CheckNoRtcp());
-}
-
-// Test that we properly pass on normal RTP packets.
-TEST_F(HybridVideoEngineTest, HandleRtp) {
- static const uint8 kPacket[1024] = { 0 };
- static const uint8 kRtcp[1024] = { 1 };
- EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
- EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
- EXPECT_TRUE(channel_->SetSend(true));
- DeliverPacket(kPacket, sizeof(kPacket));
- DeliverRtcp(kRtcp, sizeof(kRtcp));
- EXPECT_TRUE(sub_channel1_->CheckRtp(kPacket, sizeof(kPacket)));
- EXPECT_TRUE(sub_channel1_->CheckRtcp(kRtcp, sizeof(kRtcp)));
-}
-
-// Test that we properly connect media error signal.
-TEST_F(HybridVideoEngineTest, MediaErrorSignal) {
- cricket::VideoMediaErrorCatcher catcher;
-
- // Verify no signal from either channel before the active channel is set.
- EXPECT_TRUE(SetupEngine());
- channel_->SignalMediaError.connect(&catcher,
- &cricket::VideoMediaErrorCatcher::OnError);
- sub_channel1_->SignalMediaError(1, cricket::VideoMediaChannel::ERROR_OTHER);
- EXPECT_EQ(0U, catcher.ssrc());
- sub_channel2_->SignalMediaError(2,
- cricket::VideoMediaChannel::ERROR_REC_DEVICE_OPEN_FAILED);
- EXPECT_EQ(0U, catcher.ssrc());
-
- // Set vp8 as active channel and verify that a signal comes from it.
- EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
- sub_channel1_->SignalMediaError(1, cricket::VideoMediaChannel::ERROR_OTHER);
- EXPECT_EQ(cricket::VideoMediaChannel::ERROR_OTHER, catcher.error());
- EXPECT_EQ(1U, catcher.ssrc());
-
- // Set generic codec as active channel and verify that a signal comes from it.
- EXPECT_TRUE(SetupEngine());
- channel_->SignalMediaError.connect(&catcher,
- &cricket::VideoMediaErrorCatcher::OnError);
- EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsGenericFirst)));
- sub_channel2_->SignalMediaError(2,
- cricket::VideoMediaChannel::ERROR_REC_DEVICE_OPEN_FAILED);
- EXPECT_EQ(cricket::VideoMediaChannel::ERROR_REC_DEVICE_OPEN_FAILED,
- catcher.error());
- EXPECT_EQ(2U, catcher.ssrc());
-}
-
-// Test that SetSend doesn't re-enter.
-TEST_F(HybridVideoEngineTest, RepeatSetSend) {
- EXPECT_TRUE(SetupEngine());
- EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
-
- // Verify initial status.
- EXPECT_FALSE(channel_->sending());
- EXPECT_FALSE(sub_channel1_->sending());
- EXPECT_EQ(0, engine_.num_ch1_send_on());
- EXPECT_EQ(0, engine_.num_ch1_send_off());
-
- // Verfiy SetSend(true) works correctly.
- EXPECT_TRUE(channel_->SetSend(true));
- EXPECT_TRUE(channel_->sending());
- EXPECT_TRUE(sub_channel1_->sending());
- EXPECT_EQ(1, engine_.num_ch1_send_on());
- EXPECT_EQ(0, engine_.num_ch1_send_off());
-
- // SetSend(true) again and verify nothing changes.
- EXPECT_TRUE(channel_->SetSend(true));
- EXPECT_TRUE(channel_->sending());
- EXPECT_TRUE(sub_channel1_->sending());
- EXPECT_EQ(1, engine_.num_ch1_send_on());
- EXPECT_EQ(0, engine_.num_ch1_send_off());
-
- // Verify SetSend(false) works correctly.
- EXPECT_TRUE(channel_->SetSend(false));
- EXPECT_FALSE(channel_->sending());
- EXPECT_FALSE(sub_channel1_->sending());
- EXPECT_EQ(1, engine_.num_ch1_send_on());
- EXPECT_EQ(1, engine_.num_ch1_send_off());
-
- // SetSend(false) again and verfiy nothing changes.
- EXPECT_TRUE(channel_->SetSend(false));
- EXPECT_FALSE(channel_->sending());
- EXPECT_FALSE(sub_channel1_->sending());
- EXPECT_EQ(1, engine_.num_ch1_send_on());
- EXPECT_EQ(1, engine_.num_ch1_send_off());
-}
-
-// Test that SetOptions.
-TEST_F(HybridVideoEngineTest, SetOptions) {
- cricket::VideoOptions vmo;
- vmo.video_high_bitrate.Set(true);
- vmo.system_low_adaptation_threshhold.Set(0.10f);
- EXPECT_TRUE(SetupEngine());
- EXPECT_TRUE(channel_->SetOptions(vmo));
-
- bool high_bitrate;
- float low;
- EXPECT_TRUE(sub_channel1_->GetOptions(&vmo));
- EXPECT_TRUE(vmo.video_high_bitrate.Get(&high_bitrate));
- EXPECT_TRUE(high_bitrate);
- EXPECT_TRUE(vmo.system_low_adaptation_threshhold.Get(&low));
- EXPECT_EQ(0.10f, low);
- EXPECT_TRUE(sub_channel2_->GetOptions(&vmo));
- EXPECT_TRUE(vmo.video_high_bitrate.Get(&high_bitrate));
- EXPECT_TRUE(high_bitrate);
- EXPECT_TRUE(vmo.system_low_adaptation_threshhold.Get(&low));
- EXPECT_EQ(0.10f, low);
-
- vmo.video_high_bitrate.Set(false);
- vmo.system_low_adaptation_threshhold.Set(0.50f);
-
- EXPECT_TRUE(channel_->SetOptions(vmo));
- EXPECT_TRUE(sub_channel1_->GetOptions(&vmo));
- EXPECT_TRUE(vmo.video_high_bitrate.Get(&high_bitrate));
- EXPECT_FALSE(high_bitrate);
- EXPECT_TRUE(vmo.system_low_adaptation_threshhold.Get(&low));
- EXPECT_EQ(0.50f, low);
- EXPECT_TRUE(sub_channel2_->GetOptions(&vmo));
- EXPECT_TRUE(vmo.video_high_bitrate.Get(&high_bitrate));
- EXPECT_FALSE(high_bitrate);
- EXPECT_TRUE(vmo.system_low_adaptation_threshhold.Get(&low));
- EXPECT_EQ(0.50f, low);
-}
-
-TEST_F(HybridVideoEngineTest, SetCapturer) {
- EXPECT_TRUE(SetupEngine());
- // Set vp8 as active channel and verify that capturer can be set.
- EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
- cricket::FakeVideoCapturer fake_video_capturer;
- EXPECT_TRUE(channel_->SetCapturer(0, &fake_video_capturer));
- EXPECT_TRUE(channel_->SetCapturer(0, NULL));
-
- // Set generic codec active channel and verify that capturer can be set.
- EXPECT_TRUE(SetupEngine());
- EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsGenericFirst)));
- EXPECT_TRUE(channel_->SetCapturer(0, &fake_video_capturer));
- EXPECT_TRUE(channel_->SetCapturer(0, NULL));
-}
diff --git a/media/base/mediachannel.h b/media/base/mediachannel.h
index 62d6b61..5232e5d 100644
--- a/media/base/mediachannel.h
+++ b/media/base/mediachannel.h
@@ -182,6 +182,7 @@ struct AudioOptions {
recording_sample_rate.SetFrom(change.recording_sample_rate);
playout_sample_rate.SetFrom(change.playout_sample_rate);
dscp.SetFrom(change.dscp);
+ combined_audio_video_bwe.SetFrom(change.combined_audio_video_bwe);
}
bool operator==(const AudioOptions& o) const {
@@ -207,7 +208,8 @@ struct AudioOptions {
rx_agc_limiter == o.rx_agc_limiter &&
recording_sample_rate == o.recording_sample_rate &&
playout_sample_rate == o.playout_sample_rate &&
- dscp == o.dscp;
+ dscp == o.dscp &&
+ combined_audio_video_bwe == o.combined_audio_video_bwe;
}
std::string ToString() const {
@@ -238,6 +240,7 @@ struct AudioOptions {
ost << ToStringIfSet("recording_sample_rate", recording_sample_rate);
ost << ToStringIfSet("playout_sample_rate", playout_sample_rate);
ost << ToStringIfSet("dscp", dscp);
+ ost << ToStringIfSet("combined_audio_video_bwe", combined_audio_video_bwe);
ost << "}";
return ost.str();
}
@@ -275,6 +278,8 @@ struct AudioOptions {
Settable<uint32> playout_sample_rate;
// Set DSCP value for packet sent from audio channel.
Settable<bool> dscp;
+ // Enable combined audio+bandwidth BWE.
+ Settable<bool> combined_audio_video_bwe;
};
// Options that can be applied to a VideoMediaChannel or a VideoMediaEngine.
diff --git a/media/base/mediaengine.h b/media/base/mediaengine.h
index 6c4b740..f30e3b1 100644
--- a/media/base/mediaengine.h
+++ b/media/base/mediaengine.h
@@ -91,8 +91,6 @@ class MediaEngineInterface {
virtual AudioOptions GetAudioOptions() const = 0;
// Sets global audio options. "options" are from AudioOptions, above.
virtual bool SetAudioOptions(const AudioOptions& options) = 0;
- // Sets global video options. "options" are from VideoOptions, above.
- virtual bool SetVideoOptions(const VideoOptions& options) = 0;
// Sets the value used by the echo canceller to offset delay values obtained
// from the OS.
virtual bool SetAudioDelayOffset(int offset) = 0;
@@ -124,7 +122,6 @@ class MediaEngineInterface {
// when a VoiceMediaChannel starts sending.
virtual bool SetLocalMonitor(bool enable) = 0;
// Installs a callback for raw frames from the local camera.
- virtual bool SetLocalRenderer(VideoRenderer* renderer) = 0;
virtual const std::vector<AudioCodec>& audio_codecs() = 0;
virtual const std::vector<RtpHeaderExtension>&
@@ -214,9 +211,6 @@ class CompositeMediaEngine : public MediaEngineInterface {
virtual bool SetAudioOptions(const AudioOptions& options) {
return voice_.SetOptions(options);
}
- virtual bool SetVideoOptions(const VideoOptions& options) {
- return video_.SetOptions(options);
- }
virtual bool SetAudioDelayOffset(int offset) {
return voice_.SetDelayOffset(offset);
}
@@ -245,10 +239,6 @@ class CompositeMediaEngine : public MediaEngineInterface {
virtual bool SetLocalMonitor(bool enable) {
return voice_.SetLocalMonitor(enable);
}
- virtual bool SetLocalRenderer(VideoRenderer* renderer) {
- return video_.SetLocalRenderer(renderer);
- }
-
virtual const std::vector<AudioCodec>& audio_codecs() {
return voice_.codecs();
}
@@ -361,7 +351,6 @@ class NullVideoEngine {
bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) {
return true;
}
- bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
const std::vector<VideoCodec>& codecs() { return codecs_; }
const std::vector<RtpHeaderExtension>& rtp_header_extensions() {
return rtp_header_extensions_;
diff --git a/media/base/testutils.cc b/media/base/testutils.cc
index 8b79df4..84fd05c 100644
--- a/media/base/testutils.cc
+++ b/media/base/testutils.cc
@@ -29,6 +29,7 @@
#include <math.h>
+#include "talk/media/base/executablehelpers.h"
#include "talk/media/base/rtpdump.h"
#include "talk/media/base/videocapturer.h"
#include "talk/media/base/videoframe.h"
@@ -255,10 +256,15 @@ void VideoCapturerListener::OnFrameCaptured(VideoCapturer* capturer,
// Returns the absolute path to a file in the testdata/ directory.
std::string GetTestFilePath(const std::string& filename) {
// Locate test data directory.
+#ifdef ENABLE_WEBRTC
+ rtc::Pathname path = rtc::GetExecutablePath();
+ EXPECT_FALSE(path.empty());
+ path.AppendPathname("../../talk/");
+#else
rtc::Pathname path = testing::GetTalkDirectory();
EXPECT_FALSE(path.empty()); // must be run from inside "talk"
- path.AppendFolder("media");
- path.AppendFolder("testdata");
+#endif
+ path.AppendFolder("media/testdata/");
path.SetFilename(filename);
return path.pathname();
}
diff --git a/media/base/videoadapter_unittest.cc b/media/base/videoadapter_unittest.cc
index af374e0..04bf3d1 100755
--- a/media/base/videoadapter_unittest.cc
+++ b/media/base/videoadapter_unittest.cc
@@ -69,17 +69,24 @@ class VideoAdapterTest : public testing::Test {
listener_.get(), &VideoCapturerListener::OnFrameCaptured);
}
- void VerifyAdaptedResolution(int width, int height) {
- EXPECT_TRUE(NULL != listener_->adapted_frame());
- EXPECT_EQ(static_cast<size_t>(width),
- listener_->adapted_frame()->GetWidth());
- EXPECT_EQ(static_cast<size_t>(height),
- listener_->adapted_frame()->GetHeight());
+ virtual void TearDown() {
+ // Explicitly disconnect the VideoCapturer before to avoid data races
+ // (frames delivered to VideoCapturerListener while it's being destructed).
+ capturer_->SignalFrameCaptured.disconnect_all();
}
protected:
class VideoCapturerListener: public sigslot::has_slots<> {
public:
+ struct Stats {
+ int captured_frames;
+ int dropped_frames;
+ bool last_adapt_was_no_op;
+
+ int adapted_width;
+ int adapted_height;
+ };
+
explicit VideoCapturerListener(VideoAdapter* adapter)
: video_adapter_(adapter),
adapted_frame_(NULL),
@@ -95,6 +102,7 @@ class VideoAdapterTest : public testing::Test {
EXPECT_TRUE(temp_i420.Init(captured_frame,
captured_frame->width, abs(captured_frame->height)));
VideoFrame* out_frame = NULL;
+ rtc::CritScope lock(&crit_);
EXPECT_TRUE(video_adapter_->AdaptFrame(&temp_i420, &out_frame));
if (out_frame) {
if (out_frame == &temp_i420) {
@@ -112,12 +120,32 @@ class VideoAdapterTest : public testing::Test {
++captured_frames_;
}
- const VideoFrame* adapted_frame() const { return adapted_frame_; }
- int captured_frames() const { return captured_frames_; }
- int dropped_frames() const { return dropped_frames_; }
- bool last_adapt_was_no_op() const { return last_adapt_was_no_op_; }
+ Stats GetStats() {
+ rtc::CritScope lock(&crit_);
+ Stats stats;
+ stats.captured_frames = captured_frames_;
+ stats.dropped_frames = dropped_frames_;
+ stats.last_adapt_was_no_op = last_adapt_was_no_op_;
+ if (adapted_frame_ != NULL) {
+ stats.adapted_width = static_cast<int>(adapted_frame_->GetWidth());
+ stats.adapted_height = static_cast<int>(adapted_frame_->GetHeight());
+ } else {
+ stats.adapted_width = stats.adapted_height = -1;
+ }
+
+ return stats;
+ }
+
+ VideoFrame* CopyAdaptedFrame() {
+ rtc::CritScope lock(&crit_);
+ if (adapted_frame_ == NULL) {
+ return NULL;
+ }
+ return adapted_frame_->Copy();
+ }
private:
+ rtc::CriticalSection crit_;
VideoAdapter* video_adapter_;
const VideoFrame* adapted_frame_;
rtc::scoped_ptr<VideoFrame> copied_output_frame_;
@@ -135,6 +163,13 @@ class VideoAdapterTest : public testing::Test {
bool received_cpu_signal_;
};
+ void VerifyAdaptedResolution(const VideoCapturerListener::Stats& stats,
+ int width,
+ int height) {
+ EXPECT_EQ(width, stats.adapted_width);
+ EXPECT_EQ(height, stats.adapted_height);
+ }
+
rtc::scoped_ptr<FileVideoCapturer> capturer_;
rtc::scoped_ptr<VideoAdapter> adapter_;
rtc::scoped_ptr<VideoCapturerListener> listener_;
@@ -157,12 +192,13 @@ TEST_F(VideoAdapterTest, AdaptInactive) {
// Call Adapter with some frames.
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify no frame drop and no resolution change.
- EXPECT_GE(listener_->captured_frames(), 10);
- EXPECT_EQ(0, listener_->dropped_frames());
- VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
+ VideoCapturerListener::Stats stats = listener_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(0, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
}
// Do not adapt the frame rate or the resolution. Expect no frame drop and no
@@ -171,13 +207,14 @@ TEST_F(VideoAdapterTest, AdaptNothing) {
adapter_->SetOutputFormat(capture_format_);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify no frame drop and no resolution change.
- EXPECT_GE(listener_->captured_frames(), 10);
- EXPECT_EQ(0, listener_->dropped_frames());
- VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
- EXPECT_TRUE(listener_->last_adapt_was_no_op());
+ VideoCapturerListener::Stats stats = listener_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(0, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
+ EXPECT_TRUE(stats.last_adapt_was_no_op);
}
TEST_F(VideoAdapterTest, AdaptZeroInterval) {
@@ -187,12 +224,13 @@ TEST_F(VideoAdapterTest, AdaptZeroInterval) {
adapter_->SetOutputFormat(format);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify no crash and that frames aren't dropped.
- EXPECT_GE(listener_->captured_frames(), 10);
- EXPECT_EQ(0, listener_->dropped_frames());
- VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
+ VideoCapturerListener::Stats stats = listener_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(0, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
}
// Adapt the frame rate to be half of the capture rate at the beginning. Expect
@@ -203,12 +241,13 @@ TEST_F(VideoAdapterTest, AdaptFramerate) {
adapter_->SetOutputFormat(request_format);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify frame drop and no resolution change.
- EXPECT_GE(listener_->captured_frames(), 10);
- EXPECT_EQ(listener_->captured_frames() / 2, listener_->dropped_frames());
- VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
+ VideoCapturerListener::Stats stats = listener_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(stats.captured_frames / 2, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
}
// Adapt the frame rate to be half of the capture rate at the beginning. Expect
@@ -219,13 +258,14 @@ TEST_F(VideoAdapterTest, AdaptFramerateVariable) {
adapter_->SetOutputFormat(request_format);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 30, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 30, kWaitTimeout);
// Verify frame drop and no resolution change.
- EXPECT_GE(listener_->captured_frames(), 30);
+ VideoCapturerListener::Stats stats = listener_->GetStats();
+ EXPECT_GE(stats.captured_frames, 30);
// Verify 2 / 3 kept (20) and 1 / 3 dropped (10).
- EXPECT_EQ(listener_->captured_frames() * 1 / 3, listener_->dropped_frames());
- VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
+ EXPECT_EQ(stats.captured_frames * 1 / 3, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
}
// Adapt the frame rate to be half of the capture rate after capturing no less
@@ -236,20 +276,20 @@ TEST_F(VideoAdapterTest, AdaptFramerateOntheFly) {
adapter_->SetOutputFormat(request_format);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify no frame drop before adaptation.
- EXPECT_EQ(0, listener_->dropped_frames());
+ EXPECT_EQ(0, listener_->GetStats().dropped_frames);
// Adapat the frame rate.
request_format.interval *= 2;
adapter_->SetOutputFormat(request_format);
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 20, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 20, kWaitTimeout);
// Verify frame drop after adaptation.
- EXPECT_GT(listener_->dropped_frames(), 0);
+ EXPECT_GT(listener_->GetStats().dropped_frames, 0);
}
// Adapt the frame resolution to be a quarter of the capture resolution at the
@@ -261,11 +301,12 @@ TEST_F(VideoAdapterTest, AdaptResolution) {
adapter_->SetOutputFormat(request_format);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify no frame drop and resolution change.
- EXPECT_EQ(0, listener_->dropped_frames());
- VerifyAdaptedResolution(request_format.width, request_format.height);
+ VideoCapturerListener::Stats stats = listener_->GetStats();
+ EXPECT_EQ(0, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, request_format.width, request_format.height);
}
// Adapt the frame resolution to half width. Expect resolution change.
@@ -276,10 +317,10 @@ TEST_F(VideoAdapterTest, AdaptResolutionNarrow) {
adapter_->SetOutputFormat(request_format);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify resolution change.
- VerifyAdaptedResolution(213, 160);
+ VerifyAdaptedResolution(listener_->GetStats(), 213, 160);
}
// Adapt the frame resolution to half height. Expect resolution change.
@@ -290,10 +331,10 @@ TEST_F(VideoAdapterTest, AdaptResolutionWide) {
adapter_->SetOutputFormat(request_format);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify resolution change.
- VerifyAdaptedResolution(213, 160);
+ VerifyAdaptedResolution(listener_->GetStats(), 213, 160);
}
// Adapt the frame resolution to be a quarter of the capture resolution after
@@ -304,21 +345,25 @@ TEST_F(VideoAdapterTest, AdaptResolutionOnTheFly) {
adapter_->SetOutputFormat(request_format);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify no resolution change before adaptation.
- VerifyAdaptedResolution(request_format.width, request_format.height);
+ VerifyAdaptedResolution(
+ listener_->GetStats(), request_format.width, request_format.height);
// Adapt the frame resolution.
request_format.width /= 2;
request_format.height /= 2;
adapter_->SetOutputFormat(request_format);
- EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 20, kWaitTimeout);
-
+ int captured_frames = listener_->GetStats().captured_frames;
+ EXPECT_TRUE_WAIT(
+ !capturer_->IsRunning() ||
+ listener_->GetStats().captured_frames >= captured_frames + 10,
+ kWaitTimeout);
// Verify resolution change after adaptation.
- VerifyAdaptedResolution(request_format.width, request_format.height);
+ VerifyAdaptedResolution(
+ listener_->GetStats(), request_format.width, request_format.height);
}
// Black the output frame.
@@ -326,42 +371,57 @@ TEST_F(VideoAdapterTest, BlackOutput) {
adapter_->SetOutputFormat(capture_format_);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify that the output frame is not black.
- EXPECT_NE(16, *listener_->adapted_frame()->GetYPlane());
- EXPECT_NE(128, *listener_->adapted_frame()->GetUPlane());
- EXPECT_NE(128, *listener_->adapted_frame()->GetVPlane());
+ rtc::scoped_ptr<VideoFrame> adapted_frame(listener_->CopyAdaptedFrame());
+ EXPECT_NE(16, *adapted_frame->GetYPlane());
+ EXPECT_NE(128, *adapted_frame->GetUPlane());
+ EXPECT_NE(128, *adapted_frame->GetVPlane());
adapter_->SetBlackOutput(true);
- EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 20, kWaitTimeout);
+ int captured_frames = listener_->GetStats().captured_frames;
+ EXPECT_TRUE_WAIT(
+ !capturer_->IsRunning() ||
+ listener_->GetStats().captured_frames >= captured_frames + 10,
+ kWaitTimeout);
// Verify that the output frame is black.
- EXPECT_EQ(16, *listener_->adapted_frame()->GetYPlane());
- EXPECT_EQ(128, *listener_->adapted_frame()->GetUPlane());
- EXPECT_EQ(128, *listener_->adapted_frame()->GetVPlane());
+ adapted_frame.reset(listener_->CopyAdaptedFrame());
+ EXPECT_EQ(16, *adapted_frame->GetYPlane());
+ EXPECT_EQ(128, *adapted_frame->GetUPlane());
+ EXPECT_EQ(128, *adapted_frame->GetVPlane());
// Verify that the elapsed time and timestamp of the black frame increase.
- int64 elapsed_time = listener_->adapted_frame()->GetElapsedTime();
- int64 timestamp = listener_->adapted_frame()->GetTimeStamp();
- EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 22, kWaitTimeout);
- EXPECT_GT(listener_->adapted_frame()->GetElapsedTime(), elapsed_time);
- EXPECT_GT(listener_->adapted_frame()->GetTimeStamp(), timestamp);
+ int64 elapsed_time = adapted_frame->GetElapsedTime();
+ int64 timestamp = adapted_frame->GetTimeStamp();
+ captured_frames = listener_->GetStats().captured_frames;
+ EXPECT_TRUE_WAIT(
+ !capturer_->IsRunning() ||
+ listener_->GetStats().captured_frames >= captured_frames + 10,
+ kWaitTimeout);
+
+ adapted_frame.reset(listener_->CopyAdaptedFrame());
+ EXPECT_GT(adapted_frame->GetElapsedTime(), elapsed_time);
+ EXPECT_GT(adapted_frame->GetTimeStamp(), timestamp);
// Change the output size
VideoFormat request_format = capture_format_;
request_format.width /= 2;
request_format.height /= 2;
adapter_->SetOutputFormat(request_format);
+ captured_frames = listener_->GetStats().captured_frames;
+ EXPECT_TRUE_WAIT(
+ !capturer_->IsRunning() ||
+ listener_->GetStats().captured_frames >= captured_frames + 10,
+ kWaitTimeout);
- EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 40, kWaitTimeout);
// Verify resolution change after adaptation.
- VerifyAdaptedResolution(request_format.width, request_format.height);
+ VerifyAdaptedResolution(
+ listener_->GetStats(), request_format.width, request_format.height);
// Verify that the output frame is black.
- EXPECT_EQ(16, *listener_->adapted_frame()->GetYPlane());
- EXPECT_EQ(128, *listener_->adapted_frame()->GetUPlane());
- EXPECT_EQ(128, *listener_->adapted_frame()->GetVPlane());
+ adapted_frame.reset(listener_->CopyAdaptedFrame());
+ EXPECT_EQ(16, *adapted_frame->GetYPlane());
+ EXPECT_EQ(128, *adapted_frame->GetUPlane());
+ EXPECT_EQ(128, *adapted_frame->GetVPlane());
}
// Drop all frames.
@@ -370,11 +430,12 @@ TEST_F(VideoAdapterTest, DropAllFrames) {
adapter_->SetOutputFormat(format);
EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
- listener_->captured_frames() >= 10, kWaitTimeout);
+ listener_->GetStats().captured_frames >= 10, kWaitTimeout);
// Verify all frames are dropped.
- EXPECT_GE(listener_->captured_frames(), 10);
- EXPECT_EQ(listener_->captured_frames(), listener_->dropped_frames());
+ VideoCapturerListener::Stats stats = listener_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(stats.captured_frames, stats.dropped_frames);
}
TEST(CoordinatedVideoAdapterTest, TestCoordinatedWithoutCpuAdaptation) {
diff --git a/media/base/videoengine_unittest.h b/media/base/videoengine_unittest.h
index 0f03c7b..8eab347 100644
--- a/media/base/videoengine_unittest.h
+++ b/media/base/videoengine_unittest.h
@@ -1593,6 +1593,25 @@ class VideoMediaChannelTest : public testing::Test,
frame_count += 2;
EXPECT_EQ_WAIT(frame_count, renderer_.num_rendered_frames(), kTimeout);
}
+ // Tests that adapted frames won't be upscaled to a higher resolution.
+ void SendsLowerResolutionOnSmallerFrames() {
+ cricket::VideoCodec codec = DefaultCodec();
+ codec.width = 320;
+ codec.height = 240;
+ EXPECT_TRUE(SetOneCodec(codec));
+ EXPECT_TRUE(SetSend(true));
+ EXPECT_TRUE(channel_->SetRender(true));
+ EXPECT_TRUE(channel_->SetRenderer(kDefaultReceiveSsrc, &renderer_));
+ EXPECT_EQ(0, renderer_.num_rendered_frames());
+ EXPECT_TRUE(SendFrame());
+ EXPECT_FRAME_WAIT(1, codec.width, codec.height, kTimeout);
+
+ // Check that we send smaller frames at the new resolution.
+ EXPECT_TRUE(rtc::Thread::Current()->ProcessMessages(33));
+ EXPECT_TRUE(video_capturer_->CaptureCustomFrame(
+ codec.width / 2, codec.height / 2, cricket::FOURCC_I420));
+ EXPECT_FRAME_WAIT(2, codec.width / 2, codec.height / 2, kTimeout);
+ }
// Tests that we can set the send stream format properly.
void SetSendStreamFormat() {
cricket::VideoCodec codec(DefaultCodec());
diff --git a/media/base/videoframe.cc b/media/base/videoframe.cc
index 1c5cfd8..018d065 100644
--- a/media/base/videoframe.cc
+++ b/media/base/videoframe.cc
@@ -235,7 +235,7 @@ bool VideoFrame::SetToBlack() {
}
static const size_t kMaxSampleSize = 1000000000u;
-// Returns whether a sample is valid
+// Returns whether a sample is valid.
bool VideoFrame::Validate(uint32 fourcc, int w, int h,
const uint8 *sample, size_t sample_size) {
if (h < 0) {
@@ -311,6 +311,11 @@ bool VideoFrame::Validate(uint32 fourcc, int w, int h,
<< " " << sample_size;
return false;
}
+ // TODO(fbarchard): Make function to dump information about frames.
+ uint8 four_samples[4] = { 0, 0, 0, 0 };
+ for (size_t i = 0; i < ARRAY_SIZE(four_samples) && i < sample_size; ++i) {
+ four_samples[i] = sample[i];
+ }
if (sample_size < expected_size) {
LOG(LS_ERROR) << "Size field is too small."
<< " format: " << GetFourccName(format)
@@ -318,10 +323,10 @@ bool VideoFrame::Validate(uint32 fourcc, int w, int h,
<< " size: " << w << "x" << h
<< " " << sample_size
<< " expected: " << expected_size
- << " sample[0..3]: " << static_cast<int>(sample[0])
- << ", " << static_cast<int>(sample[1])
- << ", " << static_cast<int>(sample[2])
- << ", " << static_cast<int>(sample[3]);
+ << " sample[0..3]: " << static_cast<int>(four_samples[0])
+ << ", " << static_cast<int>(four_samples[1])
+ << ", " << static_cast<int>(four_samples[2])
+ << ", " << static_cast<int>(four_samples[3]);
return false;
}
if (sample_size > kMaxSampleSize) {
@@ -331,13 +336,14 @@ bool VideoFrame::Validate(uint32 fourcc, int w, int h,
<< " size: " << w << "x" << h
<< " " << sample_size
<< " expected: " << 2 * expected_size
- << " sample[0..3]: " << static_cast<int>(sample[0])
- << ", " << static_cast<int>(sample[1])
- << ", " << static_cast<int>(sample[2])
- << ", " << static_cast<int>(sample[3]);
+ << " sample[0..3]: " << static_cast<int>(four_samples[0])
+ << ", " << static_cast<int>(four_samples[1])
+ << ", " << static_cast<int>(four_samples[2])
+ << ", " << static_cast<int>(four_samples[3]);
return false;
}
// Show large size warning once every 100 frames.
+ // TODO(fbarchard): Make frame counter atomic for thread safety.
static int large_warn100 = 0;
size_t large_expected_size = expected_size * 2;
if (expected_bpp >= 8 &&
@@ -350,27 +356,14 @@ bool VideoFrame::Validate(uint32 fourcc, int w, int h,
<< " size: " << w << "x" << h
<< " bytes: " << sample_size
<< " expected: " << large_expected_size
- << " sample[0..3]: " << static_cast<int>(sample[0])
- << ", " << static_cast<int>(sample[1])
- << ", " << static_cast<int>(sample[2])
- << ", " << static_cast<int>(sample[3]);
- }
- // Scan pages to ensure they are there and don't contain a single value and
- // to generate an error.
- if (!memcmp(sample + sample_size - 8, sample + sample_size - 4, 4) &&
- !memcmp(sample, sample + 4, sample_size - 4)) {
- LOG(LS_WARNING) << "Duplicate value for all pixels."
- << " format: " << GetFourccName(format)
- << " bpp: " << expected_bpp
- << " size: " << w << "x" << h
- << " bytes: " << sample_size
- << " expected: " << expected_size
- << " sample[0..3]: " << static_cast<int>(sample[0])
- << ", " << static_cast<int>(sample[1])
- << ", " << static_cast<int>(sample[2])
- << ", " << static_cast<int>(sample[3]);
+ << " sample[0..3]: " << static_cast<int>(four_samples[0])
+ << ", " << static_cast<int>(four_samples[1])
+ << ", " << static_cast<int>(four_samples[2])
+ << ", " << static_cast<int>(four_samples[3]);
}
+ // TODO(fbarchard): Add duplicate pixel check.
+ // TODO(fbarchard): Use frame counter atomic for thread safety.
static bool valid_once = true;
if (valid_once) {
valid_once = false;
@@ -380,10 +373,10 @@ bool VideoFrame::Validate(uint32 fourcc, int w, int h,
<< " size: " << w << "x" << h
<< " bytes: " << sample_size
<< " expected: " << expected_size
- << " sample[0..3]: " << static_cast<int>(sample[0])
- << ", " << static_cast<int>(sample[1])
- << ", " << static_cast<int>(sample[2])
- << ", " << static_cast<int>(sample[3]);
+ << " sample[0..3]: " << static_cast<int>(four_samples[0])
+ << ", " << static_cast<int>(four_samples[1])
+ << ", " << static_cast<int>(four_samples[2])
+ << ", " << static_cast<int>(four_samples[3]);
}
return true;
}
diff --git a/media/base/videoframe_unittest.h b/media/base/videoframe_unittest.h
index c4a7a8c..483fc34 100644
--- a/media/base/videoframe_unittest.h
+++ b/media/base/videoframe_unittest.h
@@ -135,6 +135,9 @@ class VideoFrameTest : public testing::Test {
rtc::scoped_ptr<rtc::FileStream> fs(
rtc::Filesystem::OpenFile(path, "rb"));
if (!fs.get()) {
+ LOG(LS_ERROR) << "Could not open test file path: " << path.pathname()
+ << " from current dir "
+ << rtc::Filesystem::GetCurrentDirectory().pathname();
return NULL;
}
@@ -143,6 +146,7 @@ class VideoFrameTest : public testing::Test {
new rtc::MemoryStream());
rtc::StreamResult res = Flow(fs.get(), buf, sizeof(buf), ms.get());
if (res != rtc::SR_SUCCESS) {
+ LOG(LS_ERROR) << "Could not load test file path: " << path.pathname();
return NULL;
}
@@ -419,17 +423,22 @@ class VideoFrameTest : public testing::Test {
const uint8* u, uint32 upitch,
const uint8* v, uint32 vpitch,
int max_error) {
- return IsSize(frame, width, height) &&
+ return IsSize(frame,
+ static_cast<uint32>(width),
+ static_cast<uint32>(height)) &&
frame.GetPixelWidth() == pixel_width &&
frame.GetPixelHeight() == pixel_height &&
frame.GetElapsedTime() == elapsed_time &&
frame.GetTimeStamp() == time_stamp &&
IsPlaneEqual("y", frame.GetYPlane(), frame.GetYPitch(), y, ypitch,
- width, height, max_error) &&
+ static_cast<uint32>(width),
+ static_cast<uint32>(height), max_error) &&
IsPlaneEqual("u", frame.GetUPlane(), frame.GetUPitch(), u, upitch,
- (width + 1) / 2, (height + 1) / 2, max_error) &&
+ static_cast<uint32>((width + 1) / 2),
+ static_cast<uint32>((height + 1) / 2), max_error) &&
IsPlaneEqual("v", frame.GetVPlane(), frame.GetVPitch(), v, vpitch,
- (width + 1) / 2, (height + 1) / 2, max_error);
+ static_cast<uint32>((width + 1) / 2),
+ static_cast<uint32>((height + 1) / 2), max_error);
}
static bool IsEqual(const cricket::VideoFrame& frame1,
@@ -719,7 +728,7 @@ class VideoFrameTest : public testing::Test {
T frame1, frame2;
size_t out_size = kWidth * kHeight * 2;
rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
- uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+ uint8* out = ALIGNP(outbuf.get(), kAlignment);
T frame;
ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_RGBP,
@@ -735,7 +744,7 @@ class VideoFrameTest : public testing::Test {
T frame1, frame2;
size_t out_size = kWidth * kHeight * 2;
rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
- uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+ uint8* out = ALIGNP(outbuf.get(), kAlignment);
T frame;
ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_RGBO,
@@ -751,7 +760,7 @@ class VideoFrameTest : public testing::Test {
T frame1, frame2;
size_t out_size = kWidth * kHeight * 2;
rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
- uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+ uint8* out = ALIGNP(outbuf.get(), kAlignment);
T frame;
ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_R444,
@@ -771,12 +780,12 @@ class VideoFrameTest : public testing::Test {
size_t bayer_size = kWidth * kHeight; \
rtc::scoped_ptr<uint8[]> bayerbuf(new uint8[ \
bayer_size + kAlignment]); \
- uint8 *bayer = ALIGNP(bayerbuf.get(), kAlignment); \
+ uint8* bayer = ALIGNP(bayerbuf.get(), kAlignment); \
T frame1, frame2; \
rtc::scoped_ptr<rtc::MemoryStream> ms( \
CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); \
ASSERT_TRUE(ms.get() != NULL); \
- libyuv::ARGBToBayer##BAYER(reinterpret_cast<uint8 *>(ms->GetBuffer()), \
+ libyuv::ARGBToBayer##BAYER(reinterpret_cast<uint8* >(ms->GetBuffer()), \
kWidth * 4, \
bayer, kWidth, \
kWidth, kHeight); \
@@ -812,8 +821,8 @@ void Construct##FOURCC##Mirror() { \
reinterpret_cast<uint8*>(ms->GetBuffer()), \
data_size, \
1, 1, 0, 0, 0)); \
- int width_rotate = frame1.GetWidth(); \
- int height_rotate = frame1.GetHeight(); \
+ int width_rotate = static_cast<int>(frame1.GetWidth()); \
+ int height_rotate = static_cast<int>(frame1.GetHeight()); \
EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 1, 1, 0, 0)); \
libyuv::I420Mirror(frame2.GetYPlane(), frame2.GetYPitch(), \
frame2.GetUPlane(), frame2.GetUPitch(), \
@@ -845,8 +854,8 @@ void Construct##FOURCC##Rotate##ROTATE() { \
reinterpret_cast<uint8*>(ms->GetBuffer()), \
data_size, \
1, 1, 0, 0, 0)); \
- int width_rotate = frame1.GetWidth(); \
- int height_rotate = frame1.GetHeight(); \
+ int width_rotate = static_cast<int>(frame1.GetWidth()); \
+ int height_rotate = static_cast<int>(frame1.GetHeight()); \
EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 1, 1, 0, 0)); \
libyuv::I420Rotate(frame2.GetYPlane(), frame2.GetYPitch(), \
frame2.GetUPlane(), frame2.GetUPitch(), \
@@ -995,7 +1004,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
// Convert back to ARGB.
size_t out_size = 4;
rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
- uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+ uint8* out = ALIGNP(outbuf.get(), kAlignment);
EXPECT_EQ(out_size, frame.ConvertToRgbBuffer(cricket::FOURCC_ARGB,
out,
@@ -1032,7 +1041,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
// Convert back to ARGB
size_t out_size = 10 * 4;
rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
- uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+ uint8* out = ALIGNP(outbuf.get(), kAlignment);
EXPECT_EQ(out_size, frame.ConvertToRgbBuffer(cricket::FOURCC_ARGB,
out,
@@ -1431,8 +1440,8 @@ void Construct##FOURCC##Rotate##ROTATE() { \
size_t out_size = astride * kHeight;
rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment + 1]);
memset(outbuf.get(), 0, out_size + kAlignment + 1);
- uint8 *outtop = ALIGNP(outbuf.get(), kAlignment);
- uint8 *out = outtop;
+ uint8* outtop = ALIGNP(outbuf.get(), kAlignment);
+ uint8* out = outtop;
int stride = astride;
if (invert) {
out += (kHeight - 1) * stride; // Point to last row.
@@ -1869,7 +1878,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
size_t bayer_size = kWidth * kHeight; \
rtc::scoped_ptr<uint8[]> bayerbuf(new uint8[ \
bayer_size + kAlignment]); \
- uint8 *bayer = ALIGNP(bayerbuf.get(), kAlignment); \
+ uint8* bayer = ALIGNP(bayerbuf.get(), kAlignment); \
T frame; \
rtc::scoped_ptr<rtc::MemoryStream> ms( \
CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); \
@@ -1898,7 +1907,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
size_t bayer_size = kWidth * kHeight; \
rtc::scoped_ptr<uint8[]> bayerbuf(new uint8[ \
bayer_size + 1 + kAlignment]); \
- uint8 *bayer = ALIGNP(bayerbuf.get(), kAlignment) + 1; \
+ uint8* bayer = ALIGNP(bayerbuf.get(), kAlignment) + 1; \
T frame; \
rtc::scoped_ptr<rtc::MemoryStream> ms( \
CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); \
@@ -1935,7 +1944,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
size_t bayer_size = kWidth * kHeight; \
rtc::scoped_ptr<uint8[]> bayerbuf(new uint8[ \
bayer_size + kAlignment]); \
- uint8 *bayer1 = ALIGNP(bayerbuf.get(), kAlignment); \
+ uint8* bayer1 = ALIGNP(bayerbuf.get(), kAlignment); \
for (int i = 0; i < kWidth * kHeight; ++i) { \
bayer1[i] = static_cast<uint8>(i * 33u + 183u); \
} \
@@ -1951,7 +1960,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
} \
rtc::scoped_ptr<uint8[]> bayer2buf(new uint8[ \
bayer_size + kAlignment]); \
- uint8 *bayer2 = ALIGNP(bayer2buf.get(), kAlignment); \
+ uint8* bayer2 = ALIGNP(bayer2buf.get(), kAlignment); \
libyuv::ARGBToBayer##BAYER(reinterpret_cast<uint8*>(ms->GetBuffer()), \
kWidth * 4, \
bayer2, kWidth, \