summaryrefslogtreecommitdiff
path: root/media/cast/video_receiver
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2013-10-18 15:46:22 +0100
committerTorne (Richard Coles) <torne@google.com>2013-10-18 15:46:22 +0100
commit4e180b6a0b4720a9b8e9e959a882386f690f08ff (patch)
tree788435d09362885908ba5ba9ef868b852ca82c0b /media/cast/video_receiver
parent1179b92b08db0c652a0cf003ab4d89b31ce3610f (diff)
downloadchromium_org-4e180b6a0b4720a9b8e9e959a882386f690f08ff.tar.gz
Merge from Chromium at DEPS revision 228962
This commit was generated by merge_to_master.py. Change-Id: I23bd7d7766f213fd52f28ae5e1ecc6ae9df905ea
Diffstat (limited to 'media/cast/video_receiver')
-rw-r--r--media/cast/video_receiver/codecs/vp8/vp8_decoder.cc25
-rw-r--r--media/cast/video_receiver/codecs/vp8/vp8_decoder.h12
-rw-r--r--media/cast/video_receiver/video_decoder.cc34
-rw-r--r--media/cast/video_receiver/video_decoder.h25
-rw-r--r--media/cast/video_receiver/video_decoder_unittest.cc74
-rw-r--r--media/cast/video_receiver/video_receiver.cc283
-rw-r--r--media/cast/video_receiver/video_receiver.h46
-rw-r--r--media/cast/video_receiver/video_receiver_unittest.cc82
8 files changed, 322 insertions, 259 deletions
diff --git a/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc b/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
index 93d3eb5c4a..23125b2057 100644
--- a/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
+++ b/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
@@ -29,36 +29,51 @@ void Vp8Decoder::InitDecode(int number_of_cores) {
bool Vp8Decoder::Decode(const EncodedVideoFrame& input_image,
I420VideoFrame* decoded_frame) {
+ VLOG(1) << "VP8 decode frame:" << static_cast<int>(input_image.frame_id)
+ << " sized:" << input_image.data.size();
+
if (input_image.data.empty()) return false;
vpx_codec_iter_t iter = NULL;
vpx_image_t* img;
if (vpx_codec_decode(decoder_.get(),
input_image.data.data(),
- input_image.data.size(),
+ static_cast<unsigned int>(input_image.data.size()),
0,
1 /* real time*/)) {
+ VLOG(1) << "Failed to decode VP8 frame.";
return false;
}
img = vpx_codec_get_frame(decoder_.get(), &iter);
- if (img == NULL) return false;
+ if (img == NULL) {
+ VLOG(1) << "Skip rendering VP8 frame:"
+ << static_cast<int>(input_image.frame_id);
+ return false;
+ }
+ // The img is only valid until the next call to vpx_codec_decode.
// Populate the decoded image.
decoded_frame->width = img->d_w;
decoded_frame->height = img->d_h;
decoded_frame->y_plane.stride = img->stride[VPX_PLANE_Y];
decoded_frame->y_plane.length = img->stride[VPX_PLANE_Y] * img->d_h;
- decoded_frame->y_plane.data = img->planes[VPX_PLANE_Y];
+ decoded_frame->y_plane.data = new uint8[decoded_frame->y_plane.length];
+ memcpy(decoded_frame->y_plane.data, img->planes[VPX_PLANE_Y],
+ decoded_frame->y_plane.length);
decoded_frame->u_plane.stride = img->stride[VPX_PLANE_U];
decoded_frame->u_plane.length = img->stride[VPX_PLANE_U] * img->d_h;
- decoded_frame->u_plane.data = img->planes[VPX_PLANE_U];
+ decoded_frame->u_plane.data = new uint8[decoded_frame->u_plane.length];
+ memcpy(decoded_frame->u_plane.data, img->planes[VPX_PLANE_U],
+ decoded_frame->u_plane.length);
decoded_frame->v_plane.stride = img->stride[VPX_PLANE_V];
decoded_frame->v_plane.length = img->stride[VPX_PLANE_V] * img->d_h;
- decoded_frame->v_plane.data = img->planes[VPX_PLANE_V];
+ decoded_frame->v_plane.data = new uint8[decoded_frame->v_plane.length];
+ memcpy(decoded_frame->v_plane.data, img->planes[VPX_PLANE_V],
+ decoded_frame->v_plane.length);
return true;
}
diff --git a/media/cast/video_receiver/codecs/vp8/vp8_decoder.h b/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
index 1acdb5a3d3..c8d930bb2a 100644
--- a/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
+++ b/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
@@ -6,6 +6,7 @@
#define MEDIA_CAST_RTP_RECEVIER_CODECS_VP8_VP8_DECODER_H_
#include "base/memory/scoped_ptr.h"
+#include "base/threading/non_thread_safe.h"
#include "media/cast/cast_config.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
@@ -14,20 +15,21 @@ typedef struct vpx_codec_ctx vpx_dec_ctx_t;
namespace media {
namespace cast {
-class Vp8Decoder {
+// This class is not thread safe; it's only called from the cast video decoder
+// thread.
+class Vp8Decoder : public base::NonThreadSafe {
public:
explicit Vp8Decoder(int number_of_cores);
-
~Vp8Decoder();
- // Initialize the decoder.
- void InitDecode(int number_of_cores);
-
// Decode encoded image (as a part of a video stream).
bool Decode(const EncodedVideoFrame& input_image,
I420VideoFrame* decoded_frame);
private:
+ // Initialize the decoder.
+ void InitDecode(int number_of_cores);
+
scoped_ptr<vpx_dec_ctx_t> decoder_;
};
diff --git a/media/cast/video_receiver/video_decoder.cc b/media/cast/video_receiver/video_decoder.cc
index 238d6db0ab..4a76ae6842 100644
--- a/media/cast/video_receiver/video_decoder.cc
+++ b/media/cast/video_receiver/video_decoder.cc
@@ -12,10 +12,8 @@
namespace media {
namespace cast {
-VideoDecoder::VideoDecoder(scoped_refptr<CastThread> cast_thread,
- const VideoReceiverConfig& video_config)
- : cast_thread_(cast_thread),
- codec_(video_config.codec),
+VideoDecoder::VideoDecoder(const VideoReceiverConfig& video_config)
+ : codec_(video_config.codec),
vp8_decoder_() {
switch (video_config.codec) {
case kVp8:
@@ -33,33 +31,13 @@ VideoDecoder::VideoDecoder(scoped_refptr<CastThread> cast_thread,
VideoDecoder::~VideoDecoder() {}
-void VideoDecoder::DecodeVideoFrame(
+bool VideoDecoder::DecodeVideoFrame(
const EncodedVideoFrame* encoded_frame,
const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback,
- base::Closure frame_release_callback) {
- DecodeFrame(encoded_frame, render_time, frame_decoded_callback);
- // Done with the frame -> release.
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, frame_release_callback);
-}
-
-void VideoDecoder::DecodeFrame(
- const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback) {
+ I420VideoFrame* video_frame) {
DCHECK(encoded_frame->codec == codec_) << "Invalid codec";
- // TODO(mikhal): Allow the application to allocate this memory.
- scoped_ptr<I420VideoFrame> video_frame(new I420VideoFrame());
-
- if (encoded_frame->data.size() > 0) {
- bool success = vp8_decoder_->Decode(*encoded_frame, video_frame.get());
- // Frame decoded - return frame to the user via callback.
- if (success) {
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
- base::Bind(frame_decoded_callback,
- base::Passed(&video_frame), render_time));
- }
- }
+ DCHECK_GT(encoded_frame->data.size(), GG_UINT64_C(0)) << "Empty video frame";
+ return vp8_decoder_->Decode(*encoded_frame, video_frame);
}
} // namespace cast
diff --git a/media/cast/video_receiver/video_decoder.h b/media/cast/video_receiver/video_decoder.h
index abf1955eb9..e98768c215 100644
--- a/media/cast/video_receiver/video_decoder.h
+++ b/media/cast/video_receiver/video_decoder.h
@@ -5,39 +5,32 @@
#ifndef MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
#define MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/threading/non_thread_safe.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_receiver.h"
-#include "media/cast/cast_thread.h"
namespace media {
namespace cast {
class Vp8Decoder;
-class VideoDecoder : public base::RefCountedThreadSafe<VideoDecoder>{
+// This class is not thread safe; it's only called from the cast video decoder
+// thread.
+class VideoDecoder : public base::NonThreadSafe {
public:
- VideoDecoder(scoped_refptr<CastThread> cast_thread,
- const VideoReceiverConfig& video_config);
- ~VideoDecoder();
-
+ explicit VideoDecoder(const VideoReceiverConfig& video_config);
+ virtual ~VideoDecoder();
// Decode a video frame. Decoded (raw) frame will be returned in the
- // frame_decoded_callback.
- void DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
+ // provided video_frame.
+ bool DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback,
- base::Closure frame_release_callback);
+ I420VideoFrame* video_frame);
private:
- void DecodeFrame(const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback);
VideoCodec codec_;
scoped_ptr<Vp8Decoder> vp8_decoder_;
- scoped_refptr<CastThread> cast_thread_;
DISALLOW_COPY_AND_ASSIGN(VideoDecoder);
};
diff --git a/media/cast/video_receiver/video_decoder_unittest.cc b/media/cast/video_receiver/video_decoder_unittest.cc
index 0b95d128b7..77f3efdefa 100644
--- a/media/cast/video_receiver/video_decoder_unittest.cc
+++ b/media/cast/video_receiver/video_decoder_unittest.cc
@@ -2,13 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/bind.h"
-#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
-#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/cast_thread.h"
-#include "media/cast/test/fake_task_runner.h"
#include "media/cast/video_receiver/video_decoder.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -20,75 +15,44 @@ using testing::_;
// Random frame size for testing.
const int kFrameSize = 2345;
-static void ReleaseFrame(const EncodedVideoFrame* encoded_frame) {
- // Empty since we in this test send in the same frame.
-}
-
-class TestVideoDecoderCallback :
- public base::RefCountedThreadSafe<TestVideoDecoderCallback> {
- public:
- TestVideoDecoderCallback()
- : num_called_(0) {}
- // TODO(mikhal): Set and check expectations.
- void DecodeComplete(scoped_ptr<I420VideoFrame> frame,
- const base::TimeTicks render_time) {
- num_called_++;
- }
-
- int number_times_called() {return num_called_;}
- private:
- int num_called_;
-};
-
class VideoDecoderTest : public ::testing::Test {
protected:
VideoDecoderTest() {
// Configure to vp8.
config_.codec = kVp8;
config_.use_external_decoder = false;
- video_decoder_callback_ = new TestVideoDecoderCallback();
+ decoder_.reset(new VideoDecoder(config_));
}
- ~VideoDecoderTest() {}
- virtual void SetUp() {
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
- decoder_ = new VideoDecoder(cast_thread_, config_);
- }
+ virtual ~VideoDecoderTest() {}
- scoped_refptr<VideoDecoder> decoder_;
+ scoped_ptr<VideoDecoder> decoder_;
VideoReceiverConfig config_;
- EncodedVideoFrame encoded_frame_;
- base::SimpleTestTickClock testing_clock_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<CastThread> cast_thread_;
- scoped_refptr<TestVideoDecoderCallback> video_decoder_callback_;
};
-// TODO(pwestin): Test decoding a real frame.
TEST_F(VideoDecoderTest, SizeZero) {
- encoded_frame_.codec = kVp8;
+ EncodedVideoFrame encoded_frame;
+ I420VideoFrame video_frame;
base::TimeTicks render_time;
- VideoFrameDecodedCallback frame_decoded_callback =
- base::Bind(&TestVideoDecoderCallback::DecodeComplete,
- video_decoder_callback_.get());
- decoder_->DecodeVideoFrame(&encoded_frame_, render_time,
- frame_decoded_callback, base::Bind(ReleaseFrame, &encoded_frame_));
- EXPECT_EQ(0, video_decoder_callback_->number_times_called());
+ encoded_frame.codec = kVp8;
+
+ EXPECT_DEATH(
+ decoder_->DecodeVideoFrame(&encoded_frame, render_time, &video_frame),
+ "Empty video frame");
}
TEST_F(VideoDecoderTest, InvalidCodec) {
+ EncodedVideoFrame encoded_frame;
+ I420VideoFrame video_frame;
base::TimeTicks render_time;
- VideoFrameDecodedCallback frame_decoded_callback =
- base::Bind(&TestVideoDecoderCallback::DecodeComplete,
- video_decoder_callback_.get());
- encoded_frame_.data.assign(kFrameSize, 0);
- encoded_frame_.codec = kExternalVideo;
- EXPECT_DEATH(decoder_->DecodeVideoFrame(&encoded_frame_, render_time,
- frame_decoded_callback, base::Bind(ReleaseFrame, &encoded_frame_)),
- "Invalid codec");
+ encoded_frame.data.assign(kFrameSize, 0);
+ encoded_frame.codec = kExternalVideo;
+ EXPECT_DEATH(
+ decoder_->DecodeVideoFrame(&encoded_frame, render_time, &video_frame),
+ "Invalid codec");
}
+// TODO(pwestin): Test decoding a real frame.
+
} // namespace cast
} // namespace media
diff --git a/media/cast/video_receiver/video_receiver.cc b/media/cast/video_receiver/video_receiver.cc
index 4d0421cc6c..530e412f4d 100644
--- a/media/cast/video_receiver/video_receiver.cc
+++ b/media/cast/video_receiver/video_receiver.cc
@@ -16,35 +16,36 @@ namespace media {
namespace cast {
const int64 kMinSchedulingDelayMs = 1;
-static const int64 kMaxFrameWaitMs = 20;
-static const int64 kMinTimeBetweenOffsetUpdatesMs = 500;
+
+// Minimum time before a frame is due to be rendered before we pull it for
+// decode.
+static const int64 kMinFramePullMs = 20;
+static const int64 kMinTimeBetweenOffsetUpdatesMs = 2000;
static const int kTimeOffsetFilter = 8;
+static const int64_t kMinProcessIntervalMs = 5;
// Local implementation of RtpData (defined in rtp_rtcp_defines.h).
// Used to pass payload data into the video receiver.
class LocalRtpVideoData : public RtpData {
public:
- explicit LocalRtpVideoData(VideoReceiver* video_receiver)
- : video_receiver_(video_receiver),
+ explicit LocalRtpVideoData(base::TickClock* clock,
+ VideoReceiver* video_receiver)
+ : clock_(clock),
+ video_receiver_(video_receiver),
time_updated_(false),
incoming_rtp_timestamp_(0) {
}
- ~LocalRtpVideoData() {}
+ virtual ~LocalRtpVideoData() {}
virtual void OnReceivedPayloadData(const uint8* payload_data,
int payload_size,
const RtpCastHeader* rtp_header) OVERRIDE {
- {
- if (!time_updated_) {
- incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
- time_incoming_packet_ = video_receiver_->clock_->NowTicks();
- time_updated_ = true;
- } else if (video_receiver_->clock_->NowTicks() > time_incoming_packet_ +
- base::TimeDelta::FromMilliseconds(kMinTimeBetweenOffsetUpdatesMs)) {
- incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
- time_incoming_packet_ = video_receiver_->clock_->NowTicks();
- time_updated_ = true;
- }
+ base::TimeTicks now = clock_->NowTicks();
+ if (time_incoming_packet_.is_null() || now - time_incoming_packet_ >
+ base::TimeDelta::FromMilliseconds(kMinTimeBetweenOffsetUpdatesMs)) {
+ incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
+ time_incoming_packet_ = now;
+ time_updated_ = true;
}
video_receiver_->IncomingRtpPacket(payload_data, payload_size, *rtp_header);
}
@@ -59,6 +60,7 @@ class LocalRtpVideoData : public RtpData {
}
private:
+ base::TickClock* clock_; // Not owned by this class.
VideoReceiver* video_receiver_;
bool time_updated_;
base::TimeTicks time_incoming_packet_;
@@ -73,14 +75,11 @@ class LocalRtpVideoFeedback : public RtpPayloadFeedback {
explicit LocalRtpVideoFeedback(VideoReceiver* video_receiver)
: video_receiver_(video_receiver) {
}
+
virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE {
video_receiver_->CastFeedback(cast_message);
}
- virtual void RequestKeyFrame() OVERRIDE {
- video_receiver_->RequestKeyFrame();
- }
-
private:
VideoReceiver* video_receiver_;
};
@@ -107,18 +106,17 @@ class LocalRtpReceiverStatistics : public RtpReceiverStatistics {
RtpReceiver* rtp_receiver_;
};
-
-VideoReceiver::VideoReceiver(scoped_refptr<CastThread> cast_thread,
+VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
const VideoReceiverConfig& video_config,
PacedPacketSender* const packet_sender)
- : cast_thread_(cast_thread),
+ : cast_environment_(cast_environment),
codec_(video_config.codec),
incoming_ssrc_(video_config.incoming_ssrc),
- default_tick_clock_(new base::DefaultTickClock()),
- clock_(default_tick_clock_.get()),
- incoming_payload_callback_(new LocalRtpVideoData(this)),
+ incoming_payload_callback_(
+ new LocalRtpVideoData(cast_environment_->Clock(), this)),
incoming_payload_feedback_(new LocalRtpVideoFeedback(this)),
- rtp_receiver_(NULL, &video_config, incoming_payload_callback_.get()),
+ rtp_receiver_(cast_environment_->Clock(), NULL, &video_config,
+ incoming_payload_callback_.get()),
rtp_video_receiver_statistics_(
new LocalRtpReceiverStatistics(&rtp_receiver_)),
weak_factory_(this) {
@@ -128,23 +126,26 @@ VideoReceiver::VideoReceiver(scoped_refptr<CastThread> cast_thread,
video_config.max_frame_rate / 1000;
DCHECK(max_unacked_frames) << "Invalid argument";
- framer_.reset(new Framer(incoming_payload_feedback_.get(),
+ framer_.reset(new Framer(cast_environment->Clock(),
+ incoming_payload_feedback_.get(),
video_config.incoming_ssrc,
video_config.decoder_faster_than_max_frame_rate,
max_unacked_frames));
if (!video_config.use_external_decoder) {
- video_decoder_ = new VideoDecoder(cast_thread_, video_config);
+ video_decoder_.reset(new VideoDecoder(video_config));
}
- rtcp_.reset(new Rtcp(NULL,
- packet_sender,
- NULL,
- rtp_video_receiver_statistics_.get(),
- video_config.rtcp_mode,
- base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- false,
- video_config.feedback_ssrc,
- video_config.rtcp_c_name));
+ rtcp_.reset(
+ new Rtcp(cast_environment_->Clock(),
+ NULL,
+ packet_sender,
+ NULL,
+ rtp_video_receiver_statistics_.get(),
+ video_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
+ false,
+ video_config.feedback_ssrc,
+ video_config.rtcp_c_name));
rtcp_->SetRemoteSSRC(video_config.incoming_ssrc);
ScheduleNextRtcpReport();
@@ -155,76 +156,152 @@ VideoReceiver::~VideoReceiver() {}
void VideoReceiver::GetRawVideoFrame(
const VideoFrameDecodedCallback& callback) {
- DCHECK(video_decoder_);
- scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
- base::TimeTicks render_time;
- if (GetEncodedVideoFrame(encoded_frame.get(), &render_time)) {
- base::Closure frame_release_callback =
- base::Bind(&VideoReceiver::ReleaseFrame,
- weak_factory_.GetWeakPtr(), encoded_frame->frame_id);
- // Hand the ownership of the encoded frame to the decode thread.
- cast_thread_->PostTask(CastThread::VIDEO_DECODER, FROM_HERE,
- base::Bind(&VideoReceiver::DecodeVideoFrameThread,
- weak_factory_.GetWeakPtr(), encoded_frame.release(),
- render_time, callback, frame_release_callback));
- }
+ GetEncodedVideoFrame(base::Bind(&VideoReceiver::DecodeVideoFrame,
+ weak_factory_.GetWeakPtr(),
+ callback));
+}
+
+// Called when we have a frame to decode.
+void VideoReceiver::DecodeVideoFrame(
+ const VideoFrameDecodedCallback& callback,
+ scoped_ptr<EncodedVideoFrame> encoded_frame,
+ const base::TimeTicks& render_time) {
+ // Hand the ownership of the encoded frame to the decode thread.
+ cast_environment_->PostTask(CastEnvironment::VIDEO_DECODER, FROM_HERE,
+ base::Bind(&VideoReceiver::DecodeVideoFrameThread,
+ weak_factory_.GetWeakPtr(), base::Passed(&encoded_frame),
+ render_time, callback));
}
// Utility function to run the decoder on a designated decoding thread.
void VideoReceiver::DecodeVideoFrameThread(
- const EncodedVideoFrame* encoded_frame,
+ scoped_ptr<EncodedVideoFrame> encoded_frame,
const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback,
- base::Closure frame_release_callback) {
- video_decoder_->DecodeVideoFrame(encoded_frame, render_time,
- frame_decoded_callback, frame_release_callback);
- // Release memory.
- delete encoded_frame;
-}
+ const VideoFrameDecodedCallback& frame_decoded_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_DECODER));
+ DCHECK(video_decoder_);
-bool VideoReceiver::GetEncodedVideoFrame(EncodedVideoFrame* encoded_frame,
- base::TimeTicks* render_time) {
- DCHECK(encoded_frame);
- DCHECK(render_time);
+ // TODO(mikhal): Allow the application to allocate this memory.
+ scoped_ptr<I420VideoFrame> video_frame(new I420VideoFrame());
+
+ bool success = video_decoder_->DecodeVideoFrame(encoded_frame.get(),
+ render_time, video_frame.get());
+
+ if (success) {
+ // Frame decoded - return frame to the user via callback.
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(frame_decoded_callback,
+ base::Passed(&video_frame), render_time));
+ } else {
+ // This will happen if we decide to decode but not show a frame.
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::GetRawVideoFrame,
+ weak_factory_.GetWeakPtr(), frame_decoded_callback));
+ }
+}
+// Called from the main cast thread.
+void VideoReceiver::GetEncodedVideoFrame(
+ const VideoFrameEncodedCallback& callback) {
+ scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
uint32 rtp_timestamp = 0;
bool next_frame = false;
- base::TimeTicks timeout = clock_->NowTicks() +
- base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
- if (!framer_->GetEncodedVideoFrame(timeout,
- encoded_frame,
- &rtp_timestamp,
+ if (!framer_->GetEncodedVideoFrame(encoded_frame.get(), &rtp_timestamp,
&next_frame)) {
- return false;
+ // We have no video frames. Wait for new packet(s).
+ queued_encoded_callbacks_.push_back(callback);
+ return;
}
- base::TimeTicks now = clock_->NowTicks();
+ base::TimeTicks render_time;
+ if (PullEncodedVideoFrame(rtp_timestamp, next_frame, &encoded_frame,
+ &render_time)) {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(callback, base::Passed(&encoded_frame), render_time));
+ } else {
+ // We have a video frame; however we are missing packets and we have time
+ // to wait for new packet(s).
+ queued_encoded_callbacks_.push_back(callback);
+ }
+}
+
+// Should we pull the encoded video frame from the framer? decided by if this is
+// the next frame or we are running out of time and have to pull the following
+// frame.
+// If the frame it too old to be rendered we set the don't show flag in the
+// video bitstream where possible.
+bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp,
+ bool next_frame, scoped_ptr<EncodedVideoFrame>* encoded_frame,
+ base::TimeTicks* render_time) {
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
*render_time = GetRenderTime(now, rtp_timestamp);
- base::TimeDelta max_frame_wait_delta =
- base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
+ base::TimeDelta min_wait_delta =
+ base::TimeDelta::FromMilliseconds(kMinFramePullMs);
base::TimeDelta time_until_render = *render_time - now;
- base::TimeDelta time_until_release = time_until_render - max_frame_wait_delta;
- base::TimeDelta zero_delta = base::TimeDelta::FromMilliseconds(0);
- if (!next_frame && (time_until_release > zero_delta)) {
- // TODO(mikhal): If returning false, then the application should sleep, or
- // else which may spin here. Alternatively, we could sleep here, which will
- // be posting a delayed task to ourselves, but then can end up in getting
- // stuck as well.
+ if (!next_frame && (time_until_render > min_wait_delta)) {
+ // Example:
+ // We have decoded frame 1 and we have received the complete frame 3, but
+ // not frame 2. If we still have time before frame 3 should be rendered we
+ // will wait for 2 to arrive, however if 2 never show up this timer will hit
+ // and we will pull out frame 3 for decoding and rendering.
+ base::TimeDelta time_until_release = time_until_render - min_wait_delta;
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()),
+ time_until_release);
+ VLOG(0) << "Wait before releasing frame "
+ << static_cast<int>((*encoded_frame)->frame_id)
+ << " time " << time_until_release.InMilliseconds();
return false;
}
- base::TimeDelta dont_show_timeout_delta = time_until_render -
+ base::TimeDelta dont_show_timeout_delta =
base::TimeDelta::FromMilliseconds(-kDontShowTimeoutMs);
if (codec_ == kVp8 && time_until_render < dont_show_timeout_delta) {
- encoded_frame->data[0] &= 0xef;
- VLOG(1) << "Don't show frame";
+ (*encoded_frame)->data[0] &= 0xef;
+ VLOG(0) << "Don't show frame "
+ << static_cast<int>((*encoded_frame)->frame_id)
+ << " time_until_render:" << time_until_render.InMilliseconds();
}
-
- encoded_frame->codec = codec_;
+ // We have a copy of the frame, release this one.
+ framer_->ReleaseFrame((*encoded_frame)->frame_id);
+ (*encoded_frame)->codec = codec_;
return true;
}
+void VideoReceiver::PlayoutTimeout() {
+ if (queued_encoded_callbacks_.empty()) return;
+
+ uint32 rtp_timestamp = 0;
+ bool next_frame = false;
+ scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
+
+ if (!framer_->GetEncodedVideoFrame(encoded_frame.get(), &rtp_timestamp,
+ &next_frame)) {
+ // We have no video frames. Wait for new packet(s).
+ // A timer should not be set unless we have a video frame; and if that frame
+ // was pulled early the callback should have been removed.
+ DCHECK(false);
+ return;
+ }
+ VLOG(1) << "PlayoutTimeout retrieved frame "
+ << static_cast<int>(encoded_frame->frame_id);
+
+ base::TimeTicks render_time;
+ if (PullEncodedVideoFrame(rtp_timestamp, next_frame, &encoded_frame,
+ &render_time)) {
+ if (!queued_encoded_callbacks_.empty()) {
+ VideoFrameEncodedCallback callback = queued_encoded_callbacks_.front();
+ queued_encoded_callbacks_.pop_front();
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(callback, base::Passed(&encoded_frame), render_time));
+ }
+ } else {
+ // We have a video frame; however we are missing packets and we have time
+ // to wait for new packet(s).
+ }
+}
+
base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
uint32 rtp_timestamp) {
// Senders time in ms when this frame was captured.
@@ -233,7 +310,7 @@ base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
base::TimeTicks time_incoming_packet;
uint32 incoming_rtp_timestamp;
- if (time_offset_.InMilliseconds()) { // was == 0
+ if (time_offset_.InMilliseconds() == 0) {
incoming_payload_callback_->GetPacketTimeInformation(
&time_incoming_packet, &incoming_rtp_timestamp);
@@ -266,29 +343,36 @@ base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
return (rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_);
}
-void VideoReceiver::IncomingPacket(const uint8* packet, int length) {
+void VideoReceiver::IncomingPacket(const uint8* packet, int length,
+ const base::Closure callback) {
if (Rtcp::IsRtcpPacket(packet, length)) {
rtcp_->IncomingRtcpPacket(packet, length);
- return;
+ } else {
+ rtp_receiver_.ReceivedPacket(packet, length);
}
- rtp_receiver_.ReceivedPacket(packet, length);
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
void VideoReceiver::IncomingRtpPacket(const uint8* payload_data,
int payload_size,
const RtpCastHeader& rtp_header) {
- framer_->InsertPacket(payload_data, payload_size, rtp_header);
+ bool complete = framer_->InsertPacket(payload_data, payload_size, rtp_header);
+
+ if (!complete) return; // Video frame not complete; wait for more packets.
+ if (queued_encoded_callbacks_.empty()) return; // No pending callback.
+
+ VideoFrameEncodedCallback callback = queued_encoded_callbacks_.front();
+ queued_encoded_callbacks_.pop_front();
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::GetEncodedVideoFrame,
+ weak_factory_.GetWeakPtr(), callback));
}
// Send a cast feedback message. Actual message created in the framer (cast
// message builder).
void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
rtcp_->SendRtcpCast(cast_message);
- time_last_sent_cast_message_= clock_->NowTicks();
-}
-
-void VideoReceiver::ReleaseFrame(uint8 frame_id) {
- framer_->ReleaseFrame(frame_id);
+ time_last_sent_cast_message_= cast_environment_->Clock()->NowTicks();
}
// Send a key frame request to the sender.
@@ -302,10 +386,11 @@ void VideoReceiver::ScheduleNextCastMessage() {
base::TimeTicks send_time;
framer_->TimeToSendNextCastMessage(&send_time);
- base::TimeDelta time_to_send = send_time - clock_->NowTicks();
+ base::TimeDelta time_to_send = send_time -
+ cast_environment_->Clock()->NowTicks();
time_to_send = std::max(time_to_send,
base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoReceiver::SendNextCastMessage,
weak_factory_.GetWeakPtr()), time_to_send);
}
@@ -317,13 +402,13 @@ void VideoReceiver::SendNextCastMessage() {
// Schedule the next RTCP report to be sent back to the sender.
void VideoReceiver::ScheduleNextRtcpReport() {
- base::TimeDelta time_to_next =
- rtcp_->TimeToSendNextRtcpReport() - clock_->NowTicks();
+ base::TimeDelta time_to_next = rtcp_->TimeToSendNextRtcpReport() -
+ cast_environment_->Clock()->NowTicks();
time_to_next = std::max(time_to_next,
base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoReceiver::SendNextRtcpReport,
weak_factory_.GetWeakPtr()), time_to_next);
}
diff --git a/media/cast/video_receiver/video_receiver.h b/media/cast/video_receiver/video_receiver.h
index 40d0b0320a..af23ce2db3 100644
--- a/media/cast/video_receiver/video_receiver.h
+++ b/media/cast/video_receiver/video_receiver.h
@@ -11,12 +11,11 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/non_thread_safe.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
-#include "media/cast/cast_thread.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtp_common/rtp_defines.h"
#include "media/cast/rtp_receiver/rtp_receiver.h"
@@ -38,8 +37,7 @@ class VideoDecoder;
class VideoReceiver : public base::NonThreadSafe,
public base::SupportsWeakPtr<VideoReceiver> {
public:
-
- VideoReceiver(scoped_refptr<CastThread> cast_thread,
+ VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
const VideoReceiverConfig& video_config,
PacedPacketSender* const packet_sender);
@@ -48,31 +46,22 @@ class VideoReceiver : public base::NonThreadSafe,
// Request a raw frame. Will return frame via callback when available.
void GetRawVideoFrame(const VideoFrameDecodedCallback& callback);
- // Request an encoded frame. Memory allocated by application.
- bool GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
- base::TimeTicks* render_time);
+ // Request an encoded frame. Will return frame via callback when available.
+ void GetEncodedVideoFrame(const VideoFrameEncodedCallback& callback);
// Insert a RTP packet to the video receiver.
- void IncomingPacket(const uint8* packet, int length);
-
- // Release frame - should be called following a GetEncodedVideoFrame call.
- // Removes frame from the frame map in the framer.
- void ReleaseFrame(uint8 frame_id);
+ void IncomingPacket(const uint8* packet, int length,
+ const base::Closure callback);
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- rtcp_->set_clock(clock);
- }
protected:
void IncomingRtpPacket(const uint8* payload_data,
int payload_size,
const RtpCastHeader& rtp_header);
void DecodeVideoFrameThread(
- const EncodedVideoFrame* encoded_frame,
+ scoped_ptr<EncodedVideoFrame> encoded_frame,
const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback,
- base::Closure frame_release_callback);
+ const VideoFrameDecodedCallback& frame_decoded_callback);
private:
friend class LocalRtpVideoData;
@@ -81,6 +70,17 @@ class VideoReceiver : public base::NonThreadSafe,
void CastFeedback(const RtcpCastMessage& cast_message);
void RequestKeyFrame();
+ void DecodeVideoFrame(const VideoFrameDecodedCallback& callback,
+ scoped_ptr<EncodedVideoFrame> encoded_frame,
+ const base::TimeTicks& render_time);
+
+ bool PullEncodedVideoFrame(uint32 rtp_timestamp,
+ bool next_frame,
+ scoped_ptr<EncodedVideoFrame>* encoded_frame,
+ base::TimeTicks* render_time);
+
+ void PlayoutTimeout();
+
// Returns Render time based on current time and the rtp timestamp.
base::TimeTicks GetRenderTime(base::TimeTicks now, uint32 rtp_timestamp);
@@ -94,8 +94,8 @@ class VideoReceiver : public base::NonThreadSafe,
// Actually send the next RTCP report.
void SendNextRtcpReport();
- scoped_refptr<VideoDecoder> video_decoder_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_ptr<VideoDecoder> video_decoder_;
+ scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<Framer> framer_;
const VideoCodec codec_;
const uint32 incoming_ssrc_;
@@ -109,8 +109,7 @@ class VideoReceiver : public base::NonThreadSafe,
// Sender-receiver offset estimation.
base::TimeDelta time_offset_;
- scoped_ptr<base::TickClock> default_tick_clock_;
- base::TickClock* clock_;
+ std::list<VideoFrameEncodedCallback> queued_encoded_callbacks_;
base::WeakPtrFactory<VideoReceiver> weak_factory_;
@@ -121,4 +120,3 @@ class VideoReceiver : public base::NonThreadSafe,
} // namespace media
#endif // MEDIA_CAST_VIDEO_RECEIVER_VIDEO_RECEIVER_H_
-
diff --git a/media/cast/video_receiver/video_receiver_unittest.cc b/media/cast/video_receiver/video_receiver_unittest.cc
index b1b1c0b599..5dd33a2304 100644
--- a/media/cast/video_receiver/video_receiver_unittest.cc
+++ b/media/cast/video_receiver/video_receiver_unittest.cc
@@ -7,14 +7,14 @@
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/pacing/mock_paced_packet_sender.h"
#include "media/cast/test/fake_task_runner.h"
#include "media/cast/video_receiver/video_receiver.h"
#include "testing/gmock/include/gmock/gmock.h"
static const int kPacketSize = 1500;
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
namespace media {
namespace cast {
@@ -26,23 +26,38 @@ class TestVideoReceiverCallback :
public base::RefCountedThreadSafe<TestVideoReceiverCallback> {
public:
TestVideoReceiverCallback()
- :num_called_(0) {}
+ : num_called_(0) {}
+
// TODO(mikhal): Set and check expectations.
- void DecodeComplete(scoped_ptr<I420VideoFrame> frame,
- const base::TimeTicks render_time) {
+ void DecodeComplete(scoped_ptr<I420VideoFrame> video_frame,
+ const base::TimeTicks& render_time) {
++num_called_;
}
+
+ void FrameToDecode(scoped_ptr<EncodedVideoFrame> video_frame,
+ const base::TimeTicks& render_time) {
+ EXPECT_TRUE(video_frame->key_frame);
+ EXPECT_EQ(kVp8, video_frame->codec);
+ ++num_called_;
+ }
+
int number_times_called() { return num_called_;}
+
+ protected:
+ virtual ~TestVideoReceiverCallback() {}
+
private:
+ friend class base::RefCountedThreadSafe<TestVideoReceiverCallback>;
+
int num_called_;
};
class PeerVideoReceiver : public VideoReceiver {
public:
- PeerVideoReceiver(scoped_refptr<CastThread> cast_thread,
+ PeerVideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
const VideoReceiverConfig& video_config,
PacedPacketSender* const packet_sender)
- : VideoReceiver(cast_thread, video_config, packet_sender) {
+ : VideoReceiver(cast_environment, video_config, packet_sender) {
}
using VideoReceiver::IncomingRtpPacket;
};
@@ -55,17 +70,16 @@ class VideoReceiverTest : public ::testing::Test {
config_.codec = kVp8;
config_.use_external_decoder = false;
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
+ cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_);
receiver_.reset(new
- PeerVideoReceiver(cast_thread_, config_, &mock_transport_));
+ PeerVideoReceiver(cast_environment_, config_, &mock_transport_));
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
video_receiver_callback_ = new TestVideoReceiverCallback();
- receiver_->set_clock(&testing_clock_);
}
- ~VideoReceiverTest() {}
+ virtual ~VideoReceiverTest() {}
virtual void SetUp() {
payload_.assign(kPacketSize, 0);
@@ -87,51 +101,65 @@ class VideoReceiverTest : public ::testing::Test {
base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<CastEnvironment> cast_environment_;
scoped_refptr<TestVideoReceiverCallback> video_receiver_callback_;
};
TEST_F(VideoReceiverTest, GetOnePacketEncodedframe) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
testing::Return(true));
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
- EncodedVideoFrame video_frame;
- base::TimeTicks render_time;
- EXPECT_TRUE(receiver_->GetEncodedVideoFrame(&video_frame, &render_time));
- EXPECT_TRUE(video_frame.key_frame);
- EXPECT_EQ(kVp8, video_frame.codec);
+ receiver_->IncomingRtpPacket(payload_.data(),
+ static_cast<int>(payload_.size()), rtp_header_);
+
+ VideoFrameEncodedCallback frame_to_decode_callback =
+ base::Bind(&TestVideoReceiverCallback::FrameToDecode,
+ video_receiver_callback_);
+
+ receiver_->GetEncodedVideoFrame(frame_to_decode_callback);
task_runner_->RunTasks();
+ EXPECT_EQ(video_receiver_callback_->number_times_called(), 1);
}
TEST_F(VideoReceiverTest, MultiplePackets) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
testing::Return(true));
rtp_header_.max_packet_id = 2;
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingRtpPacket(payload_.data(),
+ static_cast<int>(payload_.size()), rtp_header_);
++rtp_header_.packet_id;
++rtp_header_.webrtc.header.sequenceNumber;
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingRtpPacket(payload_.data(),
+ static_cast<int>(payload_.size()), rtp_header_);
++rtp_header_.packet_id;
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
- EncodedVideoFrame video_frame;
- base::TimeTicks render_time;
- EXPECT_TRUE(receiver_->GetEncodedVideoFrame(&video_frame, &render_time));
+ receiver_->IncomingRtpPacket(payload_.data(),
+ static_cast<int>(payload_.size()), rtp_header_);
+
+ VideoFrameEncodedCallback frame_to_decode_callback =
+ base::Bind(&TestVideoReceiverCallback::FrameToDecode,
+ video_receiver_callback_);
+
+ receiver_->GetEncodedVideoFrame(frame_to_decode_callback);
+
task_runner_->RunTasks();
+ EXPECT_EQ(video_receiver_callback_->number_times_called(), 1);
}
-// TODO(pwestin): add encoded frames.
TEST_F(VideoReceiverTest, GetOnePacketRawframe) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
testing::Return(true));
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingRtpPacket(payload_.data(),
+ static_cast<int>(payload_.size()), rtp_header_);
// Decode error - requires legal input.
VideoFrameDecodedCallback frame_decoded_callback =
base::Bind(&TestVideoReceiverCallback::DecodeComplete,
video_receiver_callback_);
receiver_->GetRawVideoFrame(frame_decoded_callback);
task_runner_->RunTasks();
+ EXPECT_EQ(video_receiver_callback_->number_times_called(), 0);
}
+// TODO(pwestin): add encoded frames.
+
} // namespace cast
} // namespace media