diff options
author | Torne (Richard Coles) <torne@google.com> | 2014-03-18 10:20:56 +0000 |
---|---|---|
committer | Torne (Richard Coles) <torne@google.com> | 2014-03-18 10:20:56 +0000 |
commit | a1401311d1ab56c4ed0a474bd38c108f75cb0cd9 (patch) | |
tree | 3437151d9ae1ce20a1e53a0d98c19ca01c786394 /media/cast/video_receiver | |
parent | af5066f1e36c6579e74752647e6c584438f80f94 (diff) | |
download | chromium_org-a1401311d1ab56c4ed0a474bd38c108f75cb0cd9.tar.gz |
Merge from Chromium at DEPS revision 257591
This commit was generated by merge_to_master.py.
Change-Id: I0010df2ec3fbb5d4947cd026de2feb150ce7a6b5
Diffstat (limited to 'media/cast/video_receiver')
-rw-r--r-- | media/cast/video_receiver/codecs/vp8/vp8_decoder.cc | 6 | ||||
-rw-r--r-- | media/cast/video_receiver/video_receiver.cc | 80 | ||||
-rw-r--r-- | media/cast/video_receiver/video_receiver.h | 17 | ||||
-rw-r--r-- | media/cast/video_receiver/video_receiver_unittest.cc | 32 |
4 files changed, 95 insertions, 40 deletions
diff --git a/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc b/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc index 1792eafbdc..b5304bd15e 100644 --- a/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc +++ b/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc @@ -62,7 +62,7 @@ bool Vp8Decoder::Decode(const transport::EncodedVideoFrame* encoded_frame, const VideoFrameDecodedCallback& frame_decoded_cb) { DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_DECODER)); const int frame_id_int = static_cast<int>(encoded_frame->frame_id); - VLOG(1) << "VP8 decode frame:" << frame_id_int + VLOG(2) << "VP8 decode frame:" << frame_id_int << " sized:" << encoded_frame->data.size(); if (encoded_frame->data.empty()) @@ -77,7 +77,7 @@ bool Vp8Decoder::Decode(const transport::EncodedVideoFrame* encoded_frame, static_cast<unsigned int>(encoded_frame->data.size()), 0, real_time_decoding)) { - VLOG(1) << "Failed to decode VP8 frame."; + VLOG(1) << "Failed to decode VP8 frame:" << frame_id_int; return false; } @@ -117,7 +117,7 @@ bool Vp8Decoder::Decode(const transport::EncodedVideoFrame* encoded_frame, (img->d_h + 1) / 2, decoded_frame.get()); - VLOG(1) << "Decoded frame " << frame_id_int; + VLOG(2) << "Decoded frame " << frame_id_int; // Update logging from the main thread. cast_environment_->PostTask(CastEnvironment::MAIN, diff --git a/media/cast/video_receiver/video_receiver.cc b/media/cast/video_receiver/video_receiver.cc index 8d68431319..81f22aae4d 100644 --- a/media/cast/video_receiver/video_receiver.cc +++ b/media/cast/video_receiver/video_receiver.cc @@ -17,21 +17,11 @@ namespace { -using media::cast::kMaxIpPacketSize; -using media::cast::kRtcpCastLogHeaderSize; -using media::cast::kRtcpReceiverEventLogSize; - static const int64 kMinSchedulingDelayMs = 1; - -static const int64 kMinTimeBetweenOffsetUpdatesMs = 2000; -static const int kTimeOffsetFilter = 8; +static const int64 kMinTimeBetweenOffsetUpdatesMs = 1000; +static const int kTimeOffsetMaxCounter = 10; static const int64_t kMinProcessIntervalMs = 5; -// This is an upper bound on number of events that can fit into a single RTCP -// packet. -static const int64 kMaxEventSubscriberEntries = - (kMaxIpPacketSize - kRtcpCastLogHeaderSize) / kRtcpReceiverEventLogSize; - } // namespace namespace media { @@ -100,11 +90,11 @@ class LocalRtpReceiverStatistics : public RtpReceiverStatistics { VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment, const VideoReceiverConfig& video_config, - transport::PacedPacketSender* const packet_sender) + transport::PacedPacketSender* const packet_sender, + const SetTargetDelayCallback& target_delay_cb) : cast_environment_(cast_environment), - event_subscriber_( - kMaxEventSubscriberEntries, - ReceiverRtcpEventSubscriber::kVideoEventSubscriber), + event_subscriber_(kReceiverRtcpEventHistorySize, + ReceiverRtcpEventSubscriber::kVideoEventSubscriber), codec_(video_config.codec), target_delay_delta_( base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)), @@ -118,9 +108,11 @@ VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment, incoming_payload_callback_.get()), rtp_video_receiver_statistics_( new LocalRtpReceiverStatistics(&rtp_receiver_)), + time_offset_counter_(0), decryptor_(), time_incoming_packet_updated_(false), incoming_rtp_timestamp_(0), + target_delay_cb_(target_delay_cb), weak_factory_(this) { int max_unacked_frames = video_config.rtp_max_delay_ms * video_config.max_frame_rate / 1000; @@ -149,7 +141,10 @@ VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment, video_config.feedback_ssrc, video_config.incoming_ssrc, video_config.rtcp_c_name)); + // Set the target delay that will be conveyed to the sender. + rtcp_->SetTargetDelay(target_delay_delta_); cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_); + memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_)); } VideoReceiver::~VideoReceiver() { @@ -309,7 +304,7 @@ bool VideoReceiver::PullEncodedVideoFrame( << static_cast<int>((*encoded_frame)->frame_id) << " time_until_render:" << time_until_render.InMilliseconds(); } else { - VLOG(1) << "Show frame " << static_cast<int>((*encoded_frame)->frame_id) + VLOG(2) << "Show frame " << static_cast<int>((*encoded_frame)->frame_id) << " time_until_render:" << time_until_render.InMilliseconds(); } // We have a copy of the frame, release this one. @@ -335,7 +330,7 @@ void VideoReceiver::PlayoutTimeout() { VLOG(1) << "Failed to retrieved a complete frame at this point in time"; return; } - VLOG(1) << "PlayoutTimeout retrieved frame " + VLOG(2) << "PlayoutTimeout retrieved frame " << static_cast<int>(encoded_frame->frame_id); if (decryptor_.initialized() && !DecryptVideoFrame(&encoded_frame)) { @@ -366,15 +361,15 @@ base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now, base::TimeTicks rtp_timestamp_in_ticks; // Compute the time offset_in_ticks based on the incoming_rtp_timestamp_. - if (time_offset_.InMilliseconds() == 0) { - if (!rtcp_->RtpTimestampInSenderTime(kVideoFrequency, + if (time_offset_counter_ == 0) { + // Check for received RTCP to sync the stream play it out asap. + if (rtcp_->RtpTimestampInSenderTime(kVideoFrequency, incoming_rtp_timestamp_, &rtp_timestamp_in_ticks)) { - // We have not received any RTCP to sync the stream play it out as soon as - // possible. - return now; + + ++time_offset_counter_; } - time_offset_ = time_incoming_packet_ - rtp_timestamp_in_ticks; + return now; } else if (time_incoming_packet_updated_) { if (rtcp_->RtpTimestampInSenderTime(kVideoFrequency, incoming_rtp_timestamp_, @@ -382,8 +377,17 @@ base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now, // Time to update the time_offset. base::TimeDelta time_offset = time_incoming_packet_ - rtp_timestamp_in_ticks; - time_offset_ = ((kTimeOffsetFilter - 1) * time_offset_ + time_offset) / - kTimeOffsetFilter; + // Taking the minimum of the first kTimeOffsetMaxCounter values. We are + // assuming that we are looking for the minimum offset, which will occur + // when network conditions are the best. This should occur at least once + // within the first kTimeOffsetMaxCounter samples. Any drift should be + // very slow, and negligible for this use case. + if (time_offset_counter_ == 1) + time_offset_ = time_offset; + else if (time_offset_counter_ < kTimeOffsetMaxCounter) { + time_offset_ = std::min(time_offset_, time_offset); + } + ++time_offset_counter_; } } // Reset |time_incoming_packet_updated_| to enable a future measurement. @@ -394,6 +398,7 @@ base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now, // This can fail if we have not received any RTCP packets in a long time. return now; } + base::TimeTicks render_time = rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_; if (last_render_time_ > render_time) @@ -423,10 +428,17 @@ void VideoReceiver::IncomingParsedRtpPacket(const uint8* payload_data, if (time_incoming_packet_.is_null()) InitializeTimers(); incoming_rtp_timestamp_ = rtp_header.webrtc.header.timestamp; - time_incoming_packet_ = now; - time_incoming_packet_updated_ = true; + // The following incoming packet info is used for syncing sender and + // receiver clock. Use only the first packet of every frame to obtain a + // minimal value. + if (rtp_header.packet_id == 0) { + time_incoming_packet_ = now; + time_incoming_packet_updated_ = true; + } } + frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] = + rtp_header.webrtc.header.timestamp; cast_environment_->Logging()->InsertPacketEvent( now, kVideoPacketReceived, @@ -472,8 +484,10 @@ void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) { DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); base::TimeTicks now = cast_environment_->Clock()->NowTicks(); - cast_environment_->Logging()->InsertGenericEvent( - now, kVideoAckSent, cast_message.ack_frame_id_); + RtpTimestamp rtp_timestamp = + frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff]; + cast_environment_->Logging()->InsertFrameEvent( + now, kVideoAckSent, rtp_timestamp, cast_message.ack_frame_id_); rtcp_->SendRtcpFromRtpReceiver(&cast_message, &event_subscriber_); } @@ -526,5 +540,11 @@ void VideoReceiver::SendNextRtcpReport() { ScheduleNextRtcpReport(); } +void VideoReceiver::UpdateTargetDelay() { + NOTIMPLEMENTED(); + rtcp_->SetTargetDelay(target_delay_delta_); + target_delay_cb_.Run(target_delay_delta_); +} + } // namespace cast } // namespace media diff --git a/media/cast/video_receiver/video_receiver.h b/media/cast/video_receiver/video_receiver.h index 38c8bc1db4..ce9f9eb7ef 100644 --- a/media/cast/video_receiver/video_receiver.h +++ b/media/cast/video_receiver/video_receiver.h @@ -33,13 +33,18 @@ class Rtcp; class RtpReceiverStatistics; class VideoDecoder; +// Callback used by the video receiver to inform the audio receiver of the new +// delay used to compute the playout and render times. +typedef base::Callback<void(base::TimeDelta)> SetTargetDelayCallback; + // Should only be called from the Main cast thread. class VideoReceiver : public base::NonThreadSafe, public base::SupportsWeakPtr<VideoReceiver> { public: VideoReceiver(scoped_refptr<CastEnvironment> cast_environment, const VideoReceiverConfig& video_config, - transport::PacedPacketSender* const packet_sender); + transport::PacedPacketSender* const packet_sender, + const SetTargetDelayCallback& target_delay_cb); virtual ~VideoReceiver(); @@ -98,6 +103,10 @@ class VideoReceiver : public base::NonThreadSafe, // Actually send the next RTCP report. void SendNextRtcpReport(); + // Update the target delay based on past information. Will also update the + // rtcp module and the audio receiver. + void UpdateTargetDelay(); + scoped_ptr<VideoDecoder> video_decoder_; scoped_refptr<CastEnvironment> cast_environment_; @@ -115,12 +124,18 @@ class VideoReceiver : public base::NonThreadSafe, scoped_ptr<Rtcp> rtcp_; scoped_ptr<RtpReceiverStatistics> rtp_video_receiver_statistics_; base::TimeDelta time_offset_; // Sender-receiver offset estimation. + int time_offset_counter_; transport::TransportEncryptionHandler decryptor_; std::list<VideoFrameEncodedCallback> queued_encoded_callbacks_; bool time_incoming_packet_updated_; base::TimeTicks time_incoming_packet_; uint32 incoming_rtp_timestamp_; base::TimeTicks last_render_time_; + SetTargetDelayCallback target_delay_cb_; + + // This mapping allows us to log kVideoAckSent as a frame event. In addition + // it allows the event to be transmitted via RTCP. + RtpTimestamp frame_id_to_rtp_timestamp_[256]; base::WeakPtrFactory<VideoReceiver> weak_factory_; diff --git a/media/cast/video_receiver/video_receiver_unittest.cc b/media/cast/video_receiver/video_receiver_unittest.cc index bf37a5476a..04a7f9d667 100644 --- a/media/cast/video_receiver/video_receiver_unittest.cc +++ b/media/cast/video_receiver/video_receiver_unittest.cc @@ -8,6 +8,7 @@ #include "base/test/simple_test_tick_clock.h" #include "media/cast/cast_defines.h" #include "media/cast/cast_environment.h" +#include "media/cast/logging/simple_event_subscriber.h" #include "media/cast/test/fake_single_thread_task_runner.h" #include "media/cast/transport/pacing/mock_paced_packet_sender.h" #include "media/cast/video_receiver/video_receiver.h" @@ -59,8 +60,12 @@ class PeerVideoReceiver : public VideoReceiver { public: PeerVideoReceiver(scoped_refptr<CastEnvironment> cast_environment, const VideoReceiverConfig& video_config, - transport::PacedPacketSender* const packet_sender) - : VideoReceiver(cast_environment, video_config, packet_sender) {} + transport::PacedPacketSender* const packet_sender, + const SetTargetDelayCallback& target_delay_cb) + : VideoReceiver(cast_environment, + video_config, + packet_sender, + target_delay_cb) {} using VideoReceiver::IncomingParsedRtpPacket; }; @@ -80,9 +85,9 @@ class VideoReceiverTest : public ::testing::Test { task_runner_, task_runner_, task_runner_, - GetDefaultCastReceiverLoggingConfig()); - receiver_.reset( - new PeerVideoReceiver(cast_environment_, config_, &mock_transport_)); + GetLoggingConfigWithRawEventsAndStatsEnabled()); + receiver_.reset(new PeerVideoReceiver( + cast_environment_, config_, &mock_transport_, target_delay_cb_)); testing_clock_->Advance( base::TimeDelta::FromMilliseconds(kStartMillisecond)); video_receiver_callback_ = new TestVideoReceiverCallback(); @@ -91,11 +96,12 @@ class VideoReceiverTest : public ::testing::Test { // Always start with a key frame. rtp_header_.is_key_frame = true; - rtp_header_.frame_id = 0; + rtp_header_.frame_id = 1234; rtp_header_.packet_id = 0; rtp_header_.max_packet_id = 0; rtp_header_.is_reference = false; rtp_header_.reference_frame_id = 0; + rtp_header_.webrtc.header.timestamp = 9000; } virtual ~VideoReceiverTest() {} @@ -110,6 +116,7 @@ class VideoReceiverTest : public ::testing::Test { scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_; scoped_refptr<CastEnvironment> cast_environment_; scoped_refptr<TestVideoReceiverCallback> video_receiver_callback_; + SetTargetDelayCallback target_delay_cb_; DISALLOW_COPY_AND_ASSIGN(VideoReceiverTest); }; @@ -129,6 +136,9 @@ TEST_F(VideoReceiverTest, GetOnePacketEncodedframe) { } TEST_F(VideoReceiverTest, MultiplePackets) { + SimpleEventSubscriber event_subscriber; + cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber); + EXPECT_CALL(mock_transport_, SendRtcpPacket(_)) .WillRepeatedly(testing::Return(true)); rtp_header_.max_packet_id = 2; @@ -149,6 +159,16 @@ TEST_F(VideoReceiverTest, MultiplePackets) { task_runner_->RunTasks(); EXPECT_EQ(video_receiver_callback_->number_times_called(), 1); + + std::vector<FrameEvent> frame_events; + event_subscriber.GetFrameEventsAndReset(&frame_events); + + ASSERT_TRUE(!frame_events.empty()); + EXPECT_EQ(kVideoAckSent, frame_events.begin()->type); + EXPECT_EQ(rtp_header_.frame_id, frame_events.begin()->frame_id); + EXPECT_EQ(rtp_header_.webrtc.header.timestamp, + frame_events.begin()->rtp_timestamp); + cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber); } TEST_F(VideoReceiverTest, GetOnePacketRawframe) { |