summaryrefslogtreecommitdiff
path: root/media/cast
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2014-06-03 10:58:34 +0100
committerTorne (Richard Coles) <torne@google.com>2014-06-03 10:58:34 +0100
commitcedac228d2dd51db4b79ea1e72c7f249408ee061 (patch)
treeaa4ff43d7fe316e95d12721ce5e17653a768a0dd /media/cast
parent6a869ecff032b5bed299d661b078b0555034598b (diff)
downloadchromium_org-cedac228d2dd51db4b79ea1e72c7f249408ee061.tar.gz
Merge from Chromium at DEPS revision 273901
This commit was generated by merge_to_master.py. Change-Id: I45745444894df927ffc1045ab8de88b9e52636a3
Diffstat (limited to 'media/cast')
-rw-r--r--media/cast/audio_receiver/audio_decoder.cc23
-rw-r--r--media/cast/audio_receiver/audio_decoder.h6
-rw-r--r--media/cast/audio_receiver/audio_decoder_unittest.cc25
-rw-r--r--media/cast/audio_receiver/audio_receiver.cc215
-rw-r--r--media/cast/audio_receiver/audio_receiver.h74
-rw-r--r--media/cast/audio_receiver/audio_receiver_unittest.cc141
-rw-r--r--media/cast/audio_sender/audio_encoder.cc24
-rw-r--r--media/cast/audio_sender/audio_encoder.h4
-rw-r--r--media/cast/audio_sender/audio_encoder_unittest.cc16
-rw-r--r--media/cast/audio_sender/audio_sender.cc117
-rw-r--r--media/cast/audio_sender/audio_sender.h25
-rw-r--r--media/cast/audio_sender/audio_sender_unittest.cc28
-rw-r--r--media/cast/base/clock_drift_smoother.cc58
-rw-r--r--media/cast/base/clock_drift_smoother.h52
-rw-r--r--media/cast/cast.gyp6
-rw-r--r--media/cast/cast_config.cc18
-rw-r--r--media/cast/cast_config.h89
-rw-r--r--media/cast/cast_defines.h71
-rw-r--r--media/cast/cast_receiver.h21
-rw-r--r--media/cast/cast_receiver_impl.cc12
-rw-r--r--media/cast/cast_receiver_impl.h4
-rw-r--r--media/cast/cast_testing.gypi226
-rw-r--r--media/cast/framer/cast_message_builder.cc73
-rw-r--r--media/cast/framer/cast_message_builder.h7
-rw-r--r--media/cast/framer/cast_message_builder_unittest.cc97
-rw-r--r--media/cast/framer/frame_buffer.cc47
-rw-r--r--media/cast/framer/frame_buffer.h8
-rw-r--r--media/cast/framer/frame_buffer_unittest.cc20
-rw-r--r--media/cast/framer/frame_id_map.cc60
-rw-r--r--media/cast/framer/frame_id_map.h6
-rw-r--r--media/cast/framer/framer.cc46
-rw-r--r--media/cast/framer/framer.h17
-rw-r--r--media/cast/framer/framer_unittest.cc168
-rw-r--r--media/cast/logging/encoding_event_subscriber.cc30
-rw-r--r--media/cast/logging/encoding_event_subscriber.h2
-rw-r--r--media/cast/logging/encoding_event_subscriber_unittest.cc101
-rw-r--r--media/cast/logging/log_deserializer.cc10
-rw-r--r--media/cast/logging/logging_defines.cc86
-rw-r--r--media/cast/logging/logging_defines.h72
-rw-r--r--media/cast/logging/logging_impl.cc30
-rw-r--r--media/cast/logging/logging_impl.h24
-rw-r--r--media/cast/logging/logging_impl_unittest.cc37
-rw-r--r--media/cast/logging/logging_raw.cc29
-rw-r--r--media/cast/logging/logging_raw.h35
-rw-r--r--media/cast/logging/logging_raw_unittest.cc46
-rw-r--r--media/cast/logging/proto/proto_utils.cc50
-rw-r--r--media/cast/logging/proto/raw_events.proto27
-rw-r--r--media/cast/logging/receiver_time_offset_estimator_impl.cc14
-rw-r--r--media/cast/logging/receiver_time_offset_estimator_impl.h8
-rw-r--r--media/cast/logging/receiver_time_offset_estimator_impl_unittest.cc38
-rw-r--r--media/cast/logging/serialize_deserialize_test.cc9
-rw-r--r--media/cast/logging/simple_event_subscriber_unittest.cc24
-rw-r--r--media/cast/logging/stats_event_subscriber.cc91
-rw-r--r--media/cast/logging/stats_event_subscriber.h2
-rw-r--r--media/cast/logging/stats_event_subscriber_unittest.cc31
-rw-r--r--media/cast/rtcp/mock_rtcp_receiver_feedback.h2
-rw-r--r--media/cast/rtcp/receiver_rtcp_event_subscriber.cc36
-rw-r--r--media/cast/rtcp/receiver_rtcp_event_subscriber.h22
-rw-r--r--media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc38
-rw-r--r--media/cast/rtcp/rtcp.cc277
-rw-r--r--media/cast/rtcp/rtcp.h80
-rw-r--r--media/cast/rtcp/rtcp_defines.cc4
-rw-r--r--media/cast/rtcp/rtcp_defines.h8
-rw-r--r--media/cast/rtcp/rtcp_receiver.cc62
-rw-r--r--media/cast/rtcp/rtcp_receiver.h3
-rw-r--r--media/cast/rtcp/rtcp_receiver_unittest.cc82
-rw-r--r--media/cast/rtcp/rtcp_sender.cc14
-rw-r--r--media/cast/rtcp/rtcp_sender_unittest.cc56
-rw-r--r--media/cast/rtcp/rtcp_unittest.cc198
-rw-r--r--media/cast/rtcp/rtcp_utility.cc111
-rw-r--r--media/cast/rtcp/rtcp_utility.h12
-rw-r--r--media/cast/rtcp/sender_rtcp_event_subscriber.cc91
-rw-r--r--media/cast/rtcp/sender_rtcp_event_subscriber.h69
-rw-r--r--media/cast/rtcp/sender_rtcp_event_subscriber_unittest.cc120
-rw-r--r--media/cast/rtcp/test_rtcp_packet_builder.cc15
-rw-r--r--media/cast/rtcp/test_rtcp_packet_builder.h2
-rw-r--r--media/cast/rtp_receiver/rtp_receiver.cc4
-rw-r--r--media/cast/rtp_receiver/rtp_receiver.h4
-rw-r--r--media/cast/rtp_timestamp_helper.cc36
-rw-r--r--media/cast/rtp_timestamp_helper.h41
-rw-r--r--media/cast/test/encode_decode_test.cc136
-rw-r--r--media/cast/test/end2end_unittest.cc523
-rw-r--r--media/cast/test/fake_single_thread_task_runner.cc40
-rw-r--r--media/cast/test/fake_single_thread_task_runner.h4
-rw-r--r--media/cast/test/receiver.cc64
-rw-r--r--media/cast/test/sender.cc17
-rw-r--r--media/cast/test/skewed_single_thread_task_runner.cc53
-rw-r--r--media/cast/test/skewed_single_thread_task_runner.h58
-rw-r--r--media/cast/test/skewed_tick_clock.cc39
-rw-r--r--media/cast/test/skewed_tick_clock.h44
-rw-r--r--media/cast/test/utility/default_config.cc24
-rw-r--r--media/cast/test/utility/default_config.h8
-rw-r--r--media/cast/test/utility/in_process_receiver.cc4
-rw-r--r--media/cast/test/utility/in_process_receiver.h12
-rw-r--r--media/cast/test/utility/net_utility.cc36
-rw-r--r--media/cast/test/utility/net_utility.h18
-rw-r--r--media/cast/test/utility/udp_proxy.cc187
-rw-r--r--media/cast/test/utility/udp_proxy.h14
-rw-r--r--media/cast/test/utility/udp_proxy_main.cc24
-rw-r--r--media/cast/transport/cast_transport_config.cc16
-rw-r--r--media/cast/transport/cast_transport_config.h109
-rw-r--r--media/cast/transport/cast_transport_sender.h27
-rw-r--r--media/cast/transport/cast_transport_sender_impl.cc43
-rw-r--r--media/cast/transport/cast_transport_sender_impl.h20
-rw-r--r--media/cast/transport/pacing/mock_paced_packet_sender.h1
-rw-r--r--media/cast/transport/pacing/paced_sender.cc26
-rw-r--r--media/cast/transport/pacing/paced_sender.h2
-rw-r--r--media/cast/transport/pacing/paced_sender_unittest.cc19
-rw-r--r--media/cast/transport/rtcp/rtcp_builder.cc55
-rw-r--r--media/cast/transport/rtcp/rtcp_builder.h3
-rw-r--r--media/cast/transport/rtcp/rtcp_builder_unittest.cc54
-rw-r--r--media/cast/transport/rtp_sender/mock_rtp_sender.h35
-rw-r--r--media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc81
-rw-r--r--media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h26
-rw-r--r--media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc34
-rw-r--r--media/cast/transport/rtp_sender/rtp_sender.cc101
-rw-r--r--media/cast/transport/rtp_sender/rtp_sender.h22
-rw-r--r--media/cast/transport/transport/udp_transport_unittest.cc11
-rw-r--r--media/cast/transport/transport_audio_sender.cc25
-rw-r--r--media/cast/transport/transport_audio_sender.h14
-rw-r--r--media/cast/transport/transport_video_sender.cc36
-rw-r--r--media/cast/transport/transport_video_sender.h14
-rw-r--r--media/cast/video_receiver/codecs/vp8/vp8_decoder.h45
-rw-r--r--media/cast/video_receiver/video_decoder.cc19
-rw-r--r--media/cast/video_receiver/video_decoder.h6
-rw-r--r--media/cast/video_receiver/video_decoder_unittest.cc17
-rw-r--r--media/cast/video_receiver/video_receiver.cc225
-rw-r--r--media/cast/video_receiver/video_receiver.h70
-rw-r--r--media/cast/video_receiver/video_receiver_unittest.cc156
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.cc41
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.h2
-rw-r--r--media/cast/video_sender/external_video_encoder.cc45
-rw-r--r--media/cast/video_sender/external_video_encoder_unittest.cc36
-rw-r--r--media/cast/video_sender/fake_software_video_encoder.cc35
-rw-r--r--media/cast/video_sender/fake_software_video_encoder.h7
-rw-r--r--media/cast/video_sender/software_video_encoder.h4
-rw-r--r--media/cast/video_sender/video_encoder.h4
-rw-r--r--media/cast/video_sender/video_encoder_impl.cc19
-rw-r--r--media/cast/video_sender/video_encoder_impl.h4
-rw-r--r--media/cast/video_sender/video_encoder_impl_unittest.cc53
-rw-r--r--media/cast/video_sender/video_sender.cc192
-rw-r--r--media/cast/video_sender/video_sender.h34
-rw-r--r--media/cast/video_sender/video_sender_unittest.cc66
143 files changed, 3348 insertions, 3798 deletions
diff --git a/media/cast/audio_receiver/audio_decoder.cc b/media/cast/audio_receiver/audio_decoder.cc
index 4e75473a6b..e4e9a1453e 100644
--- a/media/cast/audio_receiver/audio_decoder.cc
+++ b/media/cast/audio_receiver/audio_decoder.cc
@@ -9,7 +9,6 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
-#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "media/cast/cast_defines.h"
#include "third_party/opus/src/include/opus.h"
@@ -40,20 +39,10 @@ class AudioDecoder::ImplBase
return cast_initialization_status_;
}
- void DecodeFrame(scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
- scoped_ptr<AudioBus> decoded_audio;
- if (encoded_frame->codec != codec_) {
- NOTREACHED();
- cast_environment_->PostTask(CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(callback,
- base::Passed(&decoded_audio),
- false));
- }
-
COMPILE_ASSERT(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
size_of_frame_id_types_do_not_match);
bool is_continuous = true;
@@ -68,8 +57,8 @@ class AudioDecoder::ImplBase
}
last_frame_id_ = encoded_frame->frame_id;
- decoded_audio = Decode(
- reinterpret_cast<uint8*>(string_as_array(&encoded_frame->data)),
+ scoped_ptr<AudioBus> decoded_audio = Decode(
+ encoded_frame->mutable_bytes(),
static_cast<int>(encoded_frame->data.size()));
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
@@ -211,9 +200,9 @@ class AudioDecoder::Pcm16Impl : public AudioDecoder::ImplBase {
AudioDecoder::AudioDecoder(
const scoped_refptr<CastEnvironment>& cast_environment,
- const AudioReceiverConfig& audio_config)
+ const FrameReceiverConfig& audio_config)
: cast_environment_(cast_environment) {
- switch (audio_config.codec) {
+ switch (audio_config.codec.audio) {
case transport::kOpus:
impl_ = new OpusImpl(cast_environment,
audio_config.channels,
@@ -239,7 +228,7 @@ CastInitializationStatus AudioDecoder::InitializationResult() const {
}
void AudioDecoder::DecodeFrame(
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK(encoded_frame.get());
DCHECK(!callback.is_null());
diff --git a/media/cast/audio_receiver/audio_decoder.h b/media/cast/audio_receiver/audio_decoder.h
index 0e10ebaf04..a8e264dc94 100644
--- a/media/cast/audio_receiver/audio_decoder.h
+++ b/media/cast/audio_receiver/audio_decoder.h
@@ -27,11 +27,11 @@ class AudioDecoder {
bool is_continuous)> DecodeFrameCallback;
AudioDecoder(const scoped_refptr<CastEnvironment>& cast_environment,
- const AudioReceiverConfig& audio_config);
+ const FrameReceiverConfig& audio_config);
virtual ~AudioDecoder();
// Returns STATUS_AUDIO_INITIALIZED if the decoder was successfully
- // constructed from the given AudioReceiverConfig. If this method returns any
+ // constructed from the given FrameReceiverConfig. If this method returns any
// other value, calls to DecodeFrame() will not succeed.
CastInitializationStatus InitializationResult() const;
@@ -42,7 +42,7 @@ class AudioDecoder {
// monotonically-increasing by 1 for each successive call to this method.
// When it is not, the decoder will assume one or more frames have been
// dropped (e.g., due to packet loss), and will perform recovery actions.
- void DecodeFrame(scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback);
private:
diff --git a/media/cast/audio_receiver/audio_decoder_unittest.cc b/media/cast/audio_receiver/audio_decoder_unittest.cc
index d32dbe19b7..11973ab9a0 100644
--- a/media/cast/audio_receiver/audio_decoder_unittest.cc
+++ b/media/cast/audio_receiver/audio_decoder_unittest.cc
@@ -4,7 +4,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/stl_util.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/sys_byteorder.h"
@@ -12,6 +11,7 @@
#include "media/cast/audio_receiver/audio_decoder.h"
#include "media/cast/cast_config.h"
#include "media/cast/test/utility/audio_utility.h"
+#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/standalone_cast_environment.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/opus/src/include/opus.h"
@@ -38,11 +38,10 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
protected:
virtual void SetUp() OVERRIDE {
- AudioReceiverConfig decoder_config;
- decoder_config.use_external_decoder = false;
+ FrameReceiverConfig decoder_config = GetDefaultAudioReceiverConfig();
decoder_config.frequency = GetParam().sampling_rate;
decoder_config.channels = GetParam().num_channels;
- decoder_config.codec = GetParam().codec;
+ decoder_config.codec.audio = GetParam().codec;
audio_decoder_.reset(new AudioDecoder(cast_environment_, decoder_config));
CHECK_EQ(STATUS_AUDIO_INITIALIZED, audio_decoder_->InitializationResult());
@@ -71,15 +70,16 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
total_audio_decoded_ = base::TimeDelta();
}
- // Called from the unit test thread to create another EncodedAudioFrame and
- // push it into the decoding pipeline.
+ // Called from the unit test thread to create another EncodedFrame and push it
+ // into the decoding pipeline.
void FeedMoreAudio(const base::TimeDelta& duration,
int num_dropped_frames) {
- // Prepare a simulated EncodedAudioFrame to feed into the AudioDecoder.
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
- new transport::EncodedAudioFrame());
- encoded_frame->codec = GetParam().codec;
+ // Prepare a simulated EncodedFrame to feed into the AudioDecoder.
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ encoded_frame->dependency = transport::EncodedFrame::KEY;
encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id;
last_frame_id_ = encoded_frame->frame_id;
const scoped_ptr<AudioBus> audio_bus(
@@ -93,7 +93,7 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
if (GetParam().codec == transport::kPcm16) {
encoded_frame->data.resize(num_elements * sizeof(int16));
int16* const pcm_data =
- reinterpret_cast<int16*>(string_as_array(&encoded_frame->data));
+ reinterpret_cast<int16*>(encoded_frame->mutable_bytes());
for (size_t i = 0; i < interleaved.size(); ++i)
pcm_data[i] = static_cast<int16>(base::HostToNet16(interleaved[i]));
} else if (GetParam().codec == transport::kOpus) {
@@ -105,8 +105,7 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
opus_encode(opus_encoder,
&interleaved.front(),
audio_bus->frames(),
- reinterpret_cast<unsigned char*>(
- string_as_array(&encoded_frame->data)),
+ encoded_frame->mutable_bytes(),
encoded_frame->data.size());
CHECK_GT(payload_size, 1);
encoded_frame->data.resize(payload_size);
diff --git a/media/cast/audio_receiver/audio_receiver.cc b/media/cast/audio_receiver/audio_receiver.cc
index 94abdfffb1..1f47827ec6 100644
--- a/media/cast/audio_receiver/audio_receiver.cc
+++ b/media/cast/audio_receiver/audio_receiver.cc
@@ -14,29 +14,30 @@
namespace {
const int kMinSchedulingDelayMs = 1;
-// TODO(miu): This should go in AudioReceiverConfig.
-const int kTypicalAudioFrameDurationMs = 10;
} // namespace
namespace media {
namespace cast {
AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
+ const FrameReceiverConfig& audio_config,
transport::PacedPacketSender* const packet_sender)
: RtpReceiver(cast_environment->Clock(), &audio_config, NULL),
cast_environment_(cast_environment),
- event_subscriber_(kReceiverRtcpEventHistorySize,
- ReceiverRtcpEventSubscriber::kAudioEventSubscriber),
- codec_(audio_config.codec),
+ event_subscriber_(kReceiverRtcpEventHistorySize, AUDIO_EVENT),
+ codec_(audio_config.codec.audio),
frequency_(audio_config.frequency),
- target_delay_delta_(
+ target_playout_delay_(
base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms)),
+ expected_frame_duration_(
+ base::TimeDelta::FromSeconds(1) / audio_config.max_frame_rate),
+ reports_are_scheduled_(false),
framer_(cast_environment->Clock(),
this,
audio_config.incoming_ssrc,
true,
- audio_config.rtp_max_delay_ms / kTypicalAudioFrameDurationMs),
+ audio_config.rtp_max_delay_ms * audio_config.max_frame_rate /
+ 1000),
rtcp_(cast_environment,
NULL,
NULL,
@@ -46,13 +47,16 @@ AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
audio_config.feedback_ssrc,
audio_config.incoming_ssrc,
- audio_config.rtcp_c_name),
+ audio_config.rtcp_c_name,
+ true),
is_waiting_for_consecutive_frame_(false),
+ lip_sync_drift_(ClockDriftSmoother::GetDefaultTimeConstant()),
weak_factory_(this) {
- if (!audio_config.use_external_decoder)
- audio_decoder_.reset(new AudioDecoder(cast_environment, audio_config));
+ DCHECK_GT(audio_config.rtp_max_delay_ms, 0);
+ DCHECK_GT(audio_config.max_frame_rate, 0);
+ audio_decoder_.reset(new AudioDecoder(cast_environment, audio_config));
decryptor_.Initialize(audio_config.aes_key, audio_config.aes_iv_mask);
- rtcp_.SetTargetDelay(target_delay_delta_);
+ rtcp_.SetTargetDelay(target_playout_delay_);
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
}
@@ -62,47 +66,59 @@ AudioReceiver::~AudioReceiver() {
cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
}
-void AudioReceiver::InitializeTimers() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- ScheduleNextRtcpReport();
- ScheduleNextCastMessage();
-}
-
void AudioReceiver::OnReceivedPayloadData(const uint8* payload_data,
size_t payload_size,
const RtpCastHeader& rtp_header) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- // TODO(pwestin): update this as video to refresh over time.
- if (time_first_incoming_packet_.is_null()) {
- InitializeTimers();
- first_incoming_rtp_timestamp_ = rtp_header.rtp_timestamp;
- time_first_incoming_packet_ = now;
- }
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] =
rtp_header.rtp_timestamp;
cast_environment_->Logging()->InsertPacketEvent(
- now, kAudioPacketReceived, rtp_header.rtp_timestamp,
+ now, PACKET_RECEIVED, AUDIO_EVENT, rtp_header.rtp_timestamp,
rtp_header.frame_id, rtp_header.packet_id, rtp_header.max_packet_id,
payload_size);
bool duplicate = false;
const bool complete =
framer_.InsertPacket(payload_data, payload_size, rtp_header, &duplicate);
- if (duplicate) {
- cast_environment_->Logging()->InsertPacketEvent(
- now,
- kDuplicateAudioPacketReceived,
- rtp_header.rtp_timestamp,
- rtp_header.frame_id,
- rtp_header.packet_id,
- rtp_header.max_packet_id,
- payload_size);
- // Duplicate packets are ignored.
+
+ // Duplicate packets are ignored.
+ if (duplicate)
return;
+
+ // Update lip-sync values upon receiving the first packet of each frame, or if
+ // they have never been set yet.
+ if (rtp_header.packet_id == 0 || lip_sync_reference_time_.is_null()) {
+ RtpTimestamp fresh_sync_rtp;
+ base::TimeTicks fresh_sync_reference;
+ if (!rtcp_.GetLatestLipSyncTimes(&fresh_sync_rtp, &fresh_sync_reference)) {
+ // HACK: The sender should have provided Sender Reports before the first
+ // frame was sent. However, the spec does not currently require this.
+ // Therefore, when the data is missing, the local clock is used to
+ // generate reference timestamps.
+ VLOG(2) << "Lip sync info missing. Falling-back to local clock.";
+ fresh_sync_rtp = rtp_header.rtp_timestamp;
+ fresh_sync_reference = now;
+ }
+ // |lip_sync_reference_time_| is always incremented according to the time
+ // delta computed from the difference in RTP timestamps. Then,
+ // |lip_sync_drift_| accounts for clock drift and also smoothes-out any
+ // sudden/discontinuous shifts in the series of reference time values.
+ if (lip_sync_reference_time_.is_null()) {
+ lip_sync_reference_time_ = fresh_sync_reference;
+ } else {
+ lip_sync_reference_time_ += RtpDeltaToTimeDelta(
+ static_cast<int32>(fresh_sync_rtp - lip_sync_rtp_timestamp_),
+ frequency_);
+ }
+ lip_sync_rtp_timestamp_ = fresh_sync_rtp;
+ lip_sync_drift_.Update(
+ now, fresh_sync_reference - lip_sync_reference_time_);
}
+
+ // Frame not complete; wait for more packets.
if (!complete)
return;
@@ -124,15 +140,15 @@ void AudioReceiver::GetRawAudioFrame(
void AudioReceiver::DecodeEncodedAudioFrame(
const AudioFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
- const base::TimeTicks& playout_time) {
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!encoded_frame) {
- callback.Run(make_scoped_ptr<AudioBus>(NULL), playout_time, false);
+ callback.Run(make_scoped_ptr<AudioBus>(NULL), base::TimeTicks(), false);
return;
}
const uint32 frame_id = encoded_frame->frame_id;
const uint32 rtp_timestamp = encoded_frame->rtp_timestamp;
+ const base::TimeTicks playout_time = encoded_frame->reference_time;
audio_decoder_->DecodeFrame(encoded_frame.Pass(),
base::Bind(&AudioReceiver::EmitRawAudioFrame,
cast_environment_,
@@ -155,16 +171,15 @@ void AudioReceiver::EmitRawAudioFrame(
if (audio_bus.get()) {
const base::TimeTicks now = cast_environment->Clock()->NowTicks();
cast_environment->Logging()->InsertFrameEvent(
- now, kAudioFrameDecoded, rtp_timestamp, frame_id);
+ now, FRAME_DECODED, AUDIO_EVENT, rtp_timestamp, frame_id);
cast_environment->Logging()->InsertFrameEventWithDelay(
- now, kAudioPlayoutDelay, rtp_timestamp, frame_id,
+ now, FRAME_PLAYOUT, AUDIO_EVENT, rtp_timestamp, frame_id,
playout_time - now);
}
callback.Run(audio_bus.Pass(), playout_time, is_continuous);
}
-void AudioReceiver::GetEncodedAudioFrame(
- const AudioFrameEncodedCallback& callback) {
+void AudioReceiver::GetEncodedAudioFrame(const FrameEncodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
frame_request_queue_.push_back(callback);
EmitAvailableEncodedFrames();
@@ -177,26 +192,36 @@ void AudioReceiver::EmitAvailableEncodedFrames() {
// Attempt to peek at the next completed frame from the |framer_|.
// TODO(miu): We should only be peeking at the metadata, and not copying the
// payload yet! Or, at least, peek using a StringPiece instead of a copy.
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
- new transport::EncodedAudioFrame());
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
bool is_consecutively_next_frame = false;
- if (!framer_.GetEncodedAudioFrame(encoded_frame.get(),
- &is_consecutively_next_frame)) {
+ bool have_multiple_complete_frames = false;
+ if (!framer_.GetEncodedFrame(encoded_frame.get(),
+ &is_consecutively_next_frame,
+ &have_multiple_complete_frames)) {
VLOG(1) << "Wait for more audio packets to produce a completed frame.";
return; // OnReceivedPayloadData() will invoke this method in the future.
}
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ const base::TimeTicks playout_time =
+ GetPlayoutTime(encoded_frame->rtp_timestamp);
+
+ // If we have multiple decodable frames, and the current frame is
+ // too old, then skip it and decode the next frame instead.
+ if (have_multiple_complete_frames && now > playout_time) {
+ framer_.ReleaseFrame(encoded_frame->frame_id);
+ continue;
+ }
+
// If |framer_| has a frame ready that is out of sequence, examine the
// playout time to determine whether it's acceptable to continue, thereby
// skipping one or more frames. Skip if the missing frame wouldn't complete
// playing before the start of playback of the available frame.
- const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- const base::TimeTicks playout_time =
- GetPlayoutTime(now, encoded_frame->rtp_timestamp);
if (!is_consecutively_next_frame) {
// TODO(miu): Also account for expected decode time here?
const base::TimeTicks earliest_possible_end_time_of_missing_frame =
- now + base::TimeDelta::FromMilliseconds(kTypicalAudioFrameDurationMs);
+ now + expected_frame_duration_;
if (earliest_possible_end_time_of_missing_frame < playout_time) {
VLOG(1) << "Wait for next consecutive frame instead of skipping.";
if (!is_waiting_for_consecutive_frame_) {
@@ -226,14 +251,13 @@ void AudioReceiver::EmitAvailableEncodedFrames() {
encoded_frame->data.swap(decrypted_audio_data);
}
- // At this point, we have a decrypted EncodedAudioFrame ready to be emitted.
- encoded_frame->codec = codec_;
+ // At this point, we have a decrypted EncodedFrame ready to be emitted.
+ encoded_frame->reference_time = playout_time;
framer_.ReleaseFrame(encoded_frame->frame_id);
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
base::Bind(frame_request_queue_.front(),
- base::Passed(&encoded_frame),
- playout_time));
+ base::Passed(&encoded_frame)));
frame_request_queue_.pop_front();
}
}
@@ -245,6 +269,15 @@ void AudioReceiver::EmitAvailableEncodedFramesAfterWaiting() {
EmitAvailableEncodedFrames();
}
+base::TimeTicks AudioReceiver::GetPlayoutTime(uint32 rtp_timestamp) const {
+ return lip_sync_reference_time_ +
+ lip_sync_drift_.Current() +
+ RtpDeltaToTimeDelta(
+ static_cast<int32>(rtp_timestamp - lip_sync_rtp_timestamp_),
+ frequency_) +
+ target_playout_delay_;
+}
+
void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) {
@@ -252,12 +285,11 @@ void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) {
} else {
ReceivedPacket(&packet->front(), packet->size());
}
-}
-
-void AudioReceiver::SetTargetDelay(base::TimeDelta target_delay) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- target_delay_delta_ = target_delay;
- rtcp_.SetTargetDelay(target_delay_delta_);
+ if (!reports_are_scheduled_) {
+ ScheduleNextRtcpReport();
+ ScheduleNextCastMessage();
+ reports_are_scheduled_ = true;
+ }
}
void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
@@ -266,71 +298,14 @@ void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
RtpTimestamp rtp_timestamp =
frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff];
cast_environment_->Logging()->InsertFrameEvent(
- now, kAudioAckSent, rtp_timestamp, cast_message.ack_frame_id_);
+ now, FRAME_ACK_SENT, AUDIO_EVENT, rtp_timestamp,
+ cast_message.ack_frame_id_);
ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
event_subscriber_.GetRtcpEventsAndReset(&rtcp_events);
rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events);
}
-base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
- uint32 rtp_timestamp) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // Senders time in ms when this frame was recorded.
- // Note: the senders clock and our local clock might not be synced.
- base::TimeTicks rtp_timestamp_in_ticks;
- base::TimeTicks playout_time;
- if (time_offset_ == base::TimeDelta()) {
- if (rtcp_.RtpTimestampInSenderTime(frequency_,
- first_incoming_rtp_timestamp_,
- &rtp_timestamp_in_ticks)) {
- time_offset_ = time_first_incoming_packet_ - rtp_timestamp_in_ticks;
- // TODO(miu): As clocks drift w.r.t. each other, and other factors take
- // effect, |time_offset_| should be updated. Otherwise, we might as well
- // always compute the time offsets agnostic of RTCP's time data.
- } else {
- // We have not received any RTCP to sync the stream play it out as soon as
- // possible.
-
- // BUG: This means we're literally switching to a different timeline a
- // short time after a cast receiver has been running. Re-enable
- // End2EndTest.StartSenderBeforeReceiver once this is fixed.
- // http://crbug.com/356942
- uint32 rtp_timestamp_diff = rtp_timestamp - first_incoming_rtp_timestamp_;
-
- int frequency_khz = frequency_ / 1000;
- base::TimeDelta rtp_time_diff_delta =
- base::TimeDelta::FromMilliseconds(rtp_timestamp_diff / frequency_khz);
- base::TimeDelta time_diff_delta = now - time_first_incoming_packet_;
-
- playout_time = now + std::max(rtp_time_diff_delta - time_diff_delta,
- base::TimeDelta());
- }
- }
- if (playout_time.is_null()) {
- // This can fail if we have not received any RTCP packets in a long time.
- if (rtcp_.RtpTimestampInSenderTime(frequency_, rtp_timestamp,
- &rtp_timestamp_in_ticks)) {
- playout_time =
- rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_;
- } else {
- playout_time = now;
- }
- }
-
- // TODO(miu): This is broken since we literally switch timelines once |rtcp_|
- // can provide us the |time_offset_|. Furthermore, this "getter" method may
- // be called on frames received out-of-order, which means the playout times
- // for earlier frames will be computed incorrectly.
-#if 0
- // Don't allow the playout time to go backwards.
- if (last_playout_time_ > playout_time) playout_time = last_playout_time_;
- last_playout_time_ = playout_time;
-#endif
-
- return playout_time;
-}
-
void AudioReceiver::ScheduleNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_send = rtcp_.TimeToSendNextRtcpReport() -
diff --git a/media/cast/audio_receiver/audio_receiver.h b/media/cast/audio_receiver/audio_receiver.h
index 6aae1361af..87c5147b50 100644
--- a/media/cast/audio_receiver/audio_receiver.h
+++ b/media/cast/audio_receiver/audio_receiver.h
@@ -14,6 +14,7 @@
#include "base/threading/non_thread_safe.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/cast/base/clock_drift_smoother.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
@@ -39,10 +40,6 @@ class AudioDecoder;
// each step of the pipeline (i.e., encode frame, then transmit/retransmit from
// the sender, then receive and re-order packets on the receiver, then decode
// frame) can vary in duration and is typically very hard to predict.
-// Heuristics will determine when the targeted playout delay is insufficient in
-// the current environment; and the receiver can then increase the playout
-// delay, notifying the sender, to account for the extra variance.
-// TODO(miu): Make the last sentence true. http://crbug.com/360111
//
// Two types of frames can be requested: 1) A frame of decoded audio data; or 2)
// a frame of still-encoded audio data, to be passed into an external audio
@@ -59,7 +56,7 @@ class AudioReceiver : public RtpReceiver,
public base::SupportsWeakPtr<AudioReceiver> {
public:
AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
+ const FrameReceiverConfig& audio_config,
transport::PacedPacketSender* const packet_sender);
virtual ~AudioReceiver();
@@ -76,15 +73,11 @@ class AudioReceiver : public RtpReceiver,
//
// The given |callback| is guaranteed to be run at some point in the future,
// even if to respond with NULL at shutdown time.
- void GetEncodedAudioFrame(const AudioFrameEncodedCallback& callback);
+ void GetEncodedAudioFrame(const FrameEncodedCallback& callback);
// Deliver another packet, possibly a duplicate, and possibly out-of-order.
void IncomingPacket(scoped_ptr<Packet> packet);
- // Update target audio delay used to compute the playout time. Rtcp
- // will also be updated (will be included in all outgoing reports).
- void SetTargetDelay(base::TimeDelta target_delay);
-
protected:
friend class AudioReceiverTest; // Invokes OnReceivedPayloadData().
@@ -106,17 +99,16 @@ class AudioReceiver : public RtpReceiver,
// EmitAvailableEncodedFrames().
void EmitAvailableEncodedFramesAfterWaiting();
- // Feeds an EncodedAudioFrame into |audio_decoder_|. GetRawAudioFrame() uses
- // this as a callback for GetEncodedAudioFrame().
+ // Feeds an EncodedFrame into |audio_decoder_|. GetRawAudioFrame() uses this
+ // as a callback for GetEncodedAudioFrame().
void DecodeEncodedAudioFrame(
const AudioFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
- const base::TimeTicks& playout_time);
-
- // Return the playout time based on the current time and rtp timestamp.
- base::TimeTicks GetPlayoutTime(base::TimeTicks now, uint32 rtp_timestamp);
+ scoped_ptr<transport::EncodedFrame> encoded_frame);
- void InitializeTimers();
+ // Computes the playout time for a frame with the given |rtp_timestamp|.
+ // Because lip-sync info is refreshed regularly, calling this method with the
+ // same argument may return different results.
+ base::TimeTicks GetPlayoutTime(uint32 rtp_timestamp) const;
// Schedule the next RTCP report.
void ScheduleNextRtcpReport();
@@ -150,28 +142,62 @@ class AudioReceiver : public RtpReceiver,
// Processes raw audio events to be sent over to the cast sender via RTCP.
ReceiverRtcpEventSubscriber event_subscriber_;
+ // Configured audio codec.
const transport::AudioCodec codec_;
+
+ // RTP timebase: The number of RTP units advanced per one second. For audio,
+ // this is the sampling rate.
const int frequency_;
- base::TimeDelta target_delay_delta_;
+
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user). This is fixed as
+ // a value large enough to give the system sufficient time to encode,
+ // transmit/retransmit, receive, decode, and render; given its run-time
+ // environment (sender/receiver hardware performance, network conditions,
+ // etc.).
+ const base::TimeDelta target_playout_delay_;
+
+ // Hack: This is used in logic that determines whether to skip frames.
+ const base::TimeDelta expected_frame_duration_;
+
+ // Set to false initially, then set to true after scheduling the periodic
+ // sending of reports back to the sender. Reports are first scheduled just
+ // after receiving a first packet (since the first packet identifies the
+ // sender for the remainder of the session).
+ bool reports_are_scheduled_;
+
+ // Assembles packets into frames, providing this receiver with complete,
+ // decodable EncodedFrames.
Framer framer_;
+
+ // Decodes frames into raw audio for playback.
scoped_ptr<AudioDecoder> audio_decoder_;
+
+ // Manages sending/receiving of RTCP packets, including sender/receiver
+ // reports.
Rtcp rtcp_;
- base::TimeDelta time_offset_;
- base::TimeTicks time_first_incoming_packet_;
- uint32 first_incoming_rtp_timestamp_;
+
+ // Decrypts encrypted frames.
transport::TransportEncryptionHandler decryptor_;
// Outstanding callbacks to run to deliver on client requests for frames.
- std::list<AudioFrameEncodedCallback> frame_request_queue_;
+ std::list<FrameEncodedCallback> frame_request_queue_;
// True while there's an outstanding task to re-invoke
// EmitAvailableEncodedFrames().
bool is_waiting_for_consecutive_frame_;
- // This mapping allows us to log kAudioAckSent as a frame event. In addition
+ // This mapping allows us to log AUDIO_ACK_SENT as a frame event. In addition
// it allows the event to be transmitted via RTCP.
RtpTimestamp frame_id_to_rtp_timestamp_[256];
+ // Lip-sync values used to compute the playout time of each frame from its RTP
+ // timestamp. These are updated each time the first packet of a frame is
+ // received.
+ RtpTimestamp lip_sync_rtp_timestamp_;
+ base::TimeTicks lip_sync_reference_time_;
+ ClockDriftSmoother lip_sync_drift_;
+
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<AudioReceiver> weak_factory_;
diff --git a/media/cast/audio_receiver/audio_receiver_unittest.cc b/media/cast/audio_receiver/audio_receiver_unittest.cc
index 8b7e706dff..e53c1b9310 100644
--- a/media/cast/audio_receiver/audio_receiver_unittest.cc
+++ b/media/cast/audio_receiver/audio_receiver_unittest.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <deque>
+#include <utility>
+
#include "base/bind.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
@@ -12,47 +15,48 @@
#include "media/cast/logging/simple_event_subscriber.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/utility/default_config.h"
#include "media/cast/transport/pacing/mock_paced_packet_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
+using ::testing::_;
+
namespace media {
namespace cast {
-using ::testing::_;
-
namespace {
-const int64 kStartMillisecond = INT64_C(12345678900000);
const uint32 kFirstFrameId = 1234;
+const int kPlayoutDelayMillis = 300;
class FakeAudioClient {
public:
FakeAudioClient() : num_called_(0) {}
virtual ~FakeAudioClient() {}
- void SetNextExpectedResult(uint32 expected_frame_id,
- const base::TimeTicks& expected_playout_time) {
- expected_frame_id_ = expected_frame_id;
- expected_playout_time_ = expected_playout_time;
+ void AddExpectedResult(uint32 expected_frame_id,
+ const base::TimeTicks& expected_playout_time) {
+ expected_results_.push_back(
+ std::make_pair(expected_frame_id, expected_playout_time));
}
void DeliverEncodedAudioFrame(
- scoped_ptr<transport::EncodedAudioFrame> audio_frame,
- const base::TimeTicks& playout_time) {
+ scoped_ptr<transport::EncodedFrame> audio_frame) {
+ SCOPED_TRACE(::testing::Message() << "num_called_ is " << num_called_);
ASSERT_FALSE(!audio_frame)
<< "If at shutdown: There were unsatisfied requests enqueued.";
- EXPECT_EQ(expected_frame_id_, audio_frame->frame_id);
- EXPECT_EQ(transport::kPcm16, audio_frame->codec);
- EXPECT_EQ(expected_playout_time_, playout_time);
+ ASSERT_FALSE(expected_results_.empty());
+ EXPECT_EQ(expected_results_.front().first, audio_frame->frame_id);
+ EXPECT_EQ(expected_results_.front().second, audio_frame->reference_time);
+ expected_results_.pop_front();
num_called_++;
}
int number_times_called() const { return num_called_; }
private:
+ std::deque<std::pair<uint32, base::TimeTicks> > expected_results_;
int num_called_;
- uint32 expected_frame_id_;
- base::TimeTicks expected_playout_time_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioClient);
};
@@ -63,15 +67,14 @@ class AudioReceiverTest : public ::testing::Test {
protected:
AudioReceiverTest() {
// Configure the audio receiver to use PCM16.
- audio_config_.rtp_payload_type = 127;
+ audio_config_ = GetDefaultAudioReceiverConfig();
+ audio_config_.rtp_max_delay_ms = kPlayoutDelayMillis;
audio_config_.frequency = 16000;
audio_config_.channels = 1;
- audio_config_.codec = transport::kPcm16;
- audio_config_.use_external_decoder = true;
- audio_config_.feedback_ssrc = 1234;
+ audio_config_.codec.audio = transport::kPcm16;
testing_clock_ = new base::SimpleTestTickClock();
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
+ start_time_ = testing_clock_->NowTicks();
task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
cast_environment_ = new CastEnvironment(
@@ -92,7 +95,7 @@ class AudioReceiverTest : public ::testing::Test {
rtp_header_.frame_id = kFirstFrameId;
rtp_header_.packet_id = 0;
rtp_header_.max_packet_id = 0;
- rtp_header_.reference_frame_id = 0;
+ rtp_header_.reference_frame_id = rtp_header_.frame_id;
rtp_header_.rtp_timestamp = 0;
}
@@ -101,10 +104,26 @@ class AudioReceiverTest : public ::testing::Test {
payload_.data(), payload_.size(), rtp_header_);
}
- AudioReceiverConfig audio_config_;
+ void FeedLipSyncInfoIntoReceiver() {
+ const base::TimeTicks now = testing_clock_->NowTicks();
+ const int64 rtp_timestamp = (now - start_time_) *
+ audio_config_.frequency / base::TimeDelta::FromSeconds(1);
+ CHECK_LE(0, rtp_timestamp);
+ uint32 ntp_seconds;
+ uint32 ntp_fraction;
+ ConvertTimeTicksToNtp(now, &ntp_seconds, &ntp_fraction);
+ TestRtcpPacketBuilder rtcp_packet;
+ rtcp_packet.AddSrWithNtp(audio_config_.incoming_ssrc,
+ ntp_seconds, ntp_fraction,
+ static_cast<uint32>(rtp_timestamp));
+ receiver_->IncomingPacket(rtcp_packet.GetPacket().Pass());
+ }
+
+ FrameReceiverConfig audio_config_;
std::vector<uint8> payload_;
RtpCastHeader rtp_header_;
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ base::TimeTicks start_time_;
transport::MockPacedPacketSender mock_transport_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
@@ -115,11 +134,15 @@ class AudioReceiverTest : public ::testing::Test {
scoped_ptr<AudioReceiver> receiver_;
};
-TEST_F(AudioReceiverTest, GetOnePacketEncodedFrame) {
+TEST_F(AudioReceiverTest, ReceivesOneFrame) {
SimpleEventSubscriber event_subscriber;
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
- EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _)).Times(1);
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
+ .WillRepeatedly(testing::Return(true));
+
+ FeedLipSyncInfoIntoReceiver();
+ task_runner_->RunTasks();
// Enqueue a request for an audio frame.
receiver_->GetEncodedAudioFrame(
@@ -131,8 +154,10 @@ TEST_F(AudioReceiverTest, GetOnePacketEncodedFrame) {
EXPECT_EQ(0, fake_audio_client_.number_times_called());
// Deliver one audio frame to the receiver and expect to get one frame back.
- fake_audio_client_.SetNextExpectedResult(kFirstFrameId,
- testing_clock_->NowTicks());
+ const base::TimeDelta target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kPlayoutDelayMillis);
+ fake_audio_client_.AddExpectedResult(
+ kFirstFrameId, testing_clock_->NowTicks() + target_playout_delay);
FeedOneFrameIntoReceiver();
task_runner_->RunTasks();
EXPECT_EQ(1, fake_audio_client_.number_times_called());
@@ -141,19 +166,29 @@ TEST_F(AudioReceiverTest, GetOnePacketEncodedFrame) {
event_subscriber.GetFrameEventsAndReset(&frame_events);
ASSERT_TRUE(!frame_events.empty());
- EXPECT_EQ(kAudioAckSent, frame_events.begin()->type);
+ EXPECT_EQ(FRAME_ACK_SENT, frame_events.begin()->type);
+ EXPECT_EQ(AUDIO_EVENT, frame_events.begin()->media_type);
EXPECT_EQ(rtp_header_.frame_id, frame_events.begin()->frame_id);
EXPECT_EQ(rtp_header_.rtp_timestamp, frame_events.begin()->rtp_timestamp);
cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber);
}
-TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
+TEST_F(AudioReceiverTest, ReceivesFramesSkippingWhenAppropriate) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
.WillRepeatedly(testing::Return(true));
+ const uint32 rtp_advance_per_frame =
+ audio_config_.frequency / audio_config_.max_frame_rate;
+ const base::TimeDelta time_advance_per_frame =
+ base::TimeDelta::FromSeconds(1) / audio_config_.max_frame_rate;
+
+ FeedLipSyncInfoIntoReceiver();
+ task_runner_->RunTasks();
+ const base::TimeTicks first_frame_capture_time = testing_clock_->NowTicks();
+
// Enqueue a request for an audio frame.
- const AudioFrameEncodedCallback frame_encoded_callback =
+ const FrameEncodedCallback frame_encoded_callback =
base::Bind(&FakeAudioClient::DeliverEncodedAudioFrame,
base::Unretained(&fake_audio_client_));
receiver_->GetEncodedAudioFrame(frame_encoded_callback);
@@ -161,24 +196,15 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
EXPECT_EQ(0, fake_audio_client_.number_times_called());
// Receive one audio frame and expect to see the first request satisfied.
- fake_audio_client_.SetNextExpectedResult(kFirstFrameId,
- testing_clock_->NowTicks());
+ const base::TimeDelta target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kPlayoutDelayMillis);
+ fake_audio_client_.AddExpectedResult(
+ kFirstFrameId, first_frame_capture_time + target_playout_delay);
+ rtp_header_.rtp_timestamp = 0;
FeedOneFrameIntoReceiver();
task_runner_->RunTasks();
EXPECT_EQ(1, fake_audio_client_.number_times_called());
- TestRtcpPacketBuilder rtcp_packet;
-
- uint32 ntp_high;
- uint32 ntp_low;
- ConvertTimeTicksToNtp(testing_clock_->NowTicks(), &ntp_high, &ntp_low);
- rtcp_packet.AddSrWithNtp(audio_config_.feedback_ssrc, ntp_high, ntp_low,
- rtp_header_.rtp_timestamp);
-
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
-
- receiver_->IncomingPacket(rtcp_packet.GetPacket().Pass());
-
// Enqueue a second request for an audio frame, but it should not be
// fulfilled yet.
receiver_->GetEncodedAudioFrame(frame_encoded_callback);
@@ -190,10 +216,11 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
rtp_header_.is_key_frame = false;
rtp_header_.frame_id = kFirstFrameId + 2;
rtp_header_.reference_frame_id = 0;
- rtp_header_.rtp_timestamp = 960;
- fake_audio_client_.SetNextExpectedResult(
+ rtp_header_.rtp_timestamp += 2 * rtp_advance_per_frame;
+ fake_audio_client_.AddExpectedResult(
kFirstFrameId + 2,
- testing_clock_->NowTicks() + base::TimeDelta::FromMilliseconds(100));
+ first_frame_capture_time + 2 * time_advance_per_frame +
+ target_playout_delay);
FeedOneFrameIntoReceiver();
// Frame 2 should not come out at this point in time.
@@ -205,25 +232,27 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
task_runner_->RunTasks();
EXPECT_EQ(1, fake_audio_client_.number_times_called());
- // After 100 ms has elapsed, Frame 2 is emitted (to satisfy the second
- // request) because a decision was made to skip over the no-show Frame 1.
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(100));
+ // Now, advance time forward such that the receiver is convinced it should
+ // skip Frame 2. Frame 3 is emitted (to satisfy the second request) because a
+ // decision was made to skip over the no-show Frame 2.
+ testing_clock_->Advance(2 * time_advance_per_frame + target_playout_delay);
task_runner_->RunTasks();
EXPECT_EQ(2, fake_audio_client_.number_times_called());
- // Receive Frame 3 and expect it to fulfill the third request immediately.
+ // Receive Frame 4 and expect it to fulfill the third request immediately.
rtp_header_.frame_id = kFirstFrameId + 3;
rtp_header_.reference_frame_id = rtp_header_.frame_id - 1;
- rtp_header_.rtp_timestamp = 1280;
- fake_audio_client_.SetNextExpectedResult(kFirstFrameId + 3,
- testing_clock_->NowTicks());
+ rtp_header_.rtp_timestamp += rtp_advance_per_frame;
+ fake_audio_client_.AddExpectedResult(
+ kFirstFrameId + 3, first_frame_capture_time + 3 * time_advance_per_frame +
+ target_playout_delay);
FeedOneFrameIntoReceiver();
task_runner_->RunTasks();
EXPECT_EQ(3, fake_audio_client_.number_times_called());
- // Move forward another 100 ms and run any pending tasks (there should be
- // none). Expect no additional frames where emitted.
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(100));
+ // Move forward to the playout time of an unreceived Frame 5. Expect no
+ // additional frames were emitted.
+ testing_clock_->Advance(3 * time_advance_per_frame);
task_runner_->RunTasks();
EXPECT_EQ(3, fake_audio_client_.number_times_called());
}
diff --git a/media/cast/audio_sender/audio_encoder.cc b/media/cast/audio_sender/audio_encoder.cc
index 6f317267e9..f81ad26377 100644
--- a/media/cast/audio_sender/audio_encoder.cc
+++ b/media/cast/audio_sender/audio_encoder.cc
@@ -49,7 +49,8 @@ void LogAudioFrameEncodedEvent(
return;
}
cast_environment->Logging()->InsertEncodedFrameEvent(
- event_time, kAudioFrameEncoded, rtp_timestamp, frame_id,
+ event_time, media::cast::FRAME_ENCODED, media::cast::AUDIO_EVENT,
+ rtp_timestamp, frame_id,
static_cast<int>(frame_size), /* key_frame - unused */ false,
/*target_bitrate - unused*/ 0);
}
@@ -59,7 +60,7 @@ void LogAudioFrameEncodedEvent(
// Base class that handles the common problem of feeding one or more AudioBus'
// data into a buffer and then, once the buffer is full, encoding the signal and
-// emitting an EncodedAudioFrame via the FrameEncodedCallback.
+// emitting an EncodedFrame via the FrameEncodedCallback.
//
// Subclasses complete the implementation by handling the actual encoding
// details.
@@ -80,10 +81,11 @@ class AudioEncoder::ImplBase
buffer_fill_end_(0),
frame_id_(0),
frame_rtp_timestamp_(0) {
+ // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
+ const int kMaxSamplesTimesChannelsPerFrame = 48 * 2 * 100;
if (num_channels_ <= 0 || samples_per_frame_ <= 0 ||
sampling_rate % kFramesPerSecond != 0 ||
- samples_per_frame_ * num_channels_ >
- transport::EncodedAudioFrame::kMaxNumberOfSamples) {
+ samples_per_frame_ * num_channels_ > kMaxSamplesTimesChannelsPerFrame) {
cast_initialization_status_ = STATUS_INVALID_AUDIO_CONFIGURATION;
}
}
@@ -139,11 +141,13 @@ class AudioEncoder::ImplBase
if (buffer_fill_end_ < samples_per_frame_)
break;
- scoped_ptr<transport::EncodedAudioFrame> audio_frame(
- new transport::EncodedAudioFrame());
- audio_frame->codec = codec_;
+ scoped_ptr<transport::EncodedFrame> audio_frame(
+ new transport::EncodedFrame());
+ audio_frame->dependency = transport::EncodedFrame::KEY;
audio_frame->frame_id = frame_id_;
+ audio_frame->referenced_frame_id = frame_id_;
audio_frame->rtp_timestamp = frame_rtp_timestamp_;
+ audio_frame->reference_time = frame_capture_time_;
if (EncodeFromFilledBuffer(&audio_frame->data)) {
LogAudioFrameEncodedEvent(cast_environment_,
@@ -154,9 +158,7 @@ class AudioEncoder::ImplBase
cast_environment_->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
- base::Bind(callback_,
- base::Passed(&audio_frame),
- frame_capture_time_));
+ base::Bind(callback_, base::Passed(&audio_frame)));
}
// Reset the internal buffer, frame ID, and timestamps for the next frame.
@@ -192,7 +194,7 @@ class AudioEncoder::ImplBase
// call.
int buffer_fill_end_;
- // A counter used to label EncodedAudioFrames.
+ // A counter used to label EncodedFrames.
uint32 frame_id_;
// The RTP timestamp for the next frame of encoded audio. This is defined as
diff --git a/media/cast/audio_sender/audio_encoder.h b/media/cast/audio_sender/audio_encoder.h
index d4c9e0f392..2297672b74 100644
--- a/media/cast/audio_sender/audio_encoder.h
+++ b/media/cast/audio_sender/audio_encoder.h
@@ -21,8 +21,8 @@ namespace cast {
class AudioEncoder {
public:
- typedef base::Callback<void(scoped_ptr<transport::EncodedAudioFrame>,
- const base::TimeTicks&)> FrameEncodedCallback;
+ typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
AudioEncoder(const scoped_refptr<CastEnvironment>& cast_environment,
const AudioSenderConfig& audio_config,
diff --git a/media/cast/audio_sender/audio_encoder_unittest.cc b/media/cast/audio_sender/audio_encoder_unittest.cc
index 0ca07bb6e9..b521099243 100644
--- a/media/cast/audio_sender/audio_encoder_unittest.cc
+++ b/media/cast/audio_sender/audio_encoder_unittest.cc
@@ -40,11 +40,11 @@ class TestEncodedAudioFrameReceiver {
upper_bound_ = upper_bound;
}
- void FrameEncoded(scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
- const base::TimeTicks& recorded_time) {
- EXPECT_EQ(codec_, encoded_frame->codec);
+ void FrameEncoded(scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ EXPECT_EQ(encoded_frame->dependency, transport::EncodedFrame::KEY);
EXPECT_EQ(static_cast<uint8>(frames_received_ & 0xff),
encoded_frame->frame_id);
+ EXPECT_EQ(encoded_frame->frame_id, encoded_frame->referenced_frame_id);
// RTP timestamps should be monotonically increasing and integer multiples
// of the fixed frame size.
EXPECT_LE(rtp_lower_bound_, encoded_frame->rtp_timestamp);
@@ -54,9 +54,9 @@ class TestEncodedAudioFrameReceiver {
EXPECT_EQ(0u, encoded_frame->rtp_timestamp % kSamplesPerFrame);
EXPECT_TRUE(!encoded_frame->data.empty());
- EXPECT_LE(lower_bound_, recorded_time);
- lower_bound_ = recorded_time;
- EXPECT_GT(upper_bound_, recorded_time);
+ EXPECT_LE(lower_bound_, encoded_frame->reference_time);
+ lower_bound_ = encoded_frame->reference_time;
+ EXPECT_GT(upper_bound_, encoded_frame->reference_time);
++frames_received_;
}
@@ -123,8 +123,8 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
for (size_t i = 0; i < scenario.num_durations; ++i) {
const bool simulate_missing_data = scenario.durations_in_ms[i] < 0;
- const base::TimeDelta duration =
- base::TimeDelta::FromMilliseconds(abs(scenario.durations_in_ms[i]));
+ const base::TimeDelta duration = base::TimeDelta::FromMilliseconds(
+ std::abs(scenario.durations_in_ms[i]));
receiver_->SetCaptureTimeBounds(
testing_clock_->NowTicks() - frame_duration,
testing_clock_->NowTicks() + duration);
diff --git a/media/cast/audio_sender/audio_sender.cc b/media/cast/audio_sender/audio_sender.cc
index c0393b8023..513665a78e 100644
--- a/media/cast/audio_sender/audio_sender.cc
+++ b/media/cast/audio_sender/audio_sender.cc
@@ -13,27 +13,8 @@
namespace media {
namespace cast {
-const int64 kMinSchedulingDelayMs = 1;
-
-class LocalRtcpAudioSenderFeedback : public RtcpSenderFeedback {
- public:
- explicit LocalRtcpAudioSenderFeedback(AudioSender* audio_sender)
- : audio_sender_(audio_sender) {}
-
- virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback)
- OVERRIDE {
- if (!cast_feedback.missing_frames_and_packets_.empty()) {
- audio_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
- }
- VLOG(2) << "Received audio ACK "
- << static_cast<int>(cast_feedback.ack_frame_id_);
- }
-
- private:
- AudioSender* audio_sender_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(LocalRtcpAudioSenderFeedback);
-};
+const int kNumAggressiveReportsSentAtStart = 100;
+const int kMinSchedulingDelayMs = 1;
// TODO(mikhal): Reduce heap allocation when not needed.
AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
@@ -41,10 +22,9 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
transport::CastTransportSender* const transport_sender)
: cast_environment_(cast_environment),
transport_sender_(transport_sender),
- rtp_stats_(audio_config.frequency),
- rtcp_feedback_(new LocalRtcpAudioSenderFeedback(this)),
+ rtp_timestamp_helper_(audio_config.frequency),
rtcp_(cast_environment,
- rtcp_feedback_.get(),
+ this,
transport_sender_,
NULL, // paced sender.
NULL,
@@ -52,8 +32,9 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
audio_config.rtp_config.ssrc,
audio_config.incoming_feedback_ssrc,
- audio_config.rtcp_c_name),
- timers_initialized_(false),
+ audio_config.rtcp_c_name,
+ true),
+ num_aggressive_rtcp_reports_sent_(0),
cast_initialization_cb_(STATUS_AUDIO_UNINITIALIZED),
weak_factory_(this) {
rtcp_.SetCastReceiverEventHistorySize(kReceiverRtcpEventHistorySize);
@@ -74,21 +55,10 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
transport_config.rtp.max_outstanding_frames =
audio_config.rtp_config.max_delay_ms / 100 + 1;
transport_sender_->InitializeAudio(transport_config);
-
- transport_sender_->SubscribeAudioRtpStatsCallback(
- base::Bind(&AudioSender::StoreStatistics, weak_factory_.GetWeakPtr()));
}
AudioSender::~AudioSender() {}
-void AudioSender::InitializeTimers() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (!timers_initialized_) {
- timers_initialized_ = true;
- ScheduleNextRtcpReport();
- }
-}
-
void AudioSender::InsertAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
@@ -97,11 +67,28 @@ void AudioSender::InsertAudio(scoped_ptr<AudioBus> audio_bus,
}
void AudioSender::SendEncodedAudioFrame(
- scoped_ptr<transport::EncodedAudioFrame> audio_frame,
- const base::TimeTicks& recorded_time) {
+ scoped_ptr<transport::EncodedFrame> audio_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- InitializeTimers();
- transport_sender_->InsertCodedAudioFrame(audio_frame.get(), recorded_time);
+ DCHECK(!audio_frame->reference_time.is_null());
+ rtp_timestamp_helper_.StoreLatestTime(audio_frame->reference_time,
+ audio_frame->rtp_timestamp);
+
+ // At the start of the session, it's important to send reports before each
+ // frame so that the receiver can properly compute playout times. The reason
+ // more than one report is sent is because transmission is not guaranteed,
+ // only best effort, so we send enough that one should almost certainly get
+ // through.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ // SendRtcpReport() will schedule future reports to be made if this is the
+ // last "aggressive report."
+ ++num_aggressive_rtcp_reports_sent_;
+ const bool is_last_aggressive_report =
+ (num_aggressive_rtcp_reports_sent_ == kNumAggressiveReportsSentAtStart);
+ VLOG_IF(1, is_last_aggressive_report) << "Sending last aggressive report.";
+ SendRtcpReport(is_last_aggressive_report);
+ }
+
+ transport_sender_->InsertCodedAudioFrame(*audio_frame);
}
void AudioSender::ResendPackets(
@@ -126,25 +113,47 @@ void AudioSender::ScheduleNextRtcpReport() {
cast_environment_->PostDelayedTask(
CastEnvironment::MAIN,
FROM_HERE,
- base::Bind(&AudioSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
+ base::Bind(&AudioSender::SendRtcpReport,
+ weak_factory_.GetWeakPtr(),
+ true),
time_to_next);
}
-void AudioSender::StoreStatistics(
- const transport::RtcpSenderInfo& sender_info,
- base::TimeTicks time_sent,
- uint32 rtp_timestamp) {
- rtp_stats_.Store(sender_info, time_sent, rtp_timestamp);
+void AudioSender::SendRtcpReport(bool schedule_future_reports) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ uint32 now_as_rtp_timestamp = 0;
+ if (rtp_timestamp_helper_.GetCurrentTimeAsRtpTimestamp(
+ now, &now_as_rtp_timestamp)) {
+ rtcp_.SendRtcpFromRtpSender(now, now_as_rtp_timestamp);
+ } else {
+ // |rtp_timestamp_helper_| should have stored a mapping by this point.
+ NOTREACHED();
+ }
+ if (schedule_future_reports)
+ ScheduleNextRtcpReport();
}
-void AudioSender::SendRtcpReport() {
+void AudioSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // We don't send audio logging messages since all captured audio frames will
- // be sent.
- transport::RtcpSenderLogMessage empty_msg;
- rtp_stats_.UpdateInfo(cast_environment_->Clock()->NowTicks());
- rtcp_.SendRtcpFromRtpSender(empty_msg, rtp_stats_.sender_info());
- ScheduleNextRtcpReport();
+
+ if (rtcp_.is_rtt_available()) {
+ // Having the RTT values implies the receiver sent back a receiver report
+ // based on it having received a report from here. Therefore, ensure this
+ // sender stops aggressively sending reports.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ VLOG(1) << "No longer a need to send reports aggressively (sent "
+ << num_aggressive_rtcp_reports_sent_ << ").";
+ num_aggressive_rtcp_reports_sent_ = kNumAggressiveReportsSentAtStart;
+ ScheduleNextRtcpReport();
+ }
+ }
+
+ if (!cast_feedback.missing_frames_and_packets_.empty()) {
+ ResendPackets(cast_feedback.missing_frames_and_packets_);
+ }
+ VLOG(2) << "Received audio ACK "
+ << static_cast<int>(cast_feedback.ack_frame_id_);
}
} // namespace cast
diff --git a/media/cast/audio_sender/audio_sender.h b/media/cast/audio_sender/audio_sender.h
index 1a2eedd3e4..8911320b3f 100644
--- a/media/cast/audio_sender/audio_sender.h
+++ b/media/cast/audio_sender/audio_sender.h
@@ -15,17 +15,18 @@
#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_timestamp_helper.h"
#include "media/cast/transport/rtp_sender/rtp_sender.h"
namespace media {
namespace cast {
class AudioEncoder;
-class LocalRtcpAudioSenderFeedback;
// This class is not thread safe.
// It's only called from the main cast thread.
-class AudioSender : public base::NonThreadSafe,
+class AudioSender : public RtcpSenderFeedback,
+ public base::NonThreadSafe,
public base::SupportsWeakPtr<AudioSender> {
public:
AudioSender(scoped_refptr<CastEnvironment> cast_environment,
@@ -45,32 +46,24 @@ class AudioSender : public base::NonThreadSafe,
void IncomingRtcpPacket(scoped_ptr<Packet> packet);
protected:
- void SendEncodedAudioFrame(
- scoped_ptr<transport::EncodedAudioFrame> audio_frame,
- const base::TimeTicks& recorded_time);
+ void SendEncodedAudioFrame(scoped_ptr<transport::EncodedFrame> audio_frame);
private:
- friend class LocalRtcpAudioSenderFeedback;
-
void ResendPackets(
const MissingFramesAndPacketsMap& missing_frames_and_packets);
- void StoreStatistics(const transport::RtcpSenderInfo& sender_info,
- base::TimeTicks time_sent,
- uint32 rtp_timestamp);
-
void ScheduleNextRtcpReport();
- void SendRtcpReport();
+ void SendRtcpReport(bool schedule_future_reports);
- void InitializeTimers();
+ virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback)
+ OVERRIDE;
scoped_refptr<CastEnvironment> cast_environment_;
transport::CastTransportSender* const transport_sender_;
scoped_ptr<AudioEncoder> audio_encoder_;
- RtpSenderStatistics rtp_stats_;
- scoped_ptr<LocalRtcpAudioSenderFeedback> rtcp_feedback_;
+ RtpTimestampHelper rtp_timestamp_helper_;
Rtcp rtcp_;
- bool timers_initialized_;
+ int num_aggressive_rtcp_reports_sent_;
CastInitializationStatus cast_initialization_cb_;
// NOTE: Weak pointers must be invalidated before all other member variables.
diff --git a/media/cast/audio_sender/audio_sender_unittest.cc b/media/cast/audio_sender/audio_sender_unittest.cc
index 047d2e4331..51edd49602 100644
--- a/media/cast/audio_sender/audio_sender_unittest.cc
+++ b/media/cast/audio_sender/audio_sender_unittest.cc
@@ -22,8 +22,6 @@
namespace media {
namespace cast {
-static const int64 kStartMillisecond = INT64_C(12345678900000);
-
class TestPacketSender : public transport::PacketSender {
public:
TestPacketSender() : number_of_rtp_packets_(0), number_of_rtcp_packets_(0) {}
@@ -33,6 +31,12 @@ class TestPacketSender : public transport::PacketSender {
if (Rtcp::IsRtcpPacket(&packet->data[0], packet->data.size())) {
++number_of_rtcp_packets_;
} else {
+ // Check that at least one RTCP packet was sent before the first RTP
+ // packet. This confirms that the receiver will have the necessary lip
+ // sync info before it has to calculate the playout time of the first
+ // frame.
+ if (number_of_rtp_packets_ == 0)
+ EXPECT_LE(1, number_of_rtcp_packets_);
++number_of_rtp_packets_;
}
return true;
@@ -54,8 +58,7 @@ class AudioSenderTest : public ::testing::Test {
AudioSenderTest() {
InitializeMediaLibraryForTesting();
testing_clock_ = new base::SimpleTestTickClock();
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
cast_environment_ =
new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
@@ -88,7 +91,7 @@ class AudioSenderTest : public ::testing::Test {
virtual ~AudioSenderTest() {}
static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
- EXPECT_EQ(status, transport::TRANSPORT_AUDIO_INITIALIZED);
+ EXPECT_EQ(transport::TRANSPORT_AUDIO_INITIALIZED, status);
}
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
@@ -108,12 +111,10 @@ TEST_F(AudioSenderTest, Encode20ms) {
TestAudioBusFactory::kMiddleANoteFreq,
0.5f).NextAudioBus(kDuration));
- base::TimeTicks recorded_time = base::TimeTicks::Now();
- audio_sender_->InsertAudio(bus.Pass(), recorded_time);
+ audio_sender_->InsertAudio(bus.Pass(), testing_clock_->NowTicks());
task_runner_->RunTasks();
- EXPECT_GE(
- transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets(),
- 1);
+ EXPECT_LE(1, transport_.number_of_rtp_packets());
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
}
TEST_F(AudioSenderTest, RtcpTimer) {
@@ -124,8 +125,7 @@ TEST_F(AudioSenderTest, RtcpTimer) {
TestAudioBusFactory::kMiddleANoteFreq,
0.5f).NextAudioBus(kDuration));
- base::TimeTicks recorded_time = base::TimeTicks::Now();
- audio_sender_->InsertAudio(bus.Pass(), recorded_time);
+ audio_sender_->InsertAudio(bus.Pass(), testing_clock_->NowTicks());
task_runner_->RunTasks();
// Make sure that we send at least one RTCP packet.
@@ -133,8 +133,8 @@ TEST_F(AudioSenderTest, RtcpTimer) {
base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
testing_clock_->Advance(max_rtcp_timeout);
task_runner_->RunTasks();
- EXPECT_GE(transport_.number_of_rtp_packets(), 1);
- EXPECT_EQ(transport_.number_of_rtcp_packets(), 1);
+ EXPECT_LE(1, transport_.number_of_rtp_packets());
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
}
} // namespace cast
diff --git a/media/cast/base/clock_drift_smoother.cc b/media/cast/base/clock_drift_smoother.cc
new file mode 100644
index 0000000000..ca0380533e
--- /dev/null
+++ b/media/cast/base/clock_drift_smoother.cc
@@ -0,0 +1,58 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/base/clock_drift_smoother.h"
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+ClockDriftSmoother::ClockDriftSmoother(base::TimeDelta time_constant)
+ : time_constant_(time_constant),
+ estimate_us_(0.0) {
+ DCHECK(time_constant_ > base::TimeDelta());
+}
+
+ClockDriftSmoother::~ClockDriftSmoother() {}
+
+base::TimeDelta ClockDriftSmoother::Current() const {
+ DCHECK(!last_update_time_.is_null());
+ return base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(estimate_us_ + 0.5)); // Round to nearest microsecond.
+}
+
+void ClockDriftSmoother::Reset(base::TimeTicks now,
+ base::TimeDelta measured_offset) {
+ DCHECK(!now.is_null());
+ last_update_time_ = now;
+ estimate_us_ = measured_offset.InMicroseconds();
+}
+
+void ClockDriftSmoother::Update(base::TimeTicks now,
+ base::TimeDelta measured_offset) {
+ DCHECK(!now.is_null());
+ if (last_update_time_.is_null()) {
+ Reset(now, measured_offset);
+ } else if (now < last_update_time_) {
+ // |now| is not monotonically non-decreasing.
+ NOTREACHED();
+ } else {
+ const double elapsed_us = (now - last_update_time_).InMicroseconds();
+ last_update_time_ = now;
+ const double weight =
+ elapsed_us / (elapsed_us + time_constant_.InMicroseconds());
+ estimate_us_ = weight * measured_offset.InMicroseconds() +
+ (1.0 - weight) * estimate_us_;
+ }
+}
+
+// static
+base::TimeDelta ClockDriftSmoother::GetDefaultTimeConstant() {
+ static const int kDefaultTimeConstantInSeconds = 30;
+ return base::TimeDelta::FromSeconds(kDefaultTimeConstantInSeconds);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/base/clock_drift_smoother.h b/media/cast/base/clock_drift_smoother.h
new file mode 100644
index 0000000000..67de4cb51a
--- /dev/null
+++ b/media/cast/base/clock_drift_smoother.h
@@ -0,0 +1,52 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_BASE_CLOCK_DRIFT_SMOOTHER_H_
+#define MEDIA_CAST_BASE_CLOCK_DRIFT_SMOOTHER_H_
+
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+// Tracks the jitter and drift between clocks, providing a smoothed offset.
+// Internally, a Simple IIR filter is used to maintain a running average that
+// moves at a rate based on the passage of time.
+class ClockDriftSmoother {
+ public:
+ // |time_constant| is the amount of time an impulse signal takes to decay by
+ // ~62.6%. Interpretation: If the value passed to several Update() calls is
+ // held constant for T seconds, then the running average will have moved
+ // towards the value by ~62.6% from where it started.
+ explicit ClockDriftSmoother(base::TimeDelta time_constant);
+ ~ClockDriftSmoother();
+
+ // Returns the current offset.
+ base::TimeDelta Current() const;
+
+ // Discard all history and reset to exactly |offset|, measured |now|.
+ void Reset(base::TimeTicks now, base::TimeDelta offset);
+
+ // Update the current offset, which was measured |now|. The weighting that
+ // |measured_offset| will have on the running average is influenced by how
+ // much time has passed since the last call to this method (or Reset()).
+ // |now| should be monotonically non-decreasing over successive calls of this
+ // method.
+ void Update(base::TimeTicks now, base::TimeDelta measured_offset);
+
+ // Returns a time constant suitable for most use cases, where the clocks
+ // are expected to drift very little with respect to each other, and the
+ // jitter caused by clock imprecision is effectively canceled out.
+ static base::TimeDelta GetDefaultTimeConstant();
+
+ private:
+ const base::TimeDelta time_constant_;
+ base::TimeTicks last_update_time_;
+ double estimate_us_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_BASE_CLOCK_DRIFT_SMOOTHER_H_
diff --git a/media/cast/cast.gyp b/media/cast/cast.gyp
index d489a6a5b0..4f0c2dd4a9 100644
--- a/media/cast/cast.gyp
+++ b/media/cast/cast.gyp
@@ -33,6 +33,8 @@
'cast_defines.h',
'cast_environment.cc',
'cast_environment.h',
+ 'base/clock_drift_smoother.cc',
+ 'base/clock_drift_smoother.h',
'logging/encoding_event_subscriber.cc',
'logging/encoding_event_subscriber.h',
'logging/log_deserializer.cc',
@@ -55,6 +57,8 @@
'logging/simple_event_subscriber.h',
'logging/stats_event_subscriber.cc',
'logging/stats_event_subscriber.h',
+ 'rtp_timestamp_helper.cc',
+ 'rtp_timestamp_helper.h',
'transport/cast_transport_config.cc',
'transport/cast_transport_config.h',
'transport/cast_transport_defines.h',
@@ -147,8 +151,6 @@
'rtcp/rtcp_sender.h',
'rtcp/rtcp_utility.cc',
'rtcp/rtcp_utility.h',
- 'rtcp/sender_rtcp_event_subscriber.cc',
- 'rtcp/sender_rtcp_event_subscriber.h',
'rtcp/receiver_rtcp_event_subscriber.cc',
'rtcp/receiver_rtcp_event_subscriber.cc',
], # source
diff --git a/media/cast/cast_config.cc b/media/cast/cast_config.cc
index c470269775..0e7953af01 100644
--- a/media/cast/cast_config.cc
+++ b/media/cast/cast_config.cc
@@ -48,28 +48,18 @@ AudioSenderConfig::AudioSenderConfig()
channels(0),
bitrate(0) {}
-AudioReceiverConfig::AudioReceiverConfig()
+FrameReceiverConfig::FrameReceiverConfig()
: feedback_ssrc(0),
incoming_ssrc(0),
rtcp_interval(kDefaultRtcpIntervalMs),
rtcp_mode(kRtcpReducedSize),
rtp_max_delay_ms(kDefaultRtpMaxDelayMs),
rtp_payload_type(0),
- use_external_decoder(false),
frequency(0),
- channels(0) {}
+ channels(0),
+ max_frame_rate(0) {}
-VideoReceiverConfig::VideoReceiverConfig()
- : feedback_ssrc(0),
- incoming_ssrc(0),
- rtcp_interval(kDefaultRtcpIntervalMs),
- rtcp_mode(kRtcpReducedSize),
- rtp_max_delay_ms(kDefaultRtpMaxDelayMs),
- rtp_payload_type(0),
- use_external_decoder(false),
- max_frame_rate(kDefaultMaxFrameRate),
- decoder_faster_than_max_frame_rate(true),
- codec(transport::kVp8) {}
+FrameReceiverConfig::~FrameReceiverConfig() {}
} // namespace cast
} // namespace media
diff --git a/media/cast/cast_config.h b/media/cast/cast_config.h
index 7172f6835c..ea25d6b6cf 100644
--- a/media/cast/cast_config.h
+++ b/media/cast/cast_config.h
@@ -27,6 +27,8 @@ enum RtcpMode {
kRtcpReducedSize, // Reduced-size RTCP mode is described by RFC 5506.
};
+// TODO(miu): Merge AudioSenderConfig and VideoSenderConfig and make their
+// naming/documentation consistent with FrameReceiverConfig.
struct AudioSenderConfig {
AudioSenderConfig();
@@ -74,53 +76,68 @@ struct VideoSenderConfig {
int number_of_encode_threads;
};
-struct AudioReceiverConfig {
- AudioReceiverConfig();
+// TODO(miu): Naming and minor type changes are badly needed in a later CL.
+struct FrameReceiverConfig {
+ FrameReceiverConfig();
+ ~FrameReceiverConfig();
- uint32 feedback_ssrc;
- uint32 incoming_ssrc;
+ // The receiver's SSRC identifier.
+ uint32 feedback_ssrc; // TODO(miu): Rename to receiver_ssrc for clarity.
- int rtcp_interval;
- std::string rtcp_c_name;
- RtcpMode rtcp_mode;
-
- // The time the receiver is prepared to wait for retransmissions.
- int rtp_max_delay_ms;
- int rtp_payload_type;
-
- bool use_external_decoder;
- int frequency;
- int channels;
- transport::AudioCodec codec;
-
- std::string aes_key; // Binary string of size kAesKeySize.
- std::string aes_iv_mask; // Binary string of size kAesKeySize.
-};
-
-struct VideoReceiverConfig {
- VideoReceiverConfig();
-
- uint32 feedback_ssrc;
- uint32 incoming_ssrc;
+ // The sender's SSRC identifier.
+ uint32 incoming_ssrc; // TODO(miu): Rename to sender_ssrc for clarity.
+ // Mean interval (in milliseconds) between RTCP reports.
+ // TODO(miu): Remove this since it's never not kDefaultRtcpIntervalMs.
int rtcp_interval;
+
+ // CNAME representing this receiver.
+ // TODO(miu): Remove this since it should be derived elsewhere (probably in
+ // the transport layer).
std::string rtcp_c_name;
+
+ // Determines amount of detail in RTCP reports.
+ // TODO(miu): Remove this since it's never anything but kRtcpReducedSize.
RtcpMode rtcp_mode;
- // The time the receiver is prepared to wait for retransmissions.
- int rtp_max_delay_ms;
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user). This is fixed as
+ // a value large enough to give the system sufficient time to encode,
+ // transmit/retransmit, receive, decode, and render; given its run-time
+ // environment (sender/receiver hardware performance, network conditions,
+ // etc.).
+ int rtp_max_delay_ms; // TODO(miu): Change to TimeDelta target_playout_delay.
+
+ // RTP payload type enum: Specifies the type/encoding of frame data.
int rtp_payload_type;
- bool use_external_decoder;
- int max_frame_rate;
+ // RTP timebase: The number of RTP units advanced per one second. For audio,
+ // this is the sampling rate. For video, by convention, this is 90 kHz.
+ int frequency; // TODO(miu): Rename to rtp_timebase for clarity.
- // Some HW decoders can not run faster than the frame rate, preventing it
- // from catching up after a glitch.
- bool decoder_faster_than_max_frame_rate;
- transport::VideoCodec codec;
+ // Number of channels. For audio, this is normally 2. For video, this must
+ // be 1 as Cast does not have support for stereoscopic video.
+ int channels;
- std::string aes_key; // Binary string of size kAesKeySize.
- std::string aes_iv_mask; // Binary string of size kAesKeySize.
+ // The target frame rate. For audio, this is normally 100 (i.e., frames have
+ // a duration of 10ms each). For video, this is normally 30, but any frame
+ // rate is supported.
+ int max_frame_rate; // TODO(miu): Rename to target_frame_rate.
+
+ // Codec used for the compression of signal data.
+ // TODO(miu): Merge the AudioCodec and VideoCodec enums into one so this union
+ // is not necessary.
+ union MergedCodecPlaceholder {
+ transport::AudioCodec audio;
+ transport::VideoCodec video;
+ MergedCodecPlaceholder() : audio(transport::kUnknownAudioCodec) {}
+ } codec;
+
+ // The AES crypto key and initialization vector. Each of these strings
+ // contains the data in binary form, of size kAesKeySize. If they are empty
+ // strings, crypto is not being used.
+ std::string aes_key;
+ std::string aes_iv_mask;
};
// import from media::cast::transport
diff --git a/media/cast/cast_defines.h b/media/cast/cast_defines.h
index b0f9370186..afb50e0156 100644
--- a/media/cast/cast_defines.h
+++ b/media/cast/cast_defines.h
@@ -135,13 +135,22 @@ inline base::TimeDelta ConvertFromNtpDiff(uint32 ntp_delay) {
return base::TimeDelta::FromMilliseconds(delay_ms);
}
-inline void ConvertTimeToFractions(int64 time_us,
+inline void ConvertTimeToFractions(int64 ntp_time_us,
uint32* seconds,
uint32* fractions) {
- DCHECK_GE(time_us, 0) << "Time must NOT be negative";
- *seconds = static_cast<uint32>(time_us / base::Time::kMicrosecondsPerSecond);
+ DCHECK_GE(ntp_time_us, 0) << "Time must NOT be negative";
+ const int64 seconds_component =
+ ntp_time_us / base::Time::kMicrosecondsPerSecond;
+ // NTP time will overflow in the year 2036. Also, make sure unit tests don't
+ // regress and use an origin past the year 2036. If this overflows here, the
+ // inverse calculation fails to compute the correct TimeTicks value, throwing
+ // off the entire system.
+ DCHECK_LT(seconds_component, INT64_C(4263431296))
+ << "One year left to fix the NTP year 2036 wrap-around issue!";
+ *seconds = static_cast<uint32>(seconds_component);
*fractions = static_cast<uint32>(
- (time_us % base::Time::kMicrosecondsPerSecond) * kMagicFractionalUnit);
+ (ntp_time_us % base::Time::kMicrosecondsPerSecond) *
+ kMagicFractionalUnit);
}
inline void ConvertTimeTicksToNtp(const base::TimeTicks& time,
@@ -169,6 +178,11 @@ inline base::TimeTicks ConvertNtpToTimeTicks(uint32 ntp_seconds,
return base::TimeTicks::UnixEpoch() + elapsed_since_unix_epoch;
}
+inline base::TimeDelta RtpDeltaToTimeDelta(int64 rtp_delta, int rtp_timebase) {
+ DCHECK_GT(rtp_timebase, 0);
+ return rtp_delta * base::TimeDelta::FromSeconds(1) / rtp_timebase;
+}
+
inline uint32 GetVideoRtpTimestamp(const base::TimeTicks& time_ticks) {
base::TimeTicks zero_time;
base::TimeDelta recorded_delta = time_ticks - zero_time;
@@ -176,55 +190,6 @@ inline uint32 GetVideoRtpTimestamp(const base::TimeTicks& time_ticks) {
return static_cast<uint32>(recorded_delta.InMilliseconds() * 90);
}
-class RtpSenderStatistics {
- public:
- explicit RtpSenderStatistics(int frequency)
- : frequency_(frequency),
- rtp_timestamp_(0) {
- memset(&sender_info_, 0, sizeof(sender_info_));
- }
-
- ~RtpSenderStatistics() {}
-
- void UpdateInfo(const base::TimeTicks& now) {
- // Update RTP timestamp and return last stored statistics.
- uint32 ntp_seconds = 0;
- uint32 ntp_fraction = 0;
- uint32 rtp_timestamp = 0;
- if (rtp_timestamp_ > 0) {
- base::TimeDelta time_since_last_send = now - time_sent_;
- rtp_timestamp = rtp_timestamp_ + time_since_last_send.InMilliseconds() *
- (frequency_ / 1000);
- // Update NTP time to current time.
- ConvertTimeTicksToNtp(now, &ntp_seconds, &ntp_fraction);
- }
- // Populate sender info.
- sender_info_.rtp_timestamp = rtp_timestamp;
- sender_info_.ntp_seconds = ntp_seconds;
- sender_info_.ntp_fraction = ntp_fraction;
- }
-
- transport::RtcpSenderInfo sender_info() const {
- return sender_info_;
- }
-
- void Store(transport::RtcpSenderInfo sender_info,
- base::TimeTicks time_sent,
- uint32 rtp_timestamp) {
- sender_info_ = sender_info;
- time_sent_ = time_sent;
- rtp_timestamp_ = rtp_timestamp;
-}
-
- private:
- int frequency_;
- transport::RtcpSenderInfo sender_info_;
- base::TimeTicks time_sent_;
- uint32 rtp_timestamp_;
-
- DISALLOW_COPY_AND_ASSIGN(RtpSenderStatistics);
-};
-
} // namespace cast
} // namespace media
diff --git a/media/cast/cast_receiver.h b/media/cast/cast_receiver.h
index fa6adace98..2d83dd263f 100644
--- a/media/cast/cast_receiver.h
+++ b/media/cast/cast_receiver.h
@@ -40,28 +40,23 @@ typedef base::Callback<void(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& playout_time,
bool is_continuous)> VideoFrameDecodedCallback;
-// The following callbacks deliver still-encoded audio/video frame data, along
-// with the frame's corresponding play-out time. The client should examine the
-// EncodedXXXFrame::frame_id field to determine whether any frames have been
+// The following callback delivers encoded frame data and metadata. The client
+// should examine the |frame_id| field to determine whether any frames have been
// dropped (i.e., frame_id should be incrementing by one each time). Note: A
// NULL pointer can be returned on error.
-typedef base::Callback<void(scoped_ptr<transport::EncodedAudioFrame>,
- const base::TimeTicks&)> AudioFrameEncodedCallback;
-typedef base::Callback<void(scoped_ptr<transport::EncodedVideoFrame>,
- const base::TimeTicks&)> VideoFrameEncodedCallback;
+typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
// This Class is thread safe.
class FrameReceiver : public base::RefCountedThreadSafe<FrameReceiver> {
public:
virtual void GetRawAudioFrame(const AudioFrameDecodedCallback& callback) = 0;
- virtual void GetCodedAudioFrame(
- const AudioFrameEncodedCallback& callback) = 0;
+ virtual void GetCodedAudioFrame(const FrameEncodedCallback& callback) = 0;
virtual void GetRawVideoFrame(const VideoFrameDecodedCallback& callback) = 0;
- virtual void GetEncodedVideoFrame(
- const VideoFrameEncodedCallback& callback) = 0;
+ virtual void GetEncodedVideoFrame(const FrameEncodedCallback& callback) = 0;
protected:
virtual ~FrameReceiver() {}
@@ -75,8 +70,8 @@ class CastReceiver {
public:
static scoped_ptr<CastReceiver> Create(
scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config,
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config,
transport::PacketSender* const packet_sender);
// All received RTP and RTCP packets for the call should be sent to this
diff --git a/media/cast/cast_receiver_impl.cc b/media/cast/cast_receiver_impl.cc
index b38cd99667..661cbbe293 100644
--- a/media/cast/cast_receiver_impl.cc
+++ b/media/cast/cast_receiver_impl.cc
@@ -33,7 +33,7 @@ class LocalFrameReceiver : public FrameReceiver {
callback));
}
- virtual void GetEncodedVideoFrame(const VideoFrameEncodedCallback& callback)
+ virtual void GetEncodedVideoFrame(const FrameEncodedCallback& callback)
OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
@@ -51,7 +51,7 @@ class LocalFrameReceiver : public FrameReceiver {
callback));
}
- virtual void GetCodedAudioFrame(const AudioFrameEncodedCallback& callback)
+ virtual void GetCodedAudioFrame(const FrameEncodedCallback& callback)
OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
@@ -73,8 +73,8 @@ class LocalFrameReceiver : public FrameReceiver {
scoped_ptr<CastReceiver> CastReceiver::Create(
scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config,
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config,
transport::PacketSender* const packet_sender) {
return scoped_ptr<CastReceiver>(new CastReceiverImpl(
cast_environment, audio_config, video_config, packet_sender));
@@ -82,8 +82,8 @@ scoped_ptr<CastReceiver> CastReceiver::Create(
CastReceiverImpl::CastReceiverImpl(
scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config,
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config,
transport::PacketSender* const packet_sender)
: pacer_(cast_environment->Clock(),
cast_environment->Logging(),
diff --git a/media/cast/cast_receiver_impl.h b/media/cast/cast_receiver_impl.h
index 1fcb355f52..ae7d50e1ae 100644
--- a/media/cast/cast_receiver_impl.h
+++ b/media/cast/cast_receiver_impl.h
@@ -22,8 +22,8 @@ namespace cast {
class CastReceiverImpl : public CastReceiver {
public:
CastReceiverImpl(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config,
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config,
transport::PacketSender* const packet_sender);
virtual ~CastReceiverImpl();
diff --git a/media/cast/cast_testing.gypi b/media/cast/cast_testing.gypi
index f8c2553463..666b5823fe 100644
--- a/media/cast/cast_testing.gypi
+++ b/media/cast/cast_testing.gypi
@@ -3,9 +3,6 @@
# found in the LICENSE file.
{
- 'variables': {
- 'include_cast_utility_apps%': 0,
- },
'targets': [
{
'target_name': 'cast_test_utility',
@@ -23,6 +20,10 @@
'sources': [
'test/fake_single_thread_task_runner.cc',
'test/fake_single_thread_task_runner.h',
+ 'test/skewed_single_thread_task_runner.cc',
+ 'test/skewed_single_thread_task_runner.h',
+ 'test/skewed_tick_clock.cc',
+ 'test/skewed_tick_clock.h',
'test/utility/audio_utility.cc',
'test/utility/audio_utility.h',
'test/utility/barcode.cc',
@@ -33,6 +34,8 @@
'test/utility/in_process_receiver.h',
'test/utility/input_builder.cc',
'test/utility/input_builder.h',
+ 'test/utility/net_utility.cc',
+ 'test/utility/net_utility.h',
'test/utility/standalone_cast_environment.cc',
'test/utility/standalone_cast_environment.h',
'test/utility/video_utility.cc',
@@ -53,6 +56,9 @@
'cast_rtcp',
'cast_sender',
'cast_test_utility',
+ # Not a true dependency. This is here to make sure the CQ can verify
+ # the tools compile correctly.
+ 'cast_tools',
'cast_transport',
'<(DEPTH)/base/base.gyp:test_support_base',
'<(DEPTH)/net/net.gyp:net',
@@ -84,7 +90,6 @@
'rtcp/rtcp_sender_unittest.cc',
'rtcp/rtcp_unittest.cc',
'rtcp/receiver_rtcp_event_subscriber_unittest.cc',
- 'rtcp/sender_rtcp_event_subscriber_unittest.cc',
# TODO(miu): The following two are test utility modules. Rename/move the files.
'rtcp/test_rtcp_packet_builder.cc',
'rtcp/test_rtcp_packet_builder.h',
@@ -119,113 +124,118 @@
'video_sender/video_sender_unittest.cc',
], # source
},
- ], # targets
- 'conditions': [
- ['include_cast_utility_apps==1', {
- 'targets': [
- {
- 'target_name': 'cast_receiver_app',
- 'type': 'executable',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
+ {
+ # This is a target for the collection of cast development tools.
+ # They are built on bots but not shipped.
+ 'target_name': 'cast_tools',
+ 'type': 'none',
+ 'dependencies': [
+ 'cast_receiver_app',
+ 'cast_sender_app',
+ 'udp_proxy',
+ ],
+ },
+ {
+ 'target_name': 'cast_receiver_app',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_receiver',
+ 'cast_test_utility',
+ 'cast_transport',
+ '<(DEPTH)/net/net.gyp:net_test_support',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/receiver.cc',
+ ],
+ 'conditions': [
+ ['OS == "linux" and use_x11==1', {
'dependencies': [
- 'cast_base',
- 'cast_receiver',
- 'cast_test_utility',
- 'cast_transport',
- '<(DEPTH)/net/net.gyp:net_test_support',
- '<(DEPTH)/media/media.gyp:media',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+ '<(DEPTH)/build/linux/system.gyp:x11',
+ '<(DEPTH)/build/linux/system.gyp:xext',
],
'sources': [
- '<(DEPTH)/media/cast/test/receiver.cc',
- ],
- 'conditions': [
- ['OS == "linux" and use_x11==1', {
- 'dependencies': [
- '<(DEPTH)/build/linux/system.gyp:x11',
- '<(DEPTH)/build/linux/system.gyp:xext',
- ],
- 'sources': [
- '<(DEPTH)/media/cast/test/linux_output_window.cc',
- '<(DEPTH)/media/cast/test/linux_output_window.h',
- '<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
- ],
- }],
- ],
- },
- {
- 'target_name': 'cast_sender_app',
- 'type': 'executable',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'dependencies': [
- 'cast_base',
- 'cast_sender',
- 'cast_test_utility',
- 'cast_transport',
- '<(DEPTH)/net/net.gyp:net_test_support',
- '<(DEPTH)/media/media.gyp:media',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
- '<(DEPTH)/third_party/opus/opus.gyp:opus',
+ '<(DEPTH)/media/cast/test/linux_output_window.cc',
+ '<(DEPTH)/media/cast/test/linux_output_window.h',
'<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
],
- 'sources': [
- '<(DEPTH)/media/cast/test/sender.cc',
- ],
- },
- {
- 'target_name': 'generate_barcode_video',
- 'type': 'executable',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'dependencies': [
- 'cast_test_utility',
- '<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/media/media.gyp:media',
- ],
- 'sources': [
- 'test/utility/generate_barcode_video.cc',
- ],
- },
- {
- 'target_name': 'generate_timecode_audio',
- 'type': 'executable',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'dependencies': [
- 'cast_base',
- 'cast_test_utility',
- 'cast_transport',
- '<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/media/media.gyp:media',
- ],
- 'sources': [
- 'test/utility/generate_timecode_audio.cc',
- ],
- },
- {
- 'target_name': 'udp_proxy',
- 'type': 'executable',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'dependencies': [
- 'cast_test_utility',
- '<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/media/media.gyp:media',
- ],
- 'sources': [
- 'test/utility/udp_proxy_main.cc',
- ],
- },
- ], # targets
- }],
- ], # conditions
+ }],
+ ],
+ },
+ {
+ 'target_name': 'cast_sender_app',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_sender',
+ 'cast_test_utility',
+ 'cast_transport',
+ '<(DEPTH)/net/net.gyp:net_test_support',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/sender.cc',
+ ],
+ },
+ {
+ 'target_name': 'generate_barcode_video',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_test_utility',
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/media/media.gyp:media',
+ ],
+ 'sources': [
+ 'test/utility/generate_barcode_video.cc',
+ ],
+ },
+ {
+ 'target_name': 'generate_timecode_audio',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_test_utility',
+ 'cast_transport',
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/media/media.gyp:media',
+ ],
+ 'sources': [
+ 'test/utility/generate_timecode_audio.cc',
+ ],
+ },
+ {
+ 'target_name': 'udp_proxy',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_test_utility',
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/media/media.gyp:media',
+ ],
+ 'sources': [
+ 'test/utility/udp_proxy_main.cc',
+ ],
+ }
+ ], # targets
}
diff --git a/media/cast/framer/cast_message_builder.cc b/media/cast/framer/cast_message_builder.cc
index 5c317711be..f3473f9690 100644
--- a/media/cast/framer/cast_message_builder.cc
+++ b/media/cast/framer/cast_message_builder.cc
@@ -23,7 +23,6 @@ CastMessageBuilder::CastMessageBuilder(
decoder_faster_than_max_frame_rate_(decoder_faster_than_max_frame_rate),
max_unacked_frames_(max_unacked_frames),
cast_msg_(media_ssrc),
- waiting_for_key_frame_(true),
slowing_down_ack_(false),
acked_last_frame_(true),
last_acked_frame_id_(kStartFrameId) {
@@ -32,67 +31,61 @@ CastMessageBuilder::CastMessageBuilder(
CastMessageBuilder::~CastMessageBuilder() {}
-void CastMessageBuilder::CompleteFrameReceived(uint32 frame_id,
- bool is_key_frame) {
+void CastMessageBuilder::CompleteFrameReceived(uint32 frame_id) {
+ DCHECK_GE(static_cast<int32>(frame_id - last_acked_frame_id_), 0);
+ VLOG(2) << "CompleteFrameReceived: " << frame_id;
if (last_update_time_.is_null()) {
// Our first update.
last_update_time_ = clock_->NowTicks();
}
- if (waiting_for_key_frame_) {
- if (!is_key_frame) {
- // Ignore that we have received this complete frame since we are
- // waiting on a key frame.
- return;
- }
- waiting_for_key_frame_ = false;
- cast_msg_.missing_frames_and_packets_.clear();
- cast_msg_.ack_frame_id_ = frame_id;
- last_update_time_ = clock_->NowTicks();
- // We might have other complete frames waiting after we receive the last
- // packet in the key-frame.
- UpdateAckMessage();
- } else {
- if (!UpdateAckMessage())
- return;
-
- BuildPacketList();
+
+ if (!UpdateAckMessage(frame_id)) {
+ return;
}
+ BuildPacketList();
+
// Send cast message.
VLOG(2) << "Send cast message Ack:" << static_cast<int>(frame_id);
cast_feedback_->CastFeedback(cast_msg_);
}
-bool CastMessageBuilder::UpdateAckMessage() {
+bool CastMessageBuilder::UpdateAckMessage(uint32 frame_id) {
if (!decoder_faster_than_max_frame_rate_) {
int complete_frame_count = frame_id_map_->NumberOfCompleteFrames();
if (complete_frame_count > max_unacked_frames_) {
// We have too many frames pending in our framer; slow down ACK.
- slowing_down_ack_ = true;
+ if (!slowing_down_ack_) {
+ slowing_down_ack_ = true;
+ ack_queue_.push_back(last_acked_frame_id_);
+ }
} else if (complete_frame_count <= 1) {
// We are down to one or less frames in our framer; ACK normally.
slowing_down_ack_ = false;
+ ack_queue_.clear();
}
}
+
if (slowing_down_ack_) {
// We are slowing down acknowledgment by acknowledging every other frame.
- if (acked_last_frame_) {
- acked_last_frame_ = false;
- } else {
- acked_last_frame_ = true;
- last_acked_frame_id_++;
- // Note: frame skipping and slowdown ACK is not supported at the same
- // time; and it's not needed since we can skip frames to catch up.
- }
- } else {
- uint32 frame_id = frame_id_map_->LastContinuousFrame();
-
- // Is it a new frame?
- if (last_acked_frame_id_ == frame_id)
+ // Note: frame skipping and slowdown ACK is not supported at the same
+ // time; and it's not needed since we can skip frames to catch up.
+ if (!ack_queue_.empty() && ack_queue_.back() == frame_id) {
return false;
+ }
+ ack_queue_.push_back(frame_id);
+ if (!acked_last_frame_) {
+ ack_queue_.pop_front();
+ }
+ frame_id = ack_queue_.front();
+ }
- last_acked_frame_id_ = frame_id;
- acked_last_frame_ = true;
+ acked_last_frame_ = false;
+ // Is it a new frame?
+ if (last_acked_frame_id_ == frame_id) {
+ return false;
}
+ acked_last_frame_ = true;
+ last_acked_frame_id_ = frame_id;
cast_msg_.ack_frame_id_ = last_acked_frame_id_;
cast_msg_.missing_frames_and_packets_.clear();
last_update_time_ = clock_->NowTicks();
@@ -120,7 +113,6 @@ void CastMessageBuilder::UpdateCastMessage() {
}
void CastMessageBuilder::Reset() {
- waiting_for_key_frame_ = true;
cast_msg_.ack_frame_id_ = kStartFrameId;
cast_msg_.missing_frames_and_packets_.clear();
time_last_nacked_map_.clear();
@@ -142,7 +134,8 @@ bool CastMessageBuilder::UpdateCastMessageInternal(RtcpCastMessage* message) {
}
last_update_time_ = now;
- UpdateAckMessage(); // Needed to cover when a frame is skipped.
+ // Needed to cover when a frame is skipped.
+ UpdateAckMessage(last_acked_frame_id_);
BuildPacketList();
message->Copy(cast_msg_);
return true;
diff --git a/media/cast/framer/cast_message_builder.h b/media/cast/framer/cast_message_builder.h
index b76a196111..9db88d4a99 100644
--- a/media/cast/framer/cast_message_builder.h
+++ b/media/cast/framer/cast_message_builder.h
@@ -7,6 +7,7 @@
#ifndef MEDIA_CAST_FRAMER_CAST_MESSAGE_BUILDER_H_
#define MEDIA_CAST_FRAMER_CAST_MESSAGE_BUILDER_H_
+#include <deque>
#include <map>
#include "media/cast/framer/frame_id_map.h"
@@ -30,13 +31,13 @@ class CastMessageBuilder {
int max_unacked_frames);
~CastMessageBuilder();
- void CompleteFrameReceived(uint32 frame_id, bool is_key_frame);
+ void CompleteFrameReceived(uint32 frame_id);
bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
void UpdateCastMessage();
void Reset();
private:
- bool UpdateAckMessage();
+ bool UpdateAckMessage(uint32 frame_id);
void BuildPacketList();
bool UpdateCastMessageInternal(RtcpCastMessage* message);
@@ -51,13 +52,13 @@ class CastMessageBuilder {
RtcpCastMessage cast_msg_;
base::TimeTicks last_update_time_;
- bool waiting_for_key_frame_;
TimeLastNackMap time_last_nacked_map_;
bool slowing_down_ack_;
bool acked_last_frame_;
uint32 last_acked_frame_id_;
+ std::deque<uint32> ack_queue_;
DISALLOW_COPY_AND_ASSIGN(CastMessageBuilder);
};
diff --git a/media/cast/framer/cast_message_builder_unittest.cc b/media/cast/framer/cast_message_builder_unittest.cc
index c84a28f888..ef75162a08 100644
--- a/media/cast/framer/cast_message_builder_unittest.cc
+++ b/media/cast/framer/cast_message_builder_unittest.cc
@@ -112,8 +112,7 @@ class CastMessageBuilderTest : public ::testing::Test {
void InsertPacket() {
PacketType packet_type = frame_id_map_.InsertPacket(rtp_header_);
if (packet_type == kNewPacketCompletingFrame) {
- cast_msg_builder_->CompleteFrameReceived(rtp_header_.frame_id,
- rtp_header_.is_key_frame);
+ cast_msg_builder_->CompleteFrameReceived(rtp_header_.frame_id);
}
cast_msg_builder_->UpdateCastMessage();
}
@@ -136,26 +135,6 @@ class CastMessageBuilderTest : public ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(CastMessageBuilderTest);
};
-TEST_F(CastMessageBuilderTest, StartWithAKeyFrame) {
- SetFrameIds(3, 2);
- SetPacketId(0);
- SetMaxPacketId(0);
- InsertPacket();
- // Should not trigger ack.
- EXPECT_FALSE(feedback_.triggered());
- SetFrameIds(5, 5);
- SetPacketId(0);
- SetMaxPacketId(0);
- SetKeyFrame(true);
- InsertPacket();
- frame_id_map_.RemoveOldFrames(5); // Simulate 5 being pulled for rendering.
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- cast_msg_builder_->UpdateCastMessage();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(5u, feedback_.last_frame_acked());
-}
-
TEST_F(CastMessageBuilderTest, OneFrameNackList) {
SetFrameIds(0, 0);
SetPacketId(4);
@@ -187,31 +166,6 @@ TEST_F(CastMessageBuilderTest, CompleteFrameMissing) {
EXPECT_EQ(kRtcpCastAllPacketsLost, feedback_.num_missing_packets(1));
}
-TEST_F(CastMessageBuilderTest, FastForwardAck) {
- SetFrameIds(1, 0);
- SetPacketId(0);
- SetMaxPacketId(0);
- InsertPacket();
- EXPECT_FALSE(feedback_.triggered());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameIds(2, 1);
- SetPacketId(0);
- SetMaxPacketId(0);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(kStartFrameId, feedback_.last_frame_acked());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameIds(0, 0);
- SetPacketId(0);
- SetMaxPacketId(0);
- SetKeyFrame(true);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(2u, feedback_.last_frame_acked());
-}
-
TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
SetFrameIds(1, 0);
SetPacketId(0);
@@ -232,7 +186,7 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
SetMaxPacketId(5);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(kStartFrameId, feedback_.last_frame_acked());
+ EXPECT_EQ(2u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
SetFrameIds(5, 5);
@@ -260,42 +214,6 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
EXPECT_EQ(5u, feedback_.last_frame_acked());
}
-TEST_F(CastMessageBuilderTest, WrapFastForward) {
- SetFrameIds(254, 254);
- SetPacketId(0);
- SetMaxPacketId(1);
- SetKeyFrame(true);
- InsertPacket();
- EXPECT_FALSE(feedback_.triggered());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameIds(255, 254);
- SetPacketId(0);
- SetMaxPacketId(0);
- SetKeyFrame(false);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(253u, feedback_.last_frame_acked());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameIds(256, 255);
- SetPacketId(0);
- SetMaxPacketId(0);
- SetKeyFrame(false);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(253u, feedback_.last_frame_acked());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameIds(254, 254);
- SetPacketId(1);
- SetMaxPacketId(1);
- SetKeyFrame(true);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(256u, feedback_.last_frame_acked());
-}
-
TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacket) {
SetFrameIds(0, 0);
SetPacketId(0);
@@ -404,7 +322,7 @@ TEST_F(CastMessageBuilderTest, BasicRps) {
SetKeyFrame(false);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0u, feedback_.last_frame_acked());
+ EXPECT_EQ(3u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
frame_id_map_.RemoveOldFrames(3); // Simulate 3 being pulled for rendering.
@@ -480,16 +398,19 @@ TEST_F(CastMessageBuilderTest, SlowDownAck) {
// We should now have entered the slowdown ACK state.
uint32 expected_frame_id = 1;
for (; frame_id < 10; ++frame_id) {
- if (frame_id % 2)
+ if (frame_id % 2) {
++expected_frame_id;
- EXPECT_TRUE(feedback_.triggered());
+ EXPECT_TRUE(feedback_.triggered());
+ } else {
+ EXPECT_FALSE(feedback_.triggered());
+ }
EXPECT_EQ(expected_frame_id, feedback_.last_frame_acked());
SetFrameIds(frame_id, frame_id - 1);
InsertPacket();
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
}
- EXPECT_TRUE(feedback_.triggered());
+ EXPECT_FALSE(feedback_.triggered());
EXPECT_EQ(expected_frame_id, feedback_.last_frame_acked());
// Simulate frame_id being pulled for rendering.
diff --git a/media/cast/framer/frame_buffer.cc b/media/cast/framer/frame_buffer.cc
index 2bfdeb5166..0b6fa8332c 100644
--- a/media/cast/framer/frame_buffer.cc
+++ b/media/cast/framer/frame_buffer.cc
@@ -28,6 +28,8 @@ void FrameBuffer::InsertPacket(const uint8* payload_data,
frame_id_ = rtp_header.frame_id;
max_packet_id_ = rtp_header.max_packet_id;
is_key_frame_ = rtp_header.is_key_frame;
+ if (is_key_frame_)
+ DCHECK_EQ(rtp_header.frame_id, rtp_header.reference_frame_id);
last_referenced_frame_id_ = rtp_header.reference_frame_id;
rtp_timestamp_ = rtp_header.rtp_timestamp;
}
@@ -57,44 +59,27 @@ bool FrameBuffer::Complete() const {
return num_packets_received_ - 1 == max_packet_id_;
}
-bool FrameBuffer::GetEncodedAudioFrame(
- transport::EncodedAudioFrame* audio_frame) const {
+bool FrameBuffer::AssembleEncodedFrame(transport::EncodedFrame* frame) const {
if (!Complete())
return false;
// Frame is complete -> construct.
- audio_frame->frame_id = frame_id_;
- audio_frame->rtp_timestamp = rtp_timestamp_;
+ if (is_key_frame_)
+ frame->dependency = transport::EncodedFrame::KEY;
+ else if (frame_id_ == last_referenced_frame_id_)
+ frame->dependency = transport::EncodedFrame::INDEPENDENT;
+ else
+ frame->dependency = transport::EncodedFrame::DEPENDENT;
+ frame->frame_id = frame_id_;
+ frame->referenced_frame_id = last_referenced_frame_id_;
+ frame->rtp_timestamp = rtp_timestamp_;
// Build the data vector.
- audio_frame->data.clear();
- audio_frame->data.reserve(total_data_size_);
+ frame->data.clear();
+ frame->data.reserve(total_data_size_);
PacketMap::const_iterator it;
- for (it = packets_.begin(); it != packets_.end(); ++it) {
- audio_frame->data.insert(
- audio_frame->data.end(), it->second.begin(), it->second.end());
- }
- return true;
-}
-
-bool FrameBuffer::GetEncodedVideoFrame(
- transport::EncodedVideoFrame* video_frame) const {
- if (!Complete())
- return false;
- // Frame is complete -> construct.
- video_frame->key_frame = is_key_frame_;
- video_frame->frame_id = frame_id_;
- video_frame->last_referenced_frame_id = last_referenced_frame_id_;
- video_frame->rtp_timestamp = rtp_timestamp_;
-
- // Build the data vector.
- video_frame->data.clear();
- video_frame->data.reserve(total_data_size_);
- PacketMap::const_iterator it;
- for (it = packets_.begin(); it != packets_.end(); ++it) {
- video_frame->data.insert(
- video_frame->data.end(), it->second.begin(), it->second.end());
- }
+ for (it = packets_.begin(); it != packets_.end(); ++it)
+ frame->data.insert(frame->data.end(), it->second.begin(), it->second.end());
return true;
}
diff --git a/media/cast/framer/frame_buffer.h b/media/cast/framer/frame_buffer.h
index 65df021d25..d4d5dedbbd 100644
--- a/media/cast/framer/frame_buffer.h
+++ b/media/cast/framer/frame_buffer.h
@@ -25,9 +25,11 @@ class FrameBuffer {
const RtpCastHeader& rtp_header);
bool Complete() const;
- bool GetEncodedAudioFrame(transport::EncodedAudioFrame* audio_frame) const;
-
- bool GetEncodedVideoFrame(transport::EncodedVideoFrame* video_frame) const;
+ // If a frame is complete, sets the frame IDs and RTP timestamp in |frame|,
+ // and also copies the data from all packets into the data field in |frame|.
+ // Returns true if the frame was complete; false if incomplete and |frame|
+ // remains unchanged.
+ bool AssembleEncodedFrame(transport::EncodedFrame* frame) const;
bool is_key_frame() const { return is_key_frame_; }
diff --git a/media/cast/framer/frame_buffer_unittest.cc b/media/cast/framer/frame_buffer_unittest.cc
index c00aa2b77b..d6844f3e95 100644
--- a/media/cast/framer/frame_buffer_unittest.cc
+++ b/media/cast/framer/frame_buffer_unittest.cc
@@ -29,26 +29,26 @@ TEST_F(FrameBufferTest, OnePacketInsertSanity) {
rtp_header_.frame_id = 5;
rtp_header_.reference_frame_id = 5;
buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- transport::EncodedVideoFrame frame;
- EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame));
+ transport::EncodedFrame frame;
+ EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(5u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(5u, frame.referenced_frame_id);
EXPECT_EQ(3000u, frame.rtp_timestamp);
}
TEST_F(FrameBufferTest, EmptyBuffer) {
EXPECT_FALSE(buffer_.Complete());
- EXPECT_FALSE(buffer_.is_key_frame());
- transport::EncodedVideoFrame frame;
- EXPECT_FALSE(buffer_.GetEncodedVideoFrame(&frame));
+ transport::EncodedFrame frame;
+ EXPECT_FALSE(buffer_.AssembleEncodedFrame(&frame));
}
TEST_F(FrameBufferTest, DefaultOnePacketFrame) {
buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_TRUE(buffer_.Complete());
EXPECT_FALSE(buffer_.is_key_frame());
- transport::EncodedVideoFrame frame;
- EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame));
+ transport::EncodedFrame frame;
+ EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
EXPECT_EQ(payload_.size(), frame.data.size());
}
@@ -63,8 +63,8 @@ TEST_F(FrameBufferTest, MultiplePacketFrame) {
++rtp_header_.packet_id;
EXPECT_TRUE(buffer_.Complete());
EXPECT_TRUE(buffer_.is_key_frame());
- transport::EncodedVideoFrame frame;
- EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame));
+ transport::EncodedFrame frame;
+ EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
EXPECT_EQ(3 * payload_.size(), frame.data.size());
}
diff --git a/media/cast/framer/frame_id_map.cc b/media/cast/framer/frame_id_map.cc
index f93fb85125..b4389fd532 100644
--- a/media/cast/framer/frame_id_map.cc
+++ b/media/cast/framer/frame_id_map.cc
@@ -137,6 +137,22 @@ bool FrameIdMap::NextContinuousFrame(uint32* frame_id) const {
return false;
}
+bool FrameIdMap::HaveMultipleDecodableFrames() const {
+ // Find the oldest decodable frame.
+ FrameMap::const_iterator it;
+ bool found_one = false;
+ for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
+ if (it->second->Complete() && DecodableFrame(it->second.get())) {
+ if (found_one) {
+ return true;
+ } else {
+ found_one = true;
+ }
+ }
+ }
+ return false;
+}
+
uint32 FrameIdMap::LastContinuousFrame() const {
uint32 last_continuous_frame_id = last_released_frame_;
uint32 next_expected_frame = last_released_frame_;
@@ -157,43 +173,16 @@ uint32 FrameIdMap::LastContinuousFrame() const {
return last_continuous_frame_id;
}
-bool FrameIdMap::NextAudioFrameAllowingMissingFrames(uint32* frame_id) const {
- // First check if we have continuous frames.
- if (NextContinuousFrame(frame_id))
- return true;
-
- // Find the oldest frame.
- FrameMap::const_iterator it_best_match = frame_map_.end();
- FrameMap::const_iterator it;
-
- // Find first complete frame.
- for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
- if (it->second->Complete()) {
- it_best_match = it;
- break;
- }
- }
- if (it_best_match == frame_map_.end())
- return false; // No complete frame.
-
- ++it;
- for (; it != frame_map_.end(); ++it) {
- if (it->second->Complete() &&
- IsOlderFrameId(it->first, it_best_match->first)) {
- it_best_match = it;
- }
- }
- *frame_id = it_best_match->first;
- return true;
-}
-
-bool FrameIdMap::NextVideoFrameAllowingSkippingFrames(uint32* frame_id) const {
+bool FrameIdMap::NextFrameAllowingSkippingFrames(uint32* frame_id) const {
// Find the oldest decodable frame.
FrameMap::const_iterator it_best_match = frame_map_.end();
FrameMap::const_iterator it;
for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
- if (it->second->Complete() && DecodableVideoFrame(it->second.get())) {
- it_best_match = it;
+ if (it->second->Complete() && DecodableFrame(it->second.get())) {
+ if (it_best_match == frame_map_.end() ||
+ IsOlderFrameId(it->first, it_best_match->first)) {
+ it_best_match = it;
+ }
}
}
if (it_best_match == frame_map_.end())
@@ -237,11 +226,14 @@ bool FrameIdMap::ContinuousFrame(FrameInfo* frame) const {
return static_cast<uint32>(last_released_frame_ + 1) == frame->frame_id();
}
-bool FrameIdMap::DecodableVideoFrame(FrameInfo* frame) const {
+bool FrameIdMap::DecodableFrame(FrameInfo* frame) const {
if (frame->is_key_frame())
return true;
if (waiting_for_key_ && !frame->is_key_frame())
return false;
+ // Self-reference?
+ if (frame->referenced_frame_id() == frame->frame_id())
+ return true;
// Current frame is not necessarily referencing the last frame.
// Do we have the reference frame?
diff --git a/media/cast/framer/frame_id_map.h b/media/cast/framer/frame_id_map.h
index f61e251484..66e306f671 100644
--- a/media/cast/framer/frame_id_map.h
+++ b/media/cast/framer/frame_id_map.h
@@ -64,8 +64,8 @@ class FrameIdMap {
bool NextContinuousFrame(uint32* frame_id) const;
uint32 LastContinuousFrame() const;
- bool NextAudioFrameAllowingMissingFrames(uint32* frame_id) const;
- bool NextVideoFrameAllowingSkippingFrames(uint32* frame_id) const;
+ bool NextFrameAllowingSkippingFrames(uint32* frame_id) const;
+ bool HaveMultipleDecodableFrames() const;
int NumberOfCompleteFrames() const;
void GetMissingPackets(uint32 frame_id,
@@ -74,7 +74,7 @@ class FrameIdMap {
private:
bool ContinuousFrame(FrameInfo* frame) const;
- bool DecodableVideoFrame(FrameInfo* frame) const;
+ bool DecodableFrame(FrameInfo* frame) const;
FrameMap frame_map_;
bool waiting_for_key_;
diff --git a/media/cast/framer/framer.cc b/media/cast/framer/framer.cc
index d510d8b7ba..de4451a3b4 100644
--- a/media/cast/framer/framer.cc
+++ b/media/cast/framer/framer.cc
@@ -58,42 +58,15 @@ bool Framer::InsertPacket(const uint8* payload_data,
it->second->InsertPacket(payload_data, payload_size, rtp_header);
}
- bool complete = (packet_type == kNewPacketCompletingFrame);
- if (complete) {
- // ACK as soon as possible.
- VLOG(2) << "Complete frame " << static_cast<int>(rtp_header.frame_id);
- cast_msg_builder_->CompleteFrameReceived(rtp_header.frame_id,
- rtp_header.is_key_frame);
- }
- return complete;
+ return packet_type == kNewPacketCompletingFrame;
}
// This does not release the frame.
-bool Framer::GetEncodedAudioFrame(transport::EncodedAudioFrame* audio_frame,
- bool* next_frame) {
- uint32 frame_id;
- // Find frame id.
- if (frame_id_map_.NextContinuousFrame(&frame_id)) {
- // We have our next frame.
- *next_frame = true;
- } else {
- if (!frame_id_map_.NextAudioFrameAllowingMissingFrames(&frame_id)) {
- return false;
- }
- *next_frame = false;
- }
+bool Framer::GetEncodedFrame(transport::EncodedFrame* frame,
+ bool* next_frame,
+ bool* have_multiple_decodable_frames) {
+ *have_multiple_decodable_frames = frame_id_map_.HaveMultipleDecodableFrames();
- ConstFrameIterator it = frames_.find(frame_id);
- DCHECK(it != frames_.end());
- if (it == frames_.end())
- return false;
-
- return it->second->GetEncodedAudioFrame(audio_frame);
-}
-
-// This does not release the frame.
-bool Framer::GetEncodedVideoFrame(transport::EncodedVideoFrame* video_frame,
- bool* next_frame) {
uint32 frame_id;
// Find frame id.
if (frame_id_map_.NextContinuousFrame(&frame_id)) {
@@ -104,18 +77,23 @@ bool Framer::GetEncodedVideoFrame(transport::EncodedVideoFrame* video_frame,
if (!decoder_faster_than_max_frame_rate_)
return false;
- if (!frame_id_map_.NextVideoFrameAllowingSkippingFrames(&frame_id)) {
+ if (!frame_id_map_.NextFrameAllowingSkippingFrames(&frame_id)) {
return false;
}
*next_frame = false;
}
+ if (*next_frame) {
+ VLOG(2) << "ACK frame " << frame_id;
+ cast_msg_builder_->CompleteFrameReceived(frame_id);
+ }
+
ConstFrameIterator it = frames_.find(frame_id);
DCHECK(it != frames_.end());
if (it == frames_.end())
return false;
- return it->second->GetEncodedVideoFrame(video_frame);
+ return it->second->AssembleEncodedFrame(frame);
}
void Framer::Reset() {
diff --git a/media/cast/framer/framer.h b/media/cast/framer/framer.h
index eb67064a40..0b7249eff3 100644
--- a/media/cast/framer/framer.h
+++ b/media/cast/framer/framer.h
@@ -40,15 +40,14 @@ class Framer {
const RtpCastHeader& rtp_header,
bool* duplicate);
- // Extracts a complete encoded frame - will only return a complete continuous
- // frame.
- // Returns false if the frame does not exist or if the frame is not complete
- // within the given time frame.
- bool GetEncodedVideoFrame(transport::EncodedVideoFrame* video_frame,
- bool* next_frame);
-
- bool GetEncodedAudioFrame(transport::EncodedAudioFrame* audio_frame,
- bool* next_frame);
+ // Extracts a complete encoded frame - will only return a complete and
+ // decodable frame. Returns false if no such frames exist.
+ // |next_frame| will be set to true if the returned frame is the very
+ // next frame. |have_multiple_complete_frames| will be set to true
+ // if there are more decodadble frames available.
+ bool GetEncodedFrame(transport::EncodedFrame* video_frame,
+ bool* next_frame,
+ bool* have_multiple_complete_frames);
void ReleaseFrame(uint32 frame_id);
diff --git a/media/cast/framer/framer_unittest.cc b/media/cast/framer/framer_unittest.cc
index 06a340ebb0..ad53ef06ee 100644
--- a/media/cast/framer/framer_unittest.cc
+++ b/media/cast/framer/framer_unittest.cc
@@ -33,39 +33,44 @@ class FramerTest : public ::testing::Test {
};
TEST_F(FramerTest, EmptyState) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ bool multiple = false;
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
}
TEST_F(FramerTest, AlwaysStartWithKey) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
+ bool multiple = false;
bool duplicate = false;
// Insert non key first frame.
complete = framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(complete);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
rtp_header_.frame_id = 1;
rtp_header_.reference_frame_id = 1;
rtp_header_.is_key_frame = true;
complete = framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(complete);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_TRUE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(1u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, CompleteFrame) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
+ bool multiple = false;
bool duplicate = false;
// Start with a complete key frame.
@@ -73,10 +78,12 @@ TEST_F(FramerTest, CompleteFrame) {
complete = framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(complete);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Incomplete delta.
@@ -87,7 +94,7 @@ TEST_F(FramerTest, CompleteFrame) {
complete = framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_FALSE(complete);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
// Complete delta - can't skip, as incomplete sequence.
++rtp_header_.frame_id;
@@ -96,13 +103,14 @@ TEST_F(FramerTest, CompleteFrame) {
complete = framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(complete);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
}
TEST_F(FramerTest, DuplicatePackets) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
+ bool multiple = false;
bool duplicate = false;
// Start with an incomplete key frame.
@@ -113,7 +121,7 @@ TEST_F(FramerTest, DuplicatePackets) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_FALSE(complete);
EXPECT_FALSE(duplicate);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
// Add same packet again in incomplete key frame.
duplicate = false;
@@ -121,7 +129,7 @@ TEST_F(FramerTest, DuplicatePackets) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_FALSE(complete);
EXPECT_TRUE(duplicate);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
// Complete key frame.
rtp_header_.packet_id = 1;
@@ -130,8 +138,10 @@ TEST_F(FramerTest, DuplicatePackets) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(complete);
EXPECT_FALSE(duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
- EXPECT_EQ(0u, frame.frame_id);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
// Add same packet again in complete key frame.
duplicate = false;
@@ -139,8 +149,11 @@ TEST_F(FramerTest, DuplicatePackets) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_FALSE(complete);
EXPECT_TRUE(duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Incomplete delta frame.
@@ -153,7 +166,7 @@ TEST_F(FramerTest, DuplicatePackets) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_FALSE(complete);
EXPECT_FALSE(duplicate);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
// Add same packet again in incomplete delta frame.
duplicate = false;
@@ -161,7 +174,7 @@ TEST_F(FramerTest, DuplicatePackets) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_FALSE(complete);
EXPECT_TRUE(duplicate);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
// Complete delta frame.
rtp_header_.packet_id = 1;
@@ -170,8 +183,11 @@ TEST_F(FramerTest, DuplicatePackets) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(complete);
EXPECT_FALSE(duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
+ EXPECT_FALSE(multiple);
// Add same packet again in complete delta frame.
duplicate = false;
@@ -179,14 +195,18 @@ TEST_F(FramerTest, DuplicatePackets) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_FALSE(complete);
EXPECT_TRUE(duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
+ EXPECT_FALSE(multiple);
}
TEST_F(FramerTest, ContinuousSequence) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
+ bool multiple = false;
bool duplicate = false;
// Start with a complete key frame.
@@ -194,10 +214,12 @@ TEST_F(FramerTest, ContinuousSequence) {
complete = framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(complete);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Complete - not continuous.
@@ -207,13 +229,14 @@ TEST_F(FramerTest, ContinuousSequence) {
complete = framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(complete);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
}
TEST_F(FramerTest, Wrap) {
// Insert key frame, frame_id = 255 (will jump to that)
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = true;
bool duplicate = false;
// Start with a complete key frame.
@@ -222,9 +245,12 @@ TEST_F(FramerTest, Wrap) {
rtp_header_.reference_frame_id = 255;
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert wrapped delta frame - should be continuous.
@@ -232,16 +258,20 @@ TEST_F(FramerTest, Wrap) {
rtp_header_.frame_id = 256;
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, Reset) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
+ bool multiple = true;
bool duplicate = false;
// Start with a complete key frame.
@@ -250,12 +280,13 @@ TEST_F(FramerTest, Reset) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(complete);
framer_.Reset();
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
}
TEST_F(FramerTest, RequireKeyAfterReset) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = false;
bool duplicate = false;
framer_.Reset();
@@ -265,19 +296,21 @@ TEST_F(FramerTest, RequireKeyAfterReset) {
rtp_header_.frame_id = 0;
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
rtp_header_.frame_id = 1;
rtp_header_.reference_frame_id = 1;
rtp_header_.is_key_frame = true;
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_TRUE(multiple);
}
TEST_F(FramerTest, BasicNonLastReferenceId) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = false;
bool duplicate = false;
rtp_header_.is_key_frame = true;
@@ -285,7 +318,8 @@ TEST_F(FramerTest, BasicNonLastReferenceId) {
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_FALSE(multiple);
framer_.ReleaseFrame(frame.frame_id);
rtp_header_.is_key_frame = false;
@@ -294,14 +328,16 @@ TEST_F(FramerTest, BasicNonLastReferenceId) {
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_FALSE(next_frame);
+ EXPECT_FALSE(multiple);
}
TEST_F(FramerTest, InOrderReferenceFrameSelection) {
// Create pattern: 0, 1, 4, 5.
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = false;
bool duplicate = false;
rtp_header_.is_key_frame = true;
@@ -323,38 +359,51 @@ TEST_F(FramerTest, InOrderReferenceFrameSelection) {
rtp_header_.reference_frame_id = 0;
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
+ EXPECT_FALSE(multiple);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_TRUE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_FALSE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(4u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert remaining packet of frame #2 - should no be continuous.
rtp_header_.frame_id = 2;
rtp_header_.packet_id = 1;
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
rtp_header_.frame_id = 5;
rtp_header_.reference_frame_id = rtp_header_.frame_id - 1;
rtp_header_.packet_id = 0;
rtp_header_.max_packet_id = 0;
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(5u, frame.frame_id);
+ EXPECT_EQ(4u, frame.referenced_frame_id);
}
TEST_F(FramerTest, AudioWrap) {
// All audio frames are marked as key frames.
- transport::EncodedAudioFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = false;
bool duplicate = false;
rtp_header_.is_key_frame = true;
@@ -363,9 +412,12 @@ TEST_F(FramerTest, AudioWrap) {
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(254u, frame.frame_id);
+ EXPECT_EQ(254u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
rtp_header_.frame_id = 255;
@@ -379,21 +431,28 @@ TEST_F(FramerTest, AudioWrap) {
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_TRUE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
+ EXPECT_EQ(256u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, AudioWrapWithMissingFrame) {
// All audio frames are marked as key frames.
- transport::EncodedAudioFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = true;
bool duplicate = false;
// Insert and get first packet.
@@ -402,9 +461,12 @@ TEST_F(FramerTest, AudioWrapWithMissingFrame) {
rtp_header_.reference_frame_id = 253;
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(253u, frame.frame_id);
+ EXPECT_EQ(253u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert third and fourth packets.
@@ -418,13 +480,19 @@ TEST_F(FramerTest, AudioWrapWithMissingFrame) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
// Get third and fourth packets.
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_FALSE(next_frame);
+ EXPECT_TRUE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
+ EXPECT_EQ(256u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
diff --git a/media/cast/logging/encoding_event_subscriber.cc b/media/cast/logging/encoding_event_subscriber.cc
index 2ccf731ba0..48cc911ba8 100644
--- a/media/cast/logging/encoding_event_subscriber.cc
+++ b/media/cast/logging/encoding_event_subscriber.cc
@@ -62,7 +62,7 @@ void EncodingEventSubscriber::OnReceiveFrameEvent(
const FrameEvent& frame_event) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (!ShouldProcessEvent(frame_event.type))
+ if (event_media_type_ != frame_event.media_type)
return;
RtpTimestamp relative_rtp_timestamp =
@@ -92,14 +92,14 @@ void EncodingEventSubscriber::OnReceiveFrameEvent(
event_proto->add_event_timestamp_ms(
(frame_event.timestamp - base::TimeTicks()).InMilliseconds());
- if (frame_event.type == kAudioFrameEncoded) {
+ if (frame_event.type == FRAME_ENCODED) {
event_proto->set_encoded_frame_size(frame_event.size);
- } else if (frame_event.type == kVideoFrameEncoded) {
- event_proto->set_encoded_frame_size(frame_event.size);
- event_proto->set_key_frame(frame_event.key_frame);
- event_proto->set_target_bitrate(frame_event.target_bitrate);
- } else if (frame_event.type == kAudioPlayoutDelay ||
- frame_event.type == kVideoRenderDelay) {
+ if (frame_event.media_type == VIDEO_EVENT) {
+ event_proto->set_encoded_frame_size(frame_event.size);
+ event_proto->set_key_frame(frame_event.key_frame);
+ event_proto->set_target_bitrate(frame_event.target_bitrate);
+ }
+ } else if (frame_event.type == FRAME_PLAYOUT) {
event_proto->set_delay_millis(frame_event.delay_delta.InMilliseconds());
}
@@ -114,8 +114,9 @@ void EncodingEventSubscriber::OnReceivePacketEvent(
const PacketEvent& packet_event) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (!ShouldProcessEvent(packet_event.type))
+ if (event_media_type_ != packet_event.media_type)
return;
+
RtpTimestamp relative_rtp_timestamp =
GetRelativeRtpTimestamp(packet_event.rtp_timestamp);
PacketEventMap::iterator it =
@@ -178,6 +179,13 @@ void EncodingEventSubscriber::OnReceivePacketEvent(
base_packet_event_proto->add_event_timestamp_ms(
(packet_event.timestamp - base::TimeTicks()).InMilliseconds());
+ // |base_packet_event_proto| could have been created with a receiver event
+ // which does not have the packet size and we would need to overwrite it when
+ // we see a sender event, which does have the packet size.
+ if (packet_event.size > 0) {
+ base_packet_event_proto->set_size(packet_event.size);
+ }
+
if (packet_event_map_.size() > kMaxMapSize)
TransferPacketEvents(kNumMapEntriesToTransfer);
@@ -208,10 +216,6 @@ void EncodingEventSubscriber::GetEventsAndReset(LogMetadata* metadata,
Reset();
}
-bool EncodingEventSubscriber::ShouldProcessEvent(CastLoggingEvent event) {
- return GetEventMediaType(event) == event_media_type_;
-}
-
void EncodingEventSubscriber::TransferFrameEvents(size_t max_num_entries) {
DCHECK(frame_event_map_.size() >= max_num_entries);
diff --git a/media/cast/logging/encoding_event_subscriber.h b/media/cast/logging/encoding_event_subscriber.h
index c507499b27..ca2cccb5f7 100644
--- a/media/cast/logging/encoding_event_subscriber.h
+++ b/media/cast/logging/encoding_event_subscriber.h
@@ -72,8 +72,6 @@ class EncodingEventSubscriber : public RawEventSubscriber {
linked_ptr<media::cast::proto::AggregatedPacketEvent> >
PacketEventMap;
- bool ShouldProcessEvent(CastLoggingEvent event);
-
// Transfer up to |max_num_entries| smallest entries from |frame_event_map_|
// to |frame_event_storage_|. This helps keep size of |frame_event_map_| small
// and lookup speed fast.
diff --git a/media/cast/logging/encoding_event_subscriber_unittest.cc b/media/cast/logging/encoding_event_subscriber_unittest.cc
index 264ae93032..3d77a621b7 100644
--- a/media/cast/logging/encoding_event_subscriber_unittest.cc
+++ b/media/cast/logging/encoding_event_subscriber_unittest.cc
@@ -78,11 +78,13 @@ TEST_F(EncodingEventSubscriberTest, FrameEventTruncating) {
// Entry with RTP timestamp 0 should get dropped.
for (int i = 0; i < 11; i++) {
cast_environment_->Logging()->InsertFrameEvent(now,
- kVideoFrameCaptureBegin,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
i * 100,
/*frame_id*/ 0);
cast_environment_->Logging()->InsertFrameEvent(now,
- kVideoFrameDecoded,
+ FRAME_DECODED,
+ VIDEO_EVENT,
i * 100,
/*frame_id*/ 0);
}
@@ -102,7 +104,8 @@ TEST_F(EncodingEventSubscriberTest, PacketEventTruncating) {
// Entry with RTP timestamp 0 should get dropped.
for (int i = 0; i < 11; i++) {
cast_environment_->Logging()->InsertPacketEvent(now,
- kAudioPacketReceived,
+ PACKET_RECEIVED,
+ AUDIO_EVENT,
/*rtp_timestamp*/ i * 100,
/*frame_id*/ 0,
/*packet_id*/ i,
@@ -123,13 +126,15 @@ TEST_F(EncodingEventSubscriberTest, EventFiltering) {
base::TimeTicks now(testing_clock_->NowTicks());
RtpTimestamp rtp_timestamp = 100;
cast_environment_->Logging()->InsertFrameEvent(now,
- kVideoFrameDecoded,
+ FRAME_DECODED,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0);
// This is an AUDIO_EVENT and shouldn't be processed by the subscriber.
cast_environment_->Logging()->InsertFrameEvent(now,
- kAudioFrameDecoded,
+ FRAME_DECODED,
+ AUDIO_EVENT,
rtp_timestamp,
/*frame_id*/ 0);
@@ -141,7 +146,7 @@ TEST_F(EncodingEventSubscriberTest, EventFiltering) {
linked_ptr<AggregatedFrameEvent> frame_event = *it;
ASSERT_EQ(1, frame_event->event_type_size());
- EXPECT_EQ(media::cast::proto::VIDEO_FRAME_DECODED,
+ EXPECT_EQ(media::cast::proto::FRAME_DECODED,
frame_event->event_type(0));
GetEventsAndReset();
@@ -153,7 +158,8 @@ TEST_F(EncodingEventSubscriberTest, FrameEvent) {
Init(VIDEO_EVENT);
base::TimeTicks now(testing_clock_->NowTicks());
RtpTimestamp rtp_timestamp = 100;
- cast_environment_->Logging()->InsertFrameEvent(now, kVideoFrameDecoded,
+ cast_environment_->Logging()->InsertFrameEvent(now, FRAME_DECODED,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0);
@@ -169,7 +175,7 @@ TEST_F(EncodingEventSubscriberTest, FrameEvent) {
EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
ASSERT_EQ(1, event->event_type_size());
- EXPECT_EQ(media::cast::proto::VIDEO_FRAME_DECODED, event->event_type(0));
+ EXPECT_EQ(media::cast::proto::FRAME_DECODED, event->event_type(0));
ASSERT_EQ(1, event->event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now), event->event_timestamp_ms(0));
@@ -186,7 +192,7 @@ TEST_F(EncodingEventSubscriberTest, FrameEventDelay) {
RtpTimestamp rtp_timestamp = 100;
int delay_ms = 100;
cast_environment_->Logging()->InsertFrameEventWithDelay(
- now, kAudioPlayoutDelay, rtp_timestamp,
+ now, FRAME_PLAYOUT, AUDIO_EVENT, rtp_timestamp,
/*frame_id*/ 0, base::TimeDelta::FromMilliseconds(delay_ms));
GetEventsAndReset();
@@ -201,7 +207,7 @@ TEST_F(EncodingEventSubscriberTest, FrameEventDelay) {
EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
ASSERT_EQ(1, event->event_type_size());
- EXPECT_EQ(media::cast::proto::AUDIO_PLAYOUT_DELAY, event->event_type(0));
+ EXPECT_EQ(media::cast::proto::FRAME_PLAYOUT, event->event_type(0));
ASSERT_EQ(1, event->event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now), event->event_timestamp_ms(0));
@@ -218,7 +224,7 @@ TEST_F(EncodingEventSubscriberTest, FrameEventSize) {
bool key_frame = true;
int target_bitrate = 1024;
cast_environment_->Logging()->InsertEncodedFrameEvent(
- now, kVideoFrameEncoded, rtp_timestamp,
+ now, FRAME_ENCODED, VIDEO_EVENT, rtp_timestamp,
/*frame_id*/ 0, size, key_frame, target_bitrate);
GetEventsAndReset();
@@ -233,7 +239,7 @@ TEST_F(EncodingEventSubscriberTest, FrameEventSize) {
EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
ASSERT_EQ(1, event->event_type_size());
- EXPECT_EQ(media::cast::proto::VIDEO_FRAME_ENCODED, event->event_type(0));
+ EXPECT_EQ(media::cast::proto::FRAME_ENCODED, event->event_type(0));
ASSERT_EQ(1, event->event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now), event->event_timestamp_ms(0));
@@ -250,20 +256,20 @@ TEST_F(EncodingEventSubscriberTest, MultipleFrameEvents) {
RtpTimestamp rtp_timestamp2 = 200;
base::TimeTicks now1(testing_clock_->NowTicks());
cast_environment_->Logging()->InsertFrameEventWithDelay(
- now1, kAudioPlayoutDelay, rtp_timestamp1,
+ now1, FRAME_PLAYOUT, AUDIO_EVENT, rtp_timestamp1,
/*frame_id*/ 0, /*delay*/ base::TimeDelta::FromMilliseconds(100));
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
base::TimeTicks now2(testing_clock_->NowTicks());
cast_environment_->Logging()->InsertEncodedFrameEvent(
- now2, kAudioFrameEncoded, rtp_timestamp2,
+ now2, FRAME_ENCODED, AUDIO_EVENT, rtp_timestamp2,
/*frame_id*/ 0, /*size*/ 123, /* key_frame - unused */ false,
/*target_bitrate - unused*/ 0);
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
base::TimeTicks now3(testing_clock_->NowTicks());
cast_environment_->Logging()->InsertFrameEvent(
- now3, kAudioFrameDecoded, rtp_timestamp1, /*frame_id*/ 0);
+ now3, FRAME_DECODED, AUDIO_EVENT, rtp_timestamp1, /*frame_id*/ 0);
GetEventsAndReset();
@@ -277,8 +283,8 @@ TEST_F(EncodingEventSubscriberTest, MultipleFrameEvents) {
EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
ASSERT_EQ(2, event->event_type_size());
- EXPECT_EQ(media::cast::proto::AUDIO_PLAYOUT_DELAY, event->event_type(0));
- EXPECT_EQ(media::cast::proto::AUDIO_FRAME_DECODED, event->event_type(1));
+ EXPECT_EQ(media::cast::proto::FRAME_PLAYOUT, event->event_type(0));
+ EXPECT_EQ(media::cast::proto::FRAME_DECODED, event->event_type(1));
ASSERT_EQ(2, event->event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now1), event->event_timestamp_ms(0));
@@ -294,7 +300,7 @@ TEST_F(EncodingEventSubscriberTest, MultipleFrameEvents) {
EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
ASSERT_EQ(1, event->event_type_size());
- EXPECT_EQ(media::cast::proto::AUDIO_FRAME_ENCODED, event->event_type(0));
+ EXPECT_EQ(media::cast::proto::FRAME_ENCODED, event->event_type(0));
ASSERT_EQ(1, event->event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now2), event->event_timestamp_ms(0));
@@ -309,7 +315,8 @@ TEST_F(EncodingEventSubscriberTest, PacketEvent) {
int packet_id = 2;
int size = 100;
cast_environment_->Logging()->InsertPacketEvent(
- now, kAudioPacketReceived, rtp_timestamp, /*frame_id*/ 0, packet_id,
+ now, PACKET_RECEIVED, AUDIO_EVENT,
+ rtp_timestamp, /*frame_id*/ 0, packet_id,
/*max_packet_id*/ 10, size);
GetEventsAndReset();
@@ -327,7 +334,7 @@ TEST_F(EncodingEventSubscriberTest, PacketEvent) {
const BasePacketEvent& base_event = event->base_packet_event(0);
EXPECT_EQ(packet_id, base_event.packet_id());
ASSERT_EQ(1, base_event.event_type_size());
- EXPECT_EQ(media::cast::proto::AUDIO_PACKET_RECEIVED,
+ EXPECT_EQ(media::cast::proto::PACKET_RECEIVED,
base_event.event_type(0));
ASSERT_EQ(1, base_event.event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now), base_event.event_timestamp_ms(0));
@@ -344,7 +351,8 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEventsForPacket) {
int packet_id = 2;
int size = 100;
cast_environment_->Logging()->InsertPacketEvent(now1,
- kVideoPacketSentToNetwork,
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0,
packet_id,
@@ -354,7 +362,8 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEventsForPacket) {
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
base::TimeTicks now2(testing_clock_->NowTicks());
cast_environment_->Logging()->InsertPacketEvent(now2,
- kVideoPacketRetransmitted,
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0,
packet_id,
@@ -376,9 +385,9 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEventsForPacket) {
const BasePacketEvent& base_event = event->base_packet_event(0);
EXPECT_EQ(packet_id, base_event.packet_id());
ASSERT_EQ(2, base_event.event_type_size());
- EXPECT_EQ(media::cast::proto::VIDEO_PACKET_SENT_TO_NETWORK,
+ EXPECT_EQ(media::cast::proto::PACKET_SENT_TO_NETWORK,
base_event.event_type(0));
- EXPECT_EQ(media::cast::proto::VIDEO_PACKET_RETRANSMITTED,
+ EXPECT_EQ(media::cast::proto::PACKET_RETRANSMITTED,
base_event.event_type(1));
ASSERT_EQ(2, base_event.event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now1), base_event.event_timestamp_ms(0));
@@ -393,7 +402,8 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEventsForFrame) {
int packet_id_2 = 3;
int size = 100;
cast_environment_->Logging()->InsertPacketEvent(now1,
- kVideoPacketSentToNetwork,
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0,
packet_id_1,
@@ -403,7 +413,8 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEventsForFrame) {
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
base::TimeTicks now2(testing_clock_->NowTicks());
cast_environment_->Logging()->InsertPacketEvent(now2,
- kVideoPacketRetransmitted,
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0,
packet_id_2,
@@ -425,7 +436,7 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEventsForFrame) {
const BasePacketEvent& base_event = event->base_packet_event(0);
EXPECT_EQ(packet_id_1, base_event.packet_id());
ASSERT_EQ(1, base_event.event_type_size());
- EXPECT_EQ(media::cast::proto::VIDEO_PACKET_SENT_TO_NETWORK,
+ EXPECT_EQ(media::cast::proto::PACKET_SENT_TO_NETWORK,
base_event.event_type(0));
ASSERT_EQ(1, base_event.event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now1), base_event.event_timestamp_ms(0));
@@ -433,7 +444,7 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEventsForFrame) {
const BasePacketEvent& base_event_2 = event->base_packet_event(1);
EXPECT_EQ(packet_id_2, base_event_2.packet_id());
ASSERT_EQ(1, base_event_2.event_type_size());
- EXPECT_EQ(media::cast::proto::VIDEO_PACKET_RETRANSMITTED,
+ EXPECT_EQ(media::cast::proto::PACKET_RETRANSMITTED,
base_event_2.event_type(0));
ASSERT_EQ(1, base_event_2.event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now2), base_event_2.event_timestamp_ms(0));
@@ -448,7 +459,8 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEvents) {
int packet_id_2 = 3;
int size = 100;
cast_environment_->Logging()->InsertPacketEvent(now1,
- kVideoPacketSentToNetwork,
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
rtp_timestamp_1,
/*frame_id*/ 0,
packet_id_1,
@@ -458,7 +470,8 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEvents) {
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
base::TimeTicks now2(testing_clock_->NowTicks());
cast_environment_->Logging()->InsertPacketEvent(now2,
- kVideoPacketRetransmitted,
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
rtp_timestamp_2,
/*frame_id*/ 0,
packet_id_2,
@@ -480,7 +493,7 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEvents) {
const BasePacketEvent& base_event = event->base_packet_event(0);
EXPECT_EQ(packet_id_1, base_event.packet_id());
ASSERT_EQ(1, base_event.event_type_size());
- EXPECT_EQ(media::cast::proto::VIDEO_PACKET_SENT_TO_NETWORK,
+ EXPECT_EQ(media::cast::proto::PACKET_SENT_TO_NETWORK,
base_event.event_type(0));
ASSERT_EQ(1, base_event.event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now1), base_event.event_timestamp_ms(0));
@@ -496,7 +509,7 @@ TEST_F(EncodingEventSubscriberTest, MultiplePacketEvents) {
const BasePacketEvent& base_event_2 = event->base_packet_event(0);
EXPECT_EQ(packet_id_2, base_event_2.packet_id());
ASSERT_EQ(1, base_event_2.event_type_size());
- EXPECT_EQ(media::cast::proto::VIDEO_PACKET_RETRANSMITTED,
+ EXPECT_EQ(media::cast::proto::PACKET_RETRANSMITTED,
base_event_2.event_type(0));
ASSERT_EQ(1, base_event_2.event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now2), base_event_2.event_timestamp_ms(0));
@@ -508,12 +521,14 @@ TEST_F(EncodingEventSubscriberTest, FirstRtpTimestamp) {
base::TimeTicks now(testing_clock_->NowTicks());
cast_environment_->Logging()->InsertFrameEvent(now,
- kVideoFrameCaptureBegin,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0);
cast_environment_->Logging()->InsertFrameEvent(now,
- kVideoFrameCaptureEnd,
+ FRAME_CAPTURE_END,
+ VIDEO_EVENT,
rtp_timestamp + 30,
/*frame_id*/ 1);
@@ -531,7 +546,8 @@ TEST_F(EncodingEventSubscriberTest, FirstRtpTimestamp) {
rtp_timestamp = 67890;
cast_environment_->Logging()->InsertFrameEvent(now,
- kVideoFrameCaptureBegin,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0);
GetEventsAndReset();
@@ -545,13 +561,15 @@ TEST_F(EncodingEventSubscriberTest, RelativeRtpTimestampWrapAround) {
base::TimeTicks now(testing_clock_->NowTicks());
cast_environment_->Logging()->InsertFrameEvent(now,
- kVideoFrameCaptureBegin,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0);
// RtpTimestamp has now wrapped around.
cast_environment_->Logging()->InsertFrameEvent(now,
- kVideoFrameCaptureEnd,
+ FRAME_CAPTURE_END,
+ VIDEO_EVENT,
rtp_timestamp + 30,
/*frame_id*/ 1);
@@ -571,7 +589,8 @@ TEST_F(EncodingEventSubscriberTest, MaxEventsPerProto) {
RtpTimestamp rtp_timestamp = 100;
for (int i = 0; i < kMaxEventsPerProto + 1; i++) {
cast_environment_->Logging()->InsertFrameEvent(testing_clock_->NowTicks(),
- kVideoAckReceived,
+ FRAME_ACK_RECEIVED,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0);
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(30));
@@ -590,7 +609,8 @@ TEST_F(EncodingEventSubscriberTest, MaxEventsPerProto) {
for (int i = 0; i < kMaxPacketsPerFrame + 1; i++) {
cast_environment_->Logging()->InsertPacketEvent(
testing_clock_->NowTicks(),
- kVideoPacketRetransmitted,
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0,
i,
@@ -618,7 +638,8 @@ TEST_F(EncodingEventSubscriberTest, MaxEventsPerProto) {
for (int j = 0; j < kMaxEventsPerProto + 1; j++) {
cast_environment_->Logging()->InsertPacketEvent(
testing_clock_->NowTicks(),
- kVideoPacketRetransmitted,
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
rtp_timestamp,
/*frame_id*/ 0,
0,
diff --git a/media/cast/logging/log_deserializer.cc b/media/cast/logging/log_deserializer.cc
index 5621be0321..a4c79b3de9 100644
--- a/media/cast/logging/log_deserializer.cc
+++ b/media/cast/logging/log_deserializer.cc
@@ -33,7 +33,7 @@ void MergePacketEvent(const AggregatedPacketEvent& from,
const BasePacketEvent& from_base_event = from.base_packet_event(i);
bool merged = false;
for (int j = 0; j < to->base_packet_event_size(); j++) {
- BasePacketEvent* to_base_event = to->mutable_base_packet_event(i);
+ BasePacketEvent* to_base_event = to->mutable_base_packet_event(j);
if (from_base_event.packet_id() == to_base_event->packet_id()) {
to_base_event->MergeFrom(from_base_event);
merged = true;
@@ -51,12 +51,14 @@ void MergeFrameEvent(const AggregatedFrameEvent& from,
linked_ptr<AggregatedFrameEvent> to) {
to->mutable_event_type()->MergeFrom(from.event_type());
to->mutable_event_timestamp_ms()->MergeFrom(from.event_timestamp_ms());
- if (!to->has_encoded_frame_size())
+ if (!to->has_encoded_frame_size() && from.has_encoded_frame_size())
to->set_encoded_frame_size(from.encoded_frame_size());
- if (!to->has_delay_millis())
+ if (!to->has_delay_millis() && from.has_delay_millis())
to->set_delay_millis(from.delay_millis());
- if (!to->has_key_frame())
+ if (!to->has_key_frame() && from.has_key_frame())
to->set_key_frame(from.key_frame());
+ if (!to->has_target_bitrate() && from.has_target_bitrate())
+ to->set_target_bitrate(from.target_bitrate());
}
bool PopulateDeserializedLog(base::BigEndianReader* reader,
diff --git a/media/cast/logging/logging_defines.cc b/media/cast/logging/logging_defines.cc
index bcb9c376b2..d0dd5c8e57 100644
--- a/media/cast/logging/logging_defines.cc
+++ b/media/cast/logging/logging_defines.cc
@@ -7,7 +7,7 @@
#include "base/logging.h"
#define ENUM_TO_STRING(enum) \
- case k##enum: \
+ case enum: \
return #enum
namespace media {
@@ -15,80 +15,25 @@ namespace cast {
const char* CastLoggingToString(CastLoggingEvent event) {
switch (event) {
- // Can happen if the sender and receiver of RTCP log messages are not
- // aligned.
- ENUM_TO_STRING(Unknown);
- ENUM_TO_STRING(RttMs);
- ENUM_TO_STRING(PacketLoss);
- ENUM_TO_STRING(JitterMs);
- ENUM_TO_STRING(VideoAckReceived);
- ENUM_TO_STRING(RembBitrate);
- ENUM_TO_STRING(AudioAckSent);
- ENUM_TO_STRING(VideoAckSent);
- ENUM_TO_STRING(AudioFrameCaptureBegin);
- ENUM_TO_STRING(AudioFrameCaptureEnd);
- ENUM_TO_STRING(AudioFrameEncoded);
- ENUM_TO_STRING(AudioPlayoutDelay);
- ENUM_TO_STRING(AudioFrameDecoded);
- ENUM_TO_STRING(VideoFrameCaptureBegin);
- ENUM_TO_STRING(VideoFrameCaptureEnd);
- ENUM_TO_STRING(VideoFrameSentToEncoder);
- ENUM_TO_STRING(VideoFrameEncoded);
- ENUM_TO_STRING(VideoFrameDecoded);
- ENUM_TO_STRING(VideoRenderDelay);
- ENUM_TO_STRING(AudioPacketSentToNetwork);
- ENUM_TO_STRING(VideoPacketSentToNetwork);
- ENUM_TO_STRING(AudioPacketRetransmitted);
- ENUM_TO_STRING(VideoPacketRetransmitted);
- ENUM_TO_STRING(AudioPacketReceived);
- ENUM_TO_STRING(VideoPacketReceived);
- ENUM_TO_STRING(DuplicateAudioPacketReceived);
- ENUM_TO_STRING(DuplicateVideoPacketReceived);
+ ENUM_TO_STRING(UNKNOWN);
+ ENUM_TO_STRING(FRAME_CAPTURE_BEGIN);
+ ENUM_TO_STRING(FRAME_CAPTURE_END);
+ ENUM_TO_STRING(FRAME_ENCODED);
+ ENUM_TO_STRING(FRAME_ACK_RECEIVED);
+ ENUM_TO_STRING(FRAME_ACK_SENT);
+ ENUM_TO_STRING(FRAME_DECODED);
+ ENUM_TO_STRING(FRAME_PLAYOUT);
+ ENUM_TO_STRING(PACKET_SENT_TO_NETWORK);
+ ENUM_TO_STRING(PACKET_RETRANSMITTED);
+ ENUM_TO_STRING(PACKET_RECEIVED);
}
NOTREACHED();
return "";
}
-EventMediaType GetEventMediaType(CastLoggingEvent event) {
- switch (event) {
- case kUnknown:
- case kRttMs:
- case kPacketLoss:
- case kJitterMs:
- case kRembBitrate:
- return OTHER_EVENT;
- case kAudioAckSent:
- case kAudioFrameCaptureBegin:
- case kAudioFrameCaptureEnd:
- case kAudioFrameEncoded:
- case kAudioPlayoutDelay:
- case kAudioFrameDecoded:
- case kAudioPacketSentToNetwork:
- case kAudioPacketRetransmitted:
- case kAudioPacketReceived:
- case kDuplicateAudioPacketReceived:
- return AUDIO_EVENT;
- case kVideoAckReceived:
- case kVideoAckSent:
- case kVideoFrameCaptureBegin:
- case kVideoFrameCaptureEnd:
- case kVideoFrameSentToEncoder:
- case kVideoFrameEncoded:
- case kVideoFrameDecoded:
- case kVideoRenderDelay:
- case kVideoPacketSentToNetwork:
- case kVideoPacketRetransmitted:
- case kVideoPacketReceived:
- case kDuplicateVideoPacketReceived:
- return VIDEO_EVENT;
- }
- NOTREACHED();
- return OTHER_EVENT;
-}
-
FrameEvent::FrameEvent()
- : rtp_timestamp(0u), frame_id(kFrameIdUnknown), size(0u), type(kUnknown),
- key_frame(false), target_bitrate(0) {}
+ : rtp_timestamp(0u), frame_id(kFrameIdUnknown), size(0u), type(UNKNOWN),
+ media_type(UNKNOWN_EVENT), key_frame(false), target_bitrate(0) {}
FrameEvent::~FrameEvent() {}
PacketEvent::PacketEvent()
@@ -97,7 +42,8 @@ PacketEvent::PacketEvent()
max_packet_id(0),
packet_id(0),
size(0),
- type(kUnknown) {}
+ type(UNKNOWN),
+ media_type(UNKNOWN_EVENT) {}
PacketEvent::~PacketEvent() {}
} // namespace cast
diff --git a/media/cast/logging/logging_defines.h b/media/cast/logging/logging_defines.h
index f1d5c81620..b3f3841ffb 100644
--- a/media/cast/logging/logging_defines.h
+++ b/media/cast/logging/logging_defines.h
@@ -19,50 +19,33 @@ static const uint32 kFrameIdUnknown = 0xFFFFFFFF;
typedef uint32 RtpTimestamp;
enum CastLoggingEvent {
- kUnknown,
- // Generic events. These are no longer used.
- kRttMs,
- kPacketLoss,
- kJitterMs,
- kVideoAckReceived, // Sender side frame event.
- kRembBitrate, // Generic event. No longer used.
+ UNKNOWN,
+ // Sender side frame events.
+ FRAME_CAPTURE_BEGIN,
+ FRAME_CAPTURE_END,
+ FRAME_ENCODED,
+ FRAME_ACK_RECEIVED,
// Receiver side frame events.
- kAudioAckSent,
- kVideoAckSent,
- // Audio sender.
- kAudioFrameCaptureBegin,
- kAudioFrameCaptureEnd,
- kAudioFrameEncoded,
- // Audio receiver.
- kAudioFrameDecoded,
- kAudioPlayoutDelay,
- // Video sender.
- kVideoFrameCaptureBegin,
- kVideoFrameCaptureEnd,
- kVideoFrameSentToEncoder, // Deprecated
- kVideoFrameEncoded,
- // Video receiver.
- kVideoFrameDecoded,
- kVideoRenderDelay,
- // Send-side packet events.
- kAudioPacketSentToNetwork,
- kVideoPacketSentToNetwork,
- kAudioPacketRetransmitted,
- kVideoPacketRetransmitted,
- // Receive-side packet events.
- kAudioPacketReceived,
- kVideoPacketReceived,
- kDuplicateAudioPacketReceived,
- kDuplicateVideoPacketReceived,
- kNumOfLoggingEvents = kDuplicateVideoPacketReceived
+ FRAME_ACK_SENT,
+ FRAME_DECODED,
+ FRAME_PLAYOUT,
+ // Sender side packet events.
+ PACKET_SENT_TO_NETWORK,
+ PACKET_RETRANSMITTED,
+ // Receiver side packet events.
+ PACKET_RECEIVED,
+ kNumOfLoggingEvents = PACKET_RECEIVED
};
const char* CastLoggingToString(CastLoggingEvent event);
// CastLoggingEvent are classified into one of three following types.
-enum EventMediaType { AUDIO_EVENT, VIDEO_EVENT, OTHER_EVENT };
-
-EventMediaType GetEventMediaType(CastLoggingEvent event);
+enum EventMediaType {
+ AUDIO_EVENT,
+ VIDEO_EVENT,
+ UNKNOWN_EVENT,
+ EVENT_MEDIA_TYPE_LAST = UNKNOWN_EVENT
+};
struct FrameEvent {
FrameEvent();
@@ -70,7 +53,8 @@ struct FrameEvent {
RtpTimestamp rtp_timestamp;
uint32 frame_id;
- // Size of encoded frame. Only set for kVideoFrameEncoded event.
+
+ // Size of encoded frame. Only set for FRAME_ENCODED event.
size_t size;
// Time of event logged.
@@ -78,15 +62,16 @@ struct FrameEvent {
CastLoggingEvent type;
- // Render / playout delay. Only set for kAudioPlayoutDelay and
- // kVideoRenderDelay events.
+ EventMediaType media_type;
+
+ // Render / playout delay. Only set for FRAME_PLAYOUT events.
base::TimeDelta delay_delta;
- // Whether the frame is a key frame. Only set for kVideoFrameEncoded event.
+ // Whether the frame is a key frame. Only set for video FRAME_ENCODED event.
bool key_frame;
// The requested target bitrate of the encoder at the time the frame is
- // encoded. Only set for kVideoFrameEncoded event.
+ // encoded. Only set for video FRAME_ENCODED event.
int target_bitrate;
};
@@ -103,6 +88,7 @@ struct PacketEvent {
// Time of event logged.
base::TimeTicks timestamp;
CastLoggingEvent type;
+ EventMediaType media_type;
};
} // namespace cast
diff --git a/media/cast/logging/logging_impl.cc b/media/cast/logging/logging_impl.cc
index 7d975e268e..1143d1be21 100644
--- a/media/cast/logging/logging_impl.cc
+++ b/media/cast/logging/logging_impl.cc
@@ -19,33 +19,39 @@ LoggingImpl::LoggingImpl() {
LoggingImpl::~LoggingImpl() {}
void LoggingImpl::InsertFrameEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp,
uint32 frame_id) {
DCHECK(thread_checker_.CalledOnValidThread());
- raw_.InsertFrameEvent(time_of_event, event, rtp_timestamp, frame_id);
+ raw_.InsertFrameEvent(time_of_event, event, event_media_type,
+ rtp_timestamp, frame_id);
}
void LoggingImpl::InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 rtp_timestamp,
uint32 frame_id, int frame_size,
bool key_frame,
int target_bitrate) {
DCHECK(thread_checker_.CalledOnValidThread());
- raw_.InsertEncodedFrameEvent(time_of_event, event, rtp_timestamp, frame_id,
- frame_size, key_frame, target_bitrate);
+ raw_.InsertEncodedFrameEvent(time_of_event, event, event_media_type,
+ rtp_timestamp, frame_id, frame_size, key_frame, target_bitrate);
}
void LoggingImpl::InsertFrameEventWithDelay(
const base::TimeTicks& time_of_event, CastLoggingEvent event,
- uint32 rtp_timestamp, uint32 frame_id, base::TimeDelta delay) {
+ EventMediaType event_media_type, uint32 rtp_timestamp, uint32 frame_id,
+ base::TimeDelta delay) {
DCHECK(thread_checker_.CalledOnValidThread());
- raw_.InsertFrameEventWithDelay(time_of_event, event, rtp_timestamp,
- frame_id, delay);
+ raw_.InsertFrameEventWithDelay(time_of_event, event, event_media_type,
+ rtp_timestamp, frame_id, delay);
}
void LoggingImpl::InsertSinglePacketEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event,
+ EventMediaType event_media_type,
const Packet& packet) {
DCHECK(thread_checker_.CalledOnValidThread());
@@ -64,6 +70,7 @@ void LoggingImpl::InsertSinglePacketEvent(const base::TimeTicks& time_of_event,
// rtp_timestamp is enough - no need for frame_id as well.
InsertPacketEvent(time_of_event,
event,
+ event_media_type,
rtp_timestamp,
kFrameIdUnknown,
packet_id,
@@ -73,22 +80,25 @@ void LoggingImpl::InsertSinglePacketEvent(const base::TimeTicks& time_of_event,
void LoggingImpl::InsertPacketListEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event,
+ EventMediaType event_media_type,
const PacketList& packets) {
DCHECK(thread_checker_.CalledOnValidThread());
for (PacketList::const_iterator it = packets.begin(); it != packets.end();
++it) {
- InsertSinglePacketEvent(time_of_event, event, (*it)->data);
+ InsertSinglePacketEvent(time_of_event, event, event_media_type,
+ (*it)->data);
}
}
void LoggingImpl::InsertPacketEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 rtp_timestamp, uint32 frame_id,
uint16 packet_id, uint16 max_packet_id,
size_t size) {
DCHECK(thread_checker_.CalledOnValidThread());
- raw_.InsertPacketEvent(time_of_event, event, rtp_timestamp, frame_id,
- packet_id, max_packet_id, size);
+ raw_.InsertPacketEvent(time_of_event, event, event_media_type,
+ rtp_timestamp, frame_id, packet_id, max_packet_id, size);
}
void LoggingImpl::AddRawEventSubscriber(RawEventSubscriber* subscriber) {
diff --git a/media/cast/logging/logging_impl.h b/media/cast/logging/logging_impl.h
index 2efef55b4d..ba453c8c8e 100644
--- a/media/cast/logging/logging_impl.h
+++ b/media/cast/logging/logging_impl.h
@@ -26,27 +26,35 @@ class LoggingImpl {
// Note: All methods below should be called from the same thread.
void InsertFrameEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
- uint32 frame_id);
+ CastLoggingEvent event, EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id);
void InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
- uint32 frame_id, int frame_size, bool key_frame,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id,
+ int frame_size, bool key_frame,
int target_bitrate);
void InsertFrameEventWithDelay(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
- uint32 frame_id, base::TimeDelta delay);
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id,
+ base::TimeDelta delay);
void InsertSinglePacketEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event,
+ EventMediaType event_media_type,
const Packet& packet);
void InsertPacketListEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, const PacketList& packets);
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ const PacketList& packets);
void InsertPacketEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
+ CastLoggingEvent event,
+ EventMediaType event_media_type, uint32 rtp_timestamp,
uint32 frame_id, uint16 packet_id,
uint16 max_packet_id, size_t size);
diff --git a/media/cast/logging/logging_impl_unittest.cc b/media/cast/logging/logging_impl_unittest.cc
index cced0ace53..712d76bae6 100644
--- a/media/cast/logging/logging_impl_unittest.cc
+++ b/media/cast/logging/logging_impl_unittest.cc
@@ -53,7 +53,7 @@ TEST_F(LoggingImplTest, BasicFrameLogging) {
do {
now = testing_clock_.NowTicks();
logging_.InsertFrameEvent(
- now, kAudioFrameCaptureBegin, rtp_timestamp, frame_id);
+ now, FRAME_CAPTURE_BEGIN, VIDEO_EVENT, rtp_timestamp, frame_id);
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
rtp_timestamp += kFrameIntervalMs * 90;
@@ -84,7 +84,7 @@ TEST_F(LoggingImplTest, FrameLoggingWithSize) {
base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
sum_size += static_cast<size_t>(size);
logging_.InsertEncodedFrameEvent(testing_clock_.NowTicks(),
- kVideoFrameEncoded, rtp_timestamp,
+ FRAME_ENCODED, VIDEO_EVENT, rtp_timestamp,
frame_id, size, true, target_bitrate);
testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
rtp_timestamp += kFrameIntervalMs * 90;
@@ -112,7 +112,8 @@ TEST_F(LoggingImplTest, FrameLoggingWithDelay) {
base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
logging_.InsertFrameEventWithDelay(
testing_clock_.NowTicks(),
- kAudioFrameCaptureBegin,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
rtp_timestamp,
frame_id,
base::TimeDelta::FromMilliseconds(delay));
@@ -136,21 +137,23 @@ TEST_F(LoggingImplTest, MultipleEventFrameLogging) {
uint32 num_events = 0u;
do {
logging_.InsertFrameEvent(testing_clock_.NowTicks(),
- kAudioFrameCaptureBegin,
+ FRAME_CAPTURE_END,
+ VIDEO_EVENT,
rtp_timestamp,
frame_id);
++num_events;
if (frame_id % 2) {
logging_.InsertEncodedFrameEvent(testing_clock_.NowTicks(),
- kAudioFrameEncoded, rtp_timestamp,
+ FRAME_ENCODED, AUDIO_EVENT,
+ rtp_timestamp,
frame_id, 1500, true, 0);
} else if (frame_id % 3) {
- logging_.InsertFrameEvent(testing_clock_.NowTicks(), kVideoFrameDecoded,
- rtp_timestamp, frame_id);
+ logging_.InsertFrameEvent(testing_clock_.NowTicks(), FRAME_DECODED,
+ VIDEO_EVENT, rtp_timestamp, frame_id);
} else {
logging_.InsertFrameEventWithDelay(
- testing_clock_.NowTicks(), kVideoRenderDelay, rtp_timestamp, frame_id,
- base::TimeDelta::FromMilliseconds(20));
+ testing_clock_.NowTicks(), FRAME_PLAYOUT, VIDEO_EVENT,
+ rtp_timestamp, frame_id, base::TimeDelta::FromMilliseconds(20));
}
++num_events;
@@ -185,12 +188,13 @@ TEST_F(LoggingImplTest, PacketLogging) {
latest_time = testing_clock_.NowTicks();
++num_packets;
logging_.InsertPacketEvent(latest_time,
- kDuplicateVideoPacketReceived,
- rtp_timestamp,
- frame_id,
- i,
- kNumPacketsPerFrame,
- size);
+ PACKET_RECEIVED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id,
+ i,
+ kNumPacketsPerFrame,
+ size);
}
testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
rtp_timestamp += kFrameIntervalMs * 90;
@@ -211,7 +215,8 @@ TEST_F(LoggingImplTest, MultipleRawEventSubscribers) {
logging_.AddRawEventSubscriber(&event_subscriber_2);
logging_.InsertFrameEvent(testing_clock_.NowTicks(),
- kAudioFrameCaptureBegin,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
/*rtp_timestamp*/ 0u,
/*frame_id*/ 0u);
diff --git a/media/cast/logging/logging_raw.cc b/media/cast/logging/logging_raw.cc
index 272f734b84..229064d7b6 100644
--- a/media/cast/logging/logging_raw.cc
+++ b/media/cast/logging/logging_raw.cc
@@ -17,32 +17,39 @@ LoggingRaw::LoggingRaw() {}
LoggingRaw::~LoggingRaw() {}
void LoggingRaw::InsertFrameEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp,
uint32 frame_id) {
- InsertBaseFrameEvent(time_of_event, event, frame_id, rtp_timestamp,
- base::TimeDelta(), 0, false, 0);
+ InsertBaseFrameEvent(time_of_event, event, event_media_type, frame_id,
+ rtp_timestamp, base::TimeDelta(), 0, false, 0);
}
void LoggingRaw::InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 rtp_timestamp, uint32 frame_id,
int size, bool key_frame,
int target_bitrate) {
- InsertBaseFrameEvent(time_of_event, event, frame_id, rtp_timestamp,
- base::TimeDelta(), size, key_frame, target_bitrate);
+ InsertBaseFrameEvent(time_of_event, event, event_media_type,
+ frame_id, rtp_timestamp, base::TimeDelta(), size,
+ key_frame, target_bitrate);
}
void LoggingRaw::InsertFrameEventWithDelay(const base::TimeTicks& time_of_event,
CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 rtp_timestamp,
uint32 frame_id,
base::TimeDelta delay) {
- InsertBaseFrameEvent(time_of_event, event, frame_id, rtp_timestamp, delay,
- 0, false, 0);
+ InsertBaseFrameEvent(time_of_event, event, event_media_type, frame_id,
+ rtp_timestamp, delay, 0, false, 0);
}
void LoggingRaw::InsertBaseFrameEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 frame_id,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 frame_id,
uint32 rtp_timestamp,
base::TimeDelta delay, int size,
bool key_frame, int target_bitrate) {
@@ -52,6 +59,7 @@ void LoggingRaw::InsertBaseFrameEvent(const base::TimeTicks& time_of_event,
frame_event.size = size;
frame_event.timestamp = time_of_event;
frame_event.type = event;
+ frame_event.media_type = event_media_type;
frame_event.delay_delta = delay;
frame_event.key_frame = key_frame;
frame_event.target_bitrate = target_bitrate;
@@ -63,7 +71,9 @@ void LoggingRaw::InsertBaseFrameEvent(const base::TimeTicks& time_of_event,
}
void LoggingRaw::InsertPacketEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp,
uint32 frame_id, uint16 packet_id,
uint16 max_packet_id, size_t size) {
PacketEvent packet_event;
@@ -74,6 +84,7 @@ void LoggingRaw::InsertPacketEvent(const base::TimeTicks& time_of_event,
packet_event.size = size;
packet_event.timestamp = time_of_event;
packet_event.type = event;
+ packet_event.media_type = event_media_type;
for (std::vector<RawEventSubscriber*>::const_iterator it =
subscribers_.begin();
it != subscribers_.end(); ++it) {
diff --git a/media/cast/logging/logging_raw.h b/media/cast/logging/logging_raw.h
index 98c2bfe741..8ed4a59960 100644
--- a/media/cast/logging/logging_raw.h
+++ b/media/cast/logging/logging_raw.h
@@ -27,31 +27,34 @@ class LoggingRaw : public base::NonThreadSafe {
// Inform of new event: two types of events: frame and packet.
// Frame events can be inserted with different parameters.
void InsertFrameEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
- uint32 frame_id);
+ CastLoggingEvent event, EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id);
- // This function is only applicable for the following frame events:
- // kAudioFrameEncoded, kVideoFrameEncoded
+ // This function is only applicable for FRAME_ENCODED event.
// |size| - Size of encoded frame.
// |key_frame| - Whether the frame is a key frame. This field is only
- // applicable for kVideoFrameEncoded event.
+ // applicable for video event.
// |target_bitrate| - The target bitrate of the encoder the time the frame
- // was encoded. Only applicable for kVideoFrameEncoded event.
+ // was encoded. Only applicable for video event.
void InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
- uint32 frame_id, int size, bool key_frame,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id,
+ int size, bool key_frame,
int target_bitrate);
// Render/playout delay
- // This function is only applicable for the following frame events:
- // kAudioPlayoutDelay, kVideoRenderDelay
+ // This function is only applicable for FRAME_PLAYOUT event.
void InsertFrameEventWithDelay(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp,
uint32 frame_id, base::TimeDelta delay);
// Insert a packet event.
void InsertPacketEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
+ CastLoggingEvent event,
+ EventMediaType event_media_type, uint32 rtp_timestamp,
uint32 frame_id, uint16 packet_id,
uint16 max_packet_id, size_t size);
@@ -68,9 +71,11 @@ class LoggingRaw : public base::NonThreadSafe {
private:
void InsertBaseFrameEvent(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 frame_id,
- uint32 rtp_timestamp, base::TimeDelta delay,
- int size, bool key_frame, int target_bitrate);
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 frame_id, uint32 rtp_timestamp,
+ base::TimeDelta delay, int size, bool key_frame,
+ int target_bitrate);
// List of subscriber pointers. This class does not own the subscribers.
std::vector<RawEventSubscriber*> subscribers_;
diff --git a/media/cast/logging/logging_raw_unittest.cc b/media/cast/logging/logging_raw_unittest.cc
index 0949cd98f7..0b7c05aaac 100644
--- a/media/cast/logging/logging_raw_unittest.cc
+++ b/media/cast/logging/logging_raw_unittest.cc
@@ -25,11 +25,13 @@ class LoggingRawTest : public ::testing::Test {
};
TEST_F(LoggingRawTest, FrameEvent) {
- CastLoggingEvent event_type = kVideoFrameDecoded;
+ CastLoggingEvent event_type = FRAME_DECODED;
+ EventMediaType media_type = VIDEO_EVENT;
uint32 frame_id = 456u;
RtpTimestamp rtp_timestamp = 123u;
base::TimeTicks timestamp = base::TimeTicks();
- raw_.InsertFrameEvent(timestamp, event_type, rtp_timestamp, frame_id);
+ raw_.InsertFrameEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id);
event_subscriber_.GetPacketEventsAndReset(&packet_events_);
EXPECT_TRUE(packet_events_.empty());
@@ -41,19 +43,21 @@ TEST_F(LoggingRawTest, FrameEvent) {
EXPECT_EQ(0u, frame_events_[0].size);
EXPECT_EQ(timestamp, frame_events_[0].timestamp);
EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
}
TEST_F(LoggingRawTest, EncodedFrameEvent) {
- CastLoggingEvent event_type = kVideoFrameEncoded;
+ CastLoggingEvent event_type = FRAME_ENCODED;
+ EventMediaType media_type = VIDEO_EVENT;
uint32 frame_id = 456u;
RtpTimestamp rtp_timestamp = 123u;
base::TimeTicks timestamp = base::TimeTicks();
int size = 1024;
bool key_frame = true;
int target_bitrate = 4096;
- raw_.InsertEncodedFrameEvent(timestamp, event_type, rtp_timestamp, frame_id,
- size, key_frame, target_bitrate);
+ raw_.InsertEncodedFrameEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id, size, key_frame, target_bitrate);
event_subscriber_.GetPacketEventsAndReset(&packet_events_);
EXPECT_TRUE(packet_events_.empty());
@@ -65,19 +69,21 @@ TEST_F(LoggingRawTest, EncodedFrameEvent) {
EXPECT_EQ(size, static_cast<int>(frame_events_[0].size));
EXPECT_EQ(timestamp, frame_events_[0].timestamp);
EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
EXPECT_EQ(key_frame, frame_events_[0].key_frame);
EXPECT_EQ(target_bitrate, frame_events_[0].target_bitrate);
}
TEST_F(LoggingRawTest, FrameEventWithDelay) {
- CastLoggingEvent event_type = kVideoRenderDelay;
+ CastLoggingEvent event_type = FRAME_PLAYOUT;
+ EventMediaType media_type = VIDEO_EVENT;
uint32 frame_id = 456u;
RtpTimestamp rtp_timestamp = 123u;
base::TimeTicks timestamp = base::TimeTicks();
base::TimeDelta delay = base::TimeDelta::FromMilliseconds(20);
- raw_.InsertFrameEventWithDelay(timestamp, event_type, rtp_timestamp, frame_id,
- delay);
+ raw_.InsertFrameEventWithDelay(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id, delay);
event_subscriber_.GetPacketEventsAndReset(&packet_events_);
EXPECT_TRUE(packet_events_.empty());
@@ -89,25 +95,28 @@ TEST_F(LoggingRawTest, FrameEventWithDelay) {
EXPECT_EQ(0u, frame_events_[0].size);
EXPECT_EQ(timestamp, frame_events_[0].timestamp);
EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
EXPECT_EQ(delay, frame_events_[0].delay_delta);
}
TEST_F(LoggingRawTest, PacketEvent) {
- CastLoggingEvent event_type = kVideoPacketReceived;
+ CastLoggingEvent event_type = PACKET_RECEIVED;
+ EventMediaType media_type = VIDEO_EVENT;
uint32 frame_id = 456u;
uint16 packet_id = 1u;
uint16 max_packet_id = 10u;
RtpTimestamp rtp_timestamp = 123u;
base::TimeTicks timestamp = base::TimeTicks();
size_t size = 1024u;
- raw_.InsertPacketEvent(timestamp, event_type, rtp_timestamp, frame_id,
- packet_id, max_packet_id, size);
+ raw_.InsertPacketEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id, packet_id, max_packet_id, size);
event_subscriber_.GetFrameEventsAndReset(&frame_events_);
EXPECT_TRUE(frame_events_.empty());
event_subscriber_.GetPacketEventsAndReset(&packet_events_);
ASSERT_EQ(1u, packet_events_.size());
+
EXPECT_EQ(rtp_timestamp, packet_events_[0].rtp_timestamp);
EXPECT_EQ(frame_id, packet_events_[0].frame_id);
EXPECT_EQ(max_packet_id, packet_events_[0].max_packet_id);
@@ -115,6 +124,7 @@ TEST_F(LoggingRawTest, PacketEvent) {
EXPECT_EQ(size, packet_events_[0].size);
EXPECT_EQ(timestamp, packet_events_[0].timestamp);
EXPECT_EQ(event_type, packet_events_[0].type);
+ EXPECT_EQ(media_type, packet_events_[0].media_type);
}
TEST_F(LoggingRawTest, MultipleSubscribers) {
@@ -123,11 +133,13 @@ TEST_F(LoggingRawTest, MultipleSubscribers) {
// Now raw_ has two subscribers.
raw_.AddSubscriber(&event_subscriber_2);
- CastLoggingEvent event_type = kVideoFrameDecoded;
+ CastLoggingEvent event_type = FRAME_DECODED;
+ EventMediaType media_type = VIDEO_EVENT;
uint32 frame_id = 456u;
RtpTimestamp rtp_timestamp = 123u;
base::TimeTicks timestamp = base::TimeTicks();
- raw_.InsertFrameEvent(timestamp, event_type, rtp_timestamp, frame_id);
+ raw_.InsertFrameEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id);
event_subscriber_.GetPacketEventsAndReset(&packet_events_);
EXPECT_TRUE(packet_events_.empty());
@@ -139,6 +151,7 @@ TEST_F(LoggingRawTest, MultipleSubscribers) {
EXPECT_EQ(0u, frame_events_[0].size);
EXPECT_EQ(timestamp, frame_events_[0].timestamp);
EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
event_subscriber_2.GetPacketEventsAndReset(&packet_events_);
@@ -151,16 +164,18 @@ TEST_F(LoggingRawTest, MultipleSubscribers) {
EXPECT_EQ(0u, frame_events_[0].size);
EXPECT_EQ(timestamp, frame_events_[0].timestamp);
EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
// Remove event_subscriber_2, so it shouldn't receive events after this.
raw_.RemoveSubscriber(&event_subscriber_2);
- event_type = kAudioFrameDecoded;
+ media_type = AUDIO_EVENT;
frame_id = 789;
rtp_timestamp = 456;
timestamp = base::TimeTicks();
- raw_.InsertFrameEvent(timestamp, event_type, rtp_timestamp, frame_id);
+ raw_.InsertFrameEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id);
// |event_subscriber_| should still receive events.
event_subscriber_.GetFrameEventsAndReset(&frame_events_);
@@ -170,6 +185,7 @@ TEST_F(LoggingRawTest, MultipleSubscribers) {
EXPECT_EQ(0u, frame_events_[0].size);
EXPECT_EQ(timestamp, frame_events_[0].timestamp);
EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
event_subscriber_2.GetFrameEventsAndReset(&frame_events_);
diff --git a/media/cast/logging/proto/proto_utils.cc b/media/cast/logging/proto/proto_utils.cc
index 90517e1fa1..1f05616d83 100644
--- a/media/cast/logging/proto/proto_utils.cc
+++ b/media/cast/logging/proto/proto_utils.cc
@@ -6,47 +6,29 @@
#include "base/logging.h"
-#define TO_PROTO_ENUM(from_enum, to_enum) \
- case from_enum: \
- return media::cast::proto::to_enum
+#define TO_PROTO_ENUM(enum) \
+ case enum: \
+ return proto::enum
namespace media {
namespace cast {
-media::cast::proto::EventType ToProtoEventType(CastLoggingEvent event) {
+proto::EventType ToProtoEventType(CastLoggingEvent event) {
switch (event) {
- TO_PROTO_ENUM(kUnknown, UNKNOWN);
- TO_PROTO_ENUM(kRttMs, RTT_MS);
- TO_PROTO_ENUM(kPacketLoss, PACKET_LOSS);
- TO_PROTO_ENUM(kJitterMs, JITTER_MS);
- TO_PROTO_ENUM(kVideoAckReceived, VIDEO_ACK_RECEIVED);
- TO_PROTO_ENUM(kRembBitrate, REMB_BITRATE);
- TO_PROTO_ENUM(kAudioAckSent, AUDIO_ACK_SENT);
- TO_PROTO_ENUM(kVideoAckSent, VIDEO_ACK_SENT);
- TO_PROTO_ENUM(kAudioFrameCaptureEnd, AUDIO_FRAME_CAPTURE_END);
- TO_PROTO_ENUM(kAudioFrameCaptureBegin, AUDIO_FRAME_CAPTURE_BEGIN);
- TO_PROTO_ENUM(kAudioFrameEncoded, AUDIO_FRAME_ENCODED);
- TO_PROTO_ENUM(kAudioPlayoutDelay, AUDIO_PLAYOUT_DELAY);
- TO_PROTO_ENUM(kAudioFrameDecoded, AUDIO_FRAME_DECODED);
- TO_PROTO_ENUM(kVideoFrameCaptureBegin, VIDEO_FRAME_CAPTURE_BEGIN);
- TO_PROTO_ENUM(kVideoFrameCaptureEnd, VIDEO_FRAME_CAPTURE_END);
- TO_PROTO_ENUM(kVideoFrameSentToEncoder, VIDEO_FRAME_SENT_TO_ENCODER);
- TO_PROTO_ENUM(kVideoFrameEncoded, VIDEO_FRAME_ENCODED);
- TO_PROTO_ENUM(kVideoFrameDecoded, VIDEO_FRAME_DECODED);
- TO_PROTO_ENUM(kVideoRenderDelay, VIDEO_RENDER_DELAY);
- TO_PROTO_ENUM(kAudioPacketSentToNetwork, AUDIO_PACKET_SENT_TO_NETWORK);
- TO_PROTO_ENUM(kVideoPacketSentToNetwork, VIDEO_PACKET_SENT_TO_NETWORK);
- TO_PROTO_ENUM(kAudioPacketRetransmitted, AUDIO_PACKET_RETRANSMITTED);
- TO_PROTO_ENUM(kVideoPacketRetransmitted, VIDEO_PACKET_RETRANSMITTED);
- TO_PROTO_ENUM(kAudioPacketReceived, AUDIO_PACKET_RECEIVED);
- TO_PROTO_ENUM(kVideoPacketReceived, VIDEO_PACKET_RECEIVED);
- TO_PROTO_ENUM(kDuplicateAudioPacketReceived,
- DUPLICATE_AUDIO_PACKET_RECEIVED);
- TO_PROTO_ENUM(kDuplicateVideoPacketReceived,
- DUPLICATE_VIDEO_PACKET_RECEIVED);
+ TO_PROTO_ENUM(UNKNOWN);
+ TO_PROTO_ENUM(FRAME_CAPTURE_BEGIN);
+ TO_PROTO_ENUM(FRAME_CAPTURE_END);
+ TO_PROTO_ENUM(FRAME_ENCODED);
+ TO_PROTO_ENUM(FRAME_ACK_RECEIVED);
+ TO_PROTO_ENUM(FRAME_ACK_SENT);
+ TO_PROTO_ENUM(FRAME_DECODED);
+ TO_PROTO_ENUM(FRAME_PLAYOUT);
+ TO_PROTO_ENUM(PACKET_SENT_TO_NETWORK);
+ TO_PROTO_ENUM(PACKET_RETRANSMITTED);
+ TO_PROTO_ENUM(PACKET_RECEIVED);
}
NOTREACHED();
- return media::cast::proto::UNKNOWN;
+ return proto::UNKNOWN;
}
} // namespace cast
diff --git a/media/cast/logging/proto/raw_events.proto b/media/cast/logging/proto/raw_events.proto
index adbd943cad..e94aed3868 100644
--- a/media/cast/logging/proto/raw_events.proto
+++ b/media/cast/logging/proto/raw_events.proto
@@ -14,6 +14,8 @@ package media.cast.proto;
// For compatibility reasons, existing values in this enum must not be changed.
enum EventType {
UNKNOWN = 0;
+
+ // Note: 1-28 are deprecated in favor of unified event types. Do not use.
// Generic events. No longer used.
RTT_MS = 1;
PACKET_LOSS = 2;
@@ -51,6 +53,19 @@ enum EventType {
VIDEO_PACKET_RECEIVED = 26;
DUPLICATE_AUDIO_PACKET_RECEIVED = 27;
DUPLICATE_VIDEO_PACKET_RECEIVED = 28;
+
+
+ // New, unified event types.
+ FRAME_CAPTURE_BEGIN = 29;
+ FRAME_CAPTURE_END = 30;
+ FRAME_ENCODED = 31;
+ FRAME_ACK_RECEIVED = 32;
+ FRAME_ACK_SENT = 33;
+ FRAME_DECODED = 34;
+ FRAME_PLAYOUT = 35;
+ PACKET_SENT_TO_NETWORK = 36;
+ PACKET_RETRANSMITTED = 37;
+ PACKET_RECEIVED = 38;
}
// Each log will contain one |LogMetadata|.
@@ -74,6 +89,10 @@ message LogMetadata {
// of the Unix epoch. This is used for relating the timestamps in the events
// to a real time and date.
optional int64 reference_timestamp_ms_at_unix_epoch = 5;
+
+ // Extra data to attach to the log, e.g. system info or
+ // experiment tags, in key-value JSON string format.
+ optional string extra_data = 6;
}
message AggregatedFrameEvent {
@@ -86,16 +105,16 @@ message AggregatedFrameEvent {
// and date.
repeated int64 event_timestamp_ms = 3 [packed = true];
- // Only set if there is a kAudioFrameEncoded and kVideoFrameEncoded event.
+ // Only set if there is a frame encoded event.
optional int32 encoded_frame_size = 4;
- // Only set if there is a kAudioPlayoutDelay or kVideoRenderDelay event.
+ // Only set if there is a frame playout event.
optional int32 delay_millis = 5;
- // Only set if there is a kVideoFrameEncoded event.
+ // Only set if there is a video frame encoded event.
optional bool key_frame = 6;
- // Only set if there is a kVideoFrameEncoded event.
+ // Only set if there is a video frame encoded event.
optional int32 target_bitrate = 7;
};
diff --git a/media/cast/logging/receiver_time_offset_estimator_impl.cc b/media/cast/logging/receiver_time_offset_estimator_impl.cc
index 897c7e1a4b..44d5eb0b3d 100644
--- a/media/cast/logging/receiver_time_offset_estimator_impl.cc
+++ b/media/cast/logging/receiver_time_offset_estimator_impl.cc
@@ -25,9 +25,13 @@ ReceiverTimeOffsetEstimatorImpl::~ReceiverTimeOffsetEstimatorImpl() {
void ReceiverTimeOffsetEstimatorImpl::OnReceiveFrameEvent(
const FrameEvent& frame_event) {
DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (frame_event.media_type != VIDEO_EVENT)
+ return;
+
CastLoggingEvent event = frame_event.type;
- if (event != kVideoFrameEncoded && event != kVideoAckSent &&
- event != kVideoAckReceived)
+ if (event != FRAME_ENCODED && event != FRAME_ACK_SENT &&
+ event != FRAME_ACK_RECEIVED)
return;
EventTimesMap::iterator it = event_times_map_.find(frame_event.rtp_timestamp);
@@ -37,7 +41,7 @@ void ReceiverTimeOffsetEstimatorImpl::OnReceiveFrameEvent(
event_times)).first;
}
switch (event) {
- case kVideoFrameEncoded:
+ case FRAME_ENCODED:
// Encode is supposed to happen only once. If we see duplicate event,
// throw away the entry.
if (it->second.event_a_time.is_null()) {
@@ -47,7 +51,7 @@ void ReceiverTimeOffsetEstimatorImpl::OnReceiveFrameEvent(
return;
}
break;
- case kVideoAckSent:
+ case FRAME_ACK_SENT:
if (it->second.event_b_time.is_null()) {
it->second.event_b_time = frame_event.timestamp;
} else if (it->second.event_b_time != frame_event.timestamp) {
@@ -57,7 +61,7 @@ void ReceiverTimeOffsetEstimatorImpl::OnReceiveFrameEvent(
return;
}
break;
- case kVideoAckReceived:
+ case FRAME_ACK_RECEIVED:
// If there are duplicate ack received events, pick the one with the
// smallest event timestamp so we can get a better bound.
if (it->second.event_c_time.is_null()) {
diff --git a/media/cast/logging/receiver_time_offset_estimator_impl.h b/media/cast/logging/receiver_time_offset_estimator_impl.h
index 0968bed290..1d0f6c8357 100644
--- a/media/cast/logging/receiver_time_offset_estimator_impl.h
+++ b/media/cast/logging/receiver_time_offset_estimator_impl.h
@@ -13,10 +13,10 @@
namespace media {
namespace cast {
-// This implementation listens to three types of events:
-// 1. kVideoFrameEncoded (sender side)
-// 2. kVideoAckSent (receiver side)
-// 3. kVideoAckReceived (sender side)
+// This implementation listens to three types of video events:
+// 1. FRAME_ENCODED (sender side)
+// 2. FRAME_ACK_SENT (receiver side)
+// 3. FRAME_ACK_RECEIVED (sender side)
// There is a causal relationship between these events in that these events
// must happen in order. This class obtains the lower and upper bounds for
// the offset by taking the difference of timestamps (2) - (1) and (2) - (3),
diff --git a/media/cast/logging/receiver_time_offset_estimator_impl_unittest.cc b/media/cast/logging/receiver_time_offset_estimator_impl_unittest.cc
index 36985b6619..1cdbecf5de 100644
--- a/media/cast/logging/receiver_time_offset_estimator_impl_unittest.cc
+++ b/media/cast/logging/receiver_time_offset_estimator_impl_unittest.cc
@@ -65,7 +65,7 @@ TEST_F(ReceiverTimeOffsetEstimatorImplTest, EstimateOffset) {
cast_environment_->Logging()->InsertEncodedFrameEvent(
sender_clock_->NowTicks(),
- kVideoFrameEncoded,
+ FRAME_ENCODED, VIDEO_EVENT,
rtp_timestamp,
frame_id,
1234,
@@ -76,13 +76,15 @@ TEST_F(ReceiverTimeOffsetEstimatorImplTest, EstimateOffset) {
AdvanceClocks(base::TimeDelta::FromMilliseconds(10));
cast_environment_->Logging()->InsertFrameEvent(
- receiver_clock_.NowTicks(), kVideoAckSent, rtp_timestamp, frame_id);
+ receiver_clock_.NowTicks(), FRAME_ACK_SENT, VIDEO_EVENT,
+ rtp_timestamp, frame_id);
EXPECT_FALSE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
AdvanceClocks(base::TimeDelta::FromMilliseconds(30));
cast_environment_->Logging()->InsertFrameEvent(
- sender_clock_->NowTicks(), kVideoAckReceived, rtp_timestamp, frame_id);
+ sender_clock_->NowTicks(), FRAME_ACK_RECEIVED, VIDEO_EVENT,
+ rtp_timestamp, frame_id);
EXPECT_TRUE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
@@ -112,7 +114,7 @@ TEST_F(ReceiverTimeOffsetEstimatorImplTest, EventCArrivesBeforeEventB) {
cast_environment_->Logging()->InsertEncodedFrameEvent(
sender_clock_->NowTicks(),
- kVideoFrameEncoded,
+ FRAME_ENCODED, VIDEO_EVENT,
rtp_timestamp,
frame_id,
1234,
@@ -127,12 +129,12 @@ TEST_F(ReceiverTimeOffsetEstimatorImplTest, EventCArrivesBeforeEventB) {
base::TimeTicks event_c_time = sender_clock_->NowTicks();
cast_environment_->Logging()->InsertFrameEvent(
- event_c_time, kVideoAckReceived, rtp_timestamp, frame_id);
+ event_c_time, FRAME_ACK_RECEIVED, VIDEO_EVENT, rtp_timestamp, frame_id);
EXPECT_FALSE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
cast_environment_->Logging()->InsertFrameEvent(
- event_b_time, kVideoAckSent, rtp_timestamp, frame_id);
+ event_b_time, FRAME_ACK_SENT, VIDEO_EVENT, rtp_timestamp, frame_id);
EXPECT_TRUE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
@@ -166,7 +168,7 @@ TEST_F(ReceiverTimeOffsetEstimatorImplTest, MultipleIterations) {
AdvanceClocks(base::TimeDelta::FromMilliseconds(20));
cast_environment_->Logging()->InsertEncodedFrameEvent(
sender_clock_->NowTicks(),
- kVideoFrameEncoded,
+ FRAME_ENCODED, VIDEO_EVENT,
rtp_timestamp_a,
frame_id_a,
1234,
@@ -176,35 +178,39 @@ TEST_F(ReceiverTimeOffsetEstimatorImplTest, MultipleIterations) {
AdvanceClocks(base::TimeDelta::FromMilliseconds(10));
cast_environment_->Logging()->InsertEncodedFrameEvent(
sender_clock_->NowTicks(),
- kVideoFrameEncoded,
+ FRAME_ENCODED, VIDEO_EVENT,
rtp_timestamp_b,
frame_id_b,
1234,
true,
5678);
cast_environment_->Logging()->InsertFrameEvent(
- receiver_clock_.NowTicks(), kVideoAckSent, rtp_timestamp_a, frame_id_a);
+ receiver_clock_.NowTicks(), FRAME_ACK_SENT, VIDEO_EVENT,
+ rtp_timestamp_a, frame_id_a);
AdvanceClocks(base::TimeDelta::FromMilliseconds(20));
cast_environment_->Logging()->InsertFrameEvent(
- receiver_clock_.NowTicks(), kVideoAckSent, rtp_timestamp_b, frame_id_b);
+ receiver_clock_.NowTicks(), FRAME_ACK_SENT, VIDEO_EVENT,
+ rtp_timestamp_b, frame_id_b);
AdvanceClocks(base::TimeDelta::FromMilliseconds(5));
cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
- kVideoAckReceived,
+ FRAME_ACK_RECEIVED,
+ VIDEO_EVENT,
rtp_timestamp_b,
frame_id_b);
AdvanceClocks(base::TimeDelta::FromMilliseconds(5));
cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
- kVideoAckReceived,
+ FRAME_ACK_RECEIVED,
+ VIDEO_EVENT,
rtp_timestamp_a,
frame_id_a);
AdvanceClocks(base::TimeDelta::FromMilliseconds(17));
cast_environment_->Logging()->InsertEncodedFrameEvent(
sender_clock_->NowTicks(),
- kVideoFrameEncoded,
+ FRAME_ENCODED, VIDEO_EVENT,
rtp_timestamp_c,
frame_id_c,
1234,
@@ -213,11 +219,13 @@ TEST_F(ReceiverTimeOffsetEstimatorImplTest, MultipleIterations) {
AdvanceClocks(base::TimeDelta::FromMilliseconds(3));
cast_environment_->Logging()->InsertFrameEvent(
- receiver_clock_.NowTicks(), kVideoAckSent, rtp_timestamp_c, frame_id_c);
+ receiver_clock_.NowTicks(), FRAME_ACK_SENT, VIDEO_EVENT,
+ rtp_timestamp_c, frame_id_c);
AdvanceClocks(base::TimeDelta::FromMilliseconds(30));
cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
- kVideoAckReceived,
+ FRAME_ACK_RECEIVED,
+ VIDEO_EVENT,
rtp_timestamp_c,
frame_id_c);
diff --git a/media/cast/logging/serialize_deserialize_test.cc b/media/cast/logging/serialize_deserialize_test.cc
index b309cedf89..7e5aa7d3b5 100644
--- a/media/cast/logging/serialize_deserialize_test.cc
+++ b/media/cast/logging/serialize_deserialize_test.cc
@@ -20,18 +20,19 @@ using media::cast::proto::LogMetadata;
namespace {
const media::cast::CastLoggingEvent kVideoFrameEvents[] = {
- media::cast::kVideoFrameCaptureBegin, media::cast::kVideoFrameCaptureEnd,
- media::cast::kVideoFrameSentToEncoder, media::cast::kVideoFrameEncoded,
- media::cast::kVideoFrameDecoded, media::cast::kVideoRenderDelay};
+ media::cast::FRAME_CAPTURE_BEGIN, media::cast::FRAME_CAPTURE_END,
+ media::cast::FRAME_ENCODED, media::cast::FRAME_DECODED,
+ media::cast::FRAME_PLAYOUT };
const media::cast::CastLoggingEvent kVideoPacketEvents[] = {
- media::cast::kVideoPacketSentToNetwork, media::cast::kVideoPacketReceived};
+ media::cast::PACKET_SENT_TO_NETWORK, media::cast::PACKET_RECEIVED};
// The frame event fields cycle through these numbers.
const int kEncodedFrameSize[] = {512, 425, 399, 400, 237};
const int kDelayMillis[] = {15, 4, 8, 42, 23, 16};
const int kMaxSerializedBytes = 10000;
+
}
namespace media {
diff --git a/media/cast/logging/simple_event_subscriber_unittest.cc b/media/cast/logging/simple_event_subscriber_unittest.cc
index bce7f53dd7..311a234195 100644
--- a/media/cast/logging/simple_event_subscriber_unittest.cc
+++ b/media/cast/logging/simple_event_subscriber_unittest.cc
@@ -41,25 +41,31 @@ class SimpleEventSubscriberTest : public ::testing::Test {
TEST_F(SimpleEventSubscriberTest, GetAndResetEvents) {
// Log some frame events.
cast_environment_->Logging()->InsertEncodedFrameEvent(
- testing_clock_->NowTicks(), kAudioFrameEncoded, /*rtp_timestamp*/ 100u,
- /*frame_id*/ 0u, /*frame_size*/ 123, /*key_frame*/ false, 0);
+ testing_clock_->NowTicks(), FRAME_ENCODED, AUDIO_EVENT,
+ /*rtp_timestamp*/ 100u, /*frame_id*/ 0u, /*frame_size*/ 123,
+ /*key_frame*/ false, 0);
cast_environment_->Logging()->InsertFrameEventWithDelay(
- testing_clock_->NowTicks(), kAudioPlayoutDelay, /*rtp_timestamp*/ 100u,
+ testing_clock_->NowTicks(), FRAME_PLAYOUT, AUDIO_EVENT,
+ /*rtp_timestamp*/ 100u,
/*frame_id*/ 0u, /*delay*/ base::TimeDelta::FromMilliseconds(100));
cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kAudioFrameDecoded, /*rtp_timestamp*/ 200u,
+ testing_clock_->NowTicks(), FRAME_DECODED, AUDIO_EVENT,
+ /*rtp_timestamp*/ 200u,
/*frame_id*/ 0u);
// Log some packet events.
cast_environment_->Logging()->InsertPacketEvent(
- testing_clock_->NowTicks(), kAudioPacketReceived, /*rtp_timestamp*/ 200u,
+ testing_clock_->NowTicks(), PACKET_RECEIVED, AUDIO_EVENT,
+ /*rtp_timestamp*/ 200u,
/*frame_id*/ 0u, /*packet_id*/ 1u, /*max_packet_id*/ 5u, /*size*/ 100u);
cast_environment_->Logging()->InsertPacketEvent(
- testing_clock_->NowTicks(), kVideoFrameDecoded, /*rtp_timestamp*/ 200u,
- /*frame_id*/ 0u, /*packet_id*/ 1u, /*max_packet_id*/ 5u, /*size*/ 100u);
+ testing_clock_->NowTicks(), FRAME_DECODED, VIDEO_EVENT,
+ /*rtp_timestamp*/ 200u, /*frame_id*/ 0u, /*packet_id*/ 1u,
+ /*max_packet_id*/ 5u, /*size*/ 100u);
cast_environment_->Logging()->InsertPacketEvent(
- testing_clock_->NowTicks(), kVideoFrameDecoded, /*rtp_timestamp*/ 300u,
- /*frame_id*/ 0u, /*packet_id*/ 1u, /*max_packet_id*/ 5u, /*size*/ 100u);
+ testing_clock_->NowTicks(), FRAME_DECODED, VIDEO_EVENT,
+ /*rtp_timestamp*/ 300u, /*frame_id*/ 0u, /*packet_id*/ 1u,
+ /*max_packet_id*/ 5u, /*size*/ 100u);
std::vector<FrameEvent> frame_events;
event_subscriber_.GetFrameEventsAndReset(&frame_events);
diff --git a/media/cast/logging/stats_event_subscriber.cc b/media/cast/logging/stats_event_subscriber.cc
index e292d6682d..9e3226a216 100644
--- a/media/cast/logging/stats_event_subscriber.cc
+++ b/media/cast/logging/stats_event_subscriber.cc
@@ -22,49 +22,11 @@ using media::cast::EventMediaType;
const size_t kMaxFrameEventTimeMapSize = 100;
const size_t kMaxPacketEventTimeMapSize = 1000;
-CastLoggingEvent GetCapturedEvent(EventMediaType media_type) {
- return media_type == AUDIO_EVENT ?
- kAudioFrameCaptureBegin : kVideoFrameCaptureBegin;
-}
-
-CastLoggingEvent GetEncodedEvent(EventMediaType media_type) {
- return media_type == AUDIO_EVENT ? kAudioFrameEncoded : kVideoFrameEncoded;
-}
-
-CastLoggingEvent GetDecodedEvent(EventMediaType media_type) {
- return media_type == AUDIO_EVENT ? kAudioFrameDecoded : kVideoFrameDecoded;
-}
-
-CastLoggingEvent GetPlayoutEvent(EventMediaType media_type) {
- return media_type == AUDIO_EVENT ? kAudioPlayoutDelay : kVideoRenderDelay;
-}
-
-CastLoggingEvent GetPacketSentEvent(EventMediaType media_type) {
- return media_type == AUDIO_EVENT ? kAudioPacketSentToNetwork :
- kVideoPacketSentToNetwork;
-}
-
-CastLoggingEvent GetPacketReceivedEvent(EventMediaType media_type) {
- return media_type == AUDIO_EVENT ? kAudioPacketReceived :
- kVideoPacketReceived;
-}
-
-CastLoggingEvent GetPacketRetransmittedEvent(EventMediaType media_type) {
- return media_type == AUDIO_EVENT ? kAudioPacketRetransmitted :
- kVideoPacketRetransmitted;
-}
-
bool IsReceiverEvent(CastLoggingEvent event) {
- return event == kAudioFrameDecoded
- || event == kVideoFrameDecoded
- || event == kAudioPlayoutDelay
- || event == kVideoRenderDelay
- || event == kAudioAckSent
- || event == kVideoAckSent
- || event == kAudioPacketReceived
- || event == kVideoPacketReceived
- || event == kDuplicateAudioPacketReceived
- || event == kDuplicateVideoPacketReceived;
+ return event == FRAME_DECODED
+ || event == FRAME_PLAYOUT
+ || event == FRAME_ACK_SENT
+ || event == PACKET_RECEIVED;
}
} // namespace
@@ -92,7 +54,7 @@ void StatsEventSubscriber::OnReceiveFrameEvent(const FrameEvent& frame_event) {
DCHECK(thread_checker_.CalledOnValidThread());
CastLoggingEvent type = frame_event.type;
- if (GetEventMediaType(type) != event_media_type_)
+ if (frame_event.media_type != event_media_type_)
return;
FrameStatsMap::iterator it = frame_stats_.find(type);
@@ -108,9 +70,9 @@ void StatsEventSubscriber::OnReceiveFrameEvent(const FrameEvent& frame_event) {
it->second.sum_delay += frame_event.delay_delta;
}
- if (type == GetCapturedEvent(event_media_type_)) {
+ if (type == FRAME_CAPTURE_BEGIN) {
RecordFrameCapturedTime(frame_event);
- } else if (type == GetPlayoutEvent(event_media_type_)) {
+ } else if (type == FRAME_PLAYOUT) {
RecordE2ELatency(frame_event);
}
@@ -123,7 +85,7 @@ void StatsEventSubscriber::OnReceivePacketEvent(
DCHECK(thread_checker_.CalledOnValidThread());
CastLoggingEvent type = packet_event.type;
- if (GetEventMediaType(type) != event_media_type_)
+ if (packet_event.media_type != event_media_type_)
return;
PacketStatsMap::iterator it = packet_stats_.find(type);
@@ -137,10 +99,10 @@ void StatsEventSubscriber::OnReceivePacketEvent(
it->second.sum_size += packet_event.size;
}
- if (type == GetPacketSentEvent(event_media_type_) ||
- type == GetPacketReceivedEvent(event_media_type_)) {
+ if (type == PACKET_SENT_TO_NETWORK ||
+ type == PACKET_RECEIVED) {
RecordNetworkLatency(packet_event);
- } else if (type == GetPacketRetransmittedEvent(event_media_type_)) {
+ } else if (type == PACKET_RETRANSMITTED) {
// We only measure network latency using packets that doesn't have to be
// retransmitted as there is precisely one sent-receive timestamp pairs.
ErasePacketSentTime(packet_event);
@@ -210,19 +172,19 @@ void StatsEventSubscriber::GetStatsInternal(StatsMap* stats_map) const {
base::TimeTicks end_time = clock_->NowTicks();
PopulateFpsStat(
- end_time, GetCapturedEvent(event_media_type_), CAPTURE_FPS, stats_map);
+ end_time, FRAME_CAPTURE_BEGIN, CAPTURE_FPS, stats_map);
PopulateFpsStat(
- end_time, GetEncodedEvent(event_media_type_), ENCODE_FPS, stats_map);
+ end_time, FRAME_ENCODED, ENCODE_FPS, stats_map);
PopulateFpsStat(
- end_time, GetDecodedEvent(event_media_type_), DECODE_FPS, stats_map);
+ end_time, FRAME_DECODED, DECODE_FPS, stats_map);
PopulatePlayoutDelayStat(stats_map);
PopulateFrameBitrateStat(end_time, stats_map);
PopulatePacketBitrateStat(end_time,
- GetPacketSentEvent(event_media_type_),
+ PACKET_SENT_TO_NETWORK,
TRANSMISSION_KBPS,
stats_map);
PopulatePacketBitrateStat(end_time,
- GetPacketRetransmittedEvent(event_media_type_),
+ PACKET_RETRANSMITTED,
RETRANSMISSION_KBPS,
stats_map);
PopulatePacketLossPercentageStat(stats_map);
@@ -324,13 +286,13 @@ void StatsEventSubscriber::RecordNetworkLatency(
bool match = false;
base::TimeTicks packet_sent_time;
base::TimeTicks packet_received_time;
- if (recorded_type == GetPacketSentEvent(event_media_type_) &&
- packet_event.type == GetPacketReceivedEvent(event_media_type_)) {
+ if (recorded_type == PACKET_SENT_TO_NETWORK &&
+ packet_event.type == PACKET_RECEIVED) {
packet_sent_time = value.first;
packet_received_time = packet_event.timestamp;
match = true;
- } else if (recorded_type == GetPacketReceivedEvent(event_media_type_) &&
- packet_event.type == GetPacketSentEvent(event_media_type_)) {
+ } else if (recorded_type == PACKET_RECEIVED &&
+ packet_event.type == PACKET_SENT_TO_NETWORK) {
packet_sent_time = packet_event.timestamp;
packet_received_time = value.first;
match = true;
@@ -362,8 +324,7 @@ void StatsEventSubscriber::PopulateFpsStat(base::TimeTicks end_time,
}
void StatsEventSubscriber::PopulatePlayoutDelayStat(StatsMap* stats_map) const {
- CastLoggingEvent event = GetPlayoutEvent(event_media_type_);
- FrameStatsMap::const_iterator it = frame_stats_.find(event);
+ FrameStatsMap::const_iterator it = frame_stats_.find(FRAME_PLAYOUT);
if (it != frame_stats_.end()) {
double avg_delay_ms = 0.0;
base::TimeDelta sum_delay = it->second.sum_delay;
@@ -376,8 +337,7 @@ void StatsEventSubscriber::PopulatePlayoutDelayStat(StatsMap* stats_map) const {
void StatsEventSubscriber::PopulateFrameBitrateStat(base::TimeTicks end_time,
StatsMap* stats_map) const {
- CastLoggingEvent event = GetEncodedEvent(event_media_type_);
- FrameStatsMap::const_iterator it = frame_stats_.find(event);
+ FrameStatsMap::const_iterator it = frame_stats_.find(FRAME_ENCODED);
if (it != frame_stats_.end()) {
double kbps = 0.0;
base::TimeDelta duration = end_time - start_time_;
@@ -412,15 +372,12 @@ void StatsEventSubscriber::PopulatePacketLossPercentageStat(
// (re)transmission was lost.
// This means the percentage of packet loss is
// (# of retransmit events) / (# of transmit + retransmit events).
- CastLoggingEvent packet_sent_event = GetPacketSentEvent(event_media_type_);
- CastLoggingEvent packet_retransmitted_event =
- GetPacketRetransmittedEvent(event_media_type_);
PacketStatsMap::const_iterator sent_it =
- packet_stats_.find(packet_sent_event);
+ packet_stats_.find(PACKET_SENT_TO_NETWORK);
if (sent_it == packet_stats_.end())
return;
PacketStatsMap::const_iterator retransmitted_it =
- packet_stats_.find(packet_retransmitted_event);
+ packet_stats_.find(PACKET_RETRANSMITTED);
int sent_count = sent_it->second.event_counter;
int retransmitted_count = 0;
if (retransmitted_it != packet_stats_.end())
diff --git a/media/cast/logging/stats_event_subscriber.h b/media/cast/logging/stats_event_subscriber.h
index 4e198dadce..173378ab0b 100644
--- a/media/cast/logging/stats_event_subscriber.h
+++ b/media/cast/logging/stats_event_subscriber.h
@@ -83,7 +83,7 @@ class StatsEventSubscriber : public RawEventSubscriber {
DECODE_FPS,
// Average encode duration in milliseconds.
// TODO(imcheng): This stat is not populated yet because we do not have
- // the time when encode started. Record it in kVideoFrameEncoded event.
+ // the time when encode started. Record it in FRAME_ENCODED event.
AVG_ENCODE_TIME_MS,
// Average playout delay in milliseconds, with target delay already
// accounted for. Ideally, every frame should have a playout delay of 0.
diff --git a/media/cast/logging/stats_event_subscriber_unittest.cc b/media/cast/logging/stats_event_subscriber_unittest.cc
index 90602d5065..33faa02059 100644
--- a/media/cast/logging/stats_event_subscriber_unittest.cc
+++ b/media/cast/logging/stats_event_subscriber_unittest.cc
@@ -74,7 +74,8 @@ TEST_F(StatsEventSubscriberTest, Capture) {
base::TimeTicks start_time = sender_clock_->NowTicks();
for (int i = 0; i < num_frames; i++) {
cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
- kVideoFrameCaptureBegin,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
rtp_timestamp,
frame_id);
@@ -111,7 +112,7 @@ TEST_F(StatsEventSubscriberTest, Encode) {
total_size += size;
cast_environment_->Logging()->InsertEncodedFrameEvent(
sender_clock_->NowTicks(),
- kVideoFrameEncoded,
+ FRAME_ENCODED, VIDEO_EVENT,
rtp_timestamp,
frame_id,
size,
@@ -153,7 +154,7 @@ TEST_F(StatsEventSubscriberTest, Decode) {
base::TimeTicks start_time = sender_clock_->NowTicks();
for (int i = 0; i < num_frames; i++) {
cast_environment_->Logging()->InsertFrameEvent(receiver_clock_.NowTicks(),
- kVideoFrameDecoded,
+ FRAME_DECODED, VIDEO_EVENT,
rtp_timestamp,
frame_id);
@@ -190,7 +191,8 @@ TEST_F(StatsEventSubscriberTest, PlayoutDelay) {
total_delay_ms += delay_ms;
cast_environment_->Logging()->InsertFrameEventWithDelay(
receiver_clock_.NowTicks(),
- kVideoRenderDelay,
+ FRAME_PLAYOUT,
+ VIDEO_EVENT,
rtp_timestamp,
frame_id,
delay);
@@ -220,7 +222,8 @@ TEST_F(StatsEventSubscriberTest, E2ELatency) {
base::TimeDelta total_latency;
for (int i = 0; i < num_frames; i++) {
cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
- kVideoFrameCaptureBegin,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
rtp_timestamp,
frame_id);
@@ -234,7 +237,8 @@ TEST_F(StatsEventSubscriberTest, E2ELatency) {
cast_environment_->Logging()->InsertFrameEventWithDelay(
receiver_clock_.NowTicks(),
- kVideoRenderDelay,
+ FRAME_PLAYOUT,
+ VIDEO_EVENT,
rtp_timestamp,
frame_id,
delay);
@@ -274,7 +278,8 @@ TEST_F(StatsEventSubscriberTest, Packets) {
total_size += size;
cast_environment_->Logging()->InsertPacketEvent(sender_clock_->NowTicks(),
- kVideoPacketSentToNetwork,
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
rtp_timestamp,
0,
i,
@@ -299,7 +304,8 @@ TEST_F(StatsEventSubscriberTest, Packets) {
if (i % 2 == 0) {
cast_environment_->Logging()->InsertPacketEvent(
receiver_clock_.NowTicks(),
- kVideoPacketRetransmitted,
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
rtp_timestamp,
0,
i,
@@ -315,7 +321,8 @@ TEST_F(StatsEventSubscriberTest, Packets) {
if (i % 4 == 0) {
cast_environment_->Logging()->InsertPacketEvent(
receiver_clock_.NowTicks(),
- kVideoPacketRetransmitted,
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
rtp_timestamp,
0,
i,
@@ -331,7 +338,8 @@ TEST_F(StatsEventSubscriberTest, Packets) {
if (i % 8 == 0) {
cast_environment_->Logging()->InsertPacketEvent(
receiver_clock_.NowTicks(),
- kVideoPacketRetransmitted,
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
rtp_timestamp,
0,
i,
@@ -343,7 +351,8 @@ TEST_F(StatsEventSubscriberTest, Packets) {
}
cast_environment_->Logging()->InsertPacketEvent(received_time,
- kVideoPacketReceived,
+ PACKET_RECEIVED,
+ VIDEO_EVENT,
rtp_timestamp,
0,
i,
diff --git a/media/cast/rtcp/mock_rtcp_receiver_feedback.h b/media/cast/rtcp/mock_rtcp_receiver_feedback.h
index b8946d972c..56fe1ca699 100644
--- a/media/cast/rtcp/mock_rtcp_receiver_feedback.h
+++ b/media/cast/rtcp/mock_rtcp_receiver_feedback.h
@@ -30,8 +30,6 @@ class MockRtcpReceiverFeedback : public RtcpReceiverFeedback {
MOCK_METHOD1(OnReceivedReceiverLog,
void(const RtcpReceiverLogMessage& receiver_log));
- MOCK_METHOD1(OnReceivedSenderLog,
- void(const transport::RtcpSenderLogMessage& sender_log));
};
class MockRtcpRttFeedback : public RtcpRttFeedback {
diff --git a/media/cast/rtcp/receiver_rtcp_event_subscriber.cc b/media/cast/rtcp/receiver_rtcp_event_subscriber.cc
index 393b6780e9..9a9c0aeeb7 100644
--- a/media/cast/rtcp/receiver_rtcp_event_subscriber.cc
+++ b/media/cast/rtcp/receiver_rtcp_event_subscriber.cc
@@ -10,10 +10,10 @@ namespace media {
namespace cast {
ReceiverRtcpEventSubscriber::ReceiverRtcpEventSubscriber(
- const size_t max_size_to_retain, Type type)
+ const size_t max_size_to_retain, EventMediaType type)
: max_size_to_retain_(max_size_to_retain), type_(type) {
DCHECK(max_size_to_retain_ > 0u);
- DCHECK(type_ == kAudioEventSubscriber || type_ == kVideoEventSubscriber);
+ DCHECK(type_ == AUDIO_EVENT || type_ == VIDEO_EVENT);
}
ReceiverRtcpEventSubscriber::~ReceiverRtcpEventSubscriber() {
@@ -24,16 +24,13 @@ void ReceiverRtcpEventSubscriber::OnReceiveFrameEvent(
const FrameEvent& frame_event) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (ShouldProcessEvent(frame_event.type)) {
+ if (ShouldProcessEvent(frame_event.type, frame_event.media_type)) {
RtcpEvent rtcp_event;
switch (frame_event.type) {
- case kAudioPlayoutDelay:
- case kVideoRenderDelay:
+ case FRAME_PLAYOUT:
rtcp_event.delay_delta = frame_event.delay_delta;
- case kAudioFrameDecoded:
- case kVideoFrameDecoded:
- case kAudioAckSent:
- case kVideoAckSent:
+ case FRAME_ACK_SENT:
+ case FRAME_DECODED:
rtcp_event.type = frame_event.type;
rtcp_event.timestamp = frame_event.timestamp;
rtcp_events_.insert(
@@ -53,10 +50,9 @@ void ReceiverRtcpEventSubscriber::OnReceivePacketEvent(
const PacketEvent& packet_event) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (ShouldProcessEvent(packet_event.type)) {
+ if (ShouldProcessEvent(packet_event.type, packet_event.media_type)) {
RtcpEvent rtcp_event;
- if (packet_event.type == kAudioPacketReceived ||
- packet_event.type == kVideoPacketReceived) {
+ if (packet_event.type == PACKET_RECEIVED) {
rtcp_event.type = packet_event.type;
rtcp_event.timestamp = packet_event.timestamp;
rtcp_event.packet_id = packet_event.packet_id;
@@ -90,18 +86,10 @@ void ReceiverRtcpEventSubscriber::TruncateMapIfNeeded() {
}
bool ReceiverRtcpEventSubscriber::ShouldProcessEvent(
- CastLoggingEvent event_type) {
- if (type_ == kAudioEventSubscriber) {
- return event_type == kAudioPlayoutDelay ||
- event_type == kAudioFrameDecoded || event_type == kAudioAckSent ||
- event_type == kAudioPacketReceived;
- } else if (type_ == kVideoEventSubscriber) {
- return event_type == kVideoRenderDelay ||
- event_type == kVideoFrameDecoded || event_type == kVideoAckSent ||
- event_type == kVideoPacketReceived;
- } else {
- return false;
- }
+ CastLoggingEvent event_type, EventMediaType event_media_type) {
+ return type_ == event_media_type &&
+ (event_type == FRAME_ACK_SENT || event_type == FRAME_DECODED ||
+ event_type == FRAME_PLAYOUT || event_type == PACKET_RECEIVED);
}
} // namespace cast
diff --git a/media/cast/rtcp/receiver_rtcp_event_subscriber.h b/media/cast/rtcp/receiver_rtcp_event_subscriber.h
index f2c7e5a908..84af7cbaf3 100644
--- a/media/cast/rtcp/receiver_rtcp_event_subscriber.h
+++ b/media/cast/rtcp/receiver_rtcp_event_subscriber.h
@@ -12,10 +12,6 @@
#include "media/cast/logging/raw_event_subscriber.h"
#include "media/cast/rtcp/rtcp_defines.h"
-namespace base {
-class SingleThreadTaskRunner;
-}
-
namespace media {
namespace cast {
@@ -32,20 +28,14 @@ class ReceiverRtcpEventSubscriber : public RawEventSubscriber {
public:
typedef std::multimap<RtpTimestamp, RtcpEvent> RtcpEventMultiMap;
- // Identifies whether the subscriber will process audio or video related
- // frame events.
- enum Type {
- kAudioEventSubscriber, // Only processes audio events
- kVideoEventSubscriber // Only processes video events
- };
-
// |max_size_to_retain|: The object will keep up to |max_size_to_retain|
// events
// in the map. Once threshold has been reached, an event with the smallest
// RTP timestamp will be removed.
// |type|: Determines whether the subscriber will process only audio or video
// events.
- ReceiverRtcpEventSubscriber(const size_t max_size_to_retain, Type type);
+ ReceiverRtcpEventSubscriber(const size_t max_size_to_retain,
+ EventMediaType type);
virtual ~ReceiverRtcpEventSubscriber();
@@ -63,11 +53,13 @@ class ReceiverRtcpEventSubscriber : public RawEventSubscriber {
// |max_size_to_retain_|.
void TruncateMapIfNeeded();
- // Returns |true| if events of |event_type| should be processed.
- bool ShouldProcessEvent(CastLoggingEvent event_type);
+ // Returns |true| if events of |event_type| and |media_type|
+ // should be processed.
+ bool ShouldProcessEvent(CastLoggingEvent event_type,
+ EventMediaType media_type);
const size_t max_size_to_retain_;
- Type type_;
+ EventMediaType type_;
// The key should really be something more than just a RTP timestamp in order
// to differentiate between video and audio frames, but since the
diff --git a/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc b/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc
index aa0d8faa83..e0d0f17216 100644
--- a/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc
+++ b/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc
@@ -42,7 +42,7 @@ class ReceiverRtcpEventSubscriberTest : public ::testing::Test {
}
}
- void Init(ReceiverRtcpEventSubscriber::Type type) {
+ void Init(EventMediaType type) {
event_subscriber_.reset(
new ReceiverRtcpEventSubscriber(kMaxEventEntries, type));
cast_environment_->Logging()->AddRawEventSubscriber(
@@ -52,35 +52,39 @@ class ReceiverRtcpEventSubscriberTest : public ::testing::Test {
void InsertEvents() {
// Video events
cast_environment_->Logging()->InsertFrameEventWithDelay(
- testing_clock_->NowTicks(), kVideoRenderDelay, /*rtp_timestamp*/ 100u,
- /*frame_id*/ 2u, base::TimeDelta::FromMilliseconds(kDelayMs));
+ testing_clock_->NowTicks(), FRAME_PLAYOUT, VIDEO_EVENT,
+ /*rtp_timestamp*/ 100u, /*frame_id*/ 2u,
+ base::TimeDelta::FromMilliseconds(kDelayMs));
cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kVideoFrameDecoded, /*rtp_timestamp*/ 200u,
- /*frame_id*/ 1u);
+ testing_clock_->NowTicks(), FRAME_DECODED, VIDEO_EVENT,
+ /*rtp_timestamp*/ 200u, /*frame_id*/ 1u);
cast_environment_->Logging()->InsertPacketEvent(
- testing_clock_->NowTicks(), kVideoPacketReceived,
+ testing_clock_->NowTicks(), PACKET_RECEIVED, VIDEO_EVENT,
/*rtp_timestamp */ 200u, /*frame_id*/ 2u, /*packet_id*/ 1u,
/*max_packet_id*/ 10u, /*size*/ 1024u);
// Audio events
cast_environment_->Logging()->InsertFrameEventWithDelay(
- testing_clock_->NowTicks(), kAudioPlayoutDelay, /*rtp_timestamp*/ 300u,
- /*frame_id*/ 4u, base::TimeDelta::FromMilliseconds(kDelayMs));
+ testing_clock_->NowTicks(), FRAME_PLAYOUT, AUDIO_EVENT,
+ /*rtp_timestamp*/ 300u, /*frame_id*/ 4u,
+ base::TimeDelta::FromMilliseconds(kDelayMs));
cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kAudioFrameDecoded, /*rtp_timestamp*/ 400u,
- /*frame_id*/ 3u);
+ testing_clock_->NowTicks(), FRAME_DECODED, AUDIO_EVENT,
+ /*rtp_timestamp*/ 400u, /*frame_id*/ 3u);
cast_environment_->Logging()->InsertPacketEvent(
- testing_clock_->NowTicks(), kAudioPacketReceived,
+ testing_clock_->NowTicks(), PACKET_RECEIVED, AUDIO_EVENT,
/*rtp_timestamp */ 400u, /*frame_id*/ 5u, /*packet_id*/ 1u,
/*max_packet_id*/ 10u, /*size*/ 128u);
// Unrelated events
cast_environment_->Logging()->InsertFrameEvent(testing_clock_->NowTicks(),
- kVideoFrameCaptureEnd,
+ FRAME_CAPTURE_END,
+ VIDEO_EVENT,
/*rtp_timestamp*/ 100u,
/*frame_id*/ 1u);
cast_environment_->Logging()->InsertFrameEvent(testing_clock_->NowTicks(),
- kAudioFrameCaptureEnd,
+ FRAME_CAPTURE_END,
+ AUDIO_EVENT,
/*rtp_timestamp*/ 100u,
/*frame_id*/ 1u);
}
@@ -92,7 +96,7 @@ class ReceiverRtcpEventSubscriberTest : public ::testing::Test {
};
TEST_F(ReceiverRtcpEventSubscriberTest, LogVideoEvents) {
- Init(ReceiverRtcpEventSubscriber::kVideoEventSubscriber);
+ Init(VIDEO_EVENT);
InsertEvents();
ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
@@ -101,7 +105,7 @@ TEST_F(ReceiverRtcpEventSubscriberTest, LogVideoEvents) {
}
TEST_F(ReceiverRtcpEventSubscriberTest, LogAudioEvents) {
- Init(ReceiverRtcpEventSubscriber::kAudioEventSubscriber);
+ Init(AUDIO_EVENT);
InsertEvents();
ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
@@ -110,11 +114,11 @@ TEST_F(ReceiverRtcpEventSubscriberTest, LogAudioEvents) {
}
TEST_F(ReceiverRtcpEventSubscriberTest, DropEventsWhenSizeExceeded) {
- Init(ReceiverRtcpEventSubscriber::kVideoEventSubscriber);
+ Init(VIDEO_EVENT);
for (uint32 i = 1u; i <= 10u; ++i) {
cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kVideoFrameDecoded,
+ testing_clock_->NowTicks(), FRAME_DECODED, VIDEO_EVENT,
/*rtp_timestamp*/ i * 10, /*frame_id*/ i);
}
diff --git a/media/cast/rtcp/rtcp.cc b/media/cast/rtcp/rtcp.cc
index 234a1e97e2..3aa936b135 100644
--- a/media/cast/rtcp/rtcp.cc
+++ b/media/cast/rtcp/rtcp.cc
@@ -19,10 +19,7 @@ namespace media {
namespace cast {
static const int kMaxRttMs = 10000; // 10 seconds.
-static const uint16 kMaxDelay = 2000;
-
-// Time limit for received RTCP messages when we stop using it for lip-sync.
-static const int64 kMaxDiffSinceReceivedRtcpMs = 100000; // 100 seconds.
+static const int kMaxDelay = 2000;
class LocalRtcpRttFeedback : public RtcpRttFeedback {
public:
@@ -68,85 +65,7 @@ class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
virtual void OnReceivedReceiverLog(const RtcpReceiverLogMessage& receiver_log)
OVERRIDE {
- // Add received log messages into our log system.
- RtcpReceiverLogMessage::const_iterator it = receiver_log.begin();
-
- for (; it != receiver_log.end(); ++it) {
- uint32 rtp_timestamp = it->rtp_timestamp_;
-
- RtcpReceiverEventLogMessages::const_iterator event_it =
- it->event_log_messages_.begin();
- for (; event_it != it->event_log_messages_.end(); ++event_it) {
- switch (event_it->type) {
- case kAudioPacketReceived:
- case kVideoPacketReceived:
- case kDuplicateAudioPacketReceived:
- case kDuplicateVideoPacketReceived:
- cast_environment_->Logging()->InsertPacketEvent(
- event_it->event_timestamp, event_it->type, rtp_timestamp,
- kFrameIdUnknown, event_it->packet_id, 0, 0);
- break;
- case kAudioAckSent:
- case kVideoAckSent:
- case kAudioFrameDecoded:
- case kVideoFrameDecoded:
- cast_environment_->Logging()->InsertFrameEvent(
- event_it->event_timestamp, event_it->type, rtp_timestamp,
- kFrameIdUnknown);
- break;
- case kAudioPlayoutDelay:
- case kVideoRenderDelay:
- cast_environment_->Logging()->InsertFrameEventWithDelay(
- event_it->event_timestamp, event_it->type, rtp_timestamp,
- kFrameIdUnknown, event_it->delay_delta);
- break;
- default:
- VLOG(2) << "Received log message via RTCP that we did not expect: "
- << static_cast<int>(event_it->type);
- break;
- }
- }
- }
- }
-
- virtual void OnReceivedSenderLog(
- const transport::RtcpSenderLogMessage& sender_log) OVERRIDE {
- transport::RtcpSenderLogMessage::const_iterator it = sender_log.begin();
-
- for (; it != sender_log.end(); ++it) {
- uint32 rtp_timestamp = it->rtp_timestamp;
- CastLoggingEvent log_event = kUnknown;
-
- // These events are provided to know the status of frames that never
- // reached the receiver. The timing information for these events are not
- // relevant and is not sent over the wire.
- switch (it->frame_status) {
- case transport::kRtcpSenderFrameStatusDroppedByFlowControl:
- // A frame that have been dropped by the flow control would have
- // kVideoFrameCaptureBegin as its last event in the log.
- log_event = kVideoFrameCaptureBegin;
- break;
- case transport::kRtcpSenderFrameStatusDroppedByEncoder:
- // A frame that have been dropped by the encoder would have
- // kVideoFrameSentToEncoder as its last event in the log.
- log_event = kVideoFrameSentToEncoder;
- break;
- case transport::kRtcpSenderFrameStatusSentToNetwork:
- // A frame that have be encoded is always sent to the network. We
- // do not add a new log entry for this.
- log_event = kVideoFrameEncoded;
- break;
- default:
- continue;
- }
- // TODO(pwestin): how do we handle the truncated rtp_timestamp?
- // Add received log messages into our log system.
- // TODO(pwestin): how do we handle the time? we don't care about it but
- // we need to send in one.
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- cast_environment_->Logging()->InsertFrameEvent(
- now, log_event, rtp_timestamp, kFrameIdUnknown);
- }
+ rtcp_->OnReceivedReceiverLog(receiver_log);
}
private:
@@ -160,7 +79,7 @@ Rtcp::Rtcp(scoped_refptr<CastEnvironment> cast_environment,
transport::PacedPacketSender* paced_packet_sender,
RtpReceiverStatistics* rtp_receiver_statistics, RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval, uint32 local_ssrc,
- uint32 remote_ssrc, const std::string& c_name)
+ uint32 remote_ssrc, const std::string& c_name, bool is_audio)
: cast_environment_(cast_environment),
transport_sender_(transport_sender),
rtcp_interval_(rtcp_interval),
@@ -173,12 +92,13 @@ Rtcp::Rtcp(scoped_refptr<CastEnvironment> cast_environment,
receiver_feedback_(new LocalRtcpReceiverFeedback(this, cast_environment)),
rtcp_sender_(new RtcpSender(cast_environment, paced_packet_sender,
local_ssrc, c_name)),
- last_report_received_(0),
- last_received_rtp_timestamp_(0),
- last_received_ntp_seconds_(0),
- last_received_ntp_fraction_(0),
+ last_report_truncated_ntp_(0),
+ local_clock_ahead_by_(ClockDriftSmoother::GetDefaultTimeConstant()),
+ lip_sync_rtp_timestamp_(0),
+ lip_sync_ntp_timestamp_(0),
min_rtt_(base::TimeDelta::FromMilliseconds(kMaxRttMs)),
- number_of_rtt_in_avg_(0) {
+ number_of_rtt_in_avg_(0),
+ is_audio_(is_audio) {
rtcp_receiver_.reset(new RtcpReceiver(cast_environment, sender_feedback,
receiver_feedback_.get(),
rtt_feedback_.get(), local_ssrc));
@@ -261,7 +181,7 @@ void Rtcp::SendRtcpFromRtpReceiver(
&report_block.extended_high_sequence_number, &report_block.jitter);
}
- report_block.last_sr = last_report_received_;
+ report_block.last_sr = last_report_truncated_ntp_;
if (!time_last_report_received_.is_null()) {
uint32 delay_seconds = 0;
uint32 delay_fraction = 0;
@@ -283,26 +203,24 @@ void Rtcp::SendRtcpFromRtpReceiver(
target_delay_ms_);
}
-void Rtcp::SendRtcpFromRtpSender(
- const transport::RtcpSenderLogMessage& sender_log_message,
- transport::RtcpSenderInfo sender_info) {
+void Rtcp::SendRtcpFromRtpSender(base::TimeTicks current_time,
+ uint32 current_time_as_rtp_timestamp) {
DCHECK(transport_sender_);
uint32 packet_type_flags = transport::kRtcpSr;
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
-
- if (sender_log_message.size()) {
- packet_type_flags |= transport::kRtcpSenderLog;
- }
-
- SaveLastSentNtpTime(now, sender_info.ntp_seconds, sender_info.ntp_fraction);
+ uint32 current_ntp_seconds = 0;
+ uint32 current_ntp_fractions = 0;
+ ConvertTimeTicksToNtp(current_time, &current_ntp_seconds,
+ &current_ntp_fractions);
+ SaveLastSentNtpTime(current_time, current_ntp_seconds,
+ current_ntp_fractions);
transport::RtcpDlrrReportBlock dlrr;
if (!time_last_report_received_.is_null()) {
packet_type_flags |= transport::kRtcpDlrr;
- dlrr.last_rr = last_report_received_;
+ dlrr.last_rr = last_report_truncated_ntp_;
uint32 delay_seconds = 0;
uint32 delay_fraction = 0;
- base::TimeDelta delta = now - time_last_report_received_;
+ base::TimeDelta delta = current_time - time_last_report_received_;
ConvertTimeToFractions(delta.InMicroseconds(), &delay_seconds,
&delay_fraction);
@@ -310,64 +228,72 @@ void Rtcp::SendRtcpFromRtpSender(
}
transport_sender_->SendRtcpFromRtpSender(
- packet_type_flags, sender_info, dlrr, sender_log_message, local_ssrc_,
- c_name_);
+ packet_type_flags, current_ntp_seconds, current_ntp_fractions,
+ current_time_as_rtp_timestamp, dlrr, local_ssrc_, c_name_);
UpdateNextTimeToSendRtcp();
}
void Rtcp::OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction) {
- last_report_received_ = (ntp_seconds << 16) + (ntp_fraction >> 16);
+ last_report_truncated_ntp_ = ConvertToNtpDiff(ntp_seconds, ntp_fraction);
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
time_last_report_received_ = now;
+
+ // TODO(miu): This clock offset calculation does not account for packet
+ // transit time over the network. End2EndTest.EvilNetwork confirms that this
+ // contributes a very significant source of error here. Fix this along with
+ // the RTT clean-up.
+ const base::TimeDelta measured_offset =
+ now - ConvertNtpToTimeTicks(ntp_seconds, ntp_fraction);
+ local_clock_ahead_by_.Update(now, measured_offset);
+ if (measured_offset < local_clock_ahead_by_.Current()) {
+ // Logically, the minimum offset between the clocks has to be the correct
+ // one. For example, the time it took to transmit the current report may
+ // have been lower than usual, and so some of the error introduced by the
+ // transmission time can be eliminated.
+ local_clock_ahead_by_.Reset(now, measured_offset);
+ }
+ VLOG(1) << "Local clock is ahead of the remote clock by: "
+ << "measured=" << measured_offset.InMicroseconds() << " usec, "
+ << "filtered=" << local_clock_ahead_by_.Current().InMicroseconds()
+ << " usec.";
}
void Rtcp::OnReceivedLipSyncInfo(uint32 rtp_timestamp, uint32 ntp_seconds,
uint32 ntp_fraction) {
- last_received_rtp_timestamp_ = rtp_timestamp;
- last_received_ntp_seconds_ = ntp_seconds;
- last_received_ntp_fraction_ = ntp_fraction;
-}
-
-void Rtcp::OnReceivedSendReportRequest() {
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
-
- // Trigger a new RTCP report at next timer.
- next_time_to_send_rtcp_ = now;
+ if (ntp_seconds == 0) {
+ NOTREACHED();
+ return;
+ }
+ lip_sync_rtp_timestamp_ = rtp_timestamp;
+ lip_sync_ntp_timestamp_ =
+ (static_cast<uint64>(ntp_seconds) << 32) | ntp_fraction;
}
-bool Rtcp::RtpTimestampInSenderTime(int frequency, uint32 rtp_timestamp,
- base::TimeTicks* rtp_timestamp_in_ticks)
- const {
- if (last_received_ntp_seconds_ == 0)
+bool Rtcp::GetLatestLipSyncTimes(uint32* rtp_timestamp,
+ base::TimeTicks* reference_time) const {
+ if (!lip_sync_ntp_timestamp_)
return false;
- int wrap = CheckForWrapAround(rtp_timestamp, last_received_rtp_timestamp_);
- int64 rtp_timestamp_int64 = rtp_timestamp;
- int64 last_received_rtp_timestamp_int64 = last_received_rtp_timestamp_;
+ const base::TimeTicks local_reference_time =
+ ConvertNtpToTimeTicks(static_cast<uint32>(lip_sync_ntp_timestamp_ >> 32),
+ static_cast<uint32>(lip_sync_ntp_timestamp_)) +
+ local_clock_ahead_by_.Current();
- if (wrap == 1) {
- rtp_timestamp_int64 += (1LL << 32);
- } else if (wrap == -1) {
- last_received_rtp_timestamp_int64 += (1LL << 32);
- }
- // Time since the last RTCP message.
- // Note that this can be negative since we can compare a rtp timestamp from
- // a frame older than the last received RTCP message.
- int64 rtp_timestamp_diff =
- rtp_timestamp_int64 - last_received_rtp_timestamp_int64;
+ // Sanity-check: Getting regular lip sync updates?
+ DCHECK((cast_environment_->Clock()->NowTicks() - local_reference_time) <
+ base::TimeDelta::FromMinutes(1));
- int frequency_khz = frequency / 1000;
- int64 rtp_time_diff_ms = rtp_timestamp_diff / frequency_khz;
+ *rtp_timestamp = lip_sync_rtp_timestamp_;
+ *reference_time = local_reference_time;
+ return true;
+}
- // Sanity check.
- if (std::abs(rtp_time_diff_ms) > kMaxDiffSinceReceivedRtcpMs)
- return false;
+void Rtcp::OnReceivedSendReportRequest() {
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- *rtp_timestamp_in_ticks = ConvertNtpToTimeTicks(last_received_ntp_seconds_,
- last_received_ntp_fraction_) +
- base::TimeDelta::FromMilliseconds(rtp_time_diff_ms);
- return true;
+ // Trigger a new RTCP report at next timer.
+ next_time_to_send_rtcp_ = now;
}
void Rtcp::SetCastReceiverEventHistorySize(size_t size) {
@@ -375,8 +301,8 @@ void Rtcp::SetCastReceiverEventHistorySize(size_t size) {
}
void Rtcp::SetTargetDelay(base::TimeDelta target_delay) {
+ DCHECK(target_delay.InMilliseconds() < kMaxDelay);
target_delay_ms_ = static_cast<uint16>(target_delay.InMilliseconds());
- DCHECK(target_delay_ms_ < kMaxDelay);
}
void Rtcp::OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
@@ -421,17 +347,21 @@ void Rtcp::SaveLastSentNtpTime(const base::TimeTicks& now,
void Rtcp::UpdateRtt(const base::TimeDelta& sender_delay,
const base::TimeDelta& receiver_delay) {
base::TimeDelta rtt = sender_delay - receiver_delay;
+ // TODO(miu): Find out why this must be >= 1 ms, and remove the fudge if it's
+ // bogus.
rtt = std::max(rtt, base::TimeDelta::FromMilliseconds(1));
rtt_ = rtt;
min_rtt_ = std::min(min_rtt_, rtt);
max_rtt_ = std::max(max_rtt_, rtt);
+ // TODO(miu): Replace "average for all time" with an EWMA, or suitable
+ // "average over recent past" mechanism.
if (number_of_rtt_in_avg_ != 0) {
- float ac = static_cast<float>(number_of_rtt_in_avg_);
+ const double ac = static_cast<double>(number_of_rtt_in_avg_);
avg_rtt_ms_ = ((ac / (ac + 1.0)) * avg_rtt_ms_) +
- ((1.0 / (ac + 1.0)) * rtt.InMilliseconds());
+ ((1.0 / (ac + 1.0)) * rtt.InMillisecondsF());
} else {
- avg_rtt_ms_ = rtt.InMilliseconds();
+ avg_rtt_ms_ = rtt.InMillisecondsF();
}
number_of_rtt_in_avg_++;
}
@@ -446,28 +376,12 @@ bool Rtcp::Rtt(base::TimeDelta* rtt, base::TimeDelta* avg_rtt,
if (number_of_rtt_in_avg_ == 0) return false;
*rtt = rtt_;
- *avg_rtt = base::TimeDelta::FromMilliseconds(avg_rtt_ms_);
+ *avg_rtt = base::TimeDelta::FromMillisecondsD(avg_rtt_ms_);
*min_rtt = min_rtt_;
*max_rtt = max_rtt_;
return true;
}
-int Rtcp::CheckForWrapAround(uint32 new_timestamp, uint32 old_timestamp) const {
- if (new_timestamp < old_timestamp) {
- // This difference should be less than -2^31 if we have had a wrap around
- // (e.g. |new_timestamp| = 1, |rtcp_rtp_timestamp| = 2^32 - 1). Since it is
- // cast to a int32_t, it should be positive.
- if (static_cast<int32>(new_timestamp - old_timestamp) > 0) {
- return 1; // Forward wrap around.
- }
- } else if (static_cast<int32>(old_timestamp - new_timestamp) > 0) {
- // This difference should be less than -2^31 if we have had a backward wrap
- // around. Since it is cast to a int32, it should be positive.
- return -1;
- }
- return 0;
-}
-
void Rtcp::UpdateNextTimeToSendRtcp() {
int random = base::RandInt(0, 999);
base::TimeDelta time_to_next =
@@ -477,5 +391,42 @@ void Rtcp::UpdateNextTimeToSendRtcp() {
next_time_to_send_rtcp_ = now + time_to_next;
}
+void Rtcp::OnReceivedReceiverLog(const RtcpReceiverLogMessage& receiver_log) {
+ // Add received log messages into our log system.
+ RtcpReceiverLogMessage::const_iterator it = receiver_log.begin();
+ EventMediaType media_type = is_audio_ ? AUDIO_EVENT : VIDEO_EVENT;
+ for (; it != receiver_log.end(); ++it) {
+ uint32 rtp_timestamp = it->rtp_timestamp_;
+
+ RtcpReceiverEventLogMessages::const_iterator event_it =
+ it->event_log_messages_.begin();
+ for (; event_it != it->event_log_messages_.end(); ++event_it) {
+ switch (event_it->type) {
+ case PACKET_RECEIVED:
+ cast_environment_->Logging()->InsertPacketEvent(
+ event_it->event_timestamp, event_it->type,
+ media_type, rtp_timestamp,
+ kFrameIdUnknown, event_it->packet_id, 0, 0);
+ break;
+ case FRAME_ACK_SENT:
+ case FRAME_DECODED:
+ cast_environment_->Logging()->InsertFrameEvent(
+ event_it->event_timestamp, event_it->type, media_type,
+ rtp_timestamp, kFrameIdUnknown);
+ break;
+ case FRAME_PLAYOUT:
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ event_it->event_timestamp, event_it->type, media_type,
+ rtp_timestamp, kFrameIdUnknown, event_it->delay_delta);
+ break;
+ default:
+ VLOG(2) << "Received log message via RTCP that we did not expect: "
+ << static_cast<int>(event_it->type);
+ break;
+ }
+ }
+ }
+}
+
} // namespace cast
} // namespace media
diff --git a/media/cast/rtcp/rtcp.h b/media/cast/rtcp/rtcp.h
index 15bbe1138c..ff81bb90e0 100644
--- a/media/cast/rtcp/rtcp.h
+++ b/media/cast/rtcp/rtcp.h
@@ -5,16 +5,15 @@
#ifndef MEDIA_CAST_RTCP_RTCP_H_
#define MEDIA_CAST_RTCP_RTCP_H_
-#include <list>
#include <map>
#include <queue>
-#include <set>
#include <string>
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/cast/base/clock_drift_smoother.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
@@ -68,7 +67,8 @@ class Rtcp {
const base::TimeDelta& rtcp_interval,
uint32 local_ssrc,
uint32 remote_ssrc,
- const std::string& c_name);
+ const std::string& c_name,
+ bool is_audio);
virtual ~Rtcp();
@@ -77,15 +77,12 @@ class Rtcp {
static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, size_t length);
base::TimeTicks TimeToSendNextRtcpReport();
- // |sender_log_message| is optional; without it no log messages will be
- // attached to the RTCP report; instead a normal RTCP send report will be
- // sent.
- // Additionally if all messages in |sender_log_message| does
- // not fit in the packet the |sender_log_message| will contain the remaining
- // unsent messages.
- void SendRtcpFromRtpSender(
- const transport::RtcpSenderLogMessage& sender_log_message,
- transport::RtcpSenderInfo sender_info);
+
+ // Send a RTCP sender report.
+ // |current_time| is the current time reported by a tick clock.
+ // |current_time_as_rtp_timestamp| is the corresponding RTP timestamp.
+ void SendRtcpFromRtpSender(base::TimeTicks current_time,
+ uint32 current_time_as_rtp_timestamp);
// |cast_message| and |rtcp_events| is optional; if |cast_message| is
// provided the RTCP receiver report will append a Cast message containing
@@ -96,24 +93,39 @@ class Rtcp {
const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events);
void IncomingRtcpPacket(const uint8* rtcp_buffer, size_t length);
+
+ // TODO(miu): Clean up this method and downstream code: Only VideoSender uses
+ // this (for congestion control), and only the |rtt| and |avg_rtt| values, and
+ // it's not clear that any of the downstream code is doing the right thing
+ // with this data.
bool Rtt(base::TimeDelta* rtt,
base::TimeDelta* avg_rtt,
base::TimeDelta* min_rtt,
base::TimeDelta* max_rtt) const;
- bool RtpTimestampInSenderTime(int frequency,
- uint32 rtp_timestamp,
- base::TimeTicks* rtp_timestamp_in_ticks) const;
+
+ bool is_rtt_available() const { return number_of_rtt_in_avg_ > 0; }
+
+ // If available, returns true and sets the output arguments to the latest
+ // lip-sync timestamps gleaned from the sender reports. While the sender
+ // provides reference NTP times relative to its own wall clock, the
+ // |reference_time| returned here has been translated to the local
+ // CastEnvironment clock.
+ bool GetLatestLipSyncTimes(uint32* rtp_timestamp,
+ base::TimeTicks* reference_time) const;
// Set the history size to record Cast receiver events. The event history is
// used to remove duplicates. The history will store at most |size| events.
void SetCastReceiverEventHistorySize(size_t size);
- // Update the target delay. Will be added to every sender report.
+ // Update the target delay. Will be added to every report sent back to the
+ // sender.
+ // TODO(miu): Remove this deprecated functionality. The sender ignores this.
void SetTargetDelay(base::TimeDelta target_delay);
- protected:
- int CheckForWrapAround(uint32 new_timestamp, uint32 old_timestamp) const;
+ void OnReceivedReceiverLog(const RtcpReceiverLogMessage& receiver_log);
+ protected:
+ void OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction);
void OnReceivedLipSyncInfo(uint32 rtp_timestamp,
uint32 ntp_seconds,
uint32 ntp_fraction);
@@ -122,13 +134,6 @@ class Rtcp {
friend class LocalRtcpRttFeedback;
friend class LocalRtcpReceiverFeedback;
- void SendRtcp(const base::TimeTicks& now,
- uint32 packet_type_flags,
- uint32 media_ssrc,
- const RtcpCastMessage* cast_message);
-
- void OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction);
-
void OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
uint32 last_report,
uint32 delay_since_last_report);
@@ -163,19 +168,34 @@ class Rtcp {
base::TimeTicks next_time_to_send_rtcp_;
RtcpSendTimeMap last_reports_sent_map_;
RtcpSendTimeQueue last_reports_sent_queue_;
+
+ // The truncated (i.e., 64-->32-bit) NTP timestamp provided in the last report
+ // from the remote peer, along with the local time at which the report was
+ // received. These values are used for ping-pong'ing NTP timestamps between
+ // the peers so that they can estimate the network's round-trip time.
+ uint32 last_report_truncated_ntp_;
base::TimeTicks time_last_report_received_;
- uint32 last_report_received_;
- uint32 last_received_rtp_timestamp_;
- uint32 last_received_ntp_seconds_;
- uint32 last_received_ntp_fraction_;
+ // Maintains a smoothed offset between the local clock and the remote clock.
+ // Calling this member's Current() method is only valid if
+ // |time_last_report_received_| is not "null."
+ ClockDriftSmoother local_clock_ahead_by_;
+
+ // Latest "lip sync" info from the sender. The sender provides the RTP
+ // timestamp of some frame of its choosing and also a corresponding reference
+ // NTP timestamp sampled from a clock common to all media streams. It is
+ // expected that the sender will update this data regularly and in a timely
+ // manner (e.g., about once per second).
+ uint32 lip_sync_rtp_timestamp_;
+ uint64 lip_sync_ntp_timestamp_;
base::TimeDelta rtt_;
base::TimeDelta min_rtt_;
base::TimeDelta max_rtt_;
int number_of_rtt_in_avg_;
- float avg_rtt_ms_;
+ double avg_rtt_ms_;
uint16 target_delay_ms_;
+ bool is_audio_;
DISALLOW_COPY_AND_ASSIGN(Rtcp);
};
diff --git a/media/cast/rtcp/rtcp_defines.cc b/media/cast/rtcp/rtcp_defines.cc
index 858aa27ba0..214100d4d9 100644
--- a/media/cast/rtcp/rtcp_defines.cc
+++ b/media/cast/rtcp/rtcp_defines.cc
@@ -21,7 +21,7 @@ void RtcpCastMessage::Copy(const RtcpCastMessage& cast_message) {
}
RtcpReceiverEventLogMessage::RtcpReceiverEventLogMessage()
- : type(kUnknown), packet_id(0u) {}
+ : type(UNKNOWN), packet_id(0u) {}
RtcpReceiverEventLogMessage::~RtcpReceiverEventLogMessage() {}
RtcpReceiverFrameLogMessage::RtcpReceiverFrameLogMessage(uint32 timestamp)
@@ -42,7 +42,7 @@ RtcpReceiverReferenceTimeReport::RtcpReceiverReferenceTimeReport()
: remote_ssrc(0u), ntp_seconds(0u), ntp_fraction(0u) {}
RtcpReceiverReferenceTimeReport::~RtcpReceiverReferenceTimeReport() {}
-RtcpEvent::RtcpEvent() : type(kUnknown), packet_id(0u) {}
+RtcpEvent::RtcpEvent() : type(UNKNOWN), packet_id(0u) {}
RtcpEvent::~RtcpEvent() {}
} // namespace cast
diff --git a/media/cast/rtcp/rtcp_defines.h b/media/cast/rtcp/rtcp_defines.h
index 5cc4a59a07..31795648c6 100644
--- a/media/cast/rtcp/rtcp_defines.h
+++ b/media/cast/rtcp/rtcp_defines.h
@@ -17,7 +17,6 @@ namespace media {
namespace cast {
static const size_t kRtcpCastLogHeaderSize = 12;
-static const size_t kRtcpSenderFrameLogSize = 4;
static const size_t kRtcpReceiverFrameLogSize = 8;
static const size_t kRtcpReceiverEventLogSize = 4;
@@ -111,7 +110,7 @@ inline bool operator==(RtcpReceiverReferenceTimeReport lhs,
// Struct used by raw event subscribers as an intermediate format before
// sending off to the other side via RTCP.
-// (i.e., WindowedRtcpEventRtp{Sender,Receiver}Subscriber)
+// (i.e., {Sender,Receiver}RtcpEventSubscriber)
struct RtcpEvent {
RtcpEvent();
~RtcpEvent();
@@ -121,11 +120,10 @@ struct RtcpEvent {
// Time of event logged.
base::TimeTicks timestamp;
- // Render/playout delay. Only set for kAudioPlayoutDelay and
- // kVideoRenderDelay events.
+ // Render/playout delay. Only set for FRAME_PLAYOUT events.
base::TimeDelta delay_delta;
- // Only set for packet events. (kAudioPacketReceived, kVideoPacketReceived)
+ // Only set for packet events.
uint16 packet_id;
};
diff --git a/media/cast/rtcp/rtcp_receiver.cc b/media/cast/rtcp/rtcp_receiver.cc
index 9345d5850a..16395737fe 100644
--- a/media/cast/rtcp/rtcp_receiver.cc
+++ b/media/cast/rtcp/rtcp_receiver.cc
@@ -10,33 +10,6 @@
namespace {
-bool IsRtcpPacketEvent(media::cast::CastLoggingEvent event_type) {
- return event_type == media::cast::kAudioPacketReceived ||
- event_type == media::cast::kVideoPacketReceived ||
- event_type == media::cast::kDuplicateAudioPacketReceived ||
- event_type == media::cast::kDuplicateVideoPacketReceived;
-}
-
-media::cast::transport::RtcpSenderFrameStatus
-TranslateToFrameStatusFromWireFormat(uint8 status) {
- switch (status) {
- case 0:
- return media::cast::transport::kRtcpSenderFrameStatusUnknown;
- case 1:
- return media::cast::transport::kRtcpSenderFrameStatusDroppedByEncoder;
- case 2:
- return media::cast::transport::kRtcpSenderFrameStatusDroppedByFlowControl;
- case 3:
- return media::cast::transport::kRtcpSenderFrameStatusSentToNetwork;
- default:
- // If the sender adds new log messages we will end up here until we add
- // the new messages in the receiver.
- NOTREACHED();
- VLOG(1) << "Unexpected status received: " << static_cast<int>(status);
- return media::cast::transport::kRtcpSenderFrameStatusUnknown;
- }
-}
-
// A receiver frame event is identified by frame RTP timestamp, event timestamp
// and event type.
// A receiver packet event is identified by all of the above plus packet id.
@@ -128,9 +101,6 @@ void RtcpReceiver::IncomingRtcpPacket(RtcpParser* rtcp_parser) {
case kRtcpApplicationSpecificCastReceiverLogCode:
HandleApplicationSpecificCastReceiverLog(rtcp_parser);
break;
- case kRtcpApplicationSpecificCastSenderLogCode:
- HandleApplicationSpecificCastSenderLog(rtcp_parser);
- break;
case kRtcpPayloadSpecificRembCode:
case kRtcpPayloadSpecificRembItemCode:
case kRtcpPayloadSpecificCastCode:
@@ -479,7 +449,7 @@ void RtcpReceiver::HandleApplicationSpecificCastReceiverEventLog(
const uint8 event = rtcp_field.cast_receiver_log.event;
const CastLoggingEvent event_type = TranslateToLogEventFromWireFormat(event);
- uint16 packet_id = IsRtcpPacketEvent(event_type) ?
+ uint16 packet_id = event_type == PACKET_RECEIVED ?
rtcp_field.cast_receiver_log.delay_delta_or_packet_id.packet_id : 0;
const base::TimeTicks event_timestamp =
base::TimeTicks() +
@@ -518,36 +488,6 @@ void RtcpReceiver::HandleApplicationSpecificCastReceiverEventLog(
event_log_messages->push_back(event_log);
}
-void RtcpReceiver::HandleApplicationSpecificCastSenderLog(
- RtcpParser* rtcp_parser) {
- const RtcpField& rtcp_field = rtcp_parser->Field();
- uint32 remote_ssrc = rtcp_field.cast_sender_log.sender_ssrc;
-
- if (remote_ssrc_ != remote_ssrc) {
- RtcpFieldTypes field_type;
- // Message not to us. Iterate until we have passed this message.
- do {
- field_type = rtcp_parser->Iterate();
- } while (field_type == kRtcpApplicationSpecificCastSenderLogCode);
- return;
- }
- transport::RtcpSenderLogMessage sender_log;
-
- RtcpFieldTypes field_type = rtcp_parser->Iterate();
- while (field_type == kRtcpApplicationSpecificCastSenderLogCode) {
- const RtcpField& rtcp_field = rtcp_parser->Field();
- transport::RtcpSenderFrameLogMessage frame_log;
- frame_log.frame_status =
- TranslateToFrameStatusFromWireFormat(rtcp_field.cast_sender_log.status);
- frame_log.rtp_timestamp = rtcp_field.cast_sender_log.rtp_timestamp;
- sender_log.push_back(frame_log);
- field_type = rtcp_parser->Iterate();
- }
- if (receiver_feedback_) {
- receiver_feedback_->OnReceivedSenderLog(sender_log);
- }
-}
-
void RtcpReceiver::HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
RtcpCastMessage cast_message(remote_ssrc_);
diff --git a/media/cast/rtcp/rtcp_receiver.h b/media/cast/rtcp/rtcp_receiver.h
index c8b6435a84..d3cef9e57b 100644
--- a/media/cast/rtcp/rtcp_receiver.h
+++ b/media/cast/rtcp/rtcp_receiver.h
@@ -29,9 +29,6 @@ class RtcpReceiverFeedback {
virtual void OnReceivedReceiverLog(
const RtcpReceiverLogMessage& receiver_log) = 0;
- virtual void OnReceivedSenderLog(
- const transport::RtcpSenderLogMessage& sender_log) = 0;
-
virtual ~RtcpReceiverFeedback() {}
};
diff --git a/media/cast/rtcp/rtcp_receiver_unittest.cc b/media/cast/rtcp/rtcp_receiver_unittest.cc
index 74a60dc76f..f898939b26 100644
--- a/media/cast/rtcp/rtcp_receiver_unittest.cc
+++ b/media/cast/rtcp/rtcp_receiver_unittest.cc
@@ -98,7 +98,7 @@ class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
EXPECT_EQ(event_expected_it->type, event_incoming_it->type);
EXPECT_EQ(event_expected_it->event_timestamp,
event_incoming_it->event_timestamp);
- if (event_expected_it->type == kVideoPacketReceived) {
+ if (event_expected_it->type == PACKET_RECEIVED) {
EXPECT_EQ(event_expected_it->packet_id, event_incoming_it->packet_id);
} else {
EXPECT_EQ(event_expected_it->delay_delta,
@@ -111,25 +111,6 @@ class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
called_on_received_receiver_log_ = true;
}
- virtual void OnReceivedSenderLog(
- const transport::RtcpSenderLogMessage& sender_log) OVERRIDE {
- EXPECT_EQ(expected_sender_log_.size(), sender_log.size());
-
- transport::RtcpSenderLogMessage::const_iterator expected_it =
- expected_sender_log_.begin();
- transport::RtcpSenderLogMessage::const_iterator incoming_it =
- sender_log.begin();
- for (; expected_it != expected_sender_log_.end();
- ++expected_it, ++incoming_it) {
- EXPECT_EQ(expected_it->frame_status, incoming_it->frame_status);
- EXPECT_EQ(0xffffff & expected_it->rtp_timestamp,
- incoming_it->rtp_timestamp);
- }
- called_on_received_sender_log_ = true;
- }
-
- bool OnReceivedSenderLogCalled() { return called_on_received_sender_log_; }
-
bool OnReceivedReceiverLogCalled() {
return called_on_received_receiver_log_ && expected_receiver_log_.empty();
}
@@ -138,13 +119,8 @@ class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
expected_receiver_log_ = receiver_log;
}
- void SetExpectedSenderLog(const transport::RtcpSenderLogMessage& sender_log) {
- expected_sender_log_ = sender_log;
- }
-
private:
RtcpReceiverLogMessage expected_receiver_log_;
- transport::RtcpSenderLogMessage expected_sender_log_;
bool called_on_received_sender_log_;
bool called_on_received_receiver_log_;
@@ -447,40 +423,6 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
EXPECT_TRUE(sender_feedback_cast_verification.called());
}
-TEST_F(RtcpReceiverTest, InjectSenderReportWithCastSenderLogVerification) {
- RtcpReceiverCastLogVerification cast_log_verification;
- RtcpReceiver rtcp_receiver(cast_environment_,
- &mock_sender_feedback_,
- &cast_log_verification,
- &mock_rtt_feedback_,
- kSourceSsrc);
- rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
-
- transport::RtcpSenderLogMessage sender_log;
- for (int j = 0; j < 359; ++j) {
- transport::RtcpSenderFrameLogMessage sender_frame_log;
- sender_frame_log.frame_status =
- transport::kRtcpSenderFrameStatusSentToNetwork;
- sender_frame_log.rtp_timestamp = kRtpTimestamp + j * 90;
- sender_log.push_back(sender_frame_log);
- }
- cast_log_verification.SetExpectedSenderLog(sender_log);
-
- TestRtcpPacketBuilder p;
- p.AddSr(kSenderSsrc, 0);
- p.AddSdesCname(kSenderSsrc, kCName);
- p.AddSenderLog(kSenderSsrc);
-
- for (int i = 0; i < 359; ++i) {
- p.AddSenderFrameLog(transport::kRtcpSenderFrameStatusSentToNetwork,
- kRtpTimestamp + i * 90);
- }
- RtcpParser rtcp_parser(p.Data(), p.Length());
- rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
-
- EXPECT_TRUE(cast_log_verification.OnReceivedSenderLogCalled());
-}
-
TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationBase) {
static const uint32 kTimeBaseMs = 12345678;
static const uint32 kTimeDelayMs = 10;
@@ -501,18 +443,18 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationBase) {
RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
RtcpReceiverEventLogMessage event_log;
- event_log.type = kVideoAckSent;
+ event_log.type = FRAME_ACK_SENT;
event_log.event_timestamp = testing_clock.NowTicks();
event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
frame_log.event_log_messages_.push_back(event_log);
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
- event_log.type = kVideoPacketReceived;
+ event_log.type = PACKET_RECEIVED;
event_log.event_timestamp = testing_clock.NowTicks();
event_log.packet_id = kLostPacketId1;
frame_log.event_log_messages_.push_back(event_log);
- event_log.type = kVideoPacketReceived;
+ event_log.type = PACKET_RECEIVED;
event_log.event_timestamp = testing_clock.NowTicks();
event_log.packet_id = kLostPacketId2;
frame_log.event_log_messages_.push_back(event_log);
@@ -526,15 +468,15 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationBase) {
p.AddRb(kSourceSsrc);
p.AddReceiverLog(kSenderSsrc);
p.AddReceiverFrameLog(kRtpTimestamp, 3, kTimeBaseMs);
- p.AddReceiverEventLog(kDelayDeltaMs, kVideoAckSent, 0);
- p.AddReceiverEventLog(kLostPacketId1, kVideoPacketReceived, kTimeDelayMs);
- p.AddReceiverEventLog(kLostPacketId2, kVideoPacketReceived, kTimeDelayMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, FRAME_ACK_SENT, 0);
+ p.AddReceiverEventLog(kLostPacketId1, PACKET_RECEIVED, kTimeDelayMs);
+ p.AddReceiverEventLog(kLostPacketId2, PACKET_RECEIVED, kTimeDelayMs);
// Adds duplicated receiver event.
p.AddReceiverFrameLog(kRtpTimestamp, 3, kTimeBaseMs);
- p.AddReceiverEventLog(kDelayDeltaMs, kVideoAckSent, 0);
- p.AddReceiverEventLog(kLostPacketId1, kVideoPacketReceived, kTimeDelayMs);
- p.AddReceiverEventLog(kLostPacketId2, kVideoPacketReceived, kTimeDelayMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, FRAME_ACK_SENT, 0);
+ p.AddReceiverEventLog(kLostPacketId1, PACKET_RECEIVED, kTimeDelayMs);
+ p.AddReceiverEventLog(kLostPacketId2, PACKET_RECEIVED, kTimeDelayMs);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(
@@ -566,7 +508,7 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationMulti) {
for (int j = 0; j < 100; ++j) {
RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
RtcpReceiverEventLogMessage event_log;
- event_log.type = kVideoAckSent;
+ event_log.type = FRAME_ACK_SENT;
event_log.event_timestamp = testing_clock.NowTicks();
event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
frame_log.event_log_messages_.push_back(event_log);
@@ -582,7 +524,7 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationMulti) {
p.AddReceiverLog(kSenderSsrc);
for (int i = 0; i < 100; ++i) {
p.AddReceiverFrameLog(kRtpTimestamp, 1, kTimeBaseMs + i * kTimeDelayMs);
- p.AddReceiverEventLog(kDelayDeltaMs, kVideoAckSent, 0);
+ p.AddReceiverEventLog(kDelayDeltaMs, FRAME_ACK_SENT, 0);
}
EXPECT_CALL(mock_rtt_feedback_,
diff --git a/media/cast/rtcp/rtcp_sender.cc b/media/cast/rtcp/rtcp_sender.cc
index ba1a07941b..bf7d30c84c 100644
--- a/media/cast/rtcp/rtcp_sender.cc
+++ b/media/cast/rtcp/rtcp_sender.cc
@@ -682,20 +682,14 @@ void RtcpSender::BuildReceiverLog(
event_message.type,
event_message.event_timestamp - event_timestamp_base);
switch (event_message.type) {
- case kAudioAckSent:
- case kVideoAckSent:
- case kAudioPlayoutDelay:
- case kAudioFrameDecoded:
- case kVideoFrameDecoded:
- case kVideoRenderDelay:
+ case FRAME_ACK_SENT:
+ case FRAME_PLAYOUT:
+ case FRAME_DECODED:
big_endian_writer.WriteU16(
static_cast<uint16>(event_message.delay_delta.InMilliseconds()));
big_endian_writer.WriteU16(event_type_and_timestamp_delta);
break;
- case kAudioPacketReceived:
- case kVideoPacketReceived:
- case kDuplicateAudioPacketReceived:
- case kDuplicateVideoPacketReceived:
+ case PACKET_RECEIVED:
big_endian_writer.WriteU16(event_message.packet_id);
big_endian_writer.WriteU16(event_type_and_timestamp_delta);
break;
diff --git a/media/cast/rtcp/rtcp_sender_unittest.cc b/media/cast/rtcp/rtcp_sender_unittest.cc
index 6746a68bae..2bd807f380 100644
--- a/media/cast/rtcp/rtcp_sender_unittest.cc
+++ b/media/cast/rtcp/rtcp_sender_unittest.cc
@@ -63,6 +63,10 @@ class TestRtcpTransport : public transport::PacedPacketSender {
return false;
}
+ virtual void CancelSendingPacket(
+ const transport::PacketKey& packet_key) OVERRIDE {
+ }
+
void SetExpectedRtcpPacket(scoped_ptr<Packet> packet) {
expected_packet_.swap(*packet);
}
@@ -255,8 +259,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
cast_message.missing_frames_and_packets_[kFrameIdWithLostPackets] =
missing_packets;
- ReceiverRtcpEventSubscriber event_subscriber(
- 500, ReceiverRtcpEventSubscriber::kVideoEventSubscriber);
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
rtcp_sender_->SendRtcpFromRtpReceiver(
@@ -273,21 +276,23 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
p.AddReceiverLog(kSendingSsrc);
p.AddReceiverFrameLog(kRtpTimestamp, 2, kTimeBaseMs);
- p.AddReceiverEventLog(0, kVideoAckSent, 0);
- p.AddReceiverEventLog(kLostPacketId1, kVideoPacketReceived, kTimeDelayMs);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
+ p.AddReceiverEventLog(kLostPacketId1, PACKET_RECEIVED, kTimeDelayMs);
test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
FrameEvent frame_event;
frame_event.rtp_timestamp = kRtpTimestamp;
- frame_event.type = kVideoAckSent;
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
frame_event.timestamp = testing_clock.NowTicks();
event_subscriber.OnReceiveFrameEvent(frame_event);
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
PacketEvent packet_event;
packet_event.rtp_timestamp = kRtpTimestamp;
- packet_event.type = kVideoPacketReceived;
+ packet_event.type = PACKET_RECEIVED;
+ packet_event.media_type = VIDEO_EVENT;
packet_event.timestamp = testing_clock.NowTicks();
packet_event.packet_id = kLostPacketId1;
event_subscriber.OnReceivePacketEvent(packet_event);
@@ -335,24 +340,25 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithOversizedFrameLog) {
kTimeBaseMs + (kRtcpMaxReceiverLogMessages - num_events) * kTimeDelayMs);
for (int i = 0; i < num_events; i++) {
p.AddReceiverEventLog(
- kLostPacketId1, kVideoPacketReceived,
+ kLostPacketId1, PACKET_RECEIVED,
static_cast<uint16>(kTimeDelayMs * i));
}
test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- ReceiverRtcpEventSubscriber event_subscriber(
- 500, ReceiverRtcpEventSubscriber::kVideoEventSubscriber);
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
FrameEvent frame_event;
frame_event.rtp_timestamp = kRtpTimestamp;
- frame_event.type = media::cast::kVideoAckSent;
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
frame_event.timestamp = testing_clock.NowTicks();
event_subscriber.OnReceiveFrameEvent(frame_event);
for (size_t i = 0; i < kRtcpMaxReceiverLogMessages; ++i) {
PacketEvent packet_event;
packet_event.rtp_timestamp = kRtpTimestamp + 2345;
- packet_event.type = kVideoPacketReceived;
+ packet_event.type = PACKET_RECEIVED;
+ packet_event.media_type = VIDEO_EVENT;
packet_event.timestamp = testing_clock.NowTicks();
packet_event.packet_id = kLostPacketId1;
event_subscriber.OnReceivePacketEvent(packet_event);
@@ -400,17 +406,17 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithTooManyLogFrames) {
i < kRtcpMaxReceiverLogMessages;
++i) {
p.AddReceiverFrameLog(kRtpTimestamp + i, 1, kTimeBaseMs + i * kTimeDelayMs);
- p.AddReceiverEventLog(0, kVideoAckSent, 0);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
}
test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- ReceiverRtcpEventSubscriber event_subscriber(
- 500, ReceiverRtcpEventSubscriber::kVideoEventSubscriber);
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
for (size_t i = 0; i < kRtcpMaxReceiverLogMessages; ++i) {
FrameEvent frame_event;
frame_event.rtp_timestamp = kRtpTimestamp + static_cast<int>(i);
- frame_event.type = media::cast::kVideoAckSent;
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
frame_event.timestamp = testing_clock.NowTicks();
event_subscriber.OnReceiveFrameEvent(frame_event);
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
@@ -451,16 +457,16 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithOldLogFrames) {
const int kTimeBetweenEventsMs = 410;
p.AddReceiverFrameLog(kRtpTimestamp, 10, kTimeBaseMs + kTimeBetweenEventsMs);
for (int i = 0; i < 10; ++i) {
- p.AddReceiverEventLog(0, kVideoAckSent, i * kTimeBetweenEventsMs);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, i * kTimeBetweenEventsMs);
}
test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- ReceiverRtcpEventSubscriber event_subscriber(
- 500, ReceiverRtcpEventSubscriber::kVideoEventSubscriber);
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
for (int i = 0; i < 11; ++i) {
FrameEvent frame_event;
frame_event.rtp_timestamp = kRtpTimestamp;
- frame_event.type = media::cast::kVideoAckSent;
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
frame_event.timestamp = testing_clock.NowTicks();
event_subscriber.OnReceiveFrameEvent(frame_event);
testing_clock.Advance(
@@ -490,8 +496,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportRedundancy) {
base::SimpleTestTickClock testing_clock;
testing_clock.Advance(base::TimeDelta::FromMilliseconds(time_base_ms));
- ReceiverRtcpEventSubscriber event_subscriber(
- 500, ReceiverRtcpEventSubscriber::kVideoEventSubscriber);
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
size_t packet_count = kReceiveLogMessageHistorySize + 10;
for (size_t i = 0; i < packet_count; i++) {
TestRtcpPacketBuilder p;
@@ -506,23 +511,24 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportRedundancy) {
kRtpTimestamp,
1,
time_base_ms - kSecondRedundancyOffset * kTimeBetweenEventsMs);
- p.AddReceiverEventLog(0, kVideoAckSent, 0);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
}
if (i >= kFirstRedundancyOffset) {
p.AddReceiverFrameLog(
kRtpTimestamp,
1,
time_base_ms - kFirstRedundancyOffset * kTimeBetweenEventsMs);
- p.AddReceiverEventLog(0, kVideoAckSent, 0);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
}
p.AddReceiverFrameLog(kRtpTimestamp, 1, time_base_ms);
- p.AddReceiverEventLog(0, kVideoAckSent, 0);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
FrameEvent frame_event;
frame_event.rtp_timestamp = kRtpTimestamp;
- frame_event.type = media::cast::kVideoAckSent;
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
frame_event.timestamp = testing_clock.NowTicks();
event_subscriber.OnReceiveFrameEvent(frame_event);
diff --git a/media/cast/rtcp/rtcp_unittest.cc b/media/cast/rtcp/rtcp_unittest.cc
index 2918c7579b..3cae9b9fea 100644
--- a/media/cast/rtcp/rtcp_unittest.cc
+++ b/media/cast/rtcp/rtcp_unittest.cc
@@ -26,7 +26,6 @@ static const uint32 kSenderSsrc = 0x10203;
static const uint32 kReceiverSsrc = 0x40506;
static const std::string kCName("test@10.1.1.1");
static const uint32 kRtcpIntervalMs = 500;
-static const int64 kStartMillisecond = INT64_C(12345678900000);
static const int64 kAddedDelay = 123;
static const int64 kAddedShortDelay = 100;
@@ -109,6 +108,10 @@ class LocalRtcpTransport : public transport::PacedPacketSender {
return false;
}
+ virtual void CancelSendingPacket(
+ const transport::PacketKey& packet_key) OVERRIDE {
+ }
+
private:
bool drop_packets_;
bool short_delay_;
@@ -140,9 +143,10 @@ class RtcpPeer : public Rtcp {
rtcp_interval,
local_ssrc,
remote_ssrc,
- c_name) {}
+ c_name,
+ true) {}
- using Rtcp::CheckForWrapAround;
+ using Rtcp::OnReceivedNtp;
using Rtcp::OnReceivedLipSyncInfo;
};
@@ -157,10 +161,8 @@ class RtcpTest : public ::testing::Test {
task_runner_,
task_runner_)),
sender_to_receiver_(testing_clock_),
- receiver_to_sender_(cast_environment_, testing_clock_),
- rtp_sender_stats_(kVideoFrequency) {
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ receiver_to_sender_(cast_environment_, testing_clock_) {
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
net::IPEndPoint dummy_endpoint;
transport_sender_.reset(new transport::CastTransportSenderImpl(
NULL,
@@ -171,6 +173,10 @@ class RtcpTest : public ::testing::Test {
base::TimeDelta(),
task_runner_,
&sender_to_receiver_));
+ transport::CastTransportAudioConfig config;
+ config.rtp.config.ssrc = kSenderSsrc;
+ config.rtp.max_outstanding_frames = 1;
+ transport_sender_->InitializeAudio(config);
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
}
@@ -197,14 +203,12 @@ class RtcpTest : public ::testing::Test {
scoped_ptr<transport::CastTransportSenderImpl> transport_sender_;
LocalRtcpTransport receiver_to_sender_;
MockRtcpSenderFeedback mock_sender_feedback_;
- RtpSenderStatistics rtp_sender_stats_;
DISALLOW_COPY_AND_ASSIGN(RtcpTest);
};
TEST_F(RtcpTest, TimeToSend) {
- base::TimeTicks start_time;
- start_time += base::TimeDelta::FromMilliseconds(kStartMillisecond);
+ const base::TimeTicks start_time = testing_clock_->NowTicks();
Rtcp rtcp(cast_environment_,
&mock_sender_feedback_,
transport_sender_.get(),
@@ -214,7 +218,8 @@ TEST_F(RtcpTest, TimeToSend) {
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
+ kCName,
+ true);
receiver_to_sender_.set_rtcp_receiver(&rtcp);
EXPECT_LE(start_time, rtcp.TimeToSendNextRtcpReport());
EXPECT_GE(
@@ -235,10 +240,10 @@ TEST_F(RtcpTest, BasicSenderReport) {
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
+ kCName,
+ true);
sender_to_receiver_.set_rtcp_receiver(&rtcp);
- transport::RtcpSenderLogMessage empty_sender_log;
- rtcp.SendRtcpFromRtpSender(empty_sender_log, rtp_sender_stats_.sender_info());
+ rtcp.SendRtcpFromRtpSender(base::TimeTicks(), 0);
}
TEST_F(RtcpTest, BasicReceiverReport) {
@@ -251,7 +256,8 @@ TEST_F(RtcpTest, BasicReceiverReport) {
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
+ kCName,
+ true);
receiver_to_sender_.set_rtcp_receiver(&rtcp);
rtcp.SendRtcpFromRtpReceiver(NULL, NULL);
}
@@ -269,7 +275,8 @@ TEST_F(RtcpTest, BasicCast) {
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kSenderSsrc,
- kCName);
+ kCName,
+ true);
receiver_to_sender_.set_rtcp_receiver(&rtcp);
RtcpCastMessage cast_message(kSenderSsrc);
cast_message.ack_frame_id_ = kAckFrameId;
@@ -295,7 +302,8 @@ TEST_F(RtcpTest, RttReducedSizeRtcp) {
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kReceiverSsrc,
kSenderSsrc,
- kCName);
+ kCName,
+ true);
// Media sender.
Rtcp rtcp_sender(cast_environment_,
@@ -307,7 +315,8 @@ TEST_F(RtcpTest, RttReducedSizeRtcp) {
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
+ kCName,
+ true);
sender_to_receiver_.set_rtcp_receiver(&rtcp_receiver);
receiver_to_sender_.set_rtcp_receiver(&rtcp_sender);
@@ -319,9 +328,7 @@ TEST_F(RtcpTest, RttReducedSizeRtcp) {
EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- transport::RtcpSenderLogMessage empty_sender_log;
- rtcp_sender.SendRtcpFromRtpSender(empty_sender_log,
- rtp_sender_stats_.sender_info());
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 1);
RunTasks(33);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
@@ -330,8 +337,7 @@ TEST_F(RtcpTest, RttReducedSizeRtcp) {
EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 2);
EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 2);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
- rtcp_sender.SendRtcpFromRtpSender(empty_sender_log,
- rtp_sender_stats_.sender_info());
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 2);
RunTasks(33);
EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
@@ -352,7 +358,8 @@ TEST_F(RtcpTest, Rtt) {
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kReceiverSsrc,
kSenderSsrc,
- kCName);
+ kCName,
+ true);
// Media sender.
Rtcp rtcp_sender(cast_environment_,
@@ -364,7 +371,8 @@ TEST_F(RtcpTest, Rtt) {
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
+ kCName,
+ true);
receiver_to_sender_.set_rtcp_receiver(&rtcp_sender);
sender_to_receiver_.set_rtcp_receiver(&rtcp_receiver);
@@ -376,9 +384,7 @@ TEST_F(RtcpTest, Rtt) {
EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- transport::RtcpSenderLogMessage empty_sender_log;
- rtcp_sender.SendRtcpFromRtpSender(empty_sender_log,
- rtp_sender_stats_.sender_info());
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 1);
RunTasks(33);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
@@ -393,8 +399,7 @@ TEST_F(RtcpTest, Rtt) {
EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 2);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
- rtcp_sender.SendRtcpFromRtpSender(empty_sender_log,
- rtp_sender_stats_.sender_info());
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 2);
RunTasks(33);
EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 2);
@@ -412,8 +417,7 @@ TEST_F(RtcpTest, Rtt) {
EXPECT_NEAR(kAddedDelay + kAddedShortDelay, min_rtt.InMilliseconds(), 2);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
- rtcp_sender.SendRtcpFromRtpSender(empty_sender_log,
- rtp_sender_stats_.sender_info());
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 3);
RunTasks(33);
EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
@@ -445,9 +449,10 @@ TEST_F(RtcpTest, RttWithPacketLoss) {
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- kSenderSsrc,
kReceiverSsrc,
- kCName);
+ kSenderSsrc,
+ kCName,
+ true);
// Media sender.
Rtcp rtcp_sender(cast_environment_,
@@ -457,17 +462,16 @@ TEST_F(RtcpTest, RttWithPacketLoss) {
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- kReceiverSsrc,
kSenderSsrc,
- kCName);
+ kReceiverSsrc,
+ kCName,
+ true);
receiver_to_sender_.set_rtcp_receiver(&rtcp_sender);
sender_to_receiver_.set_rtcp_receiver(&rtcp_receiver);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
- transport::RtcpSenderLogMessage empty_sender_log;
- rtcp_sender.SendRtcpFromRtpSender(empty_sender_log,
- rtp_sender_stats_.sender_info());
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 0);
RunTasks(33);
base::TimeDelta rtt;
@@ -486,8 +490,7 @@ TEST_F(RtcpTest, RttWithPacketLoss) {
receiver_to_sender_.set_drop_packets(true);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
- rtcp_sender.SendRtcpFromRtpSender(empty_sender_log,
- rtp_sender_stats_.sender_info());
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 1);
RunTasks(33);
EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
@@ -499,136 +502,45 @@ TEST_F(RtcpTest, NtpAndTime) {
const int64 kSecondsbetweenYear1900and2030 = INT64_C(47481 * 24 * 60 * 60);
uint32 ntp_seconds_1 = 0;
- uint32 ntp_fractions_1 = 0;
+ uint32 ntp_fraction_1 = 0;
base::TimeTicks input_time = base::TimeTicks::Now();
- ConvertTimeTicksToNtp(input_time, &ntp_seconds_1, &ntp_fractions_1);
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_1, &ntp_fraction_1);
// Verify absolute value.
EXPECT_GT(ntp_seconds_1, kSecondsbetweenYear1900and2010);
EXPECT_LT(ntp_seconds_1, kSecondsbetweenYear1900and2030);
- base::TimeTicks out_1 = ConvertNtpToTimeTicks(ntp_seconds_1, ntp_fractions_1);
+ base::TimeTicks out_1 = ConvertNtpToTimeTicks(ntp_seconds_1, ntp_fraction_1);
EXPECT_EQ(input_time, out_1); // Verify inverse.
base::TimeDelta time_delta = base::TimeDelta::FromMilliseconds(1000);
input_time += time_delta;
uint32 ntp_seconds_2 = 0;
- uint32 ntp_fractions_2 = 0;
+ uint32 ntp_fraction_2 = 0;
- ConvertTimeTicksToNtp(input_time, &ntp_seconds_2, &ntp_fractions_2);
- base::TimeTicks out_2 = ConvertNtpToTimeTicks(ntp_seconds_2, ntp_fractions_2);
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_2, &ntp_fraction_2);
+ base::TimeTicks out_2 = ConvertNtpToTimeTicks(ntp_seconds_2, ntp_fraction_2);
EXPECT_EQ(input_time, out_2); // Verify inverse.
// Verify delta.
EXPECT_EQ((out_2 - out_1), time_delta);
EXPECT_EQ((ntp_seconds_2 - ntp_seconds_1), UINT32_C(1));
- EXPECT_NEAR(ntp_fractions_2, ntp_fractions_1, 1);
+ EXPECT_NEAR(ntp_fraction_2, ntp_fraction_1, 1);
time_delta = base::TimeDelta::FromMilliseconds(500);
input_time += time_delta;
uint32 ntp_seconds_3 = 0;
- uint32 ntp_fractions_3 = 0;
+ uint32 ntp_fraction_3 = 0;
- ConvertTimeTicksToNtp(input_time, &ntp_seconds_3, &ntp_fractions_3);
- base::TimeTicks out_3 = ConvertNtpToTimeTicks(ntp_seconds_3, ntp_fractions_3);
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_3, &ntp_fraction_3);
+ base::TimeTicks out_3 = ConvertNtpToTimeTicks(ntp_seconds_3, ntp_fraction_3);
EXPECT_EQ(input_time, out_3); // Verify inverse.
// Verify delta.
EXPECT_EQ((out_3 - out_2), time_delta);
- EXPECT_NEAR((ntp_fractions_3 - ntp_fractions_2), 0xffffffff / 2, 1);
-}
-
-TEST_F(RtcpTest, WrapAround) {
- RtcpPeer rtcp_peer(cast_environment_,
- &mock_sender_feedback_,
- transport_sender_.get(),
- NULL,
- NULL,
- kRtcpReducedSize,
- base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- kReceiverSsrc,
- kSenderSsrc,
- kCName);
- uint32 new_timestamp = 0;
- uint32 old_timestamp = 0;
- EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
- new_timestamp = 1234567890;
- old_timestamp = 1234567000;
- EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
- new_timestamp = 1234567000;
- old_timestamp = 1234567890;
- EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
- new_timestamp = 123;
- old_timestamp = 4234567890u;
- EXPECT_EQ(1, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
- new_timestamp = 4234567890u;
- old_timestamp = 123;
- EXPECT_EQ(-1, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
-}
-
-TEST_F(RtcpTest, RtpTimestampInSenderTime) {
- RtcpPeer rtcp_peer(cast_environment_,
- &mock_sender_feedback_,
- transport_sender_.get(),
- &receiver_to_sender_,
- NULL,
- kRtcpReducedSize,
- base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- kReceiverSsrc,
- kSenderSsrc,
- kCName);
- int frequency = 32000;
- uint32 rtp_timestamp = 64000;
- base::TimeTicks rtp_timestamp_in_ticks;
-
- // Test fail before we get a OnReceivedLipSyncInfo.
- EXPECT_FALSE(rtcp_peer.RtpTimestampInSenderTime(
- frequency, rtp_timestamp, &rtp_timestamp_in_ticks));
-
- uint32 ntp_seconds = 0;
- uint32 ntp_fractions = 0;
- uint64 input_time_us = 12345678901000LL;
- base::TimeTicks input_time;
- input_time += base::TimeDelta::FromMicroseconds(input_time_us);
-
- // Test exact match.
- ConvertTimeTicksToNtp(input_time, &ntp_seconds, &ntp_fractions);
- rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(
- frequency, rtp_timestamp, &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time, rtp_timestamp_in_ticks);
-
- // Test older rtp_timestamp.
- rtp_timestamp = 32000;
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(
- frequency, rtp_timestamp, &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time - base::TimeDelta::FromMilliseconds(1000),
- rtp_timestamp_in_ticks);
-
- // Test older rtp_timestamp with wrap.
- rtp_timestamp = 4294903296u;
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(
- frequency, rtp_timestamp, &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time - base::TimeDelta::FromMilliseconds(4000),
- rtp_timestamp_in_ticks);
-
- // Test newer rtp_timestamp.
- rtp_timestamp = 128000;
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(
- frequency, rtp_timestamp, &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time + base::TimeDelta::FromMilliseconds(2000),
- rtp_timestamp_in_ticks);
-
- // Test newer rtp_timestamp with wrap.
- rtp_timestamp = 4294903296u;
- rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
- rtp_timestamp = 64000;
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(
- frequency, rtp_timestamp, &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time + base::TimeDelta::FromMilliseconds(4000),
- rtp_timestamp_in_ticks);
+ EXPECT_NEAR((ntp_fraction_3 - ntp_fraction_2), 0xffffffff / 2, 1);
}
} // namespace cast
diff --git a/media/cast/rtcp/rtcp_utility.cc b/media/cast/rtcp/rtcp_utility.cc
index da6ef9df66..e29f82e9cf 100644
--- a/media/cast/rtcp/rtcp_utility.cc
+++ b/media/cast/rtcp/rtcp_utility.cc
@@ -61,9 +61,6 @@ RtcpFieldTypes RtcpParser::Iterate() {
case kStateApplicationSpecificCastReceiverEventLog:
IterateCastReceiverLogEvent();
break;
- case kStateApplicationSpecificCastSenderLog:
- IterateCastSenderLog();
- break;
case kStateExtendedReportBlock:
IterateExtendedReportItem();
break;
@@ -243,12 +240,6 @@ void RtcpParser::IterateCastReceiverLogEvent() {
Iterate();
}
-void RtcpParser::IterateCastSenderLog() {
- bool success = ParseCastSenderLogItem();
- if (!success)
- Iterate();
-}
-
void RtcpParser::Validate() {
if (rtcp_data_ == NULL)
return; // NOT VALID
@@ -523,8 +514,7 @@ bool RtcpParser::ParseByeItem() {
bool RtcpParser::ParseApplicationDefined(uint8 subtype) {
ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
- if (length < 16 ||
- !(subtype == kSenderLogSubtype || subtype == kReceiverLogSubtype)) {
+ if (length < 16 || subtype != kReceiverLogSubtype) {
state_ = kStateTopLevel;
EndCurrentBlock();
return false;
@@ -546,11 +536,6 @@ bool RtcpParser::ParseApplicationDefined(uint8 subtype) {
}
rtcp_data_ += 12;
switch (subtype) {
- case kSenderLogSubtype:
- state_ = kStateApplicationSpecificCastSenderLog;
- field_type_ = kRtcpApplicationSpecificCastSenderLogCode;
- field_.cast_sender_log.sender_ssrc = sender_ssrc;
- break;
case kReceiverLogSubtype:
state_ = kStateApplicationSpecificCastReceiverFrameLog;
field_type_ = kRtcpApplicationSpecificCastReceiverLogCode;
@@ -623,28 +608,6 @@ bool RtcpParser::ParseCastReceiverLogEventItem() {
return true;
}
-bool RtcpParser::ParseCastSenderLogItem() {
- ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
-
- if (length < 4) {
- state_ = kStateTopLevel;
- EndCurrentBlock();
- return false;
- }
- uint32 data;
- base::BigEndianReader big_endian_reader(
- reinterpret_cast<const char*>(rtcp_data_), length);
- big_endian_reader.ReadU32(&data);
-
- rtcp_data_ += 4;
-
- field_.cast_sender_log.status = static_cast<uint8>(data >> 24);
- // We have 24 LSB of the RTP timestamp on the wire.
- field_.cast_sender_log.rtp_timestamp = data & 0xffffff;
- field_type_ = kRtcpApplicationSpecificCastSenderLogCode;
- return true;
-}
-
bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
DCHECK((header.PT == transport::kPacketTypeGenericRtpFeedback) ||
(header.PT == transport::kPacketTypePayloadSpecific))
@@ -1051,61 +1014,51 @@ bool RtcpParser::ParseExtendedReportDelaySinceLastReceiverReport() {
return true;
}
+// Converts a log event type to an integer value.
+// NOTE: We have only allocated 4 bits to represent the type of event over the
+// wire. Therefore, this function can only return values from 0 to 15.
uint8 ConvertEventTypeToWireFormat(CastLoggingEvent event) {
switch (event) {
- case kAudioAckSent:
- return 1;
- case kAudioPlayoutDelay:
- return 2;
- case kAudioFrameDecoded:
- return 3;
- case kAudioPacketReceived:
- return 4;
- case kVideoAckSent:
- return 5;
- case kVideoFrameDecoded:
- return 6;
- case kVideoRenderDelay:
- return 7;
- case kVideoPacketReceived:
- return 8;
- case kDuplicateAudioPacketReceived:
- return 9;
- case kDuplicateVideoPacketReceived:
- return 10;
+ case FRAME_ACK_SENT:
+ return 11;
+ case FRAME_PLAYOUT:
+ return 12;
+ case FRAME_DECODED:
+ return 13;
+ case PACKET_RECEIVED:
+ return 14;
default:
return 0; // Not an interesting event.
}
}
CastLoggingEvent TranslateToLogEventFromWireFormat(uint8 event) {
+ // TODO(imcheng): Remove the old mappings once they are no longer used.
switch (event) {
- case 1:
- return media::cast::kAudioAckSent;
- case 2:
- return media::cast::kAudioPlayoutDelay;
- case 3:
- return media::cast::kAudioFrameDecoded;
- case 4:
- return media::cast::kAudioPacketReceived;
- case 5:
- return media::cast::kVideoAckSent;
- case 6:
- return media::cast::kVideoFrameDecoded;
- case 7:
- return media::cast::kVideoRenderDelay;
- case 8:
- return media::cast::kVideoPacketReceived;
- case 9:
- return media::cast::kDuplicateAudioPacketReceived;
- case 10:
- return media::cast::kDuplicateVideoPacketReceived;
+ case 1: // AudioAckSent
+ case 5: // VideoAckSent
+ case 11: // Unified
+ return FRAME_ACK_SENT;
+ case 2: // AudioPlayoutDelay
+ case 7: // VideoRenderDelay
+ case 12: // Unified
+ return FRAME_PLAYOUT;
+ case 3: // AudioFrameDecoded
+ case 6: // VideoFrameDecoded
+ case 13: // Unified
+ return FRAME_DECODED;
+ case 4: // AudioPacketReceived
+ case 8: // VideoPacketReceived
+ case 14: // Unified
+ return PACKET_RECEIVED;
+ case 9: // DuplicateAudioPacketReceived
+ case 10: // DuplicateVideoPacketReceived
default:
// If the sender adds new log messages we will end up here until we add
// the new messages in the receiver.
VLOG(1) << "Unexpected log message received: " << static_cast<int>(event);
NOTREACHED();
- return media::cast::kUnknown;
+ return UNKNOWN;
}
}
diff --git a/media/cast/rtcp/rtcp_utility.h b/media/cast/rtcp/rtcp_utility.h
index fa8574406b..34f3f25a88 100644
--- a/media/cast/rtcp/rtcp_utility.h
+++ b/media/cast/rtcp/rtcp_utility.h
@@ -22,7 +22,6 @@ static const int kRtcpMaxNumberOfRembFeedbackSsrcs = 255;
static const uint32 kRemb = ('R' << 24) + ('E' << 16) + ('M' << 8) + 'B';
static const uint32 kCast = ('C' << 24) + ('A' << 16) + ('S' << 8) + 'T';
-static const uint8 kSenderLogSubtype = 1;
static const uint8 kReceiverLogSubtype = 2;
static const size_t kRtcpMaxReceiverLogMessages = 256;
@@ -160,12 +159,6 @@ struct RtcpFieldApplicationSpecificCastReceiverLogItem {
uint16 event_timestamp_delta;
};
-struct RtcpFieldApplicationSpecificCastSenderLogItem {
- uint32 sender_ssrc;
- uint8 status;
- uint32 rtp_timestamp;
-};
-
union RtcpField {
RtcpFieldReceiverReport receiver_report;
RtcpFieldSenderReport sender_report;
@@ -190,7 +183,6 @@ union RtcpField {
RtcpFieldPayloadSpecificCastNackItem cast_nack_item;
RtcpFieldApplicationSpecificCastReceiverLogItem cast_receiver_log;
- RtcpFieldApplicationSpecificCastSenderLogItem cast_sender_log;
};
enum RtcpFieldTypes {
@@ -225,7 +217,6 @@ enum RtcpFieldTypes {
kRtcpApplicationSpecificCastReceiverLogCode,
kRtcpApplicationSpecificCastReceiverLogFrameCode,
kRtcpApplicationSpecificCastReceiverLogEventCode,
- kRtcpApplicationSpecificCastSenderLogCode,
// RFC 5104.
kRtcpPayloadSpecificFirCode,
@@ -264,7 +255,6 @@ class RtcpParser {
kStateBye,
kStateApplicationSpecificCastReceiverFrameLog,
kStateApplicationSpecificCastReceiverEventLog,
- kStateApplicationSpecificCastSenderLog,
kStateExtendedReportBlock,
kStateExtendedReportDelaySinceLastReceiverReport,
kStateGenericRtpFeedbackNack,
@@ -286,7 +276,6 @@ class RtcpParser {
void IterateByeItem();
void IterateCastReceiverLogFrame();
void IterateCastReceiverLogEvent();
- void IterateCastSenderLog();
void IterateExtendedReportItem();
void IterateExtendedReportDelaySinceLastReceiverReportItem();
void IterateNackItem();
@@ -312,7 +301,6 @@ class RtcpParser {
bool ParseApplicationDefined(uint8 subtype);
bool ParseCastReceiverLogFrameItem();
bool ParseCastReceiverLogEventItem();
- bool ParseCastSenderLogItem();
bool ParseExtendedReport();
bool ParseExtendedReportItem();
diff --git a/media/cast/rtcp/sender_rtcp_event_subscriber.cc b/media/cast/rtcp/sender_rtcp_event_subscriber.cc
deleted file mode 100644
index 05bcf3b7a8..0000000000
--- a/media/cast/rtcp/sender_rtcp_event_subscriber.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/rtcp/sender_rtcp_event_subscriber.h"
-
-#include <utility>
-
-#include "base/logging.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-
-namespace media {
-namespace cast {
-
-SenderRtcpEventSubscriber::SenderRtcpEventSubscriber(
- const size_t max_size_to_retain)
- : max_size_to_retain_(max_size_to_retain) {
- DCHECK(max_size_to_retain_ > 0u);
-}
-
-SenderRtcpEventSubscriber::~SenderRtcpEventSubscriber() {
- DCHECK(thread_checker_.CalledOnValidThread());
-}
-
-void SenderRtcpEventSubscriber::OnReceiveFrameEvent(
- const FrameEvent& frame_event) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (frame_event.type != kVideoFrameCaptureBegin &&
- frame_event.type != kVideoFrameSentToEncoder &&
- frame_event.type != kVideoFrameEncoded) {
- // Not interested in other events.
- return;
- }
-
- RtcpEventMap::iterator it = rtcp_events_.find(frame_event.rtp_timestamp);
- if (it == rtcp_events_.end()) {
- // We have not stored this frame (RTP timestamp) in our map.
- RtcpEvent rtcp_event;
- rtcp_event.type = frame_event.type;
- rtcp_event.timestamp = frame_event.timestamp;
-
- // Do not need to fill out rtcp_event.delay_delta or rtcp_event.packet_id
- // as they are not set in frame events we are interested in.
- rtcp_events_.insert(std::make_pair(frame_event.rtp_timestamp, rtcp_event));
-
- TruncateMapIfNeeded();
- } else {
- // We already have this frame (RTP timestamp) in our map.
- // Only update events that are later in the chain.
- // This is due to that events can be reordered on the wire.
- if (frame_event.type == kVideoFrameCaptureBegin) {
- return; // First event in chain can not be late by definition.
- }
-
- if (it->second.type == kVideoFrameEncoded) {
- return; // Last event in chain should not be updated.
- }
-
- // Update existing entry.
- it->second.type = frame_event.type;
- }
-
- DCHECK(rtcp_events_.size() <= max_size_to_retain_);
-}
-
-void SenderRtcpEventSubscriber::OnReceivePacketEvent(
- const PacketEvent& packet_event) {
- DCHECK(thread_checker_.CalledOnValidThread());
- // Do nothing as RTP sender is not interested in packet events for RTCP.
-}
-
-void SenderRtcpEventSubscriber::GetRtcpEventsAndReset(
- RtcpEventMap* rtcp_events) {
- DCHECK(thread_checker_.CalledOnValidThread());
- rtcp_events->swap(rtcp_events_);
- rtcp_events_.clear();
-}
-
-void SenderRtcpEventSubscriber::TruncateMapIfNeeded() {
- // If map size has exceeded |max_size_to_retain_|, remove entry with
- // the smallest RTP timestamp.
- if (rtcp_events_.size() > max_size_to_retain_) {
- DVLOG(2) << "RTCP event map exceeded size limit; "
- << "removing oldest entry";
- // This is fine since we only insert elements one at a time.
- rtcp_events_.erase(rtcp_events_.begin());
- }
-}
-
-} // namespace cast
-} // namespace media
diff --git a/media/cast/rtcp/sender_rtcp_event_subscriber.h b/media/cast/rtcp/sender_rtcp_event_subscriber.h
deleted file mode 100644
index 32c22d52c5..0000000000
--- a/media/cast/rtcp/sender_rtcp_event_subscriber.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_RTCP_SENDER_RTCP_EVENT_SUBSCRIBER_H_
-#define MEDIA_CAST_RTCP_SENDER_RTCP_EVENT_SUBSCRIBER_H_
-
-#include <map>
-
-#include "base/threading/thread_checker.h"
-#include "media/cast/logging/logging_defines.h"
-#include "media/cast/logging/raw_event_subscriber.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-
-namespace media {
-namespace cast {
-
-// The key should really be something more than just a RTP timestamp in order
-// to differentiate between video and audio frames, but since the implementation
-// only process video frame events, RTP timestamp only as key is fine.
-typedef std::map<RtpTimestamp, RtcpEvent> RtcpEventMap;
-
-// A RawEventSubscriber implementation with the following properties:
-// - Only processes raw event types that are relevant for sending from cast
-// sender to cast receiver via RTCP.
-// - Captures information to be sent over to RTCP from raw event logs into the
-// more compact RtcpEvent struct.
-// - Orders events by RTP timestamp with a map.
-// - Internally, the map is capped at a maximum size configurable by the caller.
-// The subscriber only keeps the most recent events (determined by RTP
-// timestamp) up to the size limit.
-class SenderRtcpEventSubscriber : public RawEventSubscriber {
- public:
- // |max_size_to_retain|: The object will keep up to |max_size_to_retain|
- // events
- // in the map. Once threshold has been reached, an event with the smallest
- // RTP timestamp will be removed.
- SenderRtcpEventSubscriber(const size_t max_size_to_retain);
-
- virtual ~SenderRtcpEventSubscriber();
-
- // RawEventSubscriber implementation.
- virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
- virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
-
- // Assigns all collected events since last invocation to |rtcp_events|, and
- // clears |rtcp_events_|.
- void GetRtcpEventsAndReset(RtcpEventMap* rtcp_events);
-
- private:
- // If |rtcp_events_.size()| exceeds |max_size_to_retain_|, remove an oldest
- // entry
- // (determined by RTP timestamp) so its size no greater than
- // |max_size_to_retain_|.
- void TruncateMapIfNeeded();
-
- const size_t max_size_to_retain_;
- RtcpEventMap rtcp_events_;
-
- // Ensures methods are only called on the main thread.
- base::ThreadChecker thread_checker_;
-
- DISALLOW_COPY_AND_ASSIGN(SenderRtcpEventSubscriber);
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_RTCP_SENDER_RTCP_EVENT_SUBSCRIBER_H_
diff --git a/media/cast/rtcp/sender_rtcp_event_subscriber_unittest.cc b/media/cast/rtcp/sender_rtcp_event_subscriber_unittest.cc
deleted file mode 100644
index 95f23064dc..0000000000
--- a/media/cast/rtcp/sender_rtcp_event_subscriber_unittest.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/test/simple_test_tick_clock.h"
-#include "base/time/tick_clock.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/logging/logging_defines.h"
-#include "media/cast/rtcp/sender_rtcp_event_subscriber.h"
-#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-namespace cast {
-
-namespace {
-
-const size_t kMaxEventEntries = 10u;
-
-} // namespace
-
-class SenderRtcpEventSubscriberTest : public ::testing::Test {
- protected:
- SenderRtcpEventSubscriberTest()
- : testing_clock_(new base::SimpleTestTickClock()),
- task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
- cast_environment_(new CastEnvironment(
- scoped_ptr<base::TickClock>(testing_clock_).Pass(),
- task_runner_,
- task_runner_,
- task_runner_)),
- event_subscriber_(kMaxEventEntries) {
- cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
- }
-
- virtual ~SenderRtcpEventSubscriberTest() {
- cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
- }
-
- base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
- scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
- scoped_refptr<CastEnvironment> cast_environment_;
- SenderRtcpEventSubscriber event_subscriber_;
-};
-
-TEST_F(SenderRtcpEventSubscriberTest, InsertEntry) {
- cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kVideoFrameCaptureBegin, 100u, 1u);
- cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kVideoFrameCaptureBegin, 200u, 2u);
- cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kVideoFrameSentToEncoder, 100u, 1u);
- cast_environment_->Logging()->InsertFrameEvent(testing_clock_->NowTicks(),
- kVideoFrameEncoded, 100u, 1u);
- cast_environment_->Logging()->InsertFrameEvent(testing_clock_->NowTicks(),
- kVideoFrameEncoded, 300u, 3u);
- cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kVideoFrameSentToEncoder, 300u, 3u);
-
- RtcpEventMap events;
- event_subscriber_.GetRtcpEventsAndReset(&events);
-
- ASSERT_EQ(3u, events.size());
-
- RtcpEventMap::iterator it = events.begin();
- EXPECT_EQ(100u, it->first);
- EXPECT_EQ(kVideoFrameEncoded, it->second.type);
-
- ++it;
- EXPECT_EQ(200u, it->first);
- EXPECT_EQ(kVideoFrameCaptureBegin, it->second.type);
-
- ++it;
- EXPECT_EQ(300u, it->first);
- EXPECT_EQ(kVideoFrameEncoded, it->second.type);
-}
-
-TEST_F(SenderRtcpEventSubscriberTest, MapReset) {
- cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kVideoFrameCaptureBegin, 100u, 1u);
-
- RtcpEventMap events;
- event_subscriber_.GetRtcpEventsAndReset(&events);
- EXPECT_EQ(1u, events.size());
-
- // Call again without any logging in between, should return empty map.
- event_subscriber_.GetRtcpEventsAndReset(&events);
- EXPECT_TRUE(events.empty());
-}
-
-TEST_F(SenderRtcpEventSubscriberTest, DropEventsWhenSizeExceeded) {
- for (uint32 i = 1u; i <= 10u; ++i) {
- cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kVideoFrameCaptureBegin, i * 10, i);
- }
-
- RtcpEventMap events;
- event_subscriber_.GetRtcpEventsAndReset(&events);
-
- ASSERT_EQ(10u, events.size());
- EXPECT_EQ(10u, events.begin()->first);
- EXPECT_EQ(100u, events.rbegin()->first);
-
- for (uint32 i = 1u; i <= 11u; ++i) {
- cast_environment_->Logging()->InsertFrameEvent(
- testing_clock_->NowTicks(), kVideoFrameCaptureBegin, i * 10, i);
- }
-
- event_subscriber_.GetRtcpEventsAndReset(&events);
-
- // Event with RTP timestamp 10 should have been dropped when 110 is inserted.
- ASSERT_EQ(10u, events.size());
- EXPECT_EQ(20u, events.begin()->first);
- EXPECT_EQ(110u, events.rbegin()->first);
-}
-
-} // namespace cast
-} // namespace media
diff --git a/media/cast/rtcp/test_rtcp_packet_builder.cc b/media/cast/rtcp/test_rtcp_packet_builder.cc
index a7c5e2c669..8d0809d928 100644
--- a/media/cast/rtcp/test_rtcp_packet_builder.cc
+++ b/media/cast/rtcp/test_rtcp_packet_builder.cc
@@ -213,21 +213,6 @@ void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc,
big_endian_writer_.WriteU8(0); // Lost packet id mask.
}
-void TestRtcpPacketBuilder::AddSenderLog(uint32 sender_ssrc) {
- AddRtcpHeader(204, 1);
- big_endian_writer_.WriteU32(sender_ssrc);
- big_endian_writer_.WriteU8('C');
- big_endian_writer_.WriteU8('A');
- big_endian_writer_.WriteU8('S');
- big_endian_writer_.WriteU8('T');
-}
-
-void TestRtcpPacketBuilder::AddSenderFrameLog(uint8 event_id,
- uint32 rtp_timestamp) {
- big_endian_writer_.WriteU32((static_cast<uint32>(event_id) << 24) +
- (rtp_timestamp & 0xffffff));
-}
-
void TestRtcpPacketBuilder::AddReceiverLog(uint32 sender_ssrc) {
AddRtcpHeader(204, 2);
big_endian_writer_.WriteU32(sender_ssrc);
diff --git a/media/cast/rtcp/test_rtcp_packet_builder.h b/media/cast/rtcp/test_rtcp_packet_builder.h
index eb4a2b7f89..d4266670ab 100644
--- a/media/cast/rtcp/test_rtcp_packet_builder.h
+++ b/media/cast/rtcp/test_rtcp_packet_builder.h
@@ -80,8 +80,6 @@ class TestRtcpPacketBuilder {
void AddRpsi(uint32 sender_ssrc, uint32 media_ssrc);
void AddRemb(uint32 sender_ssrc, uint32 media_ssrc);
void AddCast(uint32 sender_ssrc, uint32 media_ssrc, uint16 target_delay_ms);
- void AddSenderLog(uint32 sender_ssrc);
- void AddSenderFrameLog(uint8 event_id, uint32 rtp_timestamp);
void AddReceiverLog(uint32 sender_ssrc);
void AddReceiverFrameLog(uint32 rtp_timestamp,
int num_events,
diff --git a/media/cast/rtp_receiver/rtp_receiver.cc b/media/cast/rtp_receiver/rtp_receiver.cc
index 2f95bf3b66..f7ff50b375 100644
--- a/media/cast/rtp_receiver/rtp_receiver.cc
+++ b/media/cast/rtp_receiver/rtp_receiver.cc
@@ -14,8 +14,8 @@ namespace media {
namespace cast {
RtpReceiver::RtpReceiver(base::TickClock* clock,
- const AudioReceiverConfig* audio_config,
- const VideoReceiverConfig* video_config) :
+ const FrameReceiverConfig* audio_config,
+ const FrameReceiverConfig* video_config) :
packet_parser_(audio_config ? audio_config->incoming_ssrc :
(video_config ? video_config->incoming_ssrc : 0),
audio_config ? audio_config->rtp_payload_type :
diff --git a/media/cast/rtp_receiver/rtp_receiver.h b/media/cast/rtp_receiver/rtp_receiver.h
index 9d5194c148..35c7c933a2 100644
--- a/media/cast/rtp_receiver/rtp_receiver.h
+++ b/media/cast/rtp_receiver/rtp_receiver.h
@@ -22,8 +22,8 @@ namespace cast {
class RtpReceiver {
public:
RtpReceiver(base::TickClock* clock,
- const AudioReceiverConfig* audio_config,
- const VideoReceiverConfig* video_config);
+ const FrameReceiverConfig* audio_config,
+ const FrameReceiverConfig* video_config);
virtual ~RtpReceiver();
static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, size_t length);
diff --git a/media/cast/rtp_timestamp_helper.cc b/media/cast/rtp_timestamp_helper.cc
new file mode 100644
index 0000000000..3349e7b33f
--- /dev/null
+++ b/media/cast/rtp_timestamp_helper.cc
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_timestamp_helper.h"
+
+namespace media {
+namespace cast {
+
+RtpTimestampHelper::RtpTimestampHelper(int frequency)
+ : frequency_(frequency),
+ last_rtp_timestamp_(0) {
+}
+
+RtpTimestampHelper::~RtpTimestampHelper() {
+}
+
+bool RtpTimestampHelper::GetCurrentTimeAsRtpTimestamp(
+ const base::TimeTicks& now, uint32* rtp_timestamp) const {
+ if (last_capture_time_.is_null())
+ return false;
+ const base::TimeDelta elapsed_time = now - last_capture_time_;
+ const int64 rtp_delta =
+ elapsed_time * frequency_ / base::TimeDelta::FromSeconds(1);
+ *rtp_timestamp = last_rtp_timestamp_ + static_cast<uint32>(rtp_delta);
+ return true;
+}
+
+void RtpTimestampHelper::StoreLatestTime(
+ base::TimeTicks capture_time, uint32 rtp_timestamp) {
+ last_capture_time_ = capture_time;
+ last_rtp_timestamp_ = rtp_timestamp;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/rtp_timestamp_helper.h b/media/cast/rtp_timestamp_helper.h
new file mode 100644
index 0000000000..b9c650c506
--- /dev/null
+++ b/media/cast/rtp_timestamp_helper.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_TIMESTAMP_HELPER_H_
+#define MEDIA_CAST_RTP_TIMESTAMP_HELPER_H_
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+// A helper class used to convert current time ticks into RTP timestamp.
+class RtpTimestampHelper {
+ public:
+ explicit RtpTimestampHelper(int frequency);
+ ~RtpTimestampHelper();
+
+ // Compute a RTP timestamp using current time, last encoded time and
+ // last encoded RTP timestamp.
+ // Return true if |rtp_timestamp| is computed.
+ bool GetCurrentTimeAsRtpTimestamp(const base::TimeTicks& now,
+ uint32* rtp_timestamp) const;
+
+ // Store the capture time and the corresponding RTP timestamp for the
+ // last encoded frame.
+ void StoreLatestTime(base::TimeTicks capture_time, uint32 rtp_timestamp);
+
+ private:
+ int frequency_;
+ base::TimeTicks last_capture_time_;
+ uint32 last_rtp_timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtpTimestampHelper);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_DEFINES_H_
diff --git a/media/cast/test/encode_decode_test.cc b/media/cast/test/encode_decode_test.cc
deleted file mode 100644
index 67edbc89d0..0000000000
--- a/media/cast/test/encode_decode_test.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Joint encoder and decoder testing.
-// These tests operate directly on the VP8 encoder and decoder, not the
-// transport layer, and are targeted at validating the bit stream.
-
-#include <gtest/gtest.h>
-#include <stdint.h>
-
-#include "base/bind.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/video_frame.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/test/utility/video_utility.h"
-#include "media/cast/video_receiver/codecs/vp8/vp8_decoder.h"
-#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
-
-namespace media {
-namespace cast {
-
-static const int64 kStartMillisecond = INT64_C(1245);
-static const int kWidth = 1280;
-static const int kHeight = 720;
-static const int kStartbitrate = 4000000;
-static const int kMaxQp = 54;
-static const int kMinQp = 4;
-static const int kMaxFrameRate = 30;
-
-namespace {
-class EncodeDecodeTestFrameCallback
- : public base::RefCountedThreadSafe<EncodeDecodeTestFrameCallback> {
- public:
- EncodeDecodeTestFrameCallback() : num_called_(0) {
- gfx::Size size(kWidth, kHeight);
- original_frame_ = media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
- }
-
- void SetFrameStartValue(int start_value) {
- PopulateVideoFrame(original_frame_.get(), start_value);
- }
-
- void DecodeComplete(const scoped_refptr<media::VideoFrame>& decoded_frame,
- const base::TimeTicks& render_time) {
- ++num_called_;
- // Compare resolution.
- EXPECT_EQ(original_frame_->coded_size().width(),
- decoded_frame->coded_size().width());
- EXPECT_EQ(original_frame_->coded_size().height(),
- decoded_frame->coded_size().height());
- // Compare data.
- EXPECT_GT(I420PSNR(original_frame_, decoded_frame), 40.0);
- }
-
- int num_called() const { return num_called_; }
-
- protected:
- virtual ~EncodeDecodeTestFrameCallback() {}
-
- private:
- friend class base::RefCountedThreadSafe<EncodeDecodeTestFrameCallback>;
-
- int num_called_;
- scoped_refptr<media::VideoFrame> original_frame_;
-};
-} // namespace
-
-class EncodeDecodeTest : public ::testing::Test {
- protected:
- EncodeDecodeTest()
- : testing_clock_(new base::SimpleTestTickClock()),
- task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
- // CastEnvironment will only be used by the vp8 decoder; Enable only the
- // video and main threads.
- cast_environment_(new CastEnvironment(
- scoped_ptr<base::TickClock>(testing_clock_).Pass(),
- task_runner_,
- NULL,
- task_runner_)),
- test_callback_(new EncodeDecodeTestFrameCallback()) {
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- encoder_config_.max_number_of_video_buffers_used = 1;
- encoder_config_.width = kWidth;
- encoder_config_.height = kHeight;
- encoder_config_.start_bitrate = kStartbitrate;
- encoder_config_.min_qp = kMaxQp;
- encoder_config_.min_qp = kMinQp;
- encoder_config_.max_frame_rate = kMaxFrameRate;
- int max_unacked_frames = 1;
- encoder_.reset(new Vp8Encoder(encoder_config_, max_unacked_frames));
- // Initialize to use one core.
- decoder_.reset(new Vp8Decoder(cast_environment_));
- }
-
- virtual ~EncodeDecodeTest() {}
-
- virtual void SetUp() OVERRIDE {
- // Create test frame.
- int start_value = 10; // Random value to start from.
- gfx::Size size(encoder_config_.width, encoder_config_.height);
- video_frame_ = media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
- PopulateVideoFrame(video_frame_, start_value);
- test_callback_->SetFrameStartValue(start_value);
- }
-
- VideoSenderConfig encoder_config_;
- scoped_ptr<Vp8Encoder> encoder_;
- scoped_ptr<Vp8Decoder> decoder_;
- scoped_refptr<media::VideoFrame> video_frame_;
- base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
- scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_refptr<EncodeDecodeTestFrameCallback> test_callback_;
-};
-
-TEST_F(EncodeDecodeTest, BasicEncodeDecode) {
- transport::EncodedVideoFrame encoded_frame;
- encoder_->Initialize();
- // Encode frame.
- encoder_->Encode(video_frame_, &encoded_frame);
- EXPECT_GT(encoded_frame.data.size(), UINT64_C(0));
- // Decode frame.
- decoder_->Decode(&encoded_frame,
- base::TimeTicks(),
- base::Bind(&EncodeDecodeTestFrameCallback::DecodeComplete,
- test_callback_));
- task_runner_->RunTasks();
-}
-
-} // namespace cast
-} // namespace media
diff --git a/media/cast/test/end2end_unittest.cc b/media/cast/test/end2end_unittest.cc
index 4b8f6fe585..119067c7b2 100644
--- a/media/cast/test/end2end_unittest.cc
+++ b/media/cast/test/end2end_unittest.cc
@@ -18,7 +18,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/sys_byteorder.h"
#include "base/test/simple_test_tick_clock.h"
@@ -31,6 +30,8 @@
#include "media/cast/cast_sender.h"
#include "media/cast/logging/simple_event_subscriber.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/skewed_single_thread_task_runner.h"
+#include "media/cast/test/skewed_tick_clock.h"
#include "media/cast/test/utility/audio_utility.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/udp_proxy.h"
@@ -62,16 +63,11 @@ static const double kVideoAcceptedPSNR = 38.0;
// The tests are commonly implemented with |kFrameTimerMs| RunTask function;
// a normal video is 30 fps hence the 33 ms between frames.
+//
+// TODO(miu): The errors in timing will add up significantly. Find an
+// alternative approach that eliminates use of this constant.
static const int kFrameTimerMs = 33;
-// The packets pass through the pacer which can delay the beginning of the
-// frame by 10 ms if there is packets belonging to the previous frame being
-// retransmitted.
-// In addition, audio packets are sent in 10mS intervals in audio_encoder.cc,
-// although we send an audio frame every 33mS, which adds an extra delay.
-// A TODO was added in the code to resolve this.
-static const int kTimerErrorMs = 20;
-
// Start the video synthetic start value to medium range value, to avoid edge
// effects cause by encoding and quantization.
static const int kVideoStart = 100;
@@ -80,6 +76,14 @@ static const int kVideoStart = 100;
// chunks of this size.
static const int kAudioFrameDurationMs = 10;
+// The amount of time between frame capture on the sender and playout on the
+// receiver.
+static const int kTargetPlayoutDelayMs = 100;
+
+// The maximum amount of deviation expected in the playout times emitted by the
+// receiver.
+static const int kMaxAllowedPlayoutErrorMs = 30;
+
std::string ConvertFromBase16String(const std::string base_16) {
std::string compressed;
DCHECK_EQ(base_16.size() % 2, 0u) << "Must be a multiple of 2";
@@ -109,7 +113,7 @@ void VideoInitializationStatus(CastInitializationStatus status) {
// This is wrapped in a struct because it needs to be put into a std::map.
typedef struct {
- int counter[kNumOfLoggingEvents];
+ int counter[kNumOfLoggingEvents+1];
} LoggingEventCounts;
// Constructs a map from each frame (RTP timestamp) to counts of each event
@@ -158,14 +162,10 @@ std::map<uint16, LoggingEventCounts> GetEventCountForPacketEvents(
return event_counter_for_packet;
}
-void CountVideoFrame(int* counter,
- const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& render_time, bool continuous) {
- ++*counter;
-}
-
} // namespace
+// Shim that turns forwards packets from a test::PacketPipe to a
+// PacketReceiverCallback.
class LoopBackPacketPipe : public test::PacketPipe {
public:
LoopBackPacketPipe(const transport::PacketReceiverCallback& packet_receiver)
@@ -192,7 +192,9 @@ class LoopBackTransport : public transport::PacketSender {
cast_environment_(cast_environment) {}
void SetPacketReceiver(
- const transport::PacketReceiverCallback& packet_receiver) {
+ const transport::PacketReceiverCallback& packet_receiver,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock) {
scoped_ptr<test::PacketPipe> loopback_pipe(
new LoopBackPacketPipe(packet_receiver));
if (packet_pipe_) {
@@ -200,6 +202,7 @@ class LoopBackTransport : public transport::PacketSender {
} else {
packet_pipe_ = loopback_pipe.Pass();
}
+ packet_pipe_->InitOnIOThread(task_runner, clock);
}
virtual bool SendPacket(transport::PacketRef packet,
@@ -244,7 +247,7 @@ class TestReceiverAudioCallback
public:
struct ExpectedAudioFrame {
scoped_ptr<AudioBus> audio_bus;
- base::TimeTicks record_time;
+ base::TimeTicks playout_time;
};
TestReceiverAudioCallback() : num_called_(0) {}
@@ -254,13 +257,13 @@ class TestReceiverAudioCallback
}
void AddExpectedResult(const AudioBus& audio_bus,
- const base::TimeTicks& record_time) {
+ const base::TimeTicks& playout_time) {
scoped_ptr<ExpectedAudioFrame> expected_audio_frame(
new ExpectedAudioFrame());
expected_audio_frame->audio_bus =
AudioBus::Create(audio_bus.channels(), audio_bus.frames()).Pass();
audio_bus.CopyTo(expected_audio_frame->audio_bus.get());
- expected_audio_frame->record_time = record_time;
+ expected_audio_frame->playout_time = playout_time;
expected_frames_.push_back(expected_audio_frame.release());
}
@@ -292,22 +295,19 @@ class TestReceiverAudioCallback
1);
}
- // TODO(miu): This is a "fuzzy" way to check the timestamps. We should be
- // able to compute exact offsets with "omnipotent" knowledge of the system.
- const base::TimeTicks upper_bound =
- expected_audio_frame->record_time +
- base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs +
- kTimerErrorMs);
- EXPECT_GE(upper_bound, playout_time)
- << "playout_time - upper_bound == "
- << (playout_time - upper_bound).InMicroseconds() << " usec";
+ EXPECT_NEAR(
+ (playout_time - expected_audio_frame->playout_time).InMillisecondsF(),
+ 0.0,
+ kMaxAllowedPlayoutErrorMs);
+ VLOG_IF(1, !last_playout_time_.is_null())
+ << "Audio frame playout time delta (compared to last frame) is "
+ << (playout_time - last_playout_time_).InMicroseconds() << " usec.";
+ last_playout_time_ = playout_time;
EXPECT_TRUE(is_continuous);
}
- void CheckCodedAudioFrame(
- scoped_ptr<transport::EncodedAudioFrame> audio_frame,
- const base::TimeTicks& playout_time) {
+ void CheckCodedAudioFrame(scoped_ptr<transport::EncodedFrame> audio_frame) {
ASSERT_TRUE(!!audio_frame);
ASSERT_FALSE(expected_frames_.empty());
const ExpectedAudioFrame& expected_audio_frame =
@@ -322,7 +322,7 @@ class TestReceiverAudioCallback
expected_audio_frame.audio_bus->frames(),
num_elements);
int16* const pcm_data =
- reinterpret_cast<int16*>(string_as_array(&audio_frame->data));
+ reinterpret_cast<int16*>(audio_frame->mutable_bytes());
for (int i = 0; i < num_elements; ++i)
pcm_data[i] = static_cast<int16>(base::NetToHost16(pcm_data[i]));
scoped_ptr<AudioBus> audio_bus(
@@ -331,7 +331,7 @@ class TestReceiverAudioCallback
audio_bus->FromInterleaved(pcm_data, audio_bus->frames(), sizeof(int16));
// Delegate the checking from here...
- CheckAudioFrame(audio_bus.Pass(), playout_time, true);
+ CheckAudioFrame(audio_bus.Pass(), audio_frame->reference_time, true);
}
int number_times_called() const { return num_called_; }
@@ -347,6 +347,7 @@ class TestReceiverAudioCallback
int num_called_;
int expected_sampling_frequency_;
std::list<ExpectedAudioFrame*> expected_frames_;
+ base::TimeTicks last_playout_time_;
};
// Class that verifies the video frames coming out of the receiver.
@@ -357,7 +358,7 @@ class TestReceiverVideoCallback
int start_value;
int width;
int height;
- base::TimeTicks capture_time;
+ base::TimeTicks playout_time;
bool should_be_continuous;
};
@@ -366,19 +367,19 @@ class TestReceiverVideoCallback
void AddExpectedResult(int start_value,
int width,
int height,
- const base::TimeTicks& capture_time,
+ const base::TimeTicks& playout_time,
bool should_be_continuous) {
ExpectedVideoFrame expected_video_frame;
expected_video_frame.start_value = start_value;
expected_video_frame.width = width;
expected_video_frame.height = height;
- expected_video_frame.capture_time = capture_time;
+ expected_video_frame.playout_time = playout_time;
expected_video_frame.should_be_continuous = should_be_continuous;
expected_frame_.push_back(expected_video_frame);
}
void CheckVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& render_time,
+ const base::TimeTicks& playout_time,
bool is_continuous) {
++num_called_;
@@ -387,21 +388,6 @@ class TestReceiverVideoCallback
ExpectedVideoFrame expected_video_frame = expected_frame_.front();
expected_frame_.pop_front();
- base::TimeDelta time_since_capture =
- render_time - expected_video_frame.capture_time;
- const base::TimeDelta upper_bound = base::TimeDelta::FromMilliseconds(
- kDefaultRtpMaxDelayMs + kTimerErrorMs);
-
- // TODO(miu): This is a "fuzzy" way to check the timestamps. We should be
- // able to compute exact offsets with "omnipotent" knowledge of the system.
- EXPECT_GE(upper_bound, time_since_capture)
- << "time_since_capture - upper_bound == "
- << (time_since_capture - upper_bound).InMicroseconds() << " usec";
- // TODO(miu): I broke the concept of 100 ms target delay timing on the
- // receiver side, but the logic for computing playout time really isn't any
- // more broken than it was. This only affects the receiver, and is to be
- // rectified in an soon-upcoming change. http://crbug.com/356942
- // EXPECT_LE(expected_video_frame.capture_time, render_time);
EXPECT_EQ(expected_video_frame.width, video_frame->visible_rect().width());
EXPECT_EQ(expected_video_frame.height,
video_frame->visible_rect().height());
@@ -414,6 +400,15 @@ class TestReceiverVideoCallback
EXPECT_GE(I420PSNR(expected_I420_frame, video_frame), kVideoAcceptedPSNR);
+ EXPECT_NEAR(
+ (playout_time - expected_video_frame.playout_time).InMillisecondsF(),
+ 0.0,
+ kMaxAllowedPlayoutErrorMs);
+ VLOG_IF(1, !last_playout_time_.is_null())
+ << "Video frame playout time delta (compared to last frame) is "
+ << (playout_time - last_playout_time_).InMicroseconds() << " usec.";
+ last_playout_time_ = playout_time;
+
EXPECT_EQ(expected_video_frame.should_be_continuous, is_continuous);
}
@@ -427,6 +422,7 @@ class TestReceiverVideoCallback
int num_called_;
std::list<ExpectedVideoFrame> expected_frame_;
+ base::TimeTicks last_playout_time_;
};
// The actual test class, generate synthetic data for both audio and video and
@@ -435,27 +431,28 @@ class End2EndTest : public ::testing::Test {
protected:
End2EndTest()
: start_time_(),
- testing_clock_sender_(new base::SimpleTestTickClock()),
- testing_clock_receiver_(new base::SimpleTestTickClock()),
- task_runner_(
- new test::FakeSingleThreadTaskRunner(testing_clock_sender_)),
+ task_runner_(new test::FakeSingleThreadTaskRunner(&testing_clock_)),
+ testing_clock_sender_(new test::SkewedTickClock(&testing_clock_)),
+ task_runner_sender_(
+ new test::SkewedSingleThreadTaskRunner(task_runner_)),
+ testing_clock_receiver_(new test::SkewedTickClock(&testing_clock_)),
+ task_runner_receiver_(
+ new test::SkewedSingleThreadTaskRunner(task_runner_)),
cast_environment_sender_(new CastEnvironment(
scoped_ptr<base::TickClock>(testing_clock_sender_).Pass(),
- task_runner_,
- task_runner_,
- task_runner_)),
+ task_runner_sender_,
+ task_runner_sender_,
+ task_runner_sender_)),
cast_environment_receiver_(new CastEnvironment(
scoped_ptr<base::TickClock>(testing_clock_receiver_).Pass(),
- task_runner_,
- task_runner_,
- task_runner_)),
+ task_runner_receiver_,
+ task_runner_receiver_,
+ task_runner_receiver_)),
receiver_to_sender_(cast_environment_receiver_),
sender_to_receiver_(cast_environment_sender_),
test_receiver_audio_callback_(new TestReceiverAudioCallback()),
test_receiver_video_callback_(new TestReceiverVideoCallback()) {
- testing_clock_sender_->Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- testing_clock_receiver_->Advance(
+ testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
cast_environment_sender_->Logging()->AddRawEventSubscriber(
&event_subscriber_sender_);
@@ -464,9 +461,9 @@ class End2EndTest : public ::testing::Test {
void Configure(transport::VideoCodec video_codec,
transport::AudioCodec audio_codec,
int audio_sampling_frequency,
- bool external_audio_decoder,
int max_number_of_video_buffers_used) {
audio_sender_config_.rtp_config.ssrc = 1;
+ audio_sender_config_.rtp_config.max_delay_ms = kTargetPlayoutDelayMs;
audio_sender_config_.incoming_feedback_ssrc = 2;
audio_sender_config_.rtp_config.payload_type = 96;
audio_sender_config_.use_external_encoder = false;
@@ -478,25 +475,27 @@ class End2EndTest : public ::testing::Test {
audio_receiver_config_.feedback_ssrc =
audio_sender_config_.incoming_feedback_ssrc;
audio_receiver_config_.incoming_ssrc = audio_sender_config_.rtp_config.ssrc;
+ audio_receiver_config_.rtp_max_delay_ms = kTargetPlayoutDelayMs;
audio_receiver_config_.rtp_payload_type =
audio_sender_config_.rtp_config.payload_type;
- audio_receiver_config_.use_external_decoder = external_audio_decoder;
audio_receiver_config_.frequency = audio_sender_config_.frequency;
audio_receiver_config_.channels = kAudioChannels;
- audio_receiver_config_.codec = audio_sender_config_.codec;
+ audio_receiver_config_.max_frame_rate = 100;
+ audio_receiver_config_.codec.audio = audio_sender_config_.codec;
test_receiver_audio_callback_->SetExpectedSamplingFrequency(
audio_receiver_config_.frequency);
video_sender_config_.rtp_config.ssrc = 3;
+ video_sender_config_.rtp_config.max_delay_ms = kTargetPlayoutDelayMs;
video_sender_config_.incoming_feedback_ssrc = 4;
video_sender_config_.rtp_config.payload_type = 97;
video_sender_config_.use_external_encoder = false;
video_sender_config_.width = kVideoHdWidth;
video_sender_config_.height = kVideoHdHeight;
- video_sender_config_.max_bitrate = 5000000;
- video_sender_config_.min_bitrate = 1000000;
- video_sender_config_.start_bitrate = 5000000;
+ video_sender_config_.max_bitrate = 50000;
+ video_sender_config_.min_bitrate = 10000;
+ video_sender_config_.start_bitrate = 10000;
video_sender_config_.max_qp = 30;
video_sender_config_.min_qp = 4;
video_sender_config_.max_frame_rate = 30;
@@ -507,22 +506,45 @@ class End2EndTest : public ::testing::Test {
video_receiver_config_.feedback_ssrc =
video_sender_config_.incoming_feedback_ssrc;
video_receiver_config_.incoming_ssrc = video_sender_config_.rtp_config.ssrc;
+ video_receiver_config_.rtp_max_delay_ms = kTargetPlayoutDelayMs;
video_receiver_config_.rtp_payload_type =
video_sender_config_.rtp_config.payload_type;
- video_receiver_config_.use_external_decoder = false;
- video_receiver_config_.codec = video_sender_config_.codec;
+ video_receiver_config_.frequency = kVideoFrequency;
+ video_receiver_config_.channels = 1;
+ video_receiver_config_.max_frame_rate = video_sender_config_.max_frame_rate;
+ video_receiver_config_.codec.video = video_sender_config_.codec;
+ }
+
+ void SetReceiverSkew(double skew, base::TimeDelta offset) {
+ testing_clock_receiver_->SetSkew(skew, offset);
+ task_runner_receiver_->SetSkew(1.0 / skew);
+ }
+
+ // Specify the minimum/maximum difference in playout times between two
+ // consecutive frames. Also, specify the maximum absolute rate of change over
+ // each three consecutive frames.
+ void SetExpectedVideoPlayoutSmoothness(base::TimeDelta min_delta,
+ base::TimeDelta max_delta,
+ base::TimeDelta max_curvature) {
+ min_video_playout_delta_ = min_delta;
+ max_video_playout_delta_ = max_delta;
+ max_video_playout_curvature_ = max_curvature;
}
void FeedAudioFrames(int count, bool will_be_checked) {
for (int i = 0; i < count; ++i) {
scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs)));
- const base::TimeTicks send_time =
+ const base::TimeTicks capture_time =
testing_clock_sender_->NowTicks() +
i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs);
- if (will_be_checked)
- test_receiver_audio_callback_->AddExpectedResult(*audio_bus, send_time);
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
+ if (will_be_checked) {
+ test_receiver_audio_callback_->AddExpectedResult(
+ *audio_bus,
+ capture_time +
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs));
+ }
+ audio_frame_input_->InsertAudio(audio_bus.Pass(), capture_time);
}
}
@@ -531,12 +553,14 @@ class End2EndTest : public ::testing::Test {
for (int i = 0; i < count; ++i) {
scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs)));
- const base::TimeTicks send_time =
+ const base::TimeTicks capture_time =
testing_clock_sender_->NowTicks() +
i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs);
- test_receiver_audio_callback_->AddExpectedResult(*audio_bus,
- send_time + delay);
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
+ test_receiver_audio_callback_->AddExpectedResult(
+ *audio_bus,
+ capture_time + delay +
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs));
+ audio_frame_input_->InsertAudio(audio_bus.Pass(), capture_time);
}
}
@@ -554,6 +578,7 @@ class End2EndTest : public ::testing::Test {
audio_receiver_config_,
video_receiver_config_,
&receiver_to_sender_);
+
net::IPEndPoint dummy_endpoint;
transport_sender_.reset(new transport::CastTransportSenderImpl(
NULL,
@@ -562,7 +587,7 @@ class End2EndTest : public ::testing::Test {
base::Bind(&UpdateCastTransportStatus),
base::Bind(&End2EndTest::LogRawEvents, base::Unretained(this)),
base::TimeDelta::FromSeconds(1),
- task_runner_,
+ task_runner_sender_,
&sender_to_receiver_));
cast_sender_ =
@@ -576,8 +601,12 @@ class End2EndTest : public ::testing::Test {
CreateDefaultVideoEncodeAcceleratorCallback(),
CreateDefaultVideoEncodeMemoryCallback());
- receiver_to_sender_.SetPacketReceiver(cast_sender_->packet_receiver());
- sender_to_receiver_.SetPacketReceiver(cast_receiver_->packet_receiver());
+ receiver_to_sender_.SetPacketReceiver(cast_sender_->packet_receiver(),
+ task_runner_,
+ &testing_clock_);
+ sender_to_receiver_.SetPacketReceiver(cast_receiver_->packet_receiver(),
+ task_runner_,
+ &testing_clock_);
audio_frame_input_ = cast_sender_->audio_frame_input();
video_frame_input_ = cast_sender_->video_frame_input();
@@ -621,13 +650,69 @@ class End2EndTest : public ::testing::Test {
media::VideoFrame::CreateBlackFrame(gfx::Size(2, 2)), capture_time);
}
- void RunTasks(int during_ms) {
- for (int i = 0; i < during_ms; ++i) {
- // Call process the timers every 1 ms.
- testing_clock_sender_->Advance(base::TimeDelta::FromMilliseconds(1));
- testing_clock_receiver_->Advance(base::TimeDelta::FromMilliseconds(1));
- task_runner_->RunTasks();
+ void RunTasks(int ms) {
+ task_runner_->Sleep(base::TimeDelta::FromMilliseconds(ms));
+ }
+
+ void BasicPlayerGotVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& playout_time, bool continuous) {
+ // The following tests that the sender and receiver clocks can be
+ // out-of-sync, drift, and jitter with respect to one another; and depsite
+ // this, the receiver will produce smoothly-progressing playout times.
+ // Both first-order and second-order effects are tested.
+ if (!last_video_playout_time_.is_null() &&
+ min_video_playout_delta_ > base::TimeDelta()) {
+ const base::TimeDelta delta = playout_time - last_video_playout_time_;
+ VLOG(1) << "Video frame playout time delta (compared to last frame) is "
+ << delta.InMicroseconds() << " usec.";
+ EXPECT_LE(min_video_playout_delta_.InMicroseconds(),
+ delta.InMicroseconds());
+ EXPECT_GE(max_video_playout_delta_.InMicroseconds(),
+ delta.InMicroseconds());
+ if (last_video_playout_delta_ > base::TimeDelta()) {
+ base::TimeDelta abs_curvature = delta - last_video_playout_delta_;
+ if (abs_curvature < base::TimeDelta())
+ abs_curvature = -abs_curvature;
+ EXPECT_GE(max_video_playout_curvature_.InMicroseconds(),
+ abs_curvature.InMicroseconds());
+ }
+ last_video_playout_delta_ = delta;
}
+ last_video_playout_time_ = playout_time;
+
+ video_ticks_.push_back(std::make_pair(
+ testing_clock_receiver_->NowTicks(),
+ playout_time));
+ frame_receiver_->GetRawVideoFrame(
+ base::Bind(&End2EndTest::BasicPlayerGotVideoFrame,
+ base::Unretained(this)));
+ }
+
+ void BasicPlayerGotAudioFrame(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& playout_time,
+ bool is_continuous) {
+ VLOG_IF(1, !last_audio_playout_time_.is_null())
+ << "Audio frame playout time delta (compared to last frame) is "
+ << (playout_time - last_audio_playout_time_).InMicroseconds()
+ << " usec.";
+ last_audio_playout_time_ = playout_time;
+
+ audio_ticks_.push_back(std::make_pair(
+ testing_clock_receiver_->NowTicks(),
+ playout_time));
+ frame_receiver_->GetRawAudioFrame(
+ base::Bind(&End2EndTest::BasicPlayerGotAudioFrame,
+ base::Unretained(this)));
+ }
+
+ void StartBasicPlayer() {
+ frame_receiver_->GetRawVideoFrame(
+ base::Bind(&End2EndTest::BasicPlayerGotVideoFrame,
+ base::Unretained(this)));
+ frame_receiver_->GetRawAudioFrame(
+ base::Bind(&End2EndTest::BasicPlayerGotAudioFrame,
+ base::Unretained(this)));
}
void LogRawEvents(const std::vector<PacketEvent>& packet_events) {
@@ -638,6 +723,7 @@ class End2EndTest : public ::testing::Test {
++it) {
cast_environment_sender_->Logging()->InsertPacketEvent(it->timestamp,
it->type,
+ it->media_type,
it->rtp_timestamp,
it->frame_id,
it->packet_id,
@@ -646,15 +732,31 @@ class End2EndTest : public ::testing::Test {
}
}
- AudioReceiverConfig audio_receiver_config_;
- VideoReceiverConfig video_receiver_config_;
+ FrameReceiverConfig audio_receiver_config_;
+ FrameReceiverConfig video_receiver_config_;
AudioSenderConfig audio_sender_config_;
VideoSenderConfig video_sender_config_;
base::TimeTicks start_time_;
- base::SimpleTestTickClock* testing_clock_sender_;
- base::SimpleTestTickClock* testing_clock_receiver_;
+
+ // These run in "test time"
+ base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+
+ // These run on the sender timeline.
+ test::SkewedTickClock* testing_clock_sender_;
+ scoped_refptr<test::SkewedSingleThreadTaskRunner> task_runner_sender_;
+
+ // These run on the receiver timeline.
+ test::SkewedTickClock* testing_clock_receiver_;
+ scoped_refptr<test::SkewedSingleThreadTaskRunner> task_runner_receiver_;
+ base::TimeDelta min_video_playout_delta_;
+ base::TimeDelta max_video_playout_delta_;
+ base::TimeDelta max_video_playout_curvature_;
+ base::TimeTicks last_video_playout_time_;
+ base::TimeDelta last_video_playout_delta_;
+ base::TimeTicks last_audio_playout_time_;
+
scoped_refptr<CastEnvironment> cast_environment_sender_;
scoped_refptr<CastEnvironment> cast_environment_receiver_;
@@ -676,12 +778,14 @@ class End2EndTest : public ::testing::Test {
SimpleEventSubscriber event_subscriber_sender_;
std::vector<FrameEvent> frame_events_;
std::vector<PacketEvent> packet_events_;
+ std::vector<std::pair<base::TimeTicks, base::TimeTicks> > audio_ticks_;
+ std::vector<std::pair<base::TimeTicks, base::TimeTicks> > video_ticks_;
// |transport_sender_| has a RepeatingTimer which needs a MessageLoop.
base::MessageLoop message_loop_;
};
TEST_F(End2EndTest, LoopNoLossPcm16) {
- Configure(transport::kVp8, transport::kPcm16, 32000, false, 1);
+ Configure(transport::kVp8, transport::kPcm16, 32000, 1);
// Reduce video resolution to allow processing multiple frames within a
// reasonable time frame.
video_sender_config_.width = kVideoQcifWidth;
@@ -703,7 +807,8 @@ TEST_F(End2EndTest, LoopNoLossPcm16) {
video_start,
video_sender_config_.width,
video_sender_config_.height,
- testing_clock_sender_->NowTicks(),
+ testing_clock_sender_->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
true);
SendVideoFrame(video_start, testing_clock_sender_->NowTicks());
@@ -734,7 +839,7 @@ TEST_F(End2EndTest, LoopNoLossPcm16) {
// This tests our external decoder interface for Audio.
// Audio test without packet loss using raw PCM 16 audio "codec";
TEST_F(End2EndTest, LoopNoLossPcm16ExternalDecoder) {
- Configure(transport::kVp8, transport::kPcm16, 32000, true, 1);
+ Configure(transport::kVp8, transport::kPcm16, 32000, 1);
Create();
const int kNumIterations = 10;
@@ -752,8 +857,7 @@ TEST_F(End2EndTest, LoopNoLossPcm16ExternalDecoder) {
// This tests our Opus audio codec without video.
TEST_F(End2EndTest, LoopNoLossOpus) {
- Configure(transport::kVp8, transport::kOpus, kDefaultAudioSamplingRate,
- false, 1);
+ Configure(transport::kVp8, transport::kOpus, kDefaultAudioSamplingRate, 1);
Create();
const int kNumIterations = 300;
@@ -779,8 +883,7 @@ TEST_F(End2EndTest, LoopNoLossOpus) {
// in audio_receiver.cc for likely cause(s) of this bug.
// http://crbug.com/356942
TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
- Configure(transport::kVp8, transport::kPcm16, kDefaultAudioSamplingRate,
- false, 1);
+ Configure(transport::kVp8, transport::kPcm16, kDefaultAudioSamplingRate, 1);
Create();
int video_start = kVideoStart;
@@ -808,7 +911,8 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
video_start,
video_sender_config_.width,
video_sender_config_.height,
- initial_send_time + expected_delay,
+ initial_send_time + expected_delay +
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
true);
SendVideoFrame(video_start, testing_clock_sender_->NowTicks());
@@ -837,7 +941,8 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
video_start,
video_sender_config_.width,
video_sender_config_.height,
- testing_clock_sender_->NowTicks(),
+ testing_clock_sender_->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
true);
SendVideoFrame(video_start, testing_clock_sender_->NowTicks());
@@ -866,25 +971,24 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
// This tests a network glitch lasting for 10 video frames.
// Flaky. See crbug.com/351596.
TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) {
- Configure(transport::kVp8, transport::kOpus, kDefaultAudioSamplingRate,
- false, 3);
+ Configure(transport::kVp8, transport::kOpus, kDefaultAudioSamplingRate, 3);
video_sender_config_.rtp_config.max_delay_ms = 67;
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
int video_start = kVideoStart;
- base::TimeTicks send_time;
+ base::TimeTicks capture_time;
// Frames will rendered on completion until the render time stabilizes, i.e.
// we got enough data.
const int frames_before_glitch = 20;
for (int i = 0; i < frames_before_glitch; ++i) {
- send_time = testing_clock_sender_->NowTicks();
- SendVideoFrame(video_start, send_time);
+ capture_time = testing_clock_sender_->NowTicks();
+ SendVideoFrame(video_start, capture_time);
test_receiver_video_callback_->AddExpectedResult(
video_start,
video_sender_config_.width,
video_sender_config_.height,
- send_time,
+ capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
true);
frame_receiver_->GetRawVideoFrame(
base::Bind(&TestReceiverVideoCallback::CheckVideoFrame,
@@ -896,27 +1000,28 @@ TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) {
// Introduce a glitch lasting for 10 frames.
sender_to_receiver_.SetSendPackets(false);
for (int i = 0; i < 10; ++i) {
- send_time = testing_clock_sender_->NowTicks();
+ capture_time = testing_clock_sender_->NowTicks();
// First 3 will be sent and lost.
- SendVideoFrame(video_start, send_time);
+ SendVideoFrame(video_start, capture_time);
RunTasks(kFrameTimerMs);
video_start++;
}
sender_to_receiver_.SetSendPackets(true);
RunTasks(100);
- send_time = testing_clock_sender_->NowTicks();
+ capture_time = testing_clock_sender_->NowTicks();
// Frame 1 should be acked by now and we should have an opening to send 4.
- SendVideoFrame(video_start, send_time);
+ SendVideoFrame(video_start, capture_time);
RunTasks(kFrameTimerMs);
// Frames 1-3 are old frames by now, and therefore should be decoded, but
// not rendered. The next frame we expect to render is frame #4.
- test_receiver_video_callback_->AddExpectedResult(video_start,
- video_sender_config_.width,
- video_sender_config_.height,
- send_time,
- true);
+ test_receiver_video_callback_->AddExpectedResult(
+ video_start,
+ video_sender_config_.width,
+ video_sender_config_.height,
+ capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
+ true);
frame_receiver_->GetRawVideoFrame(
base::Bind(&TestReceiverVideoCallback::CheckVideoFrame,
@@ -929,27 +1034,27 @@ TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) {
// Disabled due to flakiness and crashiness. http://crbug.com/360951
TEST_F(End2EndTest, DISABLED_DropEveryOtherFrame3Buffers) {
- Configure(transport::kVp8, transport::kOpus, kDefaultAudioSamplingRate, false,
- 3);
+ Configure(transport::kVp8, transport::kOpus, kDefaultAudioSamplingRate, 3);
video_sender_config_.rtp_config.max_delay_ms = 67;
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
sender_to_receiver_.DropAllPacketsBelongingToOddFrames();
int video_start = kVideoStart;
- base::TimeTicks send_time;
+ base::TimeTicks capture_time;
int i = 0;
for (; i < 20; ++i) {
- send_time = testing_clock_sender_->NowTicks();
- SendVideoFrame(video_start, send_time);
+ capture_time = testing_clock_sender_->NowTicks();
+ SendVideoFrame(video_start, capture_time);
if (i % 2 == 0) {
test_receiver_video_callback_->AddExpectedResult(
video_start,
video_sender_config_.width,
video_sender_config_.height,
- send_time,
+ capture_time +
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
i == 0);
// GetRawVideoFrame will not return the frame until we are close in
@@ -967,7 +1072,7 @@ TEST_F(End2EndTest, DISABLED_DropEveryOtherFrame3Buffers) {
}
TEST_F(End2EndTest, CryptoVideo) {
- Configure(transport::kVp8, transport::kPcm16, 32000, false, 1);
+ Configure(transport::kVp8, transport::kPcm16, 32000, 1);
video_sender_config_.rtp_config.aes_iv_mask =
ConvertFromBase16String("1234567890abcdeffedcba0987654321");
@@ -983,14 +1088,14 @@ TEST_F(End2EndTest, CryptoVideo) {
int frames_counter = 0;
for (; frames_counter < 3; ++frames_counter) {
- const base::TimeTicks send_time = testing_clock_sender_->NowTicks();
- SendVideoFrame(frames_counter, send_time);
+ const base::TimeTicks capture_time = testing_clock_sender_->NowTicks();
+ SendVideoFrame(frames_counter, capture_time);
test_receiver_video_callback_->AddExpectedResult(
frames_counter,
video_sender_config_.width,
video_sender_config_.height,
- send_time,
+ capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
true);
RunTasks(kFrameTimerMs);
@@ -1005,7 +1110,7 @@ TEST_F(End2EndTest, CryptoVideo) {
}
TEST_F(End2EndTest, CryptoAudio) {
- Configure(transport::kVp8, transport::kPcm16, 32000, false, 1);
+ Configure(transport::kVp8, transport::kPcm16, 32000, 1);
audio_sender_config_.rtp_config.aes_iv_mask =
ConvertFromBase16String("abcdeffedcba12345678900987654321");
@@ -1034,21 +1139,21 @@ TEST_F(End2EndTest, CryptoAudio) {
// Video test without packet loss - tests the logging aspects of the end2end,
// but is basically equivalent to LoopNoLossPcm16.
TEST_F(End2EndTest, VideoLogging) {
- Configure(transport::kVp8, transport::kPcm16, 32000, false, 1);
+ Configure(transport::kVp8, transport::kPcm16, 32000, 1);
Create();
int video_start = kVideoStart;
const int num_frames = 5;
for (int i = 0; i < num_frames; ++i) {
- base::TimeTicks send_time = testing_clock_sender_->NowTicks();
+ base::TimeTicks capture_time = testing_clock_sender_->NowTicks();
test_receiver_video_callback_->AddExpectedResult(
video_start,
video_sender_config_.width,
video_sender_config_.height,
- send_time,
+ capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
true);
- SendVideoFrame(video_start, send_time);
+ SendVideoFrame(video_start, capture_time);
RunTasks(kFrameTimerMs);
frame_receiver_->GetRawVideoFrame(
@@ -1085,40 +1190,42 @@ TEST_F(End2EndTest, VideoLogging) {
map_it != event_counter_for_frame.end();
++map_it) {
int total_event_count_for_frame = 0;
- for (int i = 0; i < kNumOfLoggingEvents; ++i) {
+ for (int i = 0; i <= kNumOfLoggingEvents; ++i) {
total_event_count_for_frame += map_it->second.counter[i];
}
int expected_event_count_for_frame = 0;
- EXPECT_EQ(1, map_it->second.counter[kVideoFrameCaptureBegin]);
+ EXPECT_EQ(1, map_it->second.counter[FRAME_CAPTURE_BEGIN]);
expected_event_count_for_frame +=
- map_it->second.counter[kVideoFrameCaptureBegin];
+ map_it->second.counter[FRAME_CAPTURE_BEGIN];
- EXPECT_EQ(1, map_it->second.counter[kVideoFrameEncoded]);
+ EXPECT_EQ(1, map_it->second.counter[FRAME_CAPTURE_END]);
expected_event_count_for_frame +=
- map_it->second.counter[kVideoFrameEncoded];
+ map_it->second.counter[FRAME_CAPTURE_END];
- EXPECT_EQ(1, map_it->second.counter[kVideoFrameCaptureEnd]);
+ EXPECT_EQ(1, map_it->second.counter[FRAME_ENCODED]);
expected_event_count_for_frame +=
- map_it->second.counter[kVideoFrameCaptureEnd];
+ map_it->second.counter[FRAME_ENCODED];
- EXPECT_EQ(1, map_it->second.counter[kVideoRenderDelay]);
- expected_event_count_for_frame += map_it->second.counter[kVideoRenderDelay];
-
- EXPECT_EQ(1, map_it->second.counter[kVideoFrameDecoded]);
+ EXPECT_EQ(1, map_it->second.counter[FRAME_DECODED]);
expected_event_count_for_frame +=
- map_it->second.counter[kVideoFrameDecoded];
+ map_it->second.counter[FRAME_DECODED];
+
+ EXPECT_EQ(1, map_it->second.counter[FRAME_PLAYOUT]);
+ expected_event_count_for_frame += map_it->second.counter[FRAME_PLAYOUT];
- // There is no guarantee that kVideoAckSent is loggeed exactly once per
+
+ // There is no guarantee that FRAME_ACK_SENT is loggeed exactly once per
// frame.
- EXPECT_GT(map_it->second.counter[kVideoAckSent], 0);
- expected_event_count_for_frame += map_it->second.counter[kVideoAckSent];
+ EXPECT_GT(map_it->second.counter[FRAME_ACK_SENT], 0);
+ expected_event_count_for_frame += map_it->second.counter[FRAME_ACK_SENT];
- // There is no guarantee that kVideoAckReceived is loggeed exactly once per
+ // There is no guarantee that FRAME_ACK_RECEIVED is loggeed exactly once per
// frame.
- EXPECT_GT(map_it->second.counter[kVideoAckReceived], 0);
- expected_event_count_for_frame += map_it->second.counter[kVideoAckReceived];
+ EXPECT_GT(map_it->second.counter[FRAME_ACK_RECEIVED], 0);
+ expected_event_count_for_frame +=
+ map_it->second.counter[FRAME_ACK_RECEIVED];
// Verify that there were no other events logged with respect to this
// frame.
@@ -1138,14 +1245,14 @@ TEST_F(End2EndTest, VideoLogging) {
map_it != event_count_for_packet.end();
++map_it) {
int total_event_count_for_packet = 0;
- for (int i = 0; i < kNumOfLoggingEvents; ++i) {
+ for (int i = 0; i <= kNumOfLoggingEvents; ++i) {
total_event_count_for_packet += map_it->second.counter[i];
}
int expected_event_count_for_packet = 0;
- EXPECT_GT(map_it->second.counter[kVideoPacketReceived], 0);
+ EXPECT_GT(map_it->second.counter[PACKET_RECEIVED], 0);
expected_event_count_for_packet +=
- map_it->second.counter[kVideoPacketReceived];
+ map_it->second.counter[PACKET_RECEIVED];
// Verify that there were no other events logged with respect to this
// packet. (i.e. Total event count = expected event count)
@@ -1156,7 +1263,7 @@ TEST_F(End2EndTest, VideoLogging) {
// Audio test without packet loss - tests the logging aspects of the end2end,
// but is basically equivalent to LoopNoLossPcm16.
TEST_F(End2EndTest, AudioLogging) {
- Configure(transport::kVp8, transport::kPcm16, 32000, false, 1);
+ Configure(transport::kVp8, transport::kPcm16, 32000, 1);
Create();
int audio_diff = kFrameTimerMs;
@@ -1197,7 +1304,7 @@ TEST_F(End2EndTest, AudioLogging) {
event_counter_for_frame.begin();
it != event_counter_for_frame.end();
++it) {
- encoded_count += it->second.counter[kAudioFrameEncoded];
+ encoded_count += it->second.counter[FRAME_ENCODED];
}
EXPECT_EQ(num_audio_frames_requested, encoded_count);
@@ -1207,25 +1314,25 @@ TEST_F(End2EndTest, AudioLogging) {
event_counter_for_frame.begin();
map_it != event_counter_for_frame.end(); ++map_it) {
int total_event_count_for_frame = 0;
- for (int j = 0; j < kNumOfLoggingEvents; ++j)
+ for (int j = 0; j <= kNumOfLoggingEvents; ++j)
total_event_count_for_frame += map_it->second.counter[j];
int expected_event_count_for_frame = 0;
- EXPECT_EQ(1, map_it->second.counter[kAudioFrameEncoded]);
+ EXPECT_EQ(1, map_it->second.counter[FRAME_ENCODED]);
expected_event_count_for_frame +=
- map_it->second.counter[kAudioFrameEncoded];
+ map_it->second.counter[FRAME_ENCODED];
- EXPECT_EQ(1, map_it->second.counter[kAudioPlayoutDelay]);
+ EXPECT_EQ(1, map_it->second.counter[FRAME_PLAYOUT]);
expected_event_count_for_frame +=
- map_it->second.counter[kAudioPlayoutDelay];
+ map_it->second.counter[FRAME_PLAYOUT];
- EXPECT_EQ(1, map_it->second.counter[kAudioFrameDecoded]);
+ EXPECT_EQ(1, map_it->second.counter[FRAME_DECODED]);
expected_event_count_for_frame +=
- map_it->second.counter[kAudioFrameDecoded];
+ map_it->second.counter[FRAME_DECODED];
- EXPECT_GT(map_it->second.counter[kAudioAckSent], 0);
- expected_event_count_for_frame += map_it->second.counter[kAudioAckSent];
+ EXPECT_GT(map_it->second.counter[FRAME_ACK_SENT], 0);
+ expected_event_count_for_frame += map_it->second.counter[FRAME_ACK_SENT];
// Verify that there were no other events logged with respect to this frame.
// (i.e. Total event count = expected event count)
@@ -1234,19 +1341,93 @@ TEST_F(End2EndTest, AudioLogging) {
}
TEST_F(End2EndTest, BasicFakeSoftwareVideo) {
- Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, false, 1);
+ Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
Create();
+ StartBasicPlayer();
+ SetReceiverSkew(1.0, base::TimeDelta::FromMilliseconds(1));
+
+ // Expect very smooth playout when there is no clock skew.
+ SetExpectedVideoPlayoutSmoothness(
+ base::TimeDelta::FromMilliseconds(kFrameTimerMs) * 99 / 100,
+ base::TimeDelta::FromMilliseconds(kFrameTimerMs) * 101 / 100,
+ base::TimeDelta::FromMilliseconds(kFrameTimerMs) / 100);
int frames_counter = 0;
- int received_counter = 0;
for (; frames_counter < 1000; ++frames_counter) {
SendFakeVideoFrame(testing_clock_sender_->NowTicks());
- frame_receiver_->GetRawVideoFrame(
- base::Bind(&CountVideoFrame, &received_counter));
RunTasks(kFrameTimerMs);
}
RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline.
- EXPECT_EQ(1000, received_counter);
+ EXPECT_EQ(1000ul, video_ticks_.size());
+}
+
+TEST_F(End2EndTest, ReceiverClockFast) {
+ Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
+ Create();
+ StartBasicPlayer();
+ SetReceiverSkew(2.0, base::TimeDelta::FromMicroseconds(1234567));
+
+ int frames_counter = 0;
+ for (; frames_counter < 10000; ++frames_counter) {
+ SendFakeVideoFrame(testing_clock_sender_->NowTicks());
+ RunTasks(kFrameTimerMs);
+ }
+ RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline.
+ EXPECT_EQ(10000ul, video_ticks_.size());
+}
+
+TEST_F(End2EndTest, ReceiverClockSlow) {
+ Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
+ Create();
+ StartBasicPlayer();
+ SetReceiverSkew(0.5, base::TimeDelta::FromMicroseconds(-765432));
+
+ int frames_counter = 0;
+ for (; frames_counter < 10000; ++frames_counter) {
+ SendFakeVideoFrame(testing_clock_sender_->NowTicks());
+ RunTasks(kFrameTimerMs);
+ }
+ RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline.
+ EXPECT_EQ(10000ul, video_ticks_.size());
+}
+
+TEST_F(End2EndTest, SmoothPlayoutWithFivePercentClockRateSkew) {
+ Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
+ Create();
+ StartBasicPlayer();
+ SetReceiverSkew(1.05, base::TimeDelta::FromMilliseconds(-42));
+
+ // Expect smooth playout when there is 5% skew.
+ SetExpectedVideoPlayoutSmoothness(
+ base::TimeDelta::FromMilliseconds(kFrameTimerMs) * 90 / 100,
+ base::TimeDelta::FromMilliseconds(kFrameTimerMs) * 110 / 100,
+ base::TimeDelta::FromMilliseconds(kFrameTimerMs) / 10);
+
+ int frames_counter = 0;
+ for (; frames_counter < 10000; ++frames_counter) {
+ SendFakeVideoFrame(testing_clock_sender_->NowTicks());
+ RunTasks(kFrameTimerMs);
+ }
+ RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline.
+ EXPECT_EQ(10000ul, video_ticks_.size());
+}
+
+TEST_F(End2EndTest, EvilNetwork) {
+ Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
+ receiver_to_sender_.SetPacketPipe(test::EvilNetwork().Pass());
+ sender_to_receiver_.SetPacketPipe(test::EvilNetwork().Pass());
+ Create();
+ StartBasicPlayer();
+
+ int frames_counter = 0;
+ for (; frames_counter < 10000; ++frames_counter) {
+ SendFakeVideoFrame(testing_clock_sender_->NowTicks());
+ RunTasks(kFrameTimerMs);
+ }
+ base::TimeTicks test_end = testing_clock_receiver_->NowTicks();
+ RunTasks(100 * kFrameTimerMs + 1); // Empty the pipeline.
+ EXPECT_GT(video_ticks_.size(), 100ul);
+ EXPECT_LT((video_ticks_.back().second - test_end).InMilliseconds(), 1000);
}
// TODO(pwestin): Add repeatable packet loss test.
diff --git a/media/cast/test/fake_single_thread_task_runner.cc b/media/cast/test/fake_single_thread_task_runner.cc
index d2950fd9fb..b60a1b12ea 100644
--- a/media/cast/test/fake_single_thread_task_runner.cc
+++ b/media/cast/test/fake_single_thread_task_runner.cc
@@ -14,7 +14,8 @@ namespace test {
FakeSingleThreadTaskRunner::FakeSingleThreadTaskRunner(
base::SimpleTestTickClock* clock)
- : clock_(clock) {}
+ : clock_(clock),
+ fail_on_next_task_(false) {}
FakeSingleThreadTaskRunner::~FakeSingleThreadTaskRunner() {}
@@ -22,6 +23,9 @@ bool FakeSingleThreadTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
const base::Closure& task,
base::TimeDelta delay) {
+ if (fail_on_next_task_) {
+ LOG(FATAL) << "Infinite task-add loop detected.";
+ }
EXPECT_GE(delay, base::TimeDelta());
PostedTask posed_task(from_here,
task,
@@ -38,7 +42,7 @@ bool FakeSingleThreadTaskRunner::RunsTasksOnCurrentThread() const {
}
void FakeSingleThreadTaskRunner::RunTasks() {
- while(true) {
+ while (true) {
// Run all tasks equal or older than current time.
std::multimap<base::TimeTicks, PostedTask>::iterator it = tasks_.begin();
if (it == tasks_.end())
@@ -53,6 +57,38 @@ void FakeSingleThreadTaskRunner::RunTasks() {
}
}
+void FakeSingleThreadTaskRunner::Sleep(base::TimeDelta t) {
+ base::TimeTicks run_until = clock_->NowTicks() + t;
+ while (1) {
+ // If we run more than 100000 iterations, we've probably
+ // hit some sort of case where a new task is posted every
+ // time that we invoke a task, and we can't make progress
+ // anymore. If that happens, set fail_on_next_task_ to true
+ // and throw an error when the next task is posted.
+ for (int i = 0; i < 100000; i++) {
+ // Run all tasks equal or older than current time.
+ std::multimap<base::TimeTicks, PostedTask>::iterator it = tasks_.begin();
+ if (it == tasks_.end()) {
+ clock_->Advance(run_until - clock_->NowTicks());
+ return;
+ }
+
+ PostedTask task = it->second;
+ if (run_until < task.GetTimeToRun()) {
+ clock_->Advance(run_until - clock_->NowTicks());
+ return;
+ }
+
+ clock_->Advance(task.GetTimeToRun() - clock_->NowTicks());
+ tasks_.erase(it);
+ task.task.Run();
+ }
+ // Instead of failing immediately, we fail when the next task is
+ // added so that the backtrace will include the task that was added.
+ fail_on_next_task_ = true;
+ }
+}
+
bool FakeSingleThreadTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
const base::Closure& task,
diff --git a/media/cast/test/fake_single_thread_task_runner.h b/media/cast/test/fake_single_thread_task_runner.h
index 710138597f..779a897cfb 100644
--- a/media/cast/test/fake_single_thread_task_runner.h
+++ b/media/cast/test/fake_single_thread_task_runner.h
@@ -24,6 +24,9 @@ class FakeSingleThreadTaskRunner : public base::SingleThreadTaskRunner {
void RunTasks();
+ // Note: Advances |clock_|.
+ void Sleep(base::TimeDelta t);
+
// base::SingleThreadTaskRunner implementation.
virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
const base::Closure& task,
@@ -43,6 +46,7 @@ class FakeSingleThreadTaskRunner : public base::SingleThreadTaskRunner {
private:
base::SimpleTestTickClock* const clock_;
std::multimap<base::TimeTicks, PostedTask> tasks_;
+ bool fail_on_next_task_;
DISALLOW_COPY_AND_ASSIGN(FakeSingleThreadTaskRunner);
};
diff --git a/media/cast/test/receiver.cc b/media/cast/test/receiver.cc
index f80f5bee28..9861d3834e 100644
--- a/media/cast/test/receiver.cc
+++ b/media/cast/test/receiver.cc
@@ -86,7 +86,7 @@ std::string GetIpAddress(const std::string display_text) {
return ip_address;
}
-void GetSsrcs(AudioReceiverConfig* audio_config) {
+void GetAudioSsrcs(FrameReceiverConfig* audio_config) {
test::InputBuilder input_tx(
"Choose audio sender SSRC.", DEFAULT_AUDIO_FEEDBACK_SSRC, 1, INT_MAX);
audio_config->feedback_ssrc = input_tx.GetIntInput();
@@ -96,7 +96,7 @@ void GetSsrcs(AudioReceiverConfig* audio_config) {
audio_config->incoming_ssrc = input_rx.GetIntInput();
}
-void GetSsrcs(VideoReceiverConfig* video_config) {
+void GetVideoSsrcs(FrameReceiverConfig* video_config) {
test::InputBuilder input_tx(
"Choose video sender SSRC.", DEFAULT_VIDEO_FEEDBACK_SSRC, 1, INT_MAX);
video_config->feedback_ssrc = input_tx.GetIntInput();
@@ -119,7 +119,7 @@ void GetWindowSize(int* width, int* height) {
}
#endif // OS_LINUX
-void GetPayloadtype(AudioReceiverConfig* audio_config) {
+void GetAudioPayloadtype(FrameReceiverConfig* audio_config) {
test::InputBuilder input("Choose audio receiver payload type.",
DEFAULT_AUDIO_PAYLOAD_TYPE,
96,
@@ -127,15 +127,15 @@ void GetPayloadtype(AudioReceiverConfig* audio_config) {
audio_config->rtp_payload_type = input.GetIntInput();
}
-AudioReceiverConfig GetAudioReceiverConfig() {
- AudioReceiverConfig audio_config = GetDefaultAudioReceiverConfig();
- GetSsrcs(&audio_config);
- GetPayloadtype(&audio_config);
+FrameReceiverConfig GetAudioReceiverConfig() {
+ FrameReceiverConfig audio_config = GetDefaultAudioReceiverConfig();
+ GetAudioSsrcs(&audio_config);
+ GetAudioPayloadtype(&audio_config);
audio_config.rtp_max_delay_ms = 300;
return audio_config;
}
-void GetPayloadtype(VideoReceiverConfig* video_config) {
+void GetVideoPayloadtype(FrameReceiverConfig* video_config) {
test::InputBuilder input("Choose video receiver payload type.",
DEFAULT_VIDEO_PAYLOAD_TYPE,
96,
@@ -143,15 +143,15 @@ void GetPayloadtype(VideoReceiverConfig* video_config) {
video_config->rtp_payload_type = input.GetIntInput();
}
-VideoReceiverConfig GetVideoReceiverConfig() {
- VideoReceiverConfig video_config = GetDefaultVideoReceiverConfig();
- GetSsrcs(&video_config);
- GetPayloadtype(&video_config);
+FrameReceiverConfig GetVideoReceiverConfig() {
+ FrameReceiverConfig video_config = GetDefaultVideoReceiverConfig();
+ GetVideoSsrcs(&video_config);
+ GetVideoPayloadtype(&video_config);
video_config.rtp_max_delay_ms = 300;
return video_config;
}
-AudioParameters ToAudioParameters(const AudioReceiverConfig& config) {
+AudioParameters ToAudioParameters(const FrameReceiverConfig& config) {
const int samples_in_10ms = config.frequency / 100;
return AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
GuessChannelLayout(config.channels),
@@ -182,8 +182,8 @@ class NaivePlayer : public InProcessReceiver,
NaivePlayer(const scoped_refptr<CastEnvironment>& cast_environment,
const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config,
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config,
int window_width,
int window_height)
: InProcessReceiver(cast_environment,
@@ -377,12 +377,6 @@ class NaivePlayer : public InProcessReceiver,
return dest->frames();
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- return OnMoreData(dest, buffers_state);
- }
-
virtual void OnError(AudioOutputStream* stream) OVERRIDE {
LOG(ERROR) << "AudioOutputStream reports an error. "
<< "Playback is unlikely to continue.";
@@ -430,14 +424,21 @@ class NaivePlayer : public InProcessReceiver,
DCHECK(cast_env()->CurrentlyOn(CastEnvironment::MAIN));
if (is_being_skipped) {
- VLOG(1) << "VideoFrame[" << num_video_frames_processed_ << "]: Skipped.";
+ VLOG(1) << "VideoFrame[" << num_video_frames_processed_
+ << " (dt=" << (video_playout_queue_.front().first -
+ last_popped_video_playout_time_).InMicroseconds()
+ << " usec)]: Skipped.";
} else {
- VLOG(1) << "VideoFrame[" << num_video_frames_processed_ << "]: Playing "
+ VLOG(1) << "VideoFrame[" << num_video_frames_processed_
+ << " (dt=" << (video_playout_queue_.front().first -
+ last_popped_video_playout_time_).InMicroseconds()
+ << " usec)]: Playing "
<< (cast_env()->Clock()->NowTicks() -
video_playout_queue_.front().first).InMicroseconds()
<< " usec later than intended.";
}
+ last_popped_video_playout_time_ = video_playout_queue_.front().first;
const scoped_refptr<VideoFrame> ret = video_playout_queue_.front().second;
video_playout_queue_.pop_front();
++num_video_frames_processed_;
@@ -448,14 +449,21 @@ class NaivePlayer : public InProcessReceiver,
audio_lock_.AssertAcquired();
if (was_skipped) {
- VLOG(1) << "AudioFrame[" << num_audio_frames_processed_ << "]: Skipped";
+ VLOG(1) << "AudioFrame[" << num_audio_frames_processed_
+ << " (dt=" << (audio_playout_queue_.front().first -
+ last_popped_audio_playout_time_).InMicroseconds()
+ << " usec)]: Skipped.";
} else {
- VLOG(1) << "AudioFrame[" << num_audio_frames_processed_ << "]: Playing "
+ VLOG(1) << "AudioFrame[" << num_audio_frames_processed_
+ << " (dt=" << (audio_playout_queue_.front().first -
+ last_popped_audio_playout_time_).InMicroseconds()
+ << " usec)]: Playing "
<< (cast_env()->Clock()->NowTicks() -
audio_playout_queue_.front().first).InMicroseconds()
<< " usec later than intended.";
}
+ last_popped_audio_playout_time_ = audio_playout_queue_.front().first;
scoped_ptr<AudioBus> ret(audio_playout_queue_.front().second);
audio_playout_queue_.pop_front();
++num_audio_frames_processed_;
@@ -507,6 +515,7 @@ class NaivePlayer : public InProcessReceiver,
typedef std::pair<base::TimeTicks, scoped_refptr<VideoFrame> >
VideoQueueEntry;
std::deque<VideoQueueEntry> video_playout_queue_;
+ base::TimeTicks last_popped_video_playout_time_;
int64 num_video_frames_processed_;
base::OneShotTimer<NaivePlayer> video_playout_timer_;
@@ -515,6 +524,7 @@ class NaivePlayer : public InProcessReceiver,
base::Lock audio_lock_;
typedef std::pair<base::TimeTicks, AudioBus*> AudioQueueEntry;
std::deque<AudioQueueEntry> audio_playout_queue_;
+ base::TimeTicks last_popped_audio_playout_time_;
int64 num_audio_frames_processed_;
// These must only be used on the audio thread calling OnMoreData().
@@ -543,9 +553,9 @@ int main(int argc, char** argv) {
media::AudioManager::Create(&fake_audio_log_factory_));
CHECK(media::AudioManager::Get());
- media::cast::AudioReceiverConfig audio_config =
+ media::cast::FrameReceiverConfig audio_config =
media::cast::GetAudioReceiverConfig();
- media::cast::VideoReceiverConfig video_config =
+ media::cast::FrameReceiverConfig video_config =
media::cast::GetVideoReceiverConfig();
// Determine local and remote endpoints.
diff --git a/media/cast/test/sender.cc b/media/cast/test/sender.cc
index 50074b0a26..e457e2c9fe 100644
--- a/media/cast/test/sender.cc
+++ b/media/cast/test/sender.cc
@@ -140,7 +140,7 @@ VideoSenderConfig GetVideoSenderConfig() {
return video_config;
}
-void AVFreeFrame(AVFrame* frame) { avcodec_free_frame(&frame); }
+void AVFreeFrame(AVFrame* frame) { av_frame_free(&frame); }
class SendProcess {
public:
@@ -533,11 +533,11 @@ class SendProcess {
// Audio.
AVFrame* avframe = av_frame_alloc();
- // Shallow copy of the packet.
+ // Make a shallow copy of packet so we can slide packet.data as frames are
+ // decoded from the packet; otherwise av_free_packet() will corrupt memory.
AVPacket packet_temp = *packet.get();
do {
- avcodec_get_frame_defaults(avframe);
int frame_decoded = 0;
int result = avcodec_decode_audio4(
av_audio_context(), avframe, &frame_decoded, &packet_temp);
@@ -577,8 +577,9 @@ class SendProcess {
// Note: Not all files have correct values for pkt_pts.
base::TimeDelta::FromMilliseconds(avframe->pkt_pts));
audio_algo_.EnqueueBuffer(buffer);
+ av_frame_unref(avframe);
} while (packet_temp.size > 0);
- avcodec_free_frame(&avframe);
+ av_frame_free(&avframe);
const int frames_needed_to_scale =
playback_rate_ * av_audio_context()->sample_rate /
@@ -618,15 +619,16 @@ class SendProcess {
// Video.
int got_picture;
AVFrame* avframe = av_frame_alloc();
- avcodec_get_frame_defaults(avframe);
// Tell the decoder to reorder for us.
avframe->reordered_opaque =
av_video_context()->reordered_opaque = packet->pts;
CHECK(avcodec_decode_video2(
av_video_context(), avframe, &got_picture, packet.get()) >= 0)
<< "Video decode error.";
- if (!got_picture)
+ if (!got_picture) {
+ av_frame_free(&avframe);
return;
+ }
gfx::Size size(av_video_context()->width, av_video_context()->height);
if (!video_first_pts_set_ ||
avframe->reordered_opaque < video_first_pts_) {
@@ -745,7 +747,7 @@ class SendProcess {
namespace {
void UpdateCastTransportStatus(
media::cast::transport::CastTransportStatus status) {
- VLOG(21) << "Transport status: " << status;
+ VLOG(1) << "Transport status: " << status;
}
void LogRawEvents(
@@ -758,6 +760,7 @@ void LogRawEvents(
++it) {
cast_environment->Logging()->InsertPacketEvent(it->timestamp,
it->type,
+ it->media_type,
it->rtp_timestamp,
it->frame_id,
it->packet_id,
diff --git a/media/cast/test/skewed_single_thread_task_runner.cc b/media/cast/test/skewed_single_thread_task_runner.cc
new file mode 100644
index 0000000000..fda7dfbfca
--- /dev/null
+++ b/media/cast/test/skewed_single_thread_task_runner.cc
@@ -0,0 +1,53 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/test/skewed_single_thread_task_runner.h"
+
+#include "base/logging.h"
+#include "base/time/tick_clock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+SkewedSingleThreadTaskRunner::SkewedSingleThreadTaskRunner(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) :
+ skew_(1.0),
+ task_runner_(task_runner) {
+}
+
+SkewedSingleThreadTaskRunner::~SkewedSingleThreadTaskRunner() {}
+
+void SkewedSingleThreadTaskRunner::SetSkew(double skew) {
+ skew_ = skew;
+}
+
+bool SkewedSingleThreadTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) {
+ return task_runner_->PostDelayedTask(
+ from_here,
+ task,
+ base::TimeDelta::FromMicroseconds(delay.InMicroseconds() * skew_));
+}
+
+bool SkewedSingleThreadTaskRunner::RunsTasksOnCurrentThread() const {
+ return task_runner_->RunsTasksOnCurrentThread();
+}
+
+bool SkewedSingleThreadTaskRunner::PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) {
+ return task_runner_->PostNonNestableDelayedTask(
+ from_here,
+ task,
+ base::TimeDelta::FromMicroseconds(delay.InMicroseconds() * skew_));
+}
+
+} // namespace test
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/skewed_single_thread_task_runner.h b/media/cast/test/skewed_single_thread_task_runner.h
new file mode 100644
index 0000000000..5ad2f8d8bb
--- /dev/null
+++ b/media/cast/test/skewed_single_thread_task_runner.h
@@ -0,0 +1,58 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TEST_SKEWED_TASK_RUNNER_H_
+#define MEDIA_CAST_TEST_SKEWED_TASK_RUNNER_H_
+
+#include <map>
+
+#include "base/basictypes.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/test/test_pending_task.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+// This class wraps a SingleThreadTaskRunner, and allows you to scale
+// the delay for any posted task by a factor. The factor is changed by
+// calling SetSkew(). A skew of 2.0 means that all delayed task will
+// have to wait twice as long.
+class SkewedSingleThreadTaskRunner : public base::SingleThreadTaskRunner {
+ public:
+ explicit SkewedSingleThreadTaskRunner(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
+
+ // Set the delay multiplier to |skew|.
+ void SetSkew(double skew);
+
+ // base::SingleThreadTaskRunner implementation.
+ virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) OVERRIDE;
+
+ virtual bool RunsTasksOnCurrentThread() const OVERRIDE;
+
+ // This function is currently not used, and will return false.
+ virtual bool PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) OVERRIDE;
+
+ protected:
+ virtual ~SkewedSingleThreadTaskRunner();
+
+ private:
+ double skew_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(SkewedSingleThreadTaskRunner);
+};
+
+} // namespace test
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TEST_SKEWED_TASK_RUNNER_H_
diff --git a/media/cast/test/skewed_tick_clock.cc b/media/cast/test/skewed_tick_clock.cc
new file mode 100644
index 0000000000..272c61e6c9
--- /dev/null
+++ b/media/cast/test/skewed_tick_clock.cc
@@ -0,0 +1,39 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/test/skewed_tick_clock.h"
+
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+SkewedTickClock::SkewedTickClock(base::TickClock* clock)
+ : clock_(clock),
+ skew_(1.0),
+ last_skew_set_time_(clock_->NowTicks()),
+ skew_clock_at_last_set_(last_skew_set_time_) {
+}
+
+base::TimeTicks SkewedTickClock::SkewTicks(base::TimeTicks now) {
+ return base::TimeDelta::FromMicroseconds(
+ (now - last_skew_set_time_).InMicroseconds() * skew_) +
+ skew_clock_at_last_set_;
+}
+
+void SkewedTickClock::SetSkew(double skew, base::TimeDelta offset) {
+ base::TimeTicks now = clock_->NowTicks();
+ skew_clock_at_last_set_ = SkewTicks(now) + offset;
+ skew_ = skew;
+ last_skew_set_time_ = now;
+}
+
+base::TimeTicks SkewedTickClock::NowTicks() {
+ return SkewTicks(clock_->NowTicks());
+}
+
+} // namespace test
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/skewed_tick_clock.h b/media/cast/test/skewed_tick_clock.h
new file mode 100644
index 0000000000..dcb538448c
--- /dev/null
+++ b/media/cast/test/skewed_tick_clock.h
@@ -0,0 +1,44 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CAST_MEDIA_TEST_SKEWED_TICK_CLOCK_H
+#define CAST_MEDIA_TEST_SKEWED_TICK_CLOCK_H
+
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+// Wraps a base::TickClock, and lets you change the speed and offset
+// of time compared to the wrapped clock. See SetSkew for how usage.
+class SkewedTickClock : public base::TickClock {
+ public:
+ // Does not take ownership of |clock_|.
+ explicit SkewedTickClock(base::TickClock* clock_);
+ // |skew| > 1.0 means clock runs faster.
+ // |offset| > 0 means clock returns times from the future.
+ // Note, |offset| is cumulative.
+ // Also note that changing the skew will never make the clock
+ // jump forwards or backwards, only changing the offset will
+ // do that.
+ void SetSkew(double skew, base::TimeDelta offset);
+ virtual base::TimeTicks NowTicks() OVERRIDE;
+
+ private:
+ base::TimeTicks SkewTicks(base::TimeTicks now);
+ base::TickClock* clock_; // Not owned.
+ double skew_;
+ base::TimeTicks last_skew_set_time_;
+ base::TimeTicks skew_clock_at_last_set_;
+
+ DISALLOW_COPY_AND_ASSIGN(SkewedTickClock);
+};
+
+} // namespace test
+} // namespace cast
+} // namespace media
+
+#endif // CAST_MEDIA_TEST_SKEWED_TICK_CLOCK_H
diff --git a/media/cast/test/utility/default_config.cc b/media/cast/test/utility/default_config.cc
index 1631bfe86b..b5de5ed690 100644
--- a/media/cast/test/utility/default_config.cc
+++ b/media/cast/test/utility/default_config.cc
@@ -25,27 +25,31 @@ void CreateVideoEncodeMemory(
namespace media {
namespace cast {
-AudioReceiverConfig GetDefaultAudioReceiverConfig() {
- AudioReceiverConfig config;
+FrameReceiverConfig GetDefaultAudioReceiverConfig() {
+ FrameReceiverConfig config;
config.feedback_ssrc = 2;
config.incoming_ssrc = 1;
- config.rtp_payload_type = 127;
config.rtcp_c_name = "audio_receiver@a.b.c.d";
- config.use_external_decoder = false;
+ config.rtp_max_delay_ms = kDefaultRtpMaxDelayMs;
+ config.rtp_payload_type = 127;
config.frequency = 48000;
config.channels = 2;
- config.codec = media::cast::transport::kOpus;
+ config.max_frame_rate = 100; // 10ms of signal per frame
+ config.codec.audio = media::cast::transport::kOpus;
return config;
}
-VideoReceiverConfig GetDefaultVideoReceiverConfig() {
- VideoReceiverConfig config;
+FrameReceiverConfig GetDefaultVideoReceiverConfig() {
+ FrameReceiverConfig config;
config.feedback_ssrc = 12;
config.incoming_ssrc = 11;
- config.rtp_payload_type = 96;
config.rtcp_c_name = "video_receiver@a.b.c.d";
- config.use_external_decoder = false;
- config.codec = media::cast::transport::kVp8;
+ config.rtp_max_delay_ms = kDefaultRtpMaxDelayMs;
+ config.rtp_payload_type = 96;
+ config.frequency = kVideoFrequency;
+ config.channels = 1;
+ config.max_frame_rate = kDefaultMaxFrameRate;
+ config.codec.video = media::cast::transport::kVp8;
return config;
}
diff --git a/media/cast/test/utility/default_config.h b/media/cast/test/utility/default_config.h
index 59a24404ef..eaa3c96415 100644
--- a/media/cast/test/utility/default_config.h
+++ b/media/cast/test/utility/default_config.h
@@ -10,15 +10,15 @@
namespace media {
namespace cast {
-// Returns an AudioReceiverConfig initialized to "good-to-go" values. This
+// Returns a FrameReceiverConfig initialized to "good-to-go" values. This
// specifies 48 kHz, 2-channel Opus-coded audio, with standard ssrc's, payload
// type, and a dummy name.
-AudioReceiverConfig GetDefaultAudioReceiverConfig();
+FrameReceiverConfig GetDefaultAudioReceiverConfig();
-// Returns a VideoReceiverConfig initialized to "good-to-go" values. This
+// Returns a FrameReceiverConfig initialized to "good-to-go" values. This
// specifies VP8-coded video, with standard ssrc's, payload type, and a dummy
// name.
-VideoReceiverConfig GetDefaultVideoReceiverConfig();
+FrameReceiverConfig GetDefaultVideoReceiverConfig();
// Returns a callback that does nothing.
CreateVideoEncodeAcceleratorCallback
diff --git a/media/cast/test/utility/in_process_receiver.cc b/media/cast/test/utility/in_process_receiver.cc
index ada4da4774..ba190cec1f 100644
--- a/media/cast/test/utility/in_process_receiver.cc
+++ b/media/cast/test/utility/in_process_receiver.cc
@@ -24,8 +24,8 @@ InProcessReceiver::InProcessReceiver(
const scoped_refptr<CastEnvironment>& cast_environment,
const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config)
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config)
: cast_environment_(cast_environment),
local_end_point_(local_end_point),
remote_end_point_(remote_end_point),
diff --git a/media/cast/test/utility/in_process_receiver.h b/media/cast/test/utility/in_process_receiver.h
index 06a798eb4c..cf25da9cee 100644
--- a/media/cast/test/utility/in_process_receiver.h
+++ b/media/cast/test/utility/in_process_receiver.h
@@ -46,15 +46,15 @@ class InProcessReceiver {
InProcessReceiver(const scoped_refptr<CastEnvironment>& cast_environment,
const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config);
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config);
virtual ~InProcessReceiver();
// Convenience accessors.
scoped_refptr<CastEnvironment> cast_env() const { return cast_environment_; }
- const AudioReceiverConfig& audio_config() const { return audio_config_; }
- const VideoReceiverConfig& video_config() const { return video_config_; }
+ const FrameReceiverConfig& audio_config() const { return audio_config_; }
+ const FrameReceiverConfig& video_config() const { return video_config_; }
// Begin delivering any received audio/video frames to the OnXXXFrame()
// methods.
@@ -105,8 +105,8 @@ class InProcessReceiver {
const scoped_refptr<CastEnvironment> cast_environment_;
const net::IPEndPoint local_end_point_;
const net::IPEndPoint remote_end_point_;
- const AudioReceiverConfig audio_config_;
- const VideoReceiverConfig video_config_;
+ const FrameReceiverConfig audio_config_;
+ const FrameReceiverConfig video_config_;
scoped_ptr<transport::UdpTransport> transport_;
scoped_ptr<CastReceiver> cast_receiver_;
diff --git a/media/cast/test/utility/net_utility.cc b/media/cast/test/utility/net_utility.cc
new file mode 100644
index 0000000000..d0bc2ad328
--- /dev/null
+++ b/media/cast/test/utility/net_utility.cc
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/test/utility/net_utility.h"
+
+#include "base/basictypes.h"
+#include "net/base/net_errors.h"
+#include "net/udp/udp_socket.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+// TODO(hubbe): Move to /net/.
+net::IPEndPoint GetFreeLocalPort() {
+ net::IPAddressNumber localhost;
+ localhost.push_back(127);
+ localhost.push_back(0);
+ localhost.push_back(0);
+ localhost.push_back(1);
+ scoped_ptr<net::UDPSocket> receive_socket(
+ new net::UDPSocket(net::DatagramSocket::DEFAULT_BIND,
+ net::RandIntCallback(),
+ NULL,
+ net::NetLog::Source()));
+ receive_socket->AllowAddressReuse();
+ CHECK_EQ(net::OK, receive_socket->Bind(net::IPEndPoint(localhost, 0)));
+ net::IPEndPoint endpoint;
+ CHECK_EQ(net::OK, receive_socket->GetLocalAddress(&endpoint));
+ return endpoint;
+}
+
+} // namespace test
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/utility/net_utility.h b/media/cast/test/utility/net_utility.h
new file mode 100644
index 0000000000..c1199be7a8
--- /dev/null
+++ b/media/cast/test/utility/net_utility.h
@@ -0,0 +1,18 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/base/ip_endpoint.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+// Determine a unused UDP port for the in-process receiver to listen on.
+// Method: Bind a UDP socket on port 0, and then check which port the
+// operating system assigned to it.
+net::IPEndPoint GetFreeLocalPort();
+
+} // namespace test
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/utility/udp_proxy.cc b/media/cast/test/utility/udp_proxy.cc
index 113281528d..05c3b93891 100644
--- a/media/cast/test/utility/udp_proxy.cc
+++ b/media/cast/test/utility/udp_proxy.cc
@@ -9,6 +9,7 @@
#include "base/rand_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
+#include "base/time/default_tick_clock.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/udp/udp_socket.h"
@@ -22,10 +23,12 @@ const size_t kMaxPacketSize = 65536;
PacketPipe::PacketPipe() {}
PacketPipe::~PacketPipe() {}
void PacketPipe::InitOnIOThread(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) {
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock) {
task_runner_ = task_runner;
+ clock_ = clock;
if (pipe_) {
- pipe_->InitOnIOThread(task_runner);
+ pipe_->InitOnIOThread(task_runner, clock);
}
}
void PacketPipe::AppendToPipe(scoped_ptr<PacketPipe> pipe) {
@@ -183,8 +186,9 @@ class RandomSortedDelay : public PacketPipe {
}
}
virtual void InitOnIOThread(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) OVERRIDE {
- PacketPipe::InitOnIOThread(task_runner);
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock) OVERRIDE {
+ PacketPipe::InitOnIOThread(task_runner, clock);
// As we start the stream, assume that we are in a random
// place between two extra delays, thus multiplier = 1.0;
ScheduleExtraDelay(1.0);
@@ -202,7 +206,7 @@ class RandomSortedDelay : public PacketPipe {
}
void CauseExtraDelay() {
- block_until_ = base::TimeTicks::Now() +
+ block_until_ = clock_->NowTicks() +
base::TimeDelta::FromMicroseconds(
static_cast<int64>(extra_delay_ * 1E6));
// An extra delay just happened, wait up to seconds_between_extra_delay_*2
@@ -264,8 +268,9 @@ class NetworkGlitchPipe : public PacketPipe {
weak_factory_(this) {}
virtual void InitOnIOThread(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) OVERRIDE {
- PacketPipe::InitOnIOThread(task_runner);
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock) OVERRIDE {
+ PacketPipe::InitOnIOThread(task_runner, clock);
Flip();
}
@@ -300,62 +305,20 @@ scoped_ptr<PacketPipe> NewNetworkGlitchPipe(double average_work_time,
.Pass();
}
+class UDPProxyImpl;
+
class PacketSender : public PacketPipe {
public:
- PacketSender(net::UDPSocket* udp_socket,
- const net::IPEndPoint* destination) :
- blocked_(false),
- udp_socket_(udp_socket),
- destination_(destination),
- weak_factory_(this) {
- }
- virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE {
- if (blocked_) {
- LOG(ERROR) << "Cannot write packet right now: blocked";
- return;
- }
-
- VLOG(1) << "Sending packet, len = " << packet->size();
- // We ignore all problems, callbacks and errors.
- // If it didn't work we just drop the packet at and call it a day.
- scoped_refptr<net::IOBuffer> buf =
- new net::WrappedIOBuffer(reinterpret_cast<char*>(&packet->front()));
- size_t buf_size = packet->size();
- int result;
- if (destination_->address().empty()) {
- VLOG(1) << "Destination has not been set yet.";
- result = net::ERR_INVALID_ARGUMENT;
- } else {
- VLOG(1) << "Destination:" << destination_->ToString();
- result = udp_socket_->SendTo(buf,
- static_cast<int>(buf_size),
- *destination_,
- base::Bind(&PacketSender::AllowWrite,
- weak_factory_.GetWeakPtr(),
- buf,
- base::Passed(&packet)));
- }
- if (result == net::ERR_IO_PENDING) {
- blocked_ = true;
- } else if (result < 0) {
- LOG(ERROR) << "Failed to write packet.";
- }
- }
+ PacketSender(UDPProxyImpl* udp_proxy, const net::IPEndPoint* destination)
+ : udp_proxy_(udp_proxy), destination_(destination) {}
+ virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE;
virtual void AppendToPipe(scoped_ptr<PacketPipe> pipe) OVERRIDE {
NOTREACHED();
}
private:
- void AllowWrite(scoped_refptr<net::IOBuffer> buf,
- scoped_ptr<transport::Packet> packet,
- int unused_len) {
- DCHECK(blocked_);
- blocked_ = false;
- }
- bool blocked_;
- net::UDPSocket* udp_socket_;
+ UDPProxyImpl* udp_proxy_;
const net::IPEndPoint* destination_; // not owned
- base::WeakPtrFactory<PacketSender> weak_factory_;
};
namespace {
@@ -371,34 +334,34 @@ void BuildPipe(scoped_ptr<PacketPipe>* pipe, PacketPipe* next) {
scoped_ptr<PacketPipe> WifiNetwork() {
// This represents the buffer on the sender.
scoped_ptr<PacketPipe> pipe;
- BuildPipe(&pipe, new Buffer(256 << 10, 5000000));
+ BuildPipe(&pipe, new Buffer(256 << 10, 20));
BuildPipe(&pipe, new RandomDrop(0.005));
// This represents the buffer on the router.
BuildPipe(&pipe, new ConstantDelay(1E-3));
BuildPipe(&pipe, new RandomSortedDelay(1E-3, 20E-3, 3));
- BuildPipe(&pipe, new Buffer(256 << 10, 5000000));
+ BuildPipe(&pipe, new Buffer(256 << 10, 20));
BuildPipe(&pipe, new ConstantDelay(1E-3));
BuildPipe(&pipe, new RandomSortedDelay(1E-3, 20E-3, 3));
BuildPipe(&pipe, new RandomDrop(0.005));
// This represents the buffer on the receiving device.
- BuildPipe(&pipe, new Buffer(256 << 10, 5000000));
+ BuildPipe(&pipe, new Buffer(256 << 10, 20));
return pipe.Pass();
}
scoped_ptr<PacketPipe> BadNetwork() {
scoped_ptr<PacketPipe> pipe;
// This represents the buffer on the sender.
- BuildPipe(&pipe, new Buffer(64 << 10, 5000000)); // 64 kb buf, 5mbit/s
+ BuildPipe(&pipe, new Buffer(64 << 10, 5)); // 64 kb buf, 5mbit/s
BuildPipe(&pipe, new RandomDrop(0.05)); // 5% packet drop
BuildPipe(&pipe, new RandomSortedDelay(2E-3, 20E-3, 1));
// This represents the buffer on the router.
- BuildPipe(&pipe, new Buffer(64 << 10, 2000000)); // 64 kb buf, 2mbit/s
+ BuildPipe(&pipe, new Buffer(64 << 10, 5)); // 64 kb buf, 4mbit/s
BuildPipe(&pipe, new ConstantDelay(1E-3));
// Random 40ms every other second
// BuildPipe(&pipe, new NetworkGlitchPipe(2, 40E-1));
BuildPipe(&pipe, new RandomUnsortedDelay(5E-3));
// This represents the buffer on the receiving device.
- BuildPipe(&pipe, new Buffer(64 << 10, 4000000)); // 64 kb buf, 4mbit/s
+ BuildPipe(&pipe, new Buffer(64 << 10, 5)); // 64 kb buf, 5mbit/s
return pipe.Pass();
}
@@ -406,17 +369,17 @@ scoped_ptr<PacketPipe> BadNetwork() {
scoped_ptr<PacketPipe> EvilNetwork() {
// This represents the buffer on the sender.
scoped_ptr<PacketPipe> pipe;
- BuildPipe(&pipe, new Buffer(4 << 10, 2000000));
+ BuildPipe(&pipe, new Buffer(4 << 10, 5)); // 4 kb buf, 2mbit/s
// This represents the buffer on the router.
BuildPipe(&pipe, new RandomDrop(0.1)); // 10% packet drop
BuildPipe(&pipe, new RandomSortedDelay(20E-3, 60E-3, 1));
- BuildPipe(&pipe, new Buffer(4 << 10, 1000000)); // 4 kb buf, 1mbit/s
+ BuildPipe(&pipe, new Buffer(4 << 10, 2)); // 4 kb buf, 2mbit/s
BuildPipe(&pipe, new RandomDrop(0.1)); // 10% packet drop
BuildPipe(&pipe, new ConstantDelay(1E-3));
BuildPipe(&pipe, new NetworkGlitchPipe(2.0, 0.3));
BuildPipe(&pipe, new RandomUnsortedDelay(20E-3));
// This represents the buffer on the receiving device.
- BuildPipe(&pipe, new Buffer(4 << 10, 2000000)); // 4 kb buf, 2mbit/s
+ BuildPipe(&pipe, new Buffer(4 << 10, 2)); // 4 kb buf, 2mbit/s
return pipe.Pass();
}
@@ -426,12 +389,15 @@ class UDPProxyImpl : public UDPProxy {
const net::IPEndPoint& destination,
scoped_ptr<PacketPipe> to_dest_pipe,
scoped_ptr<PacketPipe> from_dest_pipe,
- net::NetLog* net_log) :
- local_port_(local_port),
- destination_(destination),
- proxy_thread_("media::cast::test::UdpProxy Thread"),
- to_dest_pipe_(to_dest_pipe.Pass()),
- from_dest_pipe_(from_dest_pipe.Pass()) {
+ net::NetLog* net_log)
+ : local_port_(local_port),
+ destination_(destination),
+ destination_is_mutable_(destination.address().empty()),
+ proxy_thread_("media::cast::test::UdpProxy Thread"),
+ to_dest_pipe_(to_dest_pipe.Pass()),
+ from_dest_pipe_(from_dest_pipe.Pass()),
+ blocked_(false),
+ weak_factory_(this) {
proxy_thread_.StartWithOptions(
base::Thread::Options(base::MessageLoop::TYPE_IO, 0));
base::WaitableEvent start_event(false, false);
@@ -455,6 +421,40 @@ class UDPProxyImpl : public UDPProxy {
proxy_thread_.Stop();
}
+ void Send(scoped_ptr<transport::Packet> packet,
+ const net::IPEndPoint& destination) {
+ if (blocked_) {
+ LOG(ERROR) << "Cannot write packet right now: blocked";
+ return;
+ }
+
+ VLOG(1) << "Sending packet, len = " << packet->size();
+ // We ignore all problems, callbacks and errors.
+ // If it didn't work we just drop the packet at and call it a day.
+ scoped_refptr<net::IOBuffer> buf =
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&packet->front()));
+ size_t buf_size = packet->size();
+ int result;
+ if (destination.address().empty()) {
+ VLOG(1) << "Destination has not been set yet.";
+ result = net::ERR_INVALID_ARGUMENT;
+ } else {
+ VLOG(1) << "Destination:" << destination.ToString();
+ result = socket_->SendTo(buf,
+ static_cast<int>(buf_size),
+ destination,
+ base::Bind(&UDPProxyImpl::AllowWrite,
+ weak_factory_.GetWeakPtr(),
+ buf,
+ base::Passed(&packet)));
+ }
+ if (result == net::ERR_IO_PENDING) {
+ blocked_ = true;
+ } else if (result < 0) {
+ LOG(ERROR) << "Failed to write packet.";
+ }
+ }
+
private:
void Start(base::WaitableEvent* start_event,
net::NetLog* net_log) {
@@ -462,14 +462,16 @@ class UDPProxyImpl : public UDPProxy {
net::RandIntCallback(),
net_log,
net::NetLog::Source()));
- BuildPipe(&to_dest_pipe_, new PacketSender(socket_.get(), &destination_));
- BuildPipe(&from_dest_pipe_,
- new PacketSender(socket_.get(), &return_address_));
- to_dest_pipe_->InitOnIOThread(base::MessageLoopProxy::current());
- from_dest_pipe_->InitOnIOThread(base::MessageLoopProxy::current());
+ BuildPipe(&to_dest_pipe_, new PacketSender(this, &destination_));
+ BuildPipe(&from_dest_pipe_, new PacketSender(this, &return_address_));
+ to_dest_pipe_->InitOnIOThread(base::MessageLoopProxy::current(),
+ &tick_clock_);
+ from_dest_pipe_->InitOnIOThread(base::MessageLoopProxy::current(),
+ &tick_clock_);
VLOG(0) << "From:" << local_port_.ToString();
- VLOG(0) << "To:" << destination_.ToString();
+ if (!destination_is_mutable_)
+ VLOG(0) << "To:" << destination_.ToString();
CHECK_GE(socket_->Bind(local_port_), 0);
@@ -492,9 +494,16 @@ class UDPProxyImpl : public UDPProxy {
return;
}
packet_->resize(len);
+ if (destination_is_mutable_ && set_destination_next_ &&
+ !(recv_address_ == return_address_) &&
+ !(recv_address_ == destination_)) {
+ destination_ = recv_address_;
+ }
if (recv_address_ == destination_) {
+ set_destination_next_ = false;
from_dest_pipe_->Send(packet_.Pass());
} else {
+ set_destination_next_ = true;
VLOG(1) << "Return address = " << recv_address_.ToString();
return_address_ = recv_address_;
to_dest_pipe_->Send(packet_.Pass());
@@ -524,18 +533,42 @@ class UDPProxyImpl : public UDPProxy {
}
}
+ void AllowWrite(scoped_refptr<net::IOBuffer> buf,
+ scoped_ptr<transport::Packet> packet,
+ int unused_len) {
+ DCHECK(blocked_);
+ blocked_ = false;
+ }
+ // Input
net::IPEndPoint local_port_;
+
net::IPEndPoint destination_;
- net::IPEndPoint recv_address_;
+ bool destination_is_mutable_;
+
net::IPEndPoint return_address_;
+ bool set_destination_next_;
+
+ base::DefaultTickClock tick_clock_;
base::Thread proxy_thread_;
scoped_ptr<net::UDPSocket> socket_;
scoped_ptr<PacketPipe> to_dest_pipe_;
scoped_ptr<PacketPipe> from_dest_pipe_;
+
+ // For receiving.
+ net::IPEndPoint recv_address_;
scoped_ptr<transport::Packet> packet_;
+
+ // For sending.
+ bool blocked_;
+
+ base::WeakPtrFactory<UDPProxyImpl> weak_factory_;
};
+void PacketSender::Send(scoped_ptr<transport::Packet> packet) {
+ udp_proxy_->Send(packet.Pass(), *destination_);
+}
+
scoped_ptr<UDPProxy> UDPProxy::Create(
const net::IPEndPoint& local_port,
const net::IPEndPoint& destination,
diff --git a/media/cast/test/utility/udp_proxy.h b/media/cast/test/utility/udp_proxy.h
index 9dce0cfa43..b102573a94 100644
--- a/media/cast/test/utility/udp_proxy.h
+++ b/media/cast/test/utility/udp_proxy.h
@@ -18,6 +18,10 @@ namespace net {
class NetLog;
};
+namespace base {
+class TickClock;
+};
+
namespace media {
namespace cast {
namespace test {
@@ -29,12 +33,14 @@ class PacketPipe {
virtual void Send(scoped_ptr<transport::Packet> packet) = 0;
// Allows injection of fake test runner for testing.
virtual void InitOnIOThread(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock);
virtual void AppendToPipe(scoped_ptr<PacketPipe> pipe);
protected:
scoped_ptr<PacketPipe> pipe_;
// Allows injection of fake task runner for testing.
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ base::TickClock* clock_;
};
// A UDPProxy will set up a UDP socket and bind to |local_port|.
@@ -90,16 +96,16 @@ scoped_ptr<PacketPipe> NewNetworkGlitchPipe(double average_work_time,
double average_outage_time);
// This method builds a stack of PacketPipes to emulate a reasonably
-// good wifi network. ~5mbit, 1% packet loss, ~3ms latency.
+// good wifi network. ~20mbit, 1% packet loss, ~3ms latency.
scoped_ptr<PacketPipe> WifiNetwork();
// This method builds a stack of PacketPipes to emulate a
-// bad wifi network. ~2mbit, 5% packet loss, ~7ms latency
+// bad wifi network. ~5mbit, 5% packet loss, ~7ms latency
// 40ms dropouts every ~2 seconds. Can reorder packets.
scoped_ptr<PacketPipe> BadNetwork();
// This method builds a stack of PacketPipes to emulate a crappy wifi network.
-// ~1mbit, 20% packet loss, ~40ms latency and packets can get reordered.
+// ~2mbit, 20% packet loss, ~40ms latency and packets can get reordered.
// 300ms drouputs every ~2 seconds.
scoped_ptr<PacketPipe> EvilNetwork();
diff --git a/media/cast/test/utility/udp_proxy_main.cc b/media/cast/test/utility/udp_proxy_main.cc
index 0202d8cb14..800f09dc70 100644
--- a/media/cast/test/utility/udp_proxy_main.cc
+++ b/media/cast/test/utility/udp_proxy_main.cc
@@ -116,9 +116,11 @@ void CheckByteCounters() {
}
int main(int argc, char** argv) {
- if (argc < 5) {
+ if (argc != 5 && argc != 3) {
fprintf(stderr,
"Usage: udp_proxy <localport> <remotehost> <remoteport> <type>\n"
+ "or:\n"
+ " udp_proxy <localport> <type>\n"
"Where type is one of: perfect, wifi, bad, evil\n");
exit(1);
}
@@ -127,18 +129,26 @@ int main(int argc, char** argv) {
CommandLine::Init(argc, argv);
InitLogging(logging::LoggingSettings());
- int local_port = atoi(argv[1]);
- int remote_port = atoi(argv[3]);
net::IPAddressNumber remote_ip_number;
net::IPAddressNumber local_ip_number;
-
- CHECK(net::ParseIPLiteralToNumber(argv[2], &remote_ip_number));
+ std::string network_type;
+ int local_port = atoi(argv[1]);
+ int remote_port = 0;
CHECK(net::ParseIPLiteralToNumber("0.0.0.0", &local_ip_number));
+
+ if (argc == 5) {
+ // V2 proxy
+ CHECK(net::ParseIPLiteralToNumber(argv[2], &remote_ip_number));
+ remote_port = atoi(argv[3]);
+ network_type = argv[4];
+ } else {
+ // V1 proxy
+ network_type = argv[2];
+ }
net::IPEndPoint remote_endpoint(remote_ip_number, remote_port);
net::IPEndPoint local_endpoint(local_ip_number, local_port);
-
scoped_ptr<media::cast::test::PacketPipe> in_pipe, out_pipe;
- std::string network_type = argv[4];
+
if (network_type == "perfect") {
// No action needed.
} else if (network_type == "wifi") {
diff --git a/media/cast/transport/cast_transport_config.cc b/media/cast/transport/cast_transport_config.cc
index b6dba52150..2c40a19d0a 100644
--- a/media/cast/transport/cast_transport_config.cc
+++ b/media/cast/transport/cast_transport_config.cc
@@ -33,21 +33,13 @@ CastTransportVideoConfig::CastTransportVideoConfig() : codec(kVp8) {}
CastTransportVideoConfig::~CastTransportVideoConfig() {}
-EncodedVideoFrame::EncodedVideoFrame()
- : codec(kVp8),
- key_frame(false),
+EncodedFrame::EncodedFrame()
+ : dependency(UNKNOWN_DEPENDENCY),
frame_id(0),
- last_referenced_frame_id(0),
+ referenced_frame_id(0),
rtp_timestamp(0) {}
-EncodedVideoFrame::~EncodedVideoFrame() {}
-EncodedAudioFrame::EncodedAudioFrame()
- : codec(kOpus), frame_id(0), rtp_timestamp(0) {}
-EncodedAudioFrame::~EncodedAudioFrame() {}
-
-RtcpSenderFrameLogMessage::RtcpSenderFrameLogMessage()
- : frame_status(kRtcpSenderFrameStatusUnknown), rtp_timestamp(0) {}
-RtcpSenderFrameLogMessage::~RtcpSenderFrameLogMessage() {}
+EncodedFrame::~EncodedFrame() {}
RtcpSenderInfo::RtcpSenderInfo()
: ntp_seconds(0),
diff --git a/media/cast/transport/cast_transport_config.h b/media/cast/transport/cast_transport_config.h
index d72d1f8e32..683587080b 100644
--- a/media/cast/transport/cast_transport_config.h
+++ b/media/cast/transport/cast_transport_config.h
@@ -12,6 +12,7 @@
#include "base/callback.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
+#include "base/stl_util.h"
#include "media/cast/transport/cast_transport_defines.h"
#include "net/base/ip_endpoint.h"
@@ -24,14 +25,19 @@ enum RtcpMode {
kRtcpReducedSize, // Reduced-size RTCP mode is described by RFC 5506.
};
-enum VideoCodec { kFakeSoftwareVideo, kVp8, kH264, kVideoCodecLast = kH264 };
+enum VideoCodec {
+ kUnknownVideoCodec,
+ kFakeSoftwareVideo,
+ kVp8,
+ kH264,
+ kVideoCodecLast = kH264
+};
enum AudioCodec {
- kFakeSoftwareAudio,
+ kUnknownAudioCodec,
kOpus,
kPcm16,
- kExternalAudio,
- kAudioCodecLast = kExternalAudio
+ kAudioCodecLast = kPcm16
};
struct RtpConfig {
@@ -69,27 +75,68 @@ struct CastTransportVideoConfig {
VideoCodec codec;
};
-struct EncodedVideoFrame {
- EncodedVideoFrame();
- ~EncodedVideoFrame();
+// A combination of metadata and data for one encoded frame. This can contain
+// audio data or video data or other.
+struct EncodedFrame {
+ enum Dependency {
+ // "null" value, used to indicate whether |dependency| has been set.
+ UNKNOWN_DEPENDENCY,
- VideoCodec codec;
- bool key_frame;
+ // Not decodable without the reference frame indicated by
+ // |referenced_frame_id|.
+ DEPENDENT,
+
+ // Independently decodable.
+ INDEPENDENT,
+
+ // Independently decodable, and no future frames will depend on any frames
+ // before this one.
+ KEY,
+
+ DEPENDENCY_LAST = KEY
+ };
+
+ EncodedFrame();
+ ~EncodedFrame();
+
+ // Convenience accessors to data as an array of uint8 elements.
+ const uint8* bytes() const {
+ return reinterpret_cast<uint8*>(string_as_array(
+ const_cast<std::string*>(&data)));
+ }
+ uint8* mutable_bytes() {
+ return reinterpret_cast<uint8*>(string_as_array(&data));
+ }
+
+ // This frame's dependency relationship with respect to other frames.
+ Dependency dependency;
+
+ // The label associated with this frame. Implies an ordering relative to
+ // other frames in the same stream.
uint32 frame_id;
- uint32 last_referenced_frame_id;
- uint32 rtp_timestamp;
- std::string data;
-};
-struct EncodedAudioFrame {
- EncodedAudioFrame();
- ~EncodedAudioFrame();
+ // The label associated with the frame upon which this frame depends. If
+ // this frame does not require any other frame in order to become decodable
+ // (e.g., key frames), |referenced_frame_id| must equal |frame_id|.
+ uint32 referenced_frame_id;
- AudioCodec codec;
- uint32 frame_id; // Needed to release the frame.
+ // The stream timestamp, on the timeline of the signal data. For example, RTP
+ // timestamps for audio are usually defined as the total number of audio
+ // samples encoded in all prior frames. A playback system uses this value to
+ // detect gaps in the stream, and otherwise stretch the signal to match
+ // playout targets.
uint32 rtp_timestamp;
- // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
- static const int kMaxNumberOfSamples = 48 * 2 * 100;
+
+ // The common reference clock timestamp for this frame. This value originates
+ // from a sender and is used to provide lip synchronization between streams in
+ // a receiver. Thus, in the sender context, this is set to the time at which
+ // the frame was captured/recorded. In the receiver context, this is set to
+ // the target playout time. Over a sequence of frames, this time value is
+ // expected to drift with respect to the elapsed time implied by the RTP
+ // timestamps; and it may not necessarily increment with precise regularity.
+ base::TimeTicks reference_time;
+
+ // The encoded signal data.
std::string data;
};
@@ -110,25 +157,6 @@ class PacketSender {
virtual ~PacketSender() {}
};
-// Log messages form sender to receiver.
-// TODO(mikhal): Refactor to Chromium style (MACRO_STYLE).
-enum RtcpSenderFrameStatus {
- kRtcpSenderFrameStatusUnknown = 0,
- kRtcpSenderFrameStatusDroppedByEncoder = 1,
- kRtcpSenderFrameStatusDroppedByFlowControl = 2,
- kRtcpSenderFrameStatusSentToNetwork = 3,
- kRtcpSenderFrameStatusLast = kRtcpSenderFrameStatusSentToNetwork
-};
-
-struct RtcpSenderFrameLogMessage {
- RtcpSenderFrameLogMessage();
- ~RtcpSenderFrameLogMessage();
- RtcpSenderFrameStatus frame_status;
- uint32 rtp_timestamp;
-};
-
-typedef std::vector<RtcpSenderFrameLogMessage> RtcpSenderLogMessage;
-
struct RtcpSenderInfo {
RtcpSenderInfo();
~RtcpSenderInfo();
@@ -169,6 +197,9 @@ struct SendRtcpFromRtpSenderData {
uint32 packet_type_flags;
uint32 sending_ssrc;
std::string c_name;
+ uint32 ntp_seconds;
+ uint32 ntp_fraction;
+ uint32 rtp_timestamp;
};
inline bool operator==(RtcpSenderInfo lhs, RtcpSenderInfo rhs) {
diff --git a/media/cast/transport/cast_transport_sender.h b/media/cast/transport/cast_transport_sender.h
index 605cd0472a..a69b7418f9 100644
--- a/media/cast/transport/cast_transport_sender.h
+++ b/media/cast/transport/cast_transport_sender.h
@@ -45,10 +45,6 @@ namespace transport {
typedef base::Callback<void(CastTransportStatus status)>
CastTransportStatusCallback;
-typedef base::Callback<void(const RtcpSenderInfo& sender_info,
- base::TimeTicks time_sent,
- uint32 rtp_timestamp)> CastTransportRtpStatistics;
-
typedef base::Callback<void(const std::vector<PacketEvent>&)>
BulkRawEventsCallback;
@@ -81,17 +77,17 @@ class CastTransportSender : public base::NonThreadSafe {
// The following two functions handle the encoded media frames (audio and
// video) to be processed.
// Frames will be encrypted, packetized and transmitted to the network.
- virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) = 0;
-
- virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) = 0;
+ virtual void InsertCodedAudioFrame(const EncodedFrame& audio_frame) = 0;
+ virtual void InsertCodedVideoFrame(const EncodedFrame& video_frame) = 0;
// Builds an RTCP packet and sends it to the network.
+ // |ntp_seconds|, |ntp_fraction| and |rtp_timestamp| are used in the
+ // RTCP Sender Report.
virtual void SendRtcpFromRtpSender(uint32 packet_type_flags,
- const RtcpSenderInfo& sender_info,
+ uint32 ntp_seconds,
+ uint32 ntp_fraction,
+ uint32 rtp_timestamp,
const RtcpDlrrReportBlock& dlrr,
- const RtcpSenderLogMessage& sender_log,
uint32 sending_ssrc,
const std::string& c_name) = 0;
@@ -99,15 +95,6 @@ class CastTransportSender : public base::NonThreadSafe {
virtual void ResendPackets(
bool is_audio,
const MissingFramesAndPacketsMap& missing_packets) = 0;
-
- // RTP statistics will be returned on a regular interval on the designated
- // callback.
- // Must be called after initialization of the corresponding A/V pipeline.
- virtual void SubscribeAudioRtpStatsCallback(
- const CastTransportRtpStatistics& callback) = 0;
-
- virtual void SubscribeVideoRtpStatsCallback(
- const CastTransportRtpStatistics& callback) = 0;
};
} // namespace transport
diff --git a/media/cast/transport/cast_transport_sender_impl.cc b/media/cast/transport/cast_transport_sender_impl.cc
index d8ab4a42b7..8f07832d1c 100644
--- a/media/cast/transport/cast_transport_sender_impl.cc
+++ b/media/cast/transport/cast_transport_sender_impl.cc
@@ -100,28 +100,41 @@ void CastTransportSenderImpl::SetPacketReceiver(
}
void CastTransportSenderImpl::InsertCodedAudioFrame(
- const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
+ const EncodedFrame& audio_frame) {
DCHECK(audio_sender_) << "Audio sender uninitialized";
- audio_sender_->InsertCodedAudioFrame(audio_frame, recorded_time);
+ audio_sender_->SendFrame(audio_frame);
}
void CastTransportSenderImpl::InsertCodedVideoFrame(
- const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) {
+ const EncodedFrame& video_frame) {
DCHECK(video_sender_) << "Video sender uninitialized";
- video_sender_->InsertCodedVideoFrame(video_frame, capture_time);
+ video_sender_->SendFrame(video_frame);
}
void CastTransportSenderImpl::SendRtcpFromRtpSender(
uint32 packet_type_flags,
- const RtcpSenderInfo& sender_info,
+ uint32 ntp_seconds,
+ uint32 ntp_fraction,
+ uint32 rtp_timestamp,
const RtcpDlrrReportBlock& dlrr,
- const RtcpSenderLogMessage& sender_log,
uint32 sending_ssrc,
const std::string& c_name) {
+ RtcpSenderInfo sender_info;
+ sender_info.ntp_seconds = ntp_seconds;
+ sender_info.ntp_fraction = ntp_fraction;
+ sender_info.rtp_timestamp = rtp_timestamp;
+ if (audio_sender_ && audio_sender_->ssrc() == sending_ssrc) {
+ sender_info.send_packet_count = audio_sender_->send_packet_count();
+ sender_info.send_octet_count = audio_sender_->send_octet_count();
+ } else if (video_sender_ && video_sender_->ssrc() == sending_ssrc) {
+ sender_info.send_packet_count = video_sender_->send_packet_count();
+ sender_info.send_octet_count = video_sender_->send_octet_count();
+ } else {
+ LOG(ERROR) << "Sending RTCP with an invalid SSRC.";
+ return;
+ }
rtcp_builder_.SendRtcpFromRtpSender(
- packet_type_flags, sender_info, dlrr, sender_log, sending_ssrc, c_name);
+ packet_type_flags, sender_info, dlrr, sending_ssrc, c_name);
}
void CastTransportSenderImpl::ResendPackets(
@@ -136,18 +149,6 @@ void CastTransportSenderImpl::ResendPackets(
}
}
-void CastTransportSenderImpl::SubscribeAudioRtpStatsCallback(
- const CastTransportRtpStatistics& callback) {
- DCHECK(audio_sender_) << "Audio sender uninitialized";
- audio_sender_->SubscribeAudioRtpStatsCallback(callback);
-}
-
-void CastTransportSenderImpl::SubscribeVideoRtpStatsCallback(
- const CastTransportRtpStatistics& callback) {
- DCHECK(video_sender_) << "Video sender uninitialized";
- video_sender_->SubscribeVideoRtpStatsCallback(callback);
-}
-
void CastTransportSenderImpl::SendRawEvents() {
DCHECK(event_subscriber_.get());
DCHECK(!raw_events_callback_.is_null());
diff --git a/media/cast/transport/cast_transport_sender_impl.h b/media/cast/transport/cast_transport_sender_impl.h
index dd812174cf..91b03c52be 100644
--- a/media/cast/transport/cast_transport_sender_impl.h
+++ b/media/cast/transport/cast_transport_sender_impl.h
@@ -54,18 +54,14 @@ class CastTransportSenderImpl : public CastTransportSender {
virtual void SetPacketReceiver(const PacketReceiverCallback& packet_receiver)
OVERRIDE;
- virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time)
- OVERRIDE;
-
- virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time)
- OVERRIDE;
+ virtual void InsertCodedAudioFrame(const EncodedFrame& audio_frame) OVERRIDE;
+ virtual void InsertCodedVideoFrame(const EncodedFrame& video_frame) OVERRIDE;
virtual void SendRtcpFromRtpSender(uint32 packet_type_flags,
- const RtcpSenderInfo& sender_info,
+ uint32 ntp_seconds,
+ uint32 ntp_fraction,
+ uint32 rtp_timestamp,
const RtcpDlrrReportBlock& dlrr,
- const RtcpSenderLogMessage& sender_log,
uint32 sending_ssrc,
const std::string& c_name) OVERRIDE;
@@ -73,12 +69,6 @@ class CastTransportSenderImpl : public CastTransportSender {
const MissingFramesAndPacketsMap& missing_packets)
OVERRIDE;
- virtual void SubscribeAudioRtpStatsCallback(
- const CastTransportRtpStatistics& callback) OVERRIDE;
-
- virtual void SubscribeVideoRtpStatsCallback(
- const CastTransportRtpStatistics& callback) OVERRIDE;
-
private:
// If |raw_events_callback_| is non-null, calls it with events collected
// by |event_subscriber_| since last call.
diff --git a/media/cast/transport/pacing/mock_paced_packet_sender.h b/media/cast/transport/pacing/mock_paced_packet_sender.h
index 8fb3f9f1c6..9f6d204f7f 100644
--- a/media/cast/transport/pacing/mock_paced_packet_sender.h
+++ b/media/cast/transport/pacing/mock_paced_packet_sender.h
@@ -20,6 +20,7 @@ class MockPacedPacketSender : public PacedPacketSender {
MOCK_METHOD1(SendPackets, bool(const SendPacketVector& packets));
MOCK_METHOD1(ResendPackets, bool(const SendPacketVector& packets));
MOCK_METHOD2(SendRtcpPacket, bool(unsigned int ssrc, PacketRef packet));
+ MOCK_METHOD1(CancelSendingPacket, void(const PacketKey& packet_key));
};
} // namespace transport
diff --git a/media/cast/transport/pacing/paced_sender.cc b/media/cast/transport/pacing/paced_sender.cc
index d4b502119b..10b4224075 100644
--- a/media/cast/transport/pacing/paced_sender.cc
+++ b/media/cast/transport/pacing/paced_sender.cc
@@ -21,21 +21,9 @@ static const size_t kPacingMaxBurstsPerFrame = 3;
static const size_t kTargetBurstSize = 10;
static const size_t kMaxBurstSize = 20;
-using media::cast::CastLoggingEvent;
-
-CastLoggingEvent GetLoggingEvent(bool is_audio, bool retransmit) {
- if (retransmit) {
- return is_audio ? media::cast::kAudioPacketRetransmitted
- : media::cast::kVideoPacketRetransmitted;
- } else {
- return is_audio ? media::cast::kAudioPacketSentToNetwork
- : media::cast::kVideoPacketSentToNetwork;
- }
-}
-
} // namespace
-
+// static
PacketKey PacedPacketSender::MakePacketKey(const base::TimeTicks& ticks,
uint32 ssrc,
uint16 packet_id) {
@@ -116,6 +104,10 @@ bool PacedSender::SendRtcpPacket(uint32 ssrc, PacketRef packet) {
return true;
}
+void PacedSender::CancelSendingPacket(const PacketKey& packet_key) {
+ packet_list_.erase(packet_key);
+}
+
PacketRef PacedSender::GetNextPacket(PacketType* packet_type) {
std::map<PacketKey, std::pair<PacketType, PacketRef> >::iterator i;
i = packet_list_.begin();
@@ -232,9 +224,11 @@ void PacedSender::LogPacketEvent(const Packet& packet, bool retransmit) {
return;
}
- CastLoggingEvent event = GetLoggingEvent(is_audio, retransmit);
-
- logging_->InsertSinglePacketEvent(clock_->NowTicks(), event, packet);
+ CastLoggingEvent event = retransmit ?
+ PACKET_RETRANSMITTED : PACKET_SENT_TO_NETWORK;
+ EventMediaType media_type = is_audio ? AUDIO_EVENT : VIDEO_EVENT;
+ logging_->InsertSinglePacketEvent(clock_->NowTicks(), event, media_type,
+ packet);
}
} // namespace transport
diff --git a/media/cast/transport/pacing/paced_sender.h b/media/cast/transport/pacing/paced_sender.h
index fccd0b5625..2373fb5966 100644
--- a/media/cast/transport/pacing/paced_sender.h
+++ b/media/cast/transport/pacing/paced_sender.h
@@ -43,6 +43,7 @@ class PacedPacketSender {
virtual bool SendPackets(const SendPacketVector& packets) = 0;
virtual bool ResendPackets(const SendPacketVector& packets) = 0;
virtual bool SendRtcpPacket(uint32 ssrc, PacketRef packet) = 0;
+ virtual void CancelSendingPacket(const PacketKey& packet_key) = 0;
virtual ~PacedPacketSender() {}
@@ -73,6 +74,7 @@ class PacedSender : public PacedPacketSender,
virtual bool SendPackets(const SendPacketVector& packets) OVERRIDE;
virtual bool ResendPackets(const SendPacketVector& packets) OVERRIDE;
virtual bool SendRtcpPacket(uint32 ssrc, PacketRef packet) OVERRIDE;
+ virtual void CancelSendingPacket(const PacketKey& packet_key) OVERRIDE;
private:
// Actually sends the packets to the transport.
diff --git a/media/cast/transport/pacing/paced_sender_unittest.cc b/media/cast/transport/pacing/paced_sender_unittest.cc
index 8c078897d8..ef9d89b5f0 100644
--- a/media/cast/transport/pacing/paced_sender_unittest.cc
+++ b/media/cast/transport/pacing/paced_sender_unittest.cc
@@ -174,7 +174,7 @@ TEST_F(PacedSenderTest, BasicPace) {
for (std::vector<PacketEvent>::iterator it = packet_events.begin();
it != packet_events.end();
++it) {
- if (it->type == kVideoPacketSentToNetwork)
+ if (it->type == PACKET_SENT_TO_NETWORK)
sent_to_network_event_count++;
else
FAIL() << "Got unexpected event type " << CastLoggingToString(it->type);
@@ -254,14 +254,17 @@ TEST_F(PacedSenderTest, PaceWithNack) {
for (std::vector<PacketEvent>::iterator it = packet_events.begin();
it != packet_events.end();
++it) {
- if (it->type == kVideoPacketSentToNetwork)
- video_network_event_count++;
- else if (it->type == kVideoPacketRetransmitted)
- video_retransmitted_event_count++;
- else if (it->type == kAudioPacketSentToNetwork)
- audio_network_event_count++;
- else
+ if (it->type == PACKET_SENT_TO_NETWORK) {
+ if (it->media_type == VIDEO_EVENT)
+ video_network_event_count++;
+ else
+ audio_network_event_count++;
+ } else if (it->type == PACKET_RETRANSMITTED) {
+ if (it->media_type == VIDEO_EVENT)
+ video_retransmitted_event_count++;
+ } else {
FAIL() << "Got unexpected event type " << CastLoggingToString(it->type);
+ }
}
EXPECT_EQ(expected_audio_network_event_count, audio_network_event_count);
EXPECT_EQ(expected_video_network_event_count, video_network_event_count);
diff --git a/media/cast/transport/rtcp/rtcp_builder.cc b/media/cast/transport/rtcp/rtcp_builder.cc
index 38cf0ee05a..b8875fc96b 100644
--- a/media/cast/transport/rtcp/rtcp_builder.cc
+++ b/media/cast/transport/rtcp/rtcp_builder.cc
@@ -13,19 +13,10 @@
#include "media/cast/transport/cast_transport_defines.h"
#include "media/cast/transport/pacing/paced_sender.h"
-static const size_t kRtcpCastLogHeaderSize = 12;
-static const size_t kRtcpSenderFrameLogSize = 4;
-
namespace media {
namespace cast {
namespace transport {
-namespace {
-// RFC 3550 page 44, including end null.
-static const uint32 kCast = ('C' << 24) + ('A' << 16) + ('S' << 8) + 'T';
-static const uint8 kSenderLogSubtype = 1;
-};
-
RtcpBuilder::RtcpBuilder(PacedSender* const outgoing_transport)
: transport_(outgoing_transport),
ssrc_(0) {
@@ -37,7 +28,6 @@ void RtcpBuilder::SendRtcpFromRtpSender(
uint32 packet_type_flags,
const RtcpSenderInfo& sender_info,
const RtcpDlrrReportBlock& dlrr,
- const RtcpSenderLogMessage& sender_log,
uint32 sending_ssrc,
const std::string& c_name) {
if (packet_type_flags & kRtcpRr ||
@@ -64,9 +54,6 @@ void RtcpBuilder::SendRtcpFromRtpSender(
if (packet_type_flags & kRtcpDlrr) {
if (!BuildDlrrRb(dlrr, &packet->data)) return;
}
- if (packet_type_flags & kRtcpSenderLog) {
- if (!BuildSenderLog(sender_log, &packet->data)) return;
- }
if (packet->data.empty())
return; // Sanity - don't send empty packets.
@@ -205,48 +192,6 @@ bool RtcpBuilder::BuildDlrrRb(const RtcpDlrrReportBlock& dlrr,
return true;
}
-bool RtcpBuilder::BuildSenderLog(const RtcpSenderLogMessage& sender_log_message,
- Packet* packet) const {
- DCHECK(packet);
- size_t start_size = packet->size();
- size_t remaining_space = kMaxIpPacketSize - start_size;
- if (remaining_space < kRtcpCastLogHeaderSize + kRtcpSenderFrameLogSize) {
- DLOG(FATAL) << "Not enough buffer space";
- return false;
- }
-
- size_t space_for_x_messages =
- (remaining_space - kRtcpCastLogHeaderSize) / kRtcpSenderFrameLogSize;
- size_t number_of_messages = std::min(space_for_x_messages,
- sender_log_message.size());
-
- size_t log_size = kRtcpCastLogHeaderSize +
- number_of_messages * kRtcpSenderFrameLogSize;
- packet->resize(start_size + log_size);
-
- base::BigEndianWriter big_endian_writer(
- reinterpret_cast<char*>(&((*packet)[start_size])), log_size);
- big_endian_writer.WriteU8(0x80 + kSenderLogSubtype);
- big_endian_writer.WriteU8(kPacketTypeApplicationDefined);
- big_endian_writer.WriteU16(static_cast<uint16>(2 + number_of_messages));
- big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
- big_endian_writer.WriteU32(kCast);
-
- std::vector<RtcpSenderFrameLogMessage>::const_iterator it =
- sender_log_message.begin();
- for (; number_of_messages > 0; --number_of_messages) {
- DCHECK(!sender_log_message.empty());
- const RtcpSenderFrameLogMessage& message = *it;
- big_endian_writer.WriteU8(static_cast<uint8>(message.frame_status));
- // We send the 24 east significant bits of the RTP timestamp.
- big_endian_writer.WriteU8(static_cast<uint8>(message.rtp_timestamp >> 16));
- big_endian_writer.WriteU8(static_cast<uint8>(message.rtp_timestamp >> 8));
- big_endian_writer.WriteU8(static_cast<uint8>(message.rtp_timestamp));
- ++it;
- }
- return true;
-}
-
} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/rtcp/rtcp_builder.h b/media/cast/transport/rtcp/rtcp_builder.h
index dac5f9b442..f095ae9ee5 100644
--- a/media/cast/transport/rtcp/rtcp_builder.h
+++ b/media/cast/transport/rtcp/rtcp_builder.h
@@ -25,7 +25,6 @@ class RtcpBuilder {
void SendRtcpFromRtpSender(uint32 packet_type_flags,
const RtcpSenderInfo& sender_info,
const RtcpDlrrReportBlock& dlrr,
- const RtcpSenderLogMessage& sender_log,
uint32 ssrc,
const std::string& c_name);
@@ -35,8 +34,6 @@ class RtcpBuilder {
bool BuildBye(Packet* packet) const;
bool BuildDlrrRb(const RtcpDlrrReportBlock& dlrr,
Packet* packet) const;
- bool BuildSenderLog(const RtcpSenderLogMessage& sender_log_message,
- Packet* packet) const;
PacedSender* const transport_; // Not owned by this class.
uint32 ssrc_;
diff --git a/media/cast/transport/rtcp/rtcp_builder_unittest.cc b/media/cast/transport/rtcp/rtcp_builder_unittest.cc
index 1de8755be4..0322612f27 100644
--- a/media/cast/transport/rtcp/rtcp_builder_unittest.cc
+++ b/media/cast/transport/rtcp/rtcp_builder_unittest.cc
@@ -128,7 +128,7 @@ TEST_F(RtcpBuilderTest, RtcpSenderReportWithDlrr) {
EXPECT_EQ(1, test_transport_.packet_count());
}
-TEST_F(RtcpBuilderTest, RtcpSenderReportWithDlrrAndLog) {
+TEST_F(RtcpBuilderTest, RtcpSenderReportWithDlrr) {
RtcpSenderInfo sender_info;
sender_info.ntp_seconds = kNtpHigh;
sender_info.ntp_fraction = kNtpLow;
@@ -142,8 +142,6 @@ TEST_F(RtcpBuilderTest, RtcpSenderReportWithDlrrAndLog) {
p.AddSdesCname(kSendingSsrc, kCName);
p.AddXrHeader(kSendingSsrc);
p.AddXrDlrrBlock(kSendingSsrc);
- p.AddSenderLog(kSendingSsrc);
- p.AddSenderFrameLog(kRtcpSenderFrameStatusSentToNetwork, kRtpTimestamp);
test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
@@ -151,65 +149,15 @@ TEST_F(RtcpBuilderTest, RtcpSenderReportWithDlrrAndLog) {
dlrr_rb.last_rr = kLastRr;
dlrr_rb.delay_since_last_rr = kDelayLastRr;
- RtcpSenderFrameLogMessage sender_frame_log;
- sender_frame_log.frame_status = kRtcpSenderFrameStatusSentToNetwork;
- sender_frame_log.rtp_timestamp = kRtpTimestamp;
-
- RtcpSenderLogMessage sender_log;
- sender_log.push_back(sender_frame_log);
-
rtcp_builder_->SendRtcpFromRtpSender(
RtcpBuilder::kRtcpSr | RtcpBuilder::kRtcpDlrr |
RtcpBuilder::kRtcpSenderLog,
&sender_info,
&dlrr_rb,
- &sender_log,
- kSendingSsrc,
- kCName);
-
- EXPECT_EQ(1, test_transport_.packet_count());
- EXPECT_TRUE(sender_log.empty());
-}
-
-TEST_F(RtcpBuilderTest, RtcpSenderReporWithTooManyLogFrames) {
- RtcpSenderInfo sender_info;
- sender_info.ntp_seconds = kNtpHigh;
- sender_info.ntp_fraction = kNtpLow;
- sender_info.rtp_timestamp = kRtpTimestamp;
- sender_info.send_packet_count = kSendPacketCount;
- sender_info.send_octet_count = kSendOctetCount;
-
- // Sender report + c_name + sender log.
- TestRtcpPacketBuilder p;
- p.AddSr(kSendingSsrc, 0);
- p.AddSdesCname(kSendingSsrc, kCName);
- p.AddSenderLog(kSendingSsrc);
-
- for (int i = 0; i < 359; ++i) {
- p.AddSenderFrameLog(kRtcpSenderFrameStatusSentToNetwork,
- kRtpTimestamp + i * 90);
- }
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
-
-
- RtcpSenderLogMessage sender_log;
- for (int j = 0; j < 400; ++j) {
- RtcpSenderFrameLogMessage sender_frame_log;
- sender_frame_log.frame_status = kRtcpSenderFrameStatusSentToNetwork;
- sender_frame_log.rtp_timestamp = kRtpTimestamp + j * 90;
- sender_log.push_back(sender_frame_log);
- }
-
- rtcp_builder_->SendRtcpFromRtpSender(
- RtcpBuilder::kRtcpSr | RtcpBuilder::kRtcpSenderLog,
- &sender_info,
- NULL,
- &sender_log,
kSendingSsrc,
kCName);
EXPECT_EQ(1, test_transport_.packet_count());
- EXPECT_EQ(41u, sender_log.size());
}
} // namespace cast
diff --git a/media/cast/transport/rtp_sender/mock_rtp_sender.h b/media/cast/transport/rtp_sender/mock_rtp_sender.h
deleted file mode 100644
index 6bc54d54bd..0000000000
--- a/media/cast/transport/rtp_sender/mock_rtp_sender.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_TRANSPORT_RTP_SENDER_MOCK_RTP_SENDER_H_
-#define MEDIA_CAST_TRANSPORT_RTP_SENDER_MOCK_RTP_SENDER_H_
-
-#include <vector>
-
-#include "media/cast/transport/rtp_sender/rtp_sender.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-namespace transport {
-
-class MockRtpSender : public RtpSender {
- public:
- MOCK_METHOD2(IncomingEncodedVideoFrame,
- bool(const EncodedVideoFrame& frame, int64 capture_time));
-
- MOCK_METHOD2(IncomingEncodedAudioFrame,
- bool(const EncodedAudioFrame& frame, int64 recorded_time));
-
- MOCK_METHOD3(ResendPacket,
- bool(bool is_audio, uint32 frame_id, uint16 packet_id));
-
- MOCK_METHOD0(RtpStatistics, void());
-};
-
-} // namespace transport
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_TRANSPORT_RTP_SENDER_MOCK_RTP_SENDER_H_
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc
index 3a72faf085..50ec42b7d2 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc
+++ b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc
@@ -38,79 +38,32 @@ RtpPacketizer::RtpPacketizer(PacedSender* const transport,
sequence_number_(config_.sequence_number),
rtp_timestamp_(0),
packet_id_(0),
- send_packets_count_(0),
+ send_packet_count_(0),
send_octet_count_(0) {
DCHECK(transport) << "Invalid argument";
}
RtpPacketizer::~RtpPacketizer() {}
-void RtpPacketizer::IncomingEncodedVideoFrame(
- const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) {
- DCHECK(!config_.audio) << "Invalid state";
- if (config_.audio)
- return;
-
- Cast(video_frame->key_frame,
- video_frame->frame_id,
- video_frame->last_referenced_frame_id,
- video_frame->rtp_timestamp,
- video_frame->data,
- capture_time);
-}
-
-void RtpPacketizer::IncomingEncodedAudioFrame(
- const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
- DCHECK(config_.audio) << "Invalid state";
- if (!config_.audio)
- return;
-
- Cast(true,
- audio_frame->frame_id,
- 0,
- audio_frame->rtp_timestamp,
- audio_frame->data,
- recorded_time);
-}
-
uint16 RtpPacketizer::NextSequenceNumber() {
++sequence_number_;
return sequence_number_ - 1;
}
-bool RtpPacketizer::LastSentTimestamp(base::TimeTicks* time_sent,
- uint32* rtp_timestamp) const {
- if (time_last_sent_rtp_timestamp_.is_null())
- return false;
-
- *time_sent = time_last_sent_rtp_timestamp_;
- *rtp_timestamp = rtp_timestamp_;
- return true;
-}
-
-// TODO(mikhal): Switch to pass data with a const_ref.
-void RtpPacketizer::Cast(bool is_key,
- uint32 frame_id,
- uint32 reference_frame_id,
- uint32 timestamp,
- const std::string& data,
- const base::TimeTicks& capture_time) {
- time_last_sent_rtp_timestamp_ = capture_time;
+void RtpPacketizer::SendFrameAsPackets(const EncodedFrame& frame) {
uint16 rtp_header_length = kCommonRtpHeaderLength + kCastRtpHeaderLength;
uint16 max_length = config_.max_payload_length - rtp_header_length - 1;
- rtp_timestamp_ = timestamp;
+ rtp_timestamp_ = frame.rtp_timestamp;
// Split the payload evenly (round number up).
- size_t num_packets = (data.size() + max_length) / max_length;
- size_t payload_length = (data.size() + num_packets) / num_packets;
+ size_t num_packets = (frame.data.size() + max_length) / max_length;
+ size_t payload_length = (frame.data.size() + num_packets) / num_packets;
DCHECK_LE(payload_length, max_length) << "Invalid argument";
SendPacketVector packets;
- size_t remaining_size = data.size();
- std::string::const_iterator data_iter = data.begin();
+ size_t remaining_size = frame.data.size();
+ std::string::const_iterator data_iter = frame.data.begin();
while (remaining_size > 0) {
PacketRef packet(new base::RefCountedData<Packet>);
@@ -118,36 +71,40 @@ void RtpPacketizer::Cast(bool is_key,
payload_length = remaining_size;
}
remaining_size -= payload_length;
- BuildCommonRTPheader(&packet->data, remaining_size == 0, timestamp);
+ BuildCommonRTPheader(
+ &packet->data, remaining_size == 0, frame.rtp_timestamp);
// Build Cast header.
- packet->data.push_back((is_key ? kCastKeyFrameBitMask : 0) |
- kCastReferenceFrameIdBitMask);
- packet->data.push_back(frame_id);
+ // TODO(miu): Should we always set the ref frame bit and the ref_frame_id?
+ DCHECK_NE(frame.dependency, EncodedFrame::UNKNOWN_DEPENDENCY);
+ packet->data.push_back(
+ ((frame.dependency == EncodedFrame::KEY) ? kCastKeyFrameBitMask : 0) |
+ kCastReferenceFrameIdBitMask);
+ packet->data.push_back(static_cast<uint8>(frame.frame_id));
size_t start_size = packet->data.size();
packet->data.resize(start_size + 4);
base::BigEndianWriter big_endian_writer(
reinterpret_cast<char*>(&(packet->data[start_size])), 4);
big_endian_writer.WriteU16(packet_id_);
big_endian_writer.WriteU16(static_cast<uint16>(num_packets - 1));
- packet->data.push_back(static_cast<uint8>(reference_frame_id));
+ packet->data.push_back(static_cast<uint8>(frame.referenced_frame_id));
// Copy payload data.
packet->data.insert(packet->data.end(),
data_iter,
data_iter + payload_length);
- PacketKey key = PacedPacketSender::MakePacketKey(capture_time,
+ PacketKey key = PacedPacketSender::MakePacketKey(frame.reference_time,
config_.ssrc,
packet_id_);
// Store packet.
- packet_storage_->StorePacket(frame_id, packet_id_, key, packet);
+ packet_storage_->StorePacket(frame.frame_id, packet_id_, key, packet);
++packet_id_;
data_iter += payload_length;
// Update stats.
- ++send_packets_count_;
+ ++send_packet_count_;
send_octet_count_ += payload_length;
packets.push_back(make_pair(key, packet));
}
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h
index 2b90638430..ebdbf01018 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h
+++ b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h
@@ -55,45 +55,27 @@ class RtpPacketizer {
RtpPacketizerConfig rtp_packetizer_config);
~RtpPacketizer();
- // The video_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time);
-
- // The audio_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time);
-
- bool LastSentTimestamp(base::TimeTicks* time_sent,
- uint32* rtp_timestamp) const;
+ void SendFrameAsPackets(const EncodedFrame& frame);
// Return the next sequence number, and increment by one. Enables unique
// incremental sequence numbers for every packet (including retransmissions).
uint16 NextSequenceNumber();
- int send_packets_count() { return send_packets_count_; }
-
- size_t send_octet_count() { return send_octet_count_; }
+ size_t send_packet_count() const { return send_packet_count_; }
+ size_t send_octet_count() const { return send_octet_count_; }
private:
- void Cast(bool is_key,
- uint32 frame_id,
- uint32 reference_frame_id,
- uint32 timestamp,
- const std::string& data,
- const base::TimeTicks& capture_time);
-
void BuildCommonRTPheader(Packet* packet, bool marker_bit, uint32 time_stamp);
RtpPacketizerConfig config_;
PacedSender* const transport_; // Not owned by this class.
PacketStorage* packet_storage_;
- base::TimeTicks time_last_sent_rtp_timestamp_;
uint16 sequence_number_;
uint32 rtp_timestamp_;
uint16 packet_id_;
- int send_packets_count_;
+ size_t send_packet_count_;
size_t send_octet_count_;
};
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
index 5f0edc8c74..64def4ce7f 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
+++ b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
@@ -73,9 +73,9 @@ class TestRtpPacketTransport : public PacketSender {
return true;
}
- int number_of_packets_received() const { return packets_sent_; }
+ size_t number_of_packets_received() const { return packets_sent_; }
- void set_expected_number_of_packets(int expected_number_of_packets) {
+ void set_expected_number_of_packets(size_t expected_number_of_packets) {
expected_number_of_packets_ = expected_number_of_packets;
}
@@ -85,9 +85,9 @@ class TestRtpPacketTransport : public PacketSender {
RtpPacketizerConfig config_;
uint32 sequence_number_;
- int packets_sent_;
- int number_of_packets_;
- int expected_number_of_packets_;
+ size_t packets_sent_;
+ size_t number_of_packets_;
+ size_t expected_number_of_packets_;
// Assuming packets arrive in sequence.
int expected_packet_id_;
uint32 expected_frame_id_;
@@ -112,9 +112,9 @@ class RtpPacketizerTest : public ::testing::Test {
pacer_->RegisterVideoSsrc(config_.ssrc);
rtp_packetizer_.reset(new RtpPacketizer(
pacer_.get(), &packet_storage_, config_));
- video_frame_.key_frame = false;
+ video_frame_.dependency = EncodedFrame::DEPENDENT;
video_frame_.frame_id = 0;
- video_frame_.last_referenced_frame_id = kStartFrameId;
+ video_frame_.referenced_frame_id = kStartFrameId;
video_frame_.data.assign(kFrameSize, 123);
video_frame_.rtp_timestamp =
GetVideoRtpTimestamp(testing_clock_.NowTicks());
@@ -130,7 +130,7 @@ class RtpPacketizerTest : public ::testing::Test {
base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
- EncodedVideoFrame video_frame_;
+ EncodedFrame video_frame_;
PacketStorage packet_storage_;
RtpPacketizerConfig config_;
scoped_ptr<TestRtpPacketTransport> transport_;
@@ -142,30 +142,30 @@ class RtpPacketizerTest : public ::testing::Test {
};
TEST_F(RtpPacketizerTest, SendStandardPackets) {
- int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
+ size_t expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
transport_->set_expected_number_of_packets(expected_num_of_packets);
transport_->set_rtp_timestamp(video_frame_.rtp_timestamp);
- base::TimeTicks time;
- time += base::TimeDelta::FromMilliseconds(kTimestampMs);
- rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_, time);
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kTimestampMs));
+ video_frame_.reference_time = testing_clock_.NowTicks();
+ rtp_packetizer_->SendFrameAsPackets(video_frame_);
RunTasks(33 + 1);
EXPECT_EQ(expected_num_of_packets, transport_->number_of_packets_received());
}
TEST_F(RtpPacketizerTest, Stats) {
- EXPECT_FALSE(rtp_packetizer_->send_packets_count());
+ EXPECT_FALSE(rtp_packetizer_->send_packet_count());
EXPECT_FALSE(rtp_packetizer_->send_octet_count());
// Insert packets at varying lengths.
- int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
+ size_t expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
transport_->set_expected_number_of_packets(expected_num_of_packets);
transport_->set_rtp_timestamp(video_frame_.rtp_timestamp);
testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kTimestampMs));
- rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_,
- testing_clock_.NowTicks());
+ video_frame_.reference_time = testing_clock_.NowTicks();
+ rtp_packetizer_->SendFrameAsPackets(video_frame_);
RunTasks(33 + 1);
- EXPECT_EQ(expected_num_of_packets, rtp_packetizer_->send_packets_count());
+ EXPECT_EQ(expected_num_of_packets, rtp_packetizer_->send_packet_count());
EXPECT_EQ(kFrameSize, rtp_packetizer_->send_octet_count());
EXPECT_EQ(expected_num_of_packets, transport_->number_of_packets_received());
}
diff --git a/media/cast/transport/rtp_sender/rtp_sender.cc b/media/cast/transport/rtp_sender/rtp_sender.cc
index 6c479bb46b..91b6298bac 100644
--- a/media/cast/transport/rtp_sender/rtp_sender.cc
+++ b/media/cast/transport/rtp_sender/rtp_sender.cc
@@ -13,18 +13,12 @@ namespace media {
namespace cast {
namespace transport {
-// Schedule the RTP statistics callback every 33mS. As this interval affects the
-// time offset of the render and playout times, we want it in the same ball park
-// as the frame rate.
-static const int kStatsCallbackIntervalMs = 33;
-
RtpSender::RtpSender(
base::TickClock* clock,
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
PacedSender* const transport)
: clock_(clock),
transport_(transport),
- stats_callback_(),
transport_task_runner_(transport_task_runner),
weak_factory_(this) {
// Randomly set sequence number start value.
@@ -61,17 +55,9 @@ bool RtpSender::InitializeVideo(const CastTransportVideoConfig& config) {
return true;
}
-void RtpSender::IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) {
- DCHECK(packetizer_);
- packetizer_->IncomingEncodedVideoFrame(video_frame, capture_time);
-}
-
-void RtpSender::IncomingEncodedAudioFrame(
- const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
+void RtpSender::SendFrame(const EncodedFrame& frame) {
DCHECK(packetizer_);
- packetizer_->IncomingEncodedAudioFrame(audio_frame, recorded_time);
+ packetizer_->SendFrameAsPackets(frame);
}
void RtpSender::ResendPackets(
@@ -84,51 +70,32 @@ void RtpSender::ResendPackets(
++it) {
SendPacketVector packets_to_resend;
uint8 frame_id = it->first;
- const PacketIdSet& packets_set = it->second;
+ // Set of packets that the receiver wants us to re-send.
+ // If empty, we need to re-send all packets for this frame.
+ const PacketIdSet& missing_packet_set = it->second;
bool success = false;
- if (packets_set.empty()) {
- VLOG(3) << "Missing all packets in frame " << static_cast<int>(frame_id);
+ for (uint16 packet_id = 0; ; packet_id++) {
+ // Get packet from storage.
+ success = storage_->GetPacket(frame_id, packet_id, &packets_to_resend);
- uint16 packet_id = 0;
- do {
- // Get packet from storage.
- success = storage_->GetPacket(frame_id, packet_id, &packets_to_resend);
-
- // Check that we got at least one packet.
- DCHECK(packet_id != 0 || success)
- << "Failed to resend frame " << static_cast<int>(frame_id);
-
- // Resend packet to the network.
- if (success) {
- VLOG(3) << "Resend " << static_cast<int>(frame_id) << ":"
- << packet_id;
- // Set a unique incremental sequence number for every packet.
- PacketRef packet = packets_to_resend.back().second;
- UpdateSequenceNumber(&packet->data);
- // Set the size as correspond to each frame.
- ++packet_id;
- }
- } while (success);
- } else {
- // Iterate over all of the packets in the frame.
- for (PacketIdSet::const_iterator set_it = packets_set.begin();
- set_it != packets_set.end();
- ++set_it) {
- uint16 packet_id = *set_it;
- success = storage_->GetPacket(frame_id, packet_id, &packets_to_resend);
+ // Check that we got at least one packet.
+ DCHECK(packet_id != 0 || success)
+ << "Failed to resend frame " << static_cast<int>(frame_id);
- // Check that we got at least one packet.
- DCHECK(set_it != packets_set.begin() || success)
- << "Failed to resend frame " << frame_id;
+ if (!success) break;
+ if (!missing_packet_set.empty() &&
+ missing_packet_set.find(packet_id) == missing_packet_set.end()) {
+ transport_->CancelSendingPacket(packets_to_resend.back().first);
+ packets_to_resend.pop_back();
+ } else {
// Resend packet to the network.
- if (success) {
- VLOG(3) << "Resend " << static_cast<int>(frame_id) << ":"
- << packet_id;
- PacketRef packet = packets_to_resend.back().second;
- UpdateSequenceNumber(&packet->data);
- }
+ VLOG(3) << "Resend " << static_cast<int>(frame_id) << ":"
+ << packet_id;
+ // Set a unique incremental sequence number for every packet.
+ PacketRef packet = packets_to_resend.back().second;
+ UpdateSequenceNumber(&packet->data);
}
}
transport_->ResendPackets(packets_to_resend);
@@ -142,30 +109,6 @@ void RtpSender::UpdateSequenceNumber(Packet* packet) {
(*packet)[index + 1] = (static_cast<uint8>(new_sequence_number >> 8));
}
-void RtpSender::SubscribeRtpStatsCallback(
- const CastTransportRtpStatistics& callback) {
- stats_callback_ = callback;
- ScheduleNextStatsReport();
-}
-
-void RtpSender::ScheduleNextStatsReport() {
- transport_task_runner_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&RtpSender::RtpStatistics, weak_factory_.GetWeakPtr()),
- base::TimeDelta::FromMilliseconds(kStatsCallbackIntervalMs));
-}
-
-void RtpSender::RtpStatistics() {
- RtcpSenderInfo sender_info;
- base::TimeTicks time_sent;
- uint32 rtp_timestamp = 0;
- packetizer_->LastSentTimestamp(&time_sent, &rtp_timestamp);
- sender_info.send_packet_count = packetizer_->send_packets_count();
- sender_info.send_octet_count = packetizer_->send_octet_count();
- stats_callback_.Run(sender_info, time_sent, rtp_timestamp);
- ScheduleNextStatsReport();
-}
-
} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/rtp_sender/rtp_sender.h b/media/cast/transport/rtp_sender/rtp_sender.h
index 3fe018222f..e1fbfe23dd 100644
--- a/media/cast/transport/rtp_sender/rtp_sender.h
+++ b/media/cast/transport/rtp_sender/rtp_sender.h
@@ -48,24 +48,19 @@ class RtpSender {
// video frames. Returns false if configuration is invalid.
bool InitializeVideo(const CastTransportVideoConfig& config);
- // The video_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time);
-
- // The audio_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time);
+ void SendFrame(const EncodedFrame& frame);
void ResendPackets(const MissingFramesAndPacketsMap& missing_packets);
- // Set the callback on which RTP statistics data will be returned. Calling
- // this function would start a timer that would schedule the callback in
- // a constant interval.
- void SubscribeRtpStatsCallback(const CastTransportRtpStatistics& callback);
+ size_t send_packet_count() const {
+ return packetizer_ ? packetizer_->send_packet_count() : 0;
+ }
+ size_t send_octet_count() const {
+ return packetizer_ ? packetizer_->send_octet_count() : 0;
+ }
+ uint32 ssrc() const { return config_.ssrc; }
private:
- void ScheduleNextStatsReport();
- void RtpStatistics();
void UpdateSequenceNumber(Packet* packet);
base::TickClock* clock_; // Not owned by this class.
@@ -73,7 +68,6 @@ class RtpSender {
scoped_ptr<RtpPacketizer> packetizer_;
scoped_ptr<PacketStorage> storage_;
PacedSender* const transport_;
- CastTransportRtpStatistics stats_callback_;
scoped_refptr<base::SingleThreadTaskRunner> transport_task_runner_;
// NOTE: Weak pointers must be invalidated before all other member variables.
diff --git a/media/cast/transport/transport/udp_transport_unittest.cc b/media/cast/transport/transport/udp_transport_unittest.cc
index 70e49bf6a6..26879492f0 100644
--- a/media/cast/transport/transport/udp_transport_unittest.cc
+++ b/media/cast/transport/transport/udp_transport_unittest.cc
@@ -12,6 +12,7 @@
#include "base/callback.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "media/cast/test/utility/net_utility.h"
#include "media/cast/transport/cast_transport_config.h"
#include "net/base/net_util.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -56,19 +57,19 @@ static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
TEST(UdpTransport, SendAndReceive) {
base::MessageLoopForIO message_loop;
- net::IPAddressNumber local_addr_number;
+ net::IPEndPoint free_local_port1 = test::GetFreeLocalPort();
+ net::IPEndPoint free_local_port2 = test::GetFreeLocalPort();
net::IPAddressNumber empty_addr_number;
- net::ParseIPLiteralToNumber("127.0.0.1", &local_addr_number);
net::ParseIPLiteralToNumber("0.0.0.0", &empty_addr_number);
UdpTransport send_transport(NULL,
message_loop.message_loop_proxy(),
- net::IPEndPoint(local_addr_number, 2344),
- net::IPEndPoint(local_addr_number, 2345),
+ free_local_port1,
+ free_local_port2,
base::Bind(&UpdateCastTransportStatus));
UdpTransport recv_transport(NULL,
message_loop.message_loop_proxy(),
- net::IPEndPoint(local_addr_number, 2345),
+ free_local_port2,
net::IPEndPoint(empty_addr_number, 0),
base::Bind(&UpdateCastTransportStatus));
diff --git a/media/cast/transport/transport_audio_sender.cc b/media/cast/transport/transport_audio_sender.cc
index 3635137979..5d06c8b075 100644
--- a/media/cast/transport/transport_audio_sender.cc
+++ b/media/cast/transport/transport_audio_sender.cc
@@ -27,26 +27,24 @@ TransportAudioSender::TransportAudioSender(
TransportAudioSender::~TransportAudioSender() {}
-void TransportAudioSender::InsertCodedAudioFrame(
- const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
+void TransportAudioSender::SendFrame(const EncodedFrame& audio_frame) {
if (!initialized_) {
return;
}
if (encryptor_.initialized()) {
- EncodedAudioFrame encrypted_frame;
- if (!EncryptAudioFrame(*audio_frame, &encrypted_frame)) {
+ EncodedFrame encrypted_frame;
+ if (!EncryptAudioFrame(audio_frame, &encrypted_frame)) {
+ NOTREACHED();
return;
}
- rtp_sender_.IncomingEncodedAudioFrame(&encrypted_frame, recorded_time);
+ rtp_sender_.SendFrame(encrypted_frame);
} else {
- rtp_sender_.IncomingEncodedAudioFrame(audio_frame, recorded_time);
+ rtp_sender_.SendFrame(audio_frame);
}
}
bool TransportAudioSender::EncryptAudioFrame(
- const EncodedAudioFrame& audio_frame,
- EncodedAudioFrame* encrypted_frame) {
+ const EncodedFrame& audio_frame, EncodedFrame* encrypted_frame) {
if (!initialized_) {
return false;
}
@@ -54,9 +52,11 @@ bool TransportAudioSender::EncryptAudioFrame(
audio_frame.frame_id, audio_frame.data, &encrypted_frame->data))
return false;
- encrypted_frame->codec = audio_frame.codec;
+ encrypted_frame->dependency = audio_frame.dependency;
encrypted_frame->frame_id = audio_frame.frame_id;
+ encrypted_frame->referenced_frame_id = audio_frame.referenced_frame_id;
encrypted_frame->rtp_timestamp = audio_frame.rtp_timestamp;
+ encrypted_frame->reference_time = audio_frame.reference_time;
return true;
}
@@ -68,11 +68,6 @@ void TransportAudioSender::ResendPackets(
rtp_sender_.ResendPackets(missing_frames_and_packets);
}
-void TransportAudioSender::SubscribeAudioRtpStatsCallback(
- const CastTransportRtpStatistics& callback) {
- rtp_sender_.SubscribeRtpStatsCallback(callback);
-}
-
} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/transport_audio_sender.h b/media/cast/transport/transport_audio_sender.h
index 33108e01b3..84780d0c63 100644
--- a/media/cast/transport/transport_audio_sender.h
+++ b/media/cast/transport/transport_audio_sender.h
@@ -32,26 +32,24 @@ class TransportAudioSender : public base::NonThreadSafe {
// Handles the encoded audio frames to be processed.
// Frames will be encrypted, packetized and transmitted to the network.
- void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time);
+ void SendFrame(const EncodedFrame& audio_frame);
// Retransmision request.
void ResendPackets(
const MissingFramesAndPacketsMap& missing_frames_and_packets);
+ size_t send_packet_count() const { return rtp_sender_.send_packet_count(); }
+ size_t send_octet_count() const { return rtp_sender_.send_octet_count(); }
+ uint32 ssrc() const { return rtp_sender_.ssrc(); }
bool initialized() const { return initialized_; }
- // Subscribe callback to get RTP Audio stats.
- void SubscribeAudioRtpStatsCallback(
- const CastTransportRtpStatistics& callback);
-
private:
friend class LocalRtcpAudioSenderFeedback;
// Caller must allocate the destination |encrypted_frame|. The data member
// will be resized to hold the encrypted size.
- bool EncryptAudioFrame(const EncodedAudioFrame& audio_frame,
- EncodedAudioFrame* encrypted_frame);
+ bool EncryptAudioFrame(const EncodedFrame& audio_frame,
+ EncodedFrame* encrypted_frame);
RtpSender rtp_sender_;
TransportEncryptionHandler encryptor_;
diff --git a/media/cast/transport/transport_video_sender.cc b/media/cast/transport/transport_video_sender.cc
index ef120bcb05..1add29b8cb 100644
--- a/media/cast/transport/transport_video_sender.cc
+++ b/media/cast/transport/transport_video_sender.cc
@@ -27,31 +27,24 @@ TransportVideoSender::TransportVideoSender(
TransportVideoSender::~TransportVideoSender() {}
-void TransportVideoSender::InsertCodedVideoFrame(
- const EncodedVideoFrame* coded_frame,
- const base::TimeTicks& capture_time) {
+void TransportVideoSender::SendFrame(const EncodedFrame& video_frame) {
if (!initialized_) {
return;
}
if (encryptor_.initialized()) {
- EncodedVideoFrame encrypted_video_frame;
-
- if (!EncryptVideoFrame(*coded_frame, &encrypted_video_frame))
+ EncodedFrame encrypted_frame;
+ if (!EncryptVideoFrame(video_frame, &encrypted_frame)) {
+ NOTREACHED();
return;
-
- rtp_sender_.IncomingEncodedVideoFrame(&encrypted_video_frame, capture_time);
+ }
+ rtp_sender_.SendFrame(encrypted_frame);
} else {
- rtp_sender_.IncomingEncodedVideoFrame(coded_frame, capture_time);
- }
- if (coded_frame->key_frame) {
- VLOG(1) << "Send encoded key frame; frame_id:"
- << static_cast<int>(coded_frame->frame_id);
+ rtp_sender_.SendFrame(video_frame);
}
}
bool TransportVideoSender::EncryptVideoFrame(
- const EncodedVideoFrame& video_frame,
- EncodedVideoFrame* encrypted_frame) {
+ const EncodedFrame& video_frame, EncodedFrame* encrypted_frame) {
if (!initialized_) {
return false;
}
@@ -59,11 +52,11 @@ bool TransportVideoSender::EncryptVideoFrame(
video_frame.frame_id, video_frame.data, &(encrypted_frame->data)))
return false;
- encrypted_frame->codec = video_frame.codec;
- encrypted_frame->key_frame = video_frame.key_frame;
+ encrypted_frame->dependency = video_frame.dependency;
encrypted_frame->frame_id = video_frame.frame_id;
- encrypted_frame->last_referenced_frame_id =
- video_frame.last_referenced_frame_id;
+ encrypted_frame->referenced_frame_id = video_frame.referenced_frame_id;
+ encrypted_frame->rtp_timestamp = video_frame.rtp_timestamp;
+ encrypted_frame->reference_time = video_frame.reference_time;
return true;
}
@@ -75,11 +68,6 @@ void TransportVideoSender::ResendPackets(
rtp_sender_.ResendPackets(missing_frames_and_packets);
}
-void TransportVideoSender::SubscribeVideoRtpStatsCallback(
- const CastTransportRtpStatistics& callback) {
- rtp_sender_.SubscribeRtpStatsCallback(callback);
-}
-
} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/transport_video_sender.h b/media/cast/transport/transport_video_sender.h
index 9bb2267216..3025cec19b 100644
--- a/media/cast/transport/transport_video_sender.h
+++ b/media/cast/transport/transport_video_sender.h
@@ -37,24 +37,22 @@ class TransportVideoSender : public base::NonThreadSafe {
// Handles the encoded video frames to be processed.
// Frames will be encrypted, packetized and transmitted to the network.
- void InsertCodedVideoFrame(const EncodedVideoFrame* coded_frame,
- const base::TimeTicks& capture_time);
+ void SendFrame(const EncodedFrame& video_frame);
// Retransmision request.
void ResendPackets(
const MissingFramesAndPacketsMap& missing_frames_and_packets);
+ size_t send_packet_count() const { return rtp_sender_.send_packet_count(); }
+ size_t send_octet_count() const { return rtp_sender_.send_octet_count(); }
+ uint32 ssrc() const { return rtp_sender_.ssrc(); }
bool initialized() const { return initialized_; }
- // Subscribe callback to get RTP Audio stats.
- void SubscribeVideoRtpStatsCallback(
- const CastTransportRtpStatistics& callback);
-
private:
// Caller must allocate the destination |encrypted_video_frame| the data
// member will be resized to hold the encrypted size.
- bool EncryptVideoFrame(const EncodedVideoFrame& encoded_frame,
- EncodedVideoFrame* encrypted_video_frame);
+ bool EncryptVideoFrame(const EncodedFrame& encoded_frame,
+ EncodedFrame* encrypted_video_frame);
const base::TimeDelta rtp_max_delay_;
TransportEncryptionHandler encryptor_;
diff --git a/media/cast/video_receiver/codecs/vp8/vp8_decoder.h b/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
deleted file mode 100644
index 62ee8ad9a7..0000000000
--- a/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_VIDEO_RECEVIER_CODECS_VP8_VP8_DECODER_H_
-#define MEDIA_CAST_VIDEO_RECEVIER_CODECS_VP8_VP8_DECODER_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/cast_receiver.h"
-#include "media/cast/video_receiver/software_video_decoder.h"
-#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
-
-typedef struct vpx_codec_ctx vpx_dec_ctx_t;
-
-// TODO(mikhal): Look into reusing VpxVideoDecoder.
-namespace media {
-namespace cast {
-
-class Vp8Decoder : public SoftwareVideoDecoder {
- public:
- explicit Vp8Decoder(scoped_refptr<CastEnvironment> cast_environment);
- virtual ~Vp8Decoder();
-
- // SoftwareVideoDecoder implementations.
- virtual bool Decode(const transport::EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_cb)
- OVERRIDE;
-
- private:
- // Initialize the decoder.
- void InitDecoder();
- scoped_ptr<vpx_dec_ctx_t> decoder_;
- scoped_refptr<CastEnvironment> cast_environment_;
-
- DISALLOW_COPY_AND_ASSIGN(Vp8Decoder);
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_VIDEO_RECEVIER_CODECS_VP8_VP8_DECODER_H_
diff --git a/media/cast/video_receiver/video_decoder.cc b/media/cast/video_receiver/video_decoder.cc
index 9f167490e9..c2d256268f 100644
--- a/media/cast/video_receiver/video_decoder.cc
+++ b/media/cast/video_receiver/video_decoder.cc
@@ -9,7 +9,6 @@
#include "base/json/json_reader.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/stl_util.h"
#include "base/values.h"
#include "media/base/video_util.h"
#include "media/cast/cast_defines.h"
@@ -41,18 +40,10 @@ class VideoDecoder::ImplBase
return cast_initialization_status_;
}
- void DecodeFrame(scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK_EQ(cast_initialization_status_, STATUS_VIDEO_INITIALIZED);
- if (encoded_frame->codec != codec_) {
- NOTREACHED();
- cast_environment_->PostTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(callback, scoped_refptr<VideoFrame>(NULL), false));
- }
-
COMPILE_ASSERT(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
size_of_frame_id_types_do_not_match);
bool is_continuous = true;
@@ -68,7 +59,7 @@ class VideoDecoder::ImplBase
last_frame_id_ = encoded_frame->frame_id;
const scoped_refptr<VideoFrame> decoded_frame = Decode(
- reinterpret_cast<uint8*>(string_as_array(&encoded_frame->data)),
+ encoded_frame->mutable_bytes(),
static_cast<int>(encoded_frame->data.size()));
cast_environment_->PostTask(
CastEnvironment::MAIN,
@@ -218,9 +209,9 @@ class VideoDecoder::FakeImpl : public VideoDecoder::ImplBase {
VideoDecoder::VideoDecoder(
const scoped_refptr<CastEnvironment>& cast_environment,
- const VideoReceiverConfig& video_config)
+ const FrameReceiverConfig& video_config)
: cast_environment_(cast_environment) {
- switch (video_config.codec) {
+ switch (video_config.codec.video) {
#ifndef OFFICIAL_BUILD
case transport::kFakeSoftwareVideo:
impl_ = new FakeImpl(cast_environment);
@@ -248,7 +239,7 @@ CastInitializationStatus VideoDecoder::InitializationResult() const {
}
void VideoDecoder::DecodeFrame(
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
+ scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK(encoded_frame.get());
DCHECK(!callback.is_null());
diff --git a/media/cast/video_receiver/video_decoder.h b/media/cast/video_receiver/video_decoder.h
index 7f0db54aa8..ea40173004 100644
--- a/media/cast/video_receiver/video_decoder.h
+++ b/media/cast/video_receiver/video_decoder.h
@@ -28,11 +28,11 @@ class VideoDecoder {
bool is_continuous)> DecodeFrameCallback;
VideoDecoder(const scoped_refptr<CastEnvironment>& cast_environment,
- const VideoReceiverConfig& video_config);
+ const FrameReceiverConfig& video_config);
virtual ~VideoDecoder();
// Returns STATUS_VIDEO_INITIALIZED if the decoder was successfully
- // constructed from the given VideoReceiverConfig. If this method returns any
+ // constructed from the given FrameReceiverConfig. If this method returns any
// other value, calls to DecodeFrame() will not succeed.
CastInitializationStatus InitializationResult() const;
@@ -43,7 +43,7 @@ class VideoDecoder {
// monotonically-increasing by 1 for each successive call to this method.
// When it is not, the decoder will assume one or more frames have been
// dropped (e.g., due to packet loss), and will perform recovery actions.
- void DecodeFrame(scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback);
private:
diff --git a/media/cast/video_receiver/video_decoder_unittest.cc b/media/cast/video_receiver/video_decoder_unittest.cc
index aa6b7ac058..2ca28b070c 100644
--- a/media/cast/video_receiver/video_decoder_unittest.cc
+++ b/media/cast/video_receiver/video_decoder_unittest.cc
@@ -10,6 +10,7 @@
#include "base/synchronization/lock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
+#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/standalone_cast_environment.h"
#include "media/cast/test/utility/video_utility.h"
#include "media/cast/video_receiver/video_decoder.h"
@@ -47,9 +48,8 @@ class VideoDecoderTest
protected:
virtual void SetUp() OVERRIDE {
- VideoReceiverConfig decoder_config;
- decoder_config.use_external_decoder = false;
- decoder_config.codec = GetParam();
+ FrameReceiverConfig decoder_config = GetDefaultVideoReceiverConfig();
+ decoder_config.codec.video = GetParam();
video_decoder_.reset(new VideoDecoder(cast_environment_, decoder_config));
CHECK_EQ(STATUS_VIDEO_INITIALIZED, video_decoder_->InitializationResult());
@@ -61,10 +61,10 @@ class VideoDecoderTest
total_video_frames_decoded_ = 0;
}
- // Called from the unit test thread to create another EncodedVideoFrame and
- // push it into the decoding pipeline.
+ // Called from the unit test thread to create another EncodedFrame and push it
+ // into the decoding pipeline.
void FeedMoreVideo(int num_dropped_frames) {
- // Prepare a simulated EncodedVideoFrame to feed into the VideoDecoder.
+ // Prepare a simulated EncodedFrame to feed into the VideoDecoder.
const gfx::Size frame_size(kWidth, kHeight);
const scoped_refptr<VideoFrame> video_frame =
@@ -77,11 +77,10 @@ class VideoDecoderTest
PopulateVideoFrame(video_frame, 0);
// Encode |frame| into |encoded_frame->data|.
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
CHECK_EQ(transport::kVp8, GetParam()); // Only support VP8 test currently.
vp8_encoder_.Encode(video_frame, encoded_frame.get());
- encoded_frame->codec = GetParam();
encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
last_frame_id_ = encoded_frame->frame_id;
diff --git a/media/cast/video_receiver/video_receiver.cc b/media/cast/video_receiver/video_receiver.cc
index 677475fb27..d9000de88e 100644
--- a/media/cast/video_receiver/video_receiver.cc
+++ b/media/cast/video_receiver/video_receiver.cc
@@ -17,29 +17,27 @@
namespace {
const int kMinSchedulingDelayMs = 1;
-const int kMinTimeBetweenOffsetUpdatesMs = 1000;
-const int kTimeOffsetMaxCounter = 10;
} // namespace
namespace media {
namespace cast {
VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const VideoReceiverConfig& video_config,
+ const FrameReceiverConfig& video_config,
transport::PacedPacketSender* const packet_sender)
: RtpReceiver(cast_environment->Clock(), NULL, &video_config),
cast_environment_(cast_environment),
- event_subscriber_(kReceiverRtcpEventHistorySize,
- ReceiverRtcpEventSubscriber::kVideoEventSubscriber),
- codec_(video_config.codec),
- target_delay_delta_(
+ event_subscriber_(kReceiverRtcpEventHistorySize, VIDEO_EVENT),
+ codec_(video_config.codec.video),
+ target_playout_delay_(
base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
expected_frame_duration_(
base::TimeDelta::FromSeconds(1) / video_config.max_frame_rate),
+ reports_are_scheduled_(false),
framer_(cast_environment->Clock(),
this,
video_config.incoming_ssrc,
- video_config.decoder_faster_than_max_frame_rate,
+ true,
video_config.rtp_max_delay_ms * video_config.max_frame_rate /
1000),
rtcp_(cast_environment_,
@@ -51,19 +49,16 @@ VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
video_config.feedback_ssrc,
video_config.incoming_ssrc,
- video_config.rtcp_c_name),
- time_offset_counter_(0),
- time_incoming_packet_updated_(false),
- incoming_rtp_timestamp_(0),
+ video_config.rtcp_c_name,
+ false),
is_waiting_for_consecutive_frame_(false),
+ lip_sync_drift_(ClockDriftSmoother::GetDefaultTimeConstant()),
weak_factory_(this) {
DCHECK_GT(video_config.rtp_max_delay_ms, 0);
DCHECK_GT(video_config.max_frame_rate, 0);
- if (!video_config.use_external_decoder) {
- video_decoder_.reset(new VideoDecoder(cast_environment, video_config));
- }
+ video_decoder_.reset(new VideoDecoder(cast_environment, video_config));
decryptor_.Initialize(video_config.aes_key, video_config.aes_iv_mask);
- rtcp_.SetTargetDelay(target_delay_delta_);
+ rtcp_.SetTargetDelay(target_playout_delay_);
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
}
@@ -73,12 +68,6 @@ VideoReceiver::~VideoReceiver() {
cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
}
-void VideoReceiver::InitializeTimers() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- ScheduleNextRtcpReport();
- ScheduleNextCastMessage();
-}
-
void VideoReceiver::GetRawVideoFrame(
const VideoFrameDecodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
@@ -94,15 +83,16 @@ void VideoReceiver::GetRawVideoFrame(
void VideoReceiver::DecodeEncodedVideoFrame(
const VideoFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& playout_time) {
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!encoded_frame) {
- callback.Run(make_scoped_refptr<VideoFrame>(NULL), playout_time, false);
+ callback.Run(
+ make_scoped_refptr<VideoFrame>(NULL), base::TimeTicks(), false);
return;
}
const uint32 frame_id = encoded_frame->frame_id;
const uint32 rtp_timestamp = encoded_frame->rtp_timestamp;
+ const base::TimeTicks playout_time = encoded_frame->reference_time;
video_decoder_->DecodeFrame(encoded_frame.Pass(),
base::Bind(&VideoReceiver::EmitRawVideoFrame,
cast_environment_,
@@ -125,9 +115,9 @@ void VideoReceiver::EmitRawVideoFrame(
if (video_frame) {
const base::TimeTicks now = cast_environment->Clock()->NowTicks();
cast_environment->Logging()->InsertFrameEvent(
- now, kVideoFrameDecoded, rtp_timestamp, frame_id);
+ now, FRAME_DECODED, VIDEO_EVENT, rtp_timestamp, frame_id);
cast_environment->Logging()->InsertFrameEventWithDelay(
- now, kVideoRenderDelay, rtp_timestamp, frame_id,
+ now, FRAME_PLAYOUT, VIDEO_EVENT, rtp_timestamp, frame_id,
playout_time - now);
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT1(
@@ -138,8 +128,7 @@ void VideoReceiver::EmitRawVideoFrame(
callback.Run(video_frame, playout_time, is_continuous);
}
-void VideoReceiver::GetEncodedVideoFrame(
- const VideoFrameEncodedCallback& callback) {
+void VideoReceiver::GetEncodedVideoFrame(const FrameEncodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
frame_request_queue_.push_back(callback);
EmitAvailableEncodedFrames();
@@ -152,22 +141,33 @@ void VideoReceiver::EmitAvailableEncodedFrames() {
// Attempt to peek at the next completed frame from the |framer_|.
// TODO(miu): We should only be peeking at the metadata, and not copying the
// payload yet! Or, at least, peek using a StringPiece instead of a copy.
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
bool is_consecutively_next_frame = false;
- if (!framer_.GetEncodedVideoFrame(encoded_frame.get(),
- &is_consecutively_next_frame)) {
+ bool have_multiple_complete_frames = false;
+
+ if (!framer_.GetEncodedFrame(encoded_frame.get(),
+ &is_consecutively_next_frame,
+ &have_multiple_complete_frames)) {
VLOG(1) << "Wait for more video packets to produce a completed frame.";
return; // OnReceivedPayloadData() will invoke this method in the future.
}
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ const base::TimeTicks playout_time =
+ GetPlayoutTime(encoded_frame->rtp_timestamp);
+
+ // If we have multiple decodable frames, and the current frame is
+ // too old, then skip it and decode the next frame instead.
+ if (have_multiple_complete_frames && now > playout_time) {
+ framer_.ReleaseFrame(encoded_frame->frame_id);
+ continue;
+ }
+
// If |framer_| has a frame ready that is out of sequence, examine the
// playout time to determine whether it's acceptable to continue, thereby
// skipping one or more frames. Skip if the missing frame wouldn't complete
// playing before the start of playback of the available frame.
- const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- const base::TimeTicks playout_time =
- GetPlayoutTime(now, encoded_frame->rtp_timestamp);
if (!is_consecutively_next_frame) {
// TODO(miu): Also account for expected decode time here?
const base::TimeTicks earliest_possible_end_time_of_missing_frame =
@@ -201,8 +201,8 @@ void VideoReceiver::EmitAvailableEncodedFrames() {
encoded_frame->data.swap(decrypted_video_data);
}
- // At this point, we have a decrypted EncodedVideoFrame ready to be emitted.
- encoded_frame->codec = codec_;
+ // At this point, we have a decrypted EncodedFrame ready to be emitted.
+ encoded_frame->reference_time = playout_time;
framer_.ReleaseFrame(encoded_frame->frame_id);
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT2(
@@ -214,8 +214,7 @@ void VideoReceiver::EmitAvailableEncodedFrames() {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
base::Bind(frame_request_queue_.front(),
- base::Passed(&encoded_frame),
- playout_time));
+ base::Passed(&encoded_frame)));
frame_request_queue_.pop_front();
}
}
@@ -227,72 +226,13 @@ void VideoReceiver::EmitAvailableEncodedFramesAfterWaiting() {
EmitAvailableEncodedFrames();
}
-base::TimeTicks VideoReceiver::GetPlayoutTime(base::TimeTicks now,
- uint32 rtp_timestamp) {
- // TODO(miu): This and AudioReceiver::GetPlayoutTime() need to be reconciled!
-
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // Senders time in ms when this frame was captured.
- // Note: the senders clock and our local clock might not be synced.
- base::TimeTicks rtp_timestamp_in_ticks;
-
- // Compute the time offset_in_ticks based on the incoming_rtp_timestamp_.
- if (time_offset_counter_ == 0) {
- // Check for received RTCP to sync the stream play it out asap.
- if (rtcp_.RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp_,
- &rtp_timestamp_in_ticks)) {
- ++time_offset_counter_;
- }
- } else if (time_incoming_packet_updated_) {
- if (rtcp_.RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp_,
- &rtp_timestamp_in_ticks)) {
- // Time to update the time_offset.
- base::TimeDelta time_offset =
- time_incoming_packet_ - rtp_timestamp_in_ticks;
- // Taking the minimum of the first kTimeOffsetMaxCounter values. We are
- // assuming that we are looking for the minimum offset, which will occur
- // when network conditions are the best. This should occur at least once
- // within the first kTimeOffsetMaxCounter samples. Any drift should be
- // very slow, and negligible for this use case.
- if (time_offset_counter_ == 1)
- time_offset_ = time_offset;
- else if (time_offset_counter_ < kTimeOffsetMaxCounter) {
- time_offset_ = std::min(time_offset_, time_offset);
- }
- if (time_offset_counter_ < kTimeOffsetMaxCounter)
- ++time_offset_counter_;
- }
- }
- // Reset |time_incoming_packet_updated_| to enable a future measurement.
- time_incoming_packet_updated_ = false;
- // Compute the actual rtp_timestamp_in_ticks based on the current timestamp.
- if (!rtcp_.RtpTimestampInSenderTime(
- kVideoFrequency, rtp_timestamp, &rtp_timestamp_in_ticks)) {
- // This can fail if we have not received any RTCP packets in a long time.
- // BUG: These calculations are a placeholder, and to be revisited in a
- // soon-upcoming change. http://crbug.com/356942
- const int frequency_khz = kVideoFrequency / 1000;
- const base::TimeDelta delta_based_on_rtp_timestamps =
- base::TimeDelta::FromMilliseconds(
- static_cast<int32>(rtp_timestamp - incoming_rtp_timestamp_) /
- frequency_khz);
- return time_incoming_packet_ + delta_based_on_rtp_timestamps;
- }
-
- base::TimeTicks render_time =
- rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_;
- // TODO(miu): This is broken since this "getter" method may be called on
- // frames received out-of-order, which means the playout times for earlier
- // frames will be computed incorrectly.
-#if 0
- if (last_render_time_ > render_time)
- render_time = last_render_time_;
- last_render_time_ = render_time;
-#endif
-
- return render_time;
+base::TimeTicks VideoReceiver::GetPlayoutTime(uint32 rtp_timestamp) const {
+ return lip_sync_reference_time_ +
+ lip_sync_drift_.Current() +
+ RtpDeltaToTimeDelta(
+ static_cast<int32>(rtp_timestamp - lip_sync_rtp_timestamp_),
+ kVideoFrequency) +
+ target_playout_delay_;
}
void VideoReceiver::IncomingPacket(scoped_ptr<Packet> packet) {
@@ -302,6 +242,11 @@ void VideoReceiver::IncomingPacket(scoped_ptr<Packet> packet) {
} else {
ReceivedPacket(&packet->front(), packet->size());
}
+ if (!reports_are_scheduled_) {
+ ScheduleNextRtcpReport();
+ ScheduleNextCastMessage();
+ reports_are_scheduled_ = true;
+ }
}
void VideoReceiver::OnReceivedPayloadData(const uint8* payload_data,
@@ -309,27 +254,14 @@ void VideoReceiver::OnReceivedPayloadData(const uint8* payload_data,
const RtpCastHeader& rtp_header) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- if (time_incoming_packet_.is_null() ||
- now - time_incoming_packet_ >
- base::TimeDelta::FromMilliseconds(kMinTimeBetweenOffsetUpdatesMs)) {
- if (time_incoming_packet_.is_null())
- InitializeTimers();
- incoming_rtp_timestamp_ = rtp_header.rtp_timestamp;
- // The following incoming packet info is used for syncing sender and
- // receiver clock. Use only the first packet of every frame to obtain a
- // minimal value.
- if (rtp_header.packet_id == 0) {
- time_incoming_packet_ = now;
- time_incoming_packet_updated_ = true;
- }
- }
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] =
rtp_header.rtp_timestamp;
cast_environment_->Logging()->InsertPacketEvent(
now,
- kVideoPacketReceived,
+ PACKET_RECEIVED,
+ VIDEO_EVENT,
rtp_header.rtp_timestamp,
rtp_header.frame_id,
rtp_header.packet_id,
@@ -339,20 +271,44 @@ void VideoReceiver::OnReceivedPayloadData(const uint8* payload_data,
bool duplicate = false;
const bool complete =
framer_.InsertPacket(payload_data, payload_size, rtp_header, &duplicate);
- if (duplicate) {
- cast_environment_->Logging()->InsertPacketEvent(
- now,
- kDuplicateVideoPacketReceived,
- rtp_header.rtp_timestamp,
- rtp_header.frame_id,
- rtp_header.packet_id,
- rtp_header.max_packet_id,
- payload_size);
- // Duplicate packets are ignored.
+
+ // Duplicate packets are ignored.
+ if (duplicate)
return;
+
+ // Update lip-sync values upon receiving the first packet of each frame, or if
+ // they have never been set yet.
+ if (rtp_header.packet_id == 0 || lip_sync_reference_time_.is_null()) {
+ RtpTimestamp fresh_sync_rtp;
+ base::TimeTicks fresh_sync_reference;
+ if (!rtcp_.GetLatestLipSyncTimes(&fresh_sync_rtp, &fresh_sync_reference)) {
+ // HACK: The sender should have provided Sender Reports before the first
+ // frame was sent. However, the spec does not currently require this.
+ // Therefore, when the data is missing, the local clock is used to
+ // generate reference timestamps.
+ VLOG(2) << "Lip sync info missing. Falling-back to local clock.";
+ fresh_sync_rtp = rtp_header.rtp_timestamp;
+ fresh_sync_reference = now;
+ }
+ // |lip_sync_reference_time_| is always incremented according to the time
+ // delta computed from the difference in RTP timestamps. Then,
+ // |lip_sync_drift_| accounts for clock drift and also smoothes-out any
+ // sudden/discontinuous shifts in the series of reference time values.
+ if (lip_sync_reference_time_.is_null()) {
+ lip_sync_reference_time_ = fresh_sync_reference;
+ } else {
+ lip_sync_reference_time_ += RtpDeltaToTimeDelta(
+ static_cast<int32>(fresh_sync_rtp - lip_sync_rtp_timestamp_),
+ kVideoFrequency);
+ }
+ lip_sync_rtp_timestamp_ = fresh_sync_rtp;
+ lip_sync_drift_.Update(
+ now, fresh_sync_reference - lip_sync_reference_time_);
}
+
+ // Video frame not complete; wait for more packets.
if (!complete)
- return; // Video frame not complete; wait for more packets.
+ return;
EmitAvailableEncodedFrames();
}
@@ -366,7 +322,8 @@ void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
RtpTimestamp rtp_timestamp =
frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff];
cast_environment_->Logging()->InsertFrameEvent(
- now, kVideoAckSent, rtp_timestamp, cast_message.ack_frame_id_);
+ now, FRAME_ACK_SENT, VIDEO_EVENT,
+ rtp_timestamp, cast_message.ack_frame_id_);
ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
event_subscriber_.GetRtcpEventsAndReset(&rtcp_events);
diff --git a/media/cast/video_receiver/video_receiver.h b/media/cast/video_receiver/video_receiver.h
index 4852794b90..14a4bfa5c9 100644
--- a/media/cast/video_receiver/video_receiver.h
+++ b/media/cast/video_receiver/video_receiver.h
@@ -13,6 +13,7 @@
#include "base/threading/non_thread_safe.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/cast/base/clock_drift_smoother.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
@@ -41,10 +42,6 @@ class VideoDecoder;
// each step of the pipeline (i.e., encode frame, then transmit/retransmit from
// the sender, then receive and re-order packets on the receiver, then decode
// frame) can vary in duration and is typically very hard to predict.
-// Heuristics will determine when the targeted playout delay is insufficient in
-// the current environment; and the receiver can then increase the playout
-// delay, notifying the sender, to account for the extra variance.
-// TODO(miu): Make the last sentence true. http://crbug.com/360111
//
// Two types of frames can be requested: 1) A frame of decoded video data; or 2)
// a frame of still-encoded video data, to be passed into an external video
@@ -61,7 +58,7 @@ class VideoReceiver : public RtpReceiver,
public base::SupportsWeakPtr<VideoReceiver> {
public:
VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const VideoReceiverConfig& video_config,
+ const FrameReceiverConfig& video_config,
transport::PacedPacketSender* const packet_sender);
virtual ~VideoReceiver();
@@ -76,13 +73,13 @@ class VideoReceiver : public RtpReceiver,
//
// The given |callback| is guaranteed to be run at some point in the future,
// even if to respond with NULL at shutdown time.
- void GetEncodedVideoFrame(const VideoFrameEncodedCallback& callback);
+ void GetEncodedVideoFrame(const FrameEncodedCallback& callback);
// Deliver another packet, possibly a duplicate, and possibly out-of-order.
void IncomingPacket(scoped_ptr<Packet> packet);
protected:
- friend class VideoReceiverTest; // Invoked OnReceivedPayloadData().
+ friend class VideoReceiverTest; // Invokes OnReceivedPayloadData().
virtual void OnReceivedPayloadData(const uint8* payload_data,
size_t payload_size,
@@ -102,17 +99,16 @@ class VideoReceiver : public RtpReceiver,
// EmitAvailableEncodedFrames().
void EmitAvailableEncodedFramesAfterWaiting();
- // Feeds an EncodedVideoFrame into |video_decoder_|. GetRawVideoFrame() uses
- // this as a callback for GetEncodedVideoFrame().
+ // Feeds an EncodedFrame into |video_decoder_|. GetRawVideoFrame() uses this
+ // as a callback for GetEncodedVideoFrame().
void DecodeEncodedVideoFrame(
const VideoFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& playout_time);
+ scoped_ptr<transport::EncodedFrame> encoded_frame);
- // Return the playout time based on the current time and rtp timestamp.
- base::TimeTicks GetPlayoutTime(base::TimeTicks now, uint32 rtp_timestamp);
-
- void InitializeTimers();
+ // Computes the playout time for a frame with the given |rtp_timestamp|.
+ // Because lip-sync info is refreshed regularly, calling this method with the
+ // same argument may return different results.
+ base::TimeTicks GetPlayoutTime(uint32 rtp_timestamp) const;
// Schedule timing for the next cast message.
void ScheduleNextCastMessage();
@@ -146,30 +142,58 @@ class VideoReceiver : public RtpReceiver,
// Processes raw audio events to be sent over to the cast sender via RTCP.
ReceiverRtcpEventSubscriber event_subscriber_;
+ // Configured video codec.
const transport::VideoCodec codec_;
- const base::TimeDelta target_delay_delta_;
+
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user). This is fixed as
+ // a value large enough to give the system sufficient time to encode,
+ // transmit/retransmit, receive, decode, and render; given its run-time
+ // environment (sender/receiver hardware performance, network conditions,
+ // etc.).
+ const base::TimeDelta target_playout_delay_;
+
+ // Hack: This is used in logic that determines whether to skip frames.
const base::TimeDelta expected_frame_duration_;
+
+ // Set to false initially, then set to true after scheduling the periodic
+ // sending of reports back to the sender. Reports are first scheduled just
+ // after receiving a first packet (since the first packet identifies the
+ // sender for the remainder of the session).
+ bool reports_are_scheduled_;
+
+ // Assembles packets into frames, providing this receiver with complete,
+ // decodable EncodedFrames.
Framer framer_;
+
+ // Decodes frames into media::VideoFrame images for playback.
scoped_ptr<VideoDecoder> video_decoder_;
+
+ // Manages sending/receiving of RTCP packets, including sender/receiver
+ // reports.
Rtcp rtcp_;
- base::TimeDelta time_offset_; // Sender-receiver offset estimation.
- int time_offset_counter_;
- bool time_incoming_packet_updated_;
- base::TimeTicks time_incoming_packet_;
- uint32 incoming_rtp_timestamp_;
+
+ // Decrypts encrypted frames.
transport::TransportEncryptionHandler decryptor_;
// Outstanding callbacks to run to deliver on client requests for frames.
- std::list<VideoFrameEncodedCallback> frame_request_queue_;
+ std::list<FrameEncodedCallback> frame_request_queue_;
// True while there's an outstanding task to re-invoke
// EmitAvailableEncodedFrames().
bool is_waiting_for_consecutive_frame_;
- // This mapping allows us to log kVideoAckSent as a frame event. In addition
+ // This mapping allows us to log FRAME_ACK_SENT as a frame event. In addition
// it allows the event to be transmitted via RTCP.
RtpTimestamp frame_id_to_rtp_timestamp_[256];
+ // Lip-sync values used to compute the playout time of each frame from its RTP
+ // timestamp. These are updated each time the first packet of a frame is
+ // received.
+ RtpTimestamp lip_sync_rtp_timestamp_;
+ base::TimeTicks lip_sync_reference_time_;
+ ClockDriftSmoother lip_sync_drift_;
+
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<VideoReceiver> weak_factory_;
diff --git a/media/cast/video_receiver/video_receiver_unittest.cc b/media/cast/video_receiver/video_receiver_unittest.cc
index 9a1fde7b1a..d158d7e325 100644
--- a/media/cast/video_receiver/video_receiver_unittest.cc
+++ b/media/cast/video_receiver/video_receiver_unittest.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <deque>
+#include <utility>
+
#include "base/bind.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
@@ -9,50 +12,52 @@
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/utility/default_config.h"
#include "media/cast/transport/pacing/mock_paced_packet_sender.h"
#include "media/cast/video_receiver/video_receiver.h"
#include "testing/gmock/include/gmock/gmock.h"
+using ::testing::_;
+
namespace media {
namespace cast {
-using ::testing::_;
-
namespace {
const int kPacketSize = 1500;
-const int64 kStartMillisecond = INT64_C(12345678900000);
const uint32 kFirstFrameId = 1234;
+const int kPlayoutDelayMillis = 100;
class FakeVideoClient {
public:
FakeVideoClient() : num_called_(0) {}
virtual ~FakeVideoClient() {}
- void SetNextExpectedResult(uint32 expected_frame_id,
- const base::TimeTicks& expected_playout_time) {
- expected_frame_id_ = expected_frame_id;
- expected_playout_time_ = expected_playout_time;
+ void AddExpectedResult(uint32 expected_frame_id,
+ const base::TimeTicks& expected_playout_time) {
+ expected_results_.push_back(
+ std::make_pair(expected_frame_id, expected_playout_time));
}
void DeliverEncodedVideoFrame(
- scoped_ptr<transport::EncodedVideoFrame> video_frame,
- const base::TimeTicks& playout_time) {
+ scoped_ptr<transport::EncodedFrame> video_frame) {
+ SCOPED_TRACE(::testing::Message() << "num_called_ is " << num_called_);
ASSERT_FALSE(!video_frame)
<< "If at shutdown: There were unsatisfied requests enqueued.";
- EXPECT_EQ(expected_frame_id_, video_frame->frame_id);
- EXPECT_EQ(transport::kVp8, video_frame->codec);
- EXPECT_EQ(expected_playout_time_, playout_time);
+ ASSERT_FALSE(expected_results_.empty());
+ EXPECT_EQ(expected_results_.front().first, video_frame->frame_id);
+ EXPECT_EQ(expected_results_.front().second, video_frame->reference_time);
+ expected_results_.pop_front();
++num_called_;
}
int number_times_called() const { return num_called_; }
private:
+ std::deque<std::pair<uint32, base::TimeTicks> > expected_results_;
int num_called_;
- uint32 expected_frame_id_;
- base::TimeTicks expected_playout_time_;
DISALLOW_COPY_AND_ASSIGN(FakeVideoClient);
};
@@ -61,16 +66,15 @@ class FakeVideoClient {
class VideoReceiverTest : public ::testing::Test {
protected:
VideoReceiverTest() {
- // Configure to use vp8 software implementation.
- config_.rtp_max_delay_ms = 100;
- config_.use_external_decoder = false;
+ config_ = GetDefaultVideoReceiverConfig();
+ config_.rtp_max_delay_ms = kPlayoutDelayMillis;
// Note: Frame rate must divide 1000 without remainder so the test code
// doesn't have to account for rounding errors.
config_.max_frame_rate = 25;
- config_.codec = transport::kVp8;
+ config_.codec.video = transport::kVp8; // Frame skipping not allowed.
testing_clock_ = new base::SimpleTestTickClock();
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
+ start_time_ = testing_clock_->NowTicks();
task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
cast_environment_ =
@@ -101,10 +105,26 @@ class VideoReceiverTest : public ::testing::Test {
payload_.data(), payload_.size(), rtp_header_);
}
- VideoReceiverConfig config_;
+ void FeedLipSyncInfoIntoReceiver() {
+ const base::TimeTicks now = testing_clock_->NowTicks();
+ const int64 rtp_timestamp = (now - start_time_) *
+ kVideoFrequency / base::TimeDelta::FromSeconds(1);
+ CHECK_LE(0, rtp_timestamp);
+ uint32 ntp_seconds;
+ uint32 ntp_fraction;
+ ConvertTimeTicksToNtp(now, &ntp_seconds, &ntp_fraction);
+ TestRtcpPacketBuilder rtcp_packet;
+ rtcp_packet.AddSrWithNtp(config_.incoming_ssrc,
+ ntp_seconds, ntp_fraction,
+ static_cast<uint32>(rtp_timestamp));
+ receiver_->IncomingPacket(rtcp_packet.GetPacket().Pass());
+ }
+
+ FrameReceiverConfig config_;
std::vector<uint8> payload_;
RtpCastHeader rtp_header_;
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ base::TimeTicks start_time_;
transport::MockPacedPacketSender mock_transport_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
@@ -117,13 +137,16 @@ class VideoReceiverTest : public ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(VideoReceiverTest);
};
-TEST_F(VideoReceiverTest, GetOnePacketEncodedFrame) {
+TEST_F(VideoReceiverTest, ReceivesOneFrame) {
SimpleEventSubscriber event_subscriber;
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
.WillRepeatedly(testing::Return(true));
+ FeedLipSyncInfoIntoReceiver();
+ task_runner_->RunTasks();
+
// Enqueue a request for a video frame.
receiver_->GetEncodedVideoFrame(
base::Bind(&FakeVideoClient::DeliverEncodedVideoFrame,
@@ -134,8 +157,10 @@ TEST_F(VideoReceiverTest, GetOnePacketEncodedFrame) {
EXPECT_EQ(0, fake_video_client_.number_times_called());
// Deliver one video frame to the receiver and expect to get one frame back.
- fake_video_client_.SetNextExpectedResult(kFirstFrameId,
- testing_clock_->NowTicks());
+ const base::TimeDelta target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kPlayoutDelayMillis);
+ fake_video_client_.AddExpectedResult(
+ kFirstFrameId, testing_clock_->NowTicks() + target_playout_delay);
FeedOneFrameIntoReceiver();
task_runner_->RunTasks();
EXPECT_EQ(1, fake_video_client_.number_times_called());
@@ -144,19 +169,29 @@ TEST_F(VideoReceiverTest, GetOnePacketEncodedFrame) {
event_subscriber.GetFrameEventsAndReset(&frame_events);
ASSERT_TRUE(!frame_events.empty());
- EXPECT_EQ(kVideoAckSent, frame_events.begin()->type);
+ EXPECT_EQ(FRAME_ACK_SENT, frame_events.begin()->type);
EXPECT_EQ(rtp_header_.frame_id, frame_events.begin()->frame_id);
EXPECT_EQ(rtp_header_.rtp_timestamp, frame_events.begin()->rtp_timestamp);
cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber);
}
-TEST_F(VideoReceiverTest, MultiplePendingGetCalls) {
+TEST_F(VideoReceiverTest, ReceivesFramesRefusingToSkipAny) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
.WillRepeatedly(testing::Return(true));
- // Enqueue a request for an video frame.
- const VideoFrameEncodedCallback frame_encoded_callback =
+ const uint32 rtp_advance_per_frame =
+ config_.frequency / config_.max_frame_rate;
+ const base::TimeDelta time_advance_per_frame =
+ base::TimeDelta::FromSeconds(1) / config_.max_frame_rate;
+
+ // Feed and process lip sync in receiver.
+ FeedLipSyncInfoIntoReceiver();
+ task_runner_->RunTasks();
+ const base::TimeTicks first_frame_capture_time = testing_clock_->NowTicks();
+
+ // Enqueue a request for a video frame.
+ const FrameEncodedCallback frame_encoded_callback =
base::Bind(&FakeVideoClient::DeliverEncodedVideoFrame,
base::Unretained(&fake_video_client_));
receiver_->GetEncodedVideoFrame(frame_encoded_callback);
@@ -164,17 +199,16 @@ TEST_F(VideoReceiverTest, MultiplePendingGetCalls) {
EXPECT_EQ(0, fake_video_client_.number_times_called());
// Receive one video frame and expect to see the first request satisfied.
- fake_video_client_.SetNextExpectedResult(kFirstFrameId,
- testing_clock_->NowTicks());
- const base::TimeTicks time_at_first_frame_feed = testing_clock_->NowTicks();
+ const base::TimeDelta target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kPlayoutDelayMillis);
+ fake_video_client_.AddExpectedResult(
+ kFirstFrameId, first_frame_capture_time + target_playout_delay);
+ rtp_header_.rtp_timestamp = 0;
FeedOneFrameIntoReceiver();
task_runner_->RunTasks();
EXPECT_EQ(1, fake_video_client_.number_times_called());
- testing_clock_->Advance(
- base::TimeDelta::FromSeconds(1) / config_.max_frame_rate);
-
- // Enqueue a second request for an video frame, but it should not be
+ // Enqueue a second request for a video frame, but it should not be
// fulfilled yet.
receiver_->GetEncodedVideoFrame(frame_encoded_callback);
task_runner_->RunTasks();
@@ -183,47 +217,47 @@ TEST_F(VideoReceiverTest, MultiplePendingGetCalls) {
// Receive one video frame out-of-order: Make sure that we are not continuous
// and that the RTP timestamp represents a time in the future.
rtp_header_.is_key_frame = false;
- rtp_header_.frame_id = kFirstFrameId + 2;
- rtp_header_.reference_frame_id = 0;
- rtp_header_.rtp_timestamp +=
- config_.rtp_max_delay_ms * kVideoFrequency / 1000;
- fake_video_client_.SetNextExpectedResult(
- kFirstFrameId + 2,
- time_at_first_frame_feed +
- base::TimeDelta::FromMilliseconds(config_.rtp_max_delay_ms));
+ rtp_header_.frame_id = kFirstFrameId + 2; // "Frame 3"
+ rtp_header_.reference_frame_id = kFirstFrameId + 1; // "Frame 2"
+ rtp_header_.rtp_timestamp += 2 * rtp_advance_per_frame;
FeedOneFrameIntoReceiver();
// Frame 2 should not come out at this point in time.
task_runner_->RunTasks();
EXPECT_EQ(1, fake_video_client_.number_times_called());
- // Enqueue a third request for an video frame.
+ // Enqueue a third request for a video frame.
receiver_->GetEncodedVideoFrame(frame_encoded_callback);
task_runner_->RunTasks();
EXPECT_EQ(1, fake_video_client_.number_times_called());
- // After |rtp_max_delay_ms| has elapsed, Frame 2 is emitted (to satisfy the
- // second request) because a decision was made to skip over the no-show Frame
- // 1.
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(config_.rtp_max_delay_ms));
+ // Now, advance time forward such that Frame 2 is now too late for playback.
+ // Regardless, the receiver must NOT emit Frame 3 yet because it is not
+ // allowed to skip frames for VP8.
+ testing_clock_->Advance(2 * time_advance_per_frame + target_playout_delay);
task_runner_->RunTasks();
- EXPECT_EQ(2, fake_video_client_.number_times_called());
-
- // Receive Frame 3 and expect it to fulfill the third request immediately.
- rtp_header_.frame_id = kFirstFrameId + 3;
- rtp_header_.reference_frame_id = rtp_header_.frame_id - 1;
- rtp_header_.rtp_timestamp += kVideoFrequency / config_.max_frame_rate;
- fake_video_client_.SetNextExpectedResult(kFirstFrameId + 3,
- testing_clock_->NowTicks());
+ EXPECT_EQ(1, fake_video_client_.number_times_called());
+
+ // Now receive Frame 2 and expect both the second and third requests to be
+ // fulfilled immediately.
+ fake_video_client_.AddExpectedResult(
+ kFirstFrameId + 1, // "Frame 2"
+ first_frame_capture_time + 1 * time_advance_per_frame +
+ target_playout_delay);
+ fake_video_client_.AddExpectedResult(
+ kFirstFrameId + 2, // "Frame 3"
+ first_frame_capture_time + 2 * time_advance_per_frame +
+ target_playout_delay);
+ --rtp_header_.frame_id; // "Frame 2"
+ --rtp_header_.reference_frame_id; // "Frame 1"
+ rtp_header_.rtp_timestamp -= rtp_advance_per_frame;
FeedOneFrameIntoReceiver();
task_runner_->RunTasks();
EXPECT_EQ(3, fake_video_client_.number_times_called());
- // Move forward another |rtp_max_delay_ms| and run any pending tasks (there
- // should be none). Expect no additional frames where emitted.
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(config_.rtp_max_delay_ms));
+ // Move forward to the playout time of an unreceived Frame 5. Expect no
+ // additional frames were emitted.
+ testing_clock_->Advance(3 * time_advance_per_frame);
task_runner_->RunTasks();
EXPECT_EQ(3, fake_video_client_.number_times_called());
}
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
index 38c7dfccaf..4905d3475e 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
@@ -135,7 +135,7 @@ void Vp8Encoder::InitEncode(int number_of_encode_threads) {
}
bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) {
+ transport::EncodedFrame* encoded_image) {
DCHECK(thread_checker_.CalledOnValidThread());
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
@@ -199,35 +199,34 @@ bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
// Get encoded frame.
const vpx_codec_cx_pkt_t* pkt = NULL;
vpx_codec_iter_t iter = NULL;
- size_t total_size = 0;
+ bool is_key_frame = false;
while ((pkt = vpx_codec_get_cx_data(encoder_.get(), &iter)) != NULL) {
- if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
- total_size += pkt->data.frame.sz;
- encoded_image->data.reserve(total_size);
- encoded_image->data.insert(
- encoded_image->data.end(),
- static_cast<const uint8*>(pkt->data.frame.buf),
- static_cast<const uint8*>(pkt->data.frame.buf) + pkt->data.frame.sz);
- if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- encoded_image->key_frame = true;
- } else {
- encoded_image->key_frame = false;
- }
- }
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
+ continue;
+ encoded_image->data.assign(
+ static_cast<const uint8*>(pkt->data.frame.buf),
+ static_cast<const uint8*>(pkt->data.frame.buf) + pkt->data.frame.sz);
+ is_key_frame = !!(pkt->data.frame.flags & VPX_FRAME_IS_KEY);
+ break; // Done, since all data is provided in one CX_FRAME_PKT packet.
}
// Don't update frame_id for zero size frames.
- if (total_size == 0)
+ if (encoded_image->data.empty())
return true;
// Populate the encoded frame.
- encoded_image->codec = transport::kVp8;
- encoded_image->last_referenced_frame_id = latest_frame_id_to_reference;
encoded_image->frame_id = ++last_encoded_frame_id_;
+ if (is_key_frame) {
+ encoded_image->dependency = transport::EncodedFrame::KEY;
+ encoded_image->referenced_frame_id = encoded_image->frame_id;
+ } else {
+ encoded_image->dependency = transport::EncodedFrame::DEPENDENT;
+ encoded_image->referenced_frame_id = latest_frame_id_to_reference;
+ }
- VLOG(1) << "VP8 encoded frame:" << static_cast<int>(encoded_image->frame_id)
- << " sized:" << total_size;
+ DVLOG(1) << "VP8 encoded frame_id " << encoded_image->frame_id
+ << ", sized:" << encoded_image->data.size();
- if (encoded_image->key_frame) {
+ if (is_key_frame) {
key_frame_requested_ = false;
for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.h b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
index aff6215c87..82ef2c27e0 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.h
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
@@ -37,7 +37,7 @@ class Vp8Encoder : public SoftwareVideoEncoder {
// Encode a raw image (as a part of a video stream).
virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) OVERRIDE;
+ transport::EncodedFrame* encoded_image) OVERRIDE;
// Update the encoder with a new target bit rate.
virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
diff --git a/media/cast/video_sender/external_video_encoder.cc b/media/cast/video_sender/external_video_encoder.cc
index 9280e534a4..1b31850c92 100644
--- a/media/cast/video_sender/external_video_encoder.cc
+++ b/media/cast/video_sender/external_video_encoder.cc
@@ -31,7 +31,8 @@ void LogFrameEncodedEvent(
media::cast::RtpTimestamp rtp_timestamp,
uint32 frame_id) {
cast_environment->Logging()->InsertFrameEvent(
- event_time, media::cast::kVideoFrameEncoded, rtp_timestamp, frame_id);
+ event_time, media::cast::FRAME_ENCODED, media::cast::VIDEO_EVENT,
+ rtp_timestamp, frame_id);
}
// Proxy this call to ExternalVideoEncoder on the cast main thread.
@@ -107,6 +108,9 @@ class LocalVideoEncodeAcceleratorClient
case transport::kFakeSoftwareVideo:
NOTREACHED() << "Fake software video encoder cannot be external";
break;
+ case transport::kUnknownVideoCodec:
+ NOTREACHED() << "Video codec not specified";
+ break;
}
codec_ = video_config.codec;
max_frame_rate_ = video_config.max_frame_rate;
@@ -130,9 +134,7 @@ class LocalVideoEncodeAcceleratorClient
DCHECK(encoder_task_runner_);
DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
- if (video_encode_accelerator_) {
- video_encode_accelerator_.release()->Destroy();
- }
+ video_encode_accelerator_.reset();
}
void SetBitRate(uint32 bit_rate) {
@@ -164,9 +166,7 @@ class LocalVideoEncodeAcceleratorClient
DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
VLOG(1) << "ExternalVideoEncoder NotifyError: " << error;
- if (video_encode_accelerator_) {
- video_encode_accelerator_.release()->Destroy();
- }
+ video_encode_accelerator_.reset();
cast_environment_->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
@@ -221,21 +221,19 @@ class LocalVideoEncodeAcceleratorClient
stream_header_.append(static_cast<const char*>(output_buffer->memory()),
payload_size);
} else if (!encoded_frame_data_storage_.empty()) {
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
-
- encoded_frame->codec = codec_;
- encoded_frame->key_frame = key_frame;
- encoded_frame->last_referenced_frame_id = last_encoded_frame_id_;
- last_encoded_frame_id_++;
- encoded_frame->frame_id = last_encoded_frame_id_;
- encoded_frame->rtp_timestamp = GetVideoRtpTimestamp(
- encoded_frame_data_storage_.front().capture_time);
- if (key_frame) {
- // Self referenced.
- encoded_frame->last_referenced_frame_id = encoded_frame->frame_id;
- }
-
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ encoded_frame->dependency = key_frame ? transport::EncodedFrame::KEY :
+ transport::EncodedFrame::DEPENDENT;
+ encoded_frame->frame_id = ++last_encoded_frame_id_;
+ if (key_frame)
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id;
+ else
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id - 1;
+ encoded_frame->reference_time =
+ encoded_frame_data_storage_.front().capture_time;
+ encoded_frame->rtp_timestamp =
+ GetVideoRtpTimestamp(encoded_frame->reference_time);
if (!stream_header_.empty()) {
encoded_frame->data = stream_header_;
stream_header_.clear();
@@ -256,8 +254,7 @@ class LocalVideoEncodeAcceleratorClient
CastEnvironment::MAIN,
FROM_HERE,
base::Bind(encoded_frame_data_storage_.front().frame_encoded_callback,
- base::Passed(&encoded_frame),
- encoded_frame_data_storage_.front().capture_time));
+ base::Passed(&encoded_frame)));
encoded_frame_data_storage_.pop_front();
} else {
diff --git a/media/cast/video_sender/external_video_encoder_unittest.cc b/media/cast/video_sender/external_video_encoder_unittest.cc
index 1f2e4dd057..20c97562d1 100644
--- a/media/cast/video_sender/external_video_encoder_unittest.cc
+++ b/media/cast/video_sender/external_video_encoder_unittest.cc
@@ -45,24 +45,26 @@ class TestVideoEncoderCallback
public:
TestVideoEncoderCallback() {}
- void SetExpectedResult(bool expected_key_frame,
- uint8 expected_frame_id,
- uint8 expected_last_referenced_frame_id,
+ void SetExpectedResult(uint32 expected_frame_id,
+ uint32 expected_last_referenced_frame_id,
const base::TimeTicks& expected_capture_time) {
- expected_key_frame_ = expected_key_frame;
expected_frame_id_ = expected_frame_id;
expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
expected_capture_time_ = expected_capture_time;
}
void DeliverEncodedVideoFrame(
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& capture_time) {
- EXPECT_EQ(expected_key_frame_, encoded_frame->key_frame);
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ if (expected_frame_id_ == expected_last_referenced_frame_id_) {
+ EXPECT_EQ(transport::EncodedFrame::KEY, encoded_frame->dependency);
+ } else {
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT,
+ encoded_frame->dependency);
+ }
EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
EXPECT_EQ(expected_last_referenced_frame_id_,
- encoded_frame->last_referenced_frame_id);
- EXPECT_EQ(expected_capture_time_, capture_time);
+ encoded_frame->referenced_frame_id);
+ EXPECT_EQ(expected_capture_time_, encoded_frame->reference_time);
}
protected:
@@ -72,8 +74,8 @@ class TestVideoEncoderCallback
friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
bool expected_key_frame_;
- uint8 expected_frame_id_;
- uint8 expected_last_referenced_frame_id_;
+ uint32 expected_frame_id_;
+ uint32 expected_last_referenced_frame_id_;
base::TimeTicks expected_capture_time_;
DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
@@ -145,15 +147,14 @@ TEST_F(ExternalVideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
for (int i = 0; i < 6; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(
- false, i + 1, i, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i + 1, i, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -172,7 +173,7 @@ TEST_F(ExternalVideoEncoderTest, SkipNextFrame) {
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -188,8 +189,7 @@ TEST_F(ExternalVideoEncoderTest, SkipNextFrame) {
video_encoder_->SkipNextFrame(false);
for (int i = 0; i < 2; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(
- false, i + 1, i, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i + 1, i, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -212,7 +212,7 @@ TEST_F(ExternalVideoEncoderTest, StreamHeader) {
// Verify the first returned bitstream buffer is still a key frame.
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
diff --git a/media/cast/video_sender/fake_software_video_encoder.cc b/media/cast/video_sender/fake_software_video_encoder.cc
index 0df0d6e642..ee8fe0fd00 100644
--- a/media/cast/video_sender/fake_software_video_encoder.cc
+++ b/media/cast/video_sender/fake_software_video_encoder.cc
@@ -13,10 +13,13 @@
namespace media {
namespace cast {
-FakeSoftwareVideoEncoder::FakeSoftwareVideoEncoder()
- : next_frame_is_key_(true),
+FakeSoftwareVideoEncoder::FakeSoftwareVideoEncoder(
+ const VideoSenderConfig& video_config)
+ : video_config_(video_config),
+ next_frame_is_key_(true),
frame_id_(0),
- frame_id_to_reference_(0) {
+ frame_id_to_reference_(0),
+ frame_size_(0) {
}
FakeSoftwareVideoEncoder::~FakeSoftwareVideoEncoder() {}
@@ -25,24 +28,30 @@ void FakeSoftwareVideoEncoder::Initialize() {}
bool FakeSoftwareVideoEncoder::Encode(
const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) {
- encoded_image->codec = transport::kFakeSoftwareVideo;
- encoded_image->key_frame = next_frame_is_key_;
- next_frame_is_key_ = false;
+ transport::EncodedFrame* encoded_image) {
encoded_image->frame_id = frame_id_++;
- encoded_image->last_referenced_frame_id = frame_id_to_reference_;
+ if (next_frame_is_key_) {
+ encoded_image->dependency = transport::EncodedFrame::KEY;
+ encoded_image->referenced_frame_id = encoded_image->frame_id;
+ next_frame_is_key_ = false;
+ } else {
+ encoded_image->dependency = transport::EncodedFrame::DEPENDENT;
+ encoded_image->referenced_frame_id = encoded_image->frame_id - 1;
+ }
base::DictionaryValue values;
- values.Set("key", base::Value::CreateBooleanValue(encoded_image->key_frame));
- values.Set("id", base::Value::CreateIntegerValue(encoded_image->frame_id));
- values.Set("ref", base::Value::CreateIntegerValue(
- encoded_image->last_referenced_frame_id));
+ values.SetBoolean("key",
+ encoded_image->dependency == transport::EncodedFrame::KEY);
+ values.SetInteger("ref", encoded_image->referenced_frame_id);
+ values.SetInteger("id", encoded_image->frame_id);
+ values.SetInteger("size", frame_size_);
+ values.SetString("data", std::string(frame_size_, ' '));
base::JSONWriter::Write(&values, &encoded_image->data);
return true;
}
void FakeSoftwareVideoEncoder::UpdateRates(uint32 new_bitrate) {
- // TODO(hclam): Implement bitrate control.
+ frame_size_ = new_bitrate / video_config_.max_frame_rate / 8;
}
void FakeSoftwareVideoEncoder::GenerateKeyFrame() {
diff --git a/media/cast/video_sender/fake_software_video_encoder.h b/media/cast/video_sender/fake_software_video_encoder.h
index bcc5ed06d8..0eb88ddfe1 100644
--- a/media/cast/video_sender/fake_software_video_encoder.h
+++ b/media/cast/video_sender/fake_software_video_encoder.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_CAST_VIDEO_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
#define MEDIA_CAST_VIDEO_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
+#include "media/cast/cast_config.h"
#include "media/cast/video_sender/software_video_encoder.h"
namespace media {
@@ -12,21 +13,23 @@ namespace cast {
class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder {
public:
- FakeSoftwareVideoEncoder();
+ FakeSoftwareVideoEncoder(const VideoSenderConfig& video_config);
virtual ~FakeSoftwareVideoEncoder();
// SoftwareVideoEncoder implementations.
virtual void Initialize() OVERRIDE;
virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) OVERRIDE;
+ transport::EncodedFrame* encoded_image) OVERRIDE;
virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
virtual void GenerateKeyFrame() OVERRIDE;
virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
private:
+ VideoSenderConfig video_config_;
bool next_frame_is_key_;
uint32 frame_id_;
uint32 frame_id_to_reference_;
+ int frame_size_;
};
} // namespace cast
diff --git a/media/cast/video_sender/software_video_encoder.h b/media/cast/video_sender/software_video_encoder.h
index 3d63f20b3c..f1bf6f6331 100644
--- a/media/cast/video_sender/software_video_encoder.h
+++ b/media/cast/video_sender/software_video_encoder.h
@@ -15,7 +15,7 @@ class VideoFrame;
namespace media {
namespace cast {
namespace transport {
-struct EncodedVideoFrame;
+struct EncodedFrame;
} // namespace transport
class SoftwareVideoEncoder {
@@ -28,7 +28,7 @@ class SoftwareVideoEncoder {
// Encode a raw image (as a part of a video stream).
virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) = 0;
+ transport::EncodedFrame* encoded_image) = 0;
// Update the encoder with a new target bit rate.
virtual void UpdateRates(uint32 new_bitrate) = 0;
diff --git a/media/cast/video_sender/video_encoder.h b/media/cast/video_sender/video_encoder.h
index 09a5b293df..48d63ab948 100644
--- a/media/cast/video_sender/video_encoder.h
+++ b/media/cast/video_sender/video_encoder.h
@@ -20,8 +20,8 @@ namespace cast {
// All these functions are called from the main cast thread.
class VideoEncoder {
public:
- typedef base::Callback<void(scoped_ptr<transport::EncodedVideoFrame>,
- const base::TimeTicks&)> FrameEncodedCallback;
+ typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
virtual ~VideoEncoder() {}
diff --git a/media/cast/video_sender/video_encoder_impl.cc b/media/cast/video_sender/video_encoder_impl.cc
index 2eceda30af..039813b129 100644
--- a/media/cast/video_sender/video_encoder_impl.cc
+++ b/media/cast/video_sender/video_encoder_impl.cc
@@ -43,25 +43,24 @@ void EncodeVideoFrameOnEncoderThread(
dynamic_config.latest_frame_id_to_reference);
encoder->UpdateRates(dynamic_config.bit_rate);
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
- bool retval = encoder->Encode(video_frame, encoded_frame.get());
-
- encoded_frame->rtp_timestamp = transport::GetVideoRtpTimestamp(capture_time);
-
- if (!retval) {
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ if (!encoder->Encode(video_frame, encoded_frame.get())) {
VLOG(1) << "Encoding failed";
return;
}
- if (encoded_frame->data.size() <= 0) {
+ if (encoded_frame->data.empty()) {
VLOG(1) << "Encoding resulted in an empty frame";
return;
}
+ encoded_frame->rtp_timestamp = transport::GetVideoRtpTimestamp(capture_time);
+ encoded_frame->reference_time = capture_time;
+
environment->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
base::Bind(
- frame_encoded_callback, base::Passed(&encoded_frame), capture_time));
+ frame_encoded_callback, base::Passed(&encoded_frame)));
}
} // namespace
@@ -82,7 +81,7 @@ VideoEncoderImpl::VideoEncoderImpl(
encoder_.get()));
#ifndef OFFICIAL_BUILD
} else if (video_config.codec == transport::kFakeSoftwareVideo) {
- encoder_.reset(new FakeSoftwareVideoEncoder());
+ encoder_.reset(new FakeSoftwareVideoEncoder(video_config));
#endif
} else {
DCHECK(false) << "Invalid config"; // Codec not supported.
diff --git a/media/cast/video_sender/video_encoder_impl.h b/media/cast/video_sender/video_encoder_impl.h
index 4bc0a835d1..47265c1f0b 100644
--- a/media/cast/video_sender/video_encoder_impl.h
+++ b/media/cast/video_sender/video_encoder_impl.h
@@ -26,8 +26,8 @@ class VideoEncoderImpl : public VideoEncoder {
int bit_rate;
};
- typedef base::Callback<void(scoped_ptr<transport::EncodedVideoFrame>,
- const base::TimeTicks&)> FrameEncodedCallback;
+ typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
VideoEncoderImpl(scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
diff --git a/media/cast/video_sender/video_encoder_impl_unittest.cc b/media/cast/video_sender/video_encoder_impl_unittest.cc
index 05772398f4..b1a5cb8cef 100644
--- a/media/cast/video_sender/video_encoder_impl_unittest.cc
+++ b/media/cast/video_sender/video_encoder_impl_unittest.cc
@@ -26,24 +26,26 @@ class TestVideoEncoderCallback
public:
TestVideoEncoderCallback() {}
- void SetExpectedResult(bool expected_key_frame,
- uint8 expected_frame_id,
- uint8 expected_last_referenced_frame_id,
+ void SetExpectedResult(uint32 expected_frame_id,
+ uint32 expected_last_referenced_frame_id,
const base::TimeTicks& expected_capture_time) {
- expected_key_frame_ = expected_key_frame;
expected_frame_id_ = expected_frame_id;
expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
expected_capture_time_ = expected_capture_time;
}
void DeliverEncodedVideoFrame(
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& capture_time) {
- EXPECT_EQ(expected_key_frame_, encoded_frame->key_frame);
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ if (expected_frame_id_ == expected_last_referenced_frame_id_) {
+ EXPECT_EQ(transport::EncodedFrame::KEY, encoded_frame->dependency);
+ } else {
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT,
+ encoded_frame->dependency);
+ }
EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
EXPECT_EQ(expected_last_referenced_frame_id_,
- encoded_frame->last_referenced_frame_id);
- EXPECT_EQ(expected_capture_time_, capture_time);
+ encoded_frame->referenced_frame_id);
+ EXPECT_EQ(expected_capture_time_, encoded_frame->reference_time);
}
protected:
@@ -52,9 +54,8 @@ class TestVideoEncoderCallback
private:
friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
- bool expected_key_frame_;
- uint8 expected_frame_id_;
- uint8 expected_last_referenced_frame_id_;
+ uint32 expected_frame_id_;
+ uint32 expected_last_referenced_frame_id_;
base::TimeTicks expected_capture_time_;
DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
@@ -128,21 +129,21 @@ TEST_F(VideoEncoderImplTest, EncodePattern30fpsRunningOutOfAck) {
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
capture_time += base::TimeDelta::FromMilliseconds(33);
video_encoder_->LatestFrameIdToReference(0);
- test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(1, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
capture_time += base::TimeDelta::FromMilliseconds(33);
video_encoder_->LatestFrameIdToReference(1);
- test_video_encoder_callback_->SetExpectedResult(false, 2, 1, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(2, 1, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -151,7 +152,7 @@ TEST_F(VideoEncoderImplTest, EncodePattern30fpsRunningOutOfAck) {
for (int i = 3; i < 6; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i, 2, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -170,21 +171,21 @@ TEST_F(VideoEncoderImplTest, DISABLED_EncodePattern60fpsRunningOutOfAck) {
test_video_encoder_callback_.get());
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(0);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(1, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(1);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(2, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -193,7 +194,7 @@ TEST_F(VideoEncoderImplTest, DISABLED_EncodePattern60fpsRunningOutOfAck) {
for (int i = 3; i < 9; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i, 2, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -212,35 +213,35 @@ TEST_F(VideoEncoderImplTest,
test_video_encoder_callback_.get());
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(0);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(1, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(1);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(2, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(2);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 3, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(3, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(3);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 4, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(4, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -248,7 +249,7 @@ TEST_F(VideoEncoderImplTest,
video_encoder_->LatestFrameIdToReference(4);
for (int i = 5; i < 17; ++i) {
- test_video_encoder_callback_->SetExpectedResult(false, i, 4, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i, 4, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
diff --git a/media/cast/video_sender/video_sender.cc b/media/cast/video_sender/video_sender.cc
index 07e34d5c53..57b8ea7299 100644
--- a/media/cast/video_sender/video_sender.cc
+++ b/media/cast/video_sender/video_sender.cc
@@ -13,7 +13,6 @@
#include "base/message_loop/message_loop.h"
#include "media/cast/cast_defines.h"
#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/rtcp/sender_rtcp_event_subscriber.h"
#include "media/cast/transport/cast_transport_config.h"
#include "media/cast/video_sender/external_video_encoder.h"
#include "media/cast/video_sender/video_encoder_impl.h"
@@ -21,28 +20,8 @@
namespace media {
namespace cast {
-const int64 kMinSchedulingDelayMs = 1;
-
-// This is the maxmimum number of sender frame log messages that can fit in a
-// single RTCP packet.
-const int64 kMaxEventSubscriberEntries =
- (kMaxIpPacketSize - kRtcpCastLogHeaderSize) / kRtcpSenderFrameLogSize;
-
-class LocalRtcpVideoSenderFeedback : public RtcpSenderFeedback {
- public:
- explicit LocalRtcpVideoSenderFeedback(VideoSender* video_sender)
- : video_sender_(video_sender) {}
-
- virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback)
- OVERRIDE {
- video_sender_->OnReceivedCastFeedback(cast_feedback);
- }
-
- private:
- VideoSender* video_sender_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(LocalRtcpVideoSenderFeedback);
-};
+const int kNumAggressiveReportsSentAtStart = 100;
+const int kMinSchedulingDelayMs = 1;
VideoSender::VideoSender(
scoped_refptr<CastEnvironment> cast_environment,
@@ -56,9 +35,8 @@ VideoSender::VideoSender(
max_frame_rate_(video_config.max_frame_rate),
cast_environment_(cast_environment),
transport_sender_(transport_sender),
- event_subscriber_(kMaxEventSubscriberEntries),
- rtp_stats_(kVideoFrequency),
- rtcp_feedback_(new LocalRtcpVideoSenderFeedback(this)),
+ rtp_timestamp_helper_(kVideoFrequency),
+ num_aggressive_rtcp_reports_sent_(0),
last_acked_frame_id_(-1),
last_sent_frame_id_(-1),
frames_in_encoder_(0),
@@ -98,7 +76,7 @@ VideoSender::VideoSender(
rtcp_.reset(
new Rtcp(cast_environment_,
- rtcp_feedback_.get(),
+ this,
transport_sender_,
NULL, // paced sender.
NULL,
@@ -106,7 +84,8 @@ VideoSender::VideoSender(
base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
video_config.rtp_config.ssrc,
video_config.incoming_feedback_ssrc,
- video_config.rtcp_c_name));
+ video_config.rtcp_c_name,
+ false));
rtcp_->SetCastReceiverEventHistorySize(kReceiverRtcpEventHistorySize);
// TODO(pwestin): pass cast_initialization_cb to |video_encoder_|
@@ -115,16 +94,11 @@ VideoSender::VideoSender(
CastEnvironment::MAIN,
FROM_HERE,
base::Bind(cast_initialization_cb, STATUS_VIDEO_INITIALIZED));
- cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
-
- transport_sender_->SubscribeVideoRtpStatsCallback(
- base::Bind(&VideoSender::StoreStatistics, weak_factory_.GetWeakPtr()));
}
VideoSender::~VideoSender() {
- cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
}
void VideoSender::InitializeTimers() {
@@ -144,10 +118,11 @@ void VideoSender::InsertRawVideoFrame(
RtpTimestamp rtp_timestamp = GetVideoRtpTimestamp(capture_time);
cast_environment_->Logging()->InsertFrameEvent(
- capture_time, kVideoFrameCaptureBegin, rtp_timestamp, kFrameIdUnknown);
+ capture_time, FRAME_CAPTURE_BEGIN, VIDEO_EVENT,
+ rtp_timestamp, kFrameIdUnknown);
cast_environment_->Logging()->InsertFrameEvent(
cast_environment_->Clock()->NowTicks(),
- kVideoFrameCaptureEnd,
+ FRAME_CAPTURE_END, VIDEO_EVENT,
rtp_timestamp,
kFrameIdUnknown);
@@ -156,47 +131,66 @@ void VideoSender::InsertRawVideoFrame(
"cast_perf_test", "InsertRawVideoFrame",
TRACE_EVENT_SCOPE_THREAD,
"timestamp", capture_time.ToInternalValue(),
- "rtp_timestamp", GetVideoRtpTimestamp(capture_time));
+ "rtp_timestamp", rtp_timestamp);
if (video_encoder_->EncodeVideoFrame(
video_frame,
capture_time,
base::Bind(&VideoSender::SendEncodedVideoFrameMainThread,
- weak_factory_.GetWeakPtr()))) {
+ weak_factory_.GetWeakPtr(),
+ current_requested_bitrate_))) {
frames_in_encoder_++;
UpdateFramesInFlight();
}
}
void VideoSender::SendEncodedVideoFrameMainThread(
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& capture_time) {
+ int requested_bitrate_before_encode,
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
last_send_time_ = cast_environment_->Clock()->NowTicks();
- if (encoded_frame->key_frame) {
- VLOG(1) << "Send encoded key frame; frame_id:"
- << static_cast<int>(encoded_frame->frame_id);
- }
+ VLOG_IF(1, encoded_frame->dependency == transport::EncodedFrame::KEY)
+ << "Send encoded key frame; frame_id: " << encoded_frame->frame_id;
DCHECK_GT(frames_in_encoder_, 0);
frames_in_encoder_--;
uint32 frame_id = encoded_frame->frame_id;
cast_environment_->Logging()->InsertEncodedFrameEvent(
- last_send_time_, kVideoFrameEncoded, encoded_frame->rtp_timestamp,
+ last_send_time_, FRAME_ENCODED, VIDEO_EVENT, encoded_frame->rtp_timestamp,
frame_id, static_cast<int>(encoded_frame->data.size()),
- encoded_frame->key_frame, current_requested_bitrate_);
+ encoded_frame->dependency == transport::EncodedFrame::KEY,
+ requested_bitrate_before_encode);
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT1(
"cast_perf_test", "VideoFrameEncoded",
TRACE_EVENT_SCOPE_THREAD,
- "rtp_timestamp", GetVideoRtpTimestamp(capture_time));
+ "rtp_timestamp", encoded_frame->rtp_timestamp);
// Only use lowest 8 bits as key.
frame_id_to_rtp_timestamp_[frame_id & 0xff] = encoded_frame->rtp_timestamp;
last_sent_frame_id_ = static_cast<int>(encoded_frame->frame_id);
- transport_sender_->InsertCodedVideoFrame(encoded_frame.get(), capture_time);
+ DCHECK(!encoded_frame->reference_time.is_null());
+ rtp_timestamp_helper_.StoreLatestTime(encoded_frame->reference_time,
+ encoded_frame->rtp_timestamp);
+
+ // At the start of the session, it's important to send reports before each
+ // frame so that the receiver can properly compute playout times. The reason
+ // more than one report is sent is because transmission is not guaranteed,
+ // only best effort, so send enough that one should almost certainly get
+ // through.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ // SendRtcpReport() will schedule future reports to be made if this is the
+ // last "aggressive report."
+ ++num_aggressive_rtcp_reports_sent_;
+ const bool is_last_aggressive_report =
+ (num_aggressive_rtcp_reports_sent_ == kNumAggressiveReportsSentAtStart);
+ VLOG_IF(1, is_last_aggressive_report) << "Sending last aggressive report.";
+ SendRtcpReport(is_last_aggressive_report);
+ }
+
+ transport_sender_->InsertCodedVideoFrame(*encoded_frame);
UpdateFramesInFlight();
InitializeTimers();
}
@@ -217,64 +211,25 @@ void VideoSender::ScheduleNextRtcpReport() {
cast_environment_->PostDelayedTask(
CastEnvironment::MAIN,
FROM_HERE,
- base::Bind(&VideoSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
+ base::Bind(&VideoSender::SendRtcpReport,
+ weak_factory_.GetWeakPtr(),
+ true),
time_to_next);
}
-void VideoSender::StoreStatistics(
- const transport::RtcpSenderInfo& sender_info,
- base::TimeTicks time_sent,
- uint32 rtp_timestamp) {
- rtp_stats_.Store(sender_info, time_sent, rtp_timestamp);
-}
-
-void VideoSender::SendRtcpReport() {
+void VideoSender::SendRtcpReport(bool schedule_future_reports) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- transport::RtcpSenderLogMessage sender_log_message;
- RtcpEventMap rtcp_events;
- event_subscriber_.GetRtcpEventsAndReset(&rtcp_events);
-
- for (RtcpEventMap::iterator it = rtcp_events.begin(); it != rtcp_events.end();
- ++it) {
- CastLoggingEvent event_type = it->second.type;
- if (event_type == kVideoFrameCaptureBegin ||
- event_type == kVideoFrameSentToEncoder ||
- event_type == kVideoFrameEncoded) {
- transport::RtcpSenderFrameLogMessage frame_message;
- frame_message.rtp_timestamp = it->first;
- switch (event_type) {
- case kVideoFrameCaptureBegin:
- frame_message.frame_status =
- transport::kRtcpSenderFrameStatusDroppedByFlowControl;
- break;
- case kVideoFrameSentToEncoder:
- frame_message.frame_status =
- transport::kRtcpSenderFrameStatusDroppedByEncoder;
- break;
- case kVideoFrameEncoded:
- frame_message.frame_status =
- transport::kRtcpSenderFrameStatusSentToNetwork;
- break;
- default:
- NOTREACHED();
- break;
- }
- sender_log_message.push_back(frame_message);
- } else {
- // This shouldn't happen because RtcpEventMap isn't supposed to contain
- // other event types.
- NOTREACHED() << "Got unknown event type in RtcpEventMap: " << event_type;
- }
- }
-
- rtp_stats_.UpdateInfo(cast_environment_->Clock()->NowTicks());
-
- rtcp_->SendRtcpFromRtpSender(sender_log_message, rtp_stats_.sender_info());
- if (!sender_log_message.empty()) {
- VLOG(1) << "Failed to send all log messages";
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ uint32 now_as_rtp_timestamp = 0;
+ if (rtp_timestamp_helper_.GetCurrentTimeAsRtpTimestamp(
+ now, &now_as_rtp_timestamp)) {
+ rtcp_->SendRtcpFromRtpSender(now, now_as_rtp_timestamp);
+ } else {
+ // |rtp_timestamp_helper_| should have stored a mapping by this point.
+ NOTREACHED();
}
- ScheduleNextRtcpReport();
+ if (schedule_future_reports)
+ ScheduleNextRtcpReport();
}
void VideoSender::ScheduleNextResendCheck() {
@@ -365,6 +320,16 @@ void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
if (rtcp_->Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt)) {
// Don't use a RTT lower than our average.
rtt = std::max(rtt, avg_rtt);
+
+ // Having the RTT values implies the receiver sent back a receiver report
+ // based on it having received a report from here. Therefore, ensure this
+ // sender stops aggressively sending reports.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ VLOG(1) << "No longer a need to send reports aggressively (sent "
+ << num_aggressive_rtcp_reports_sent_ << ").";
+ num_aggressive_rtcp_reports_sent_ = kNumAggressiveReportsSentAtStart;
+ ScheduleNextRtcpReport();
+ }
} else {
// We have no measured value use default.
rtt = base::TimeDelta::FromMilliseconds(kStartRttMs);
@@ -381,8 +346,7 @@ void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
cast_feedback.ack_frame_id_) {
uint32 new_bitrate = 0;
if (congestion_control_.OnAck(rtt, &new_bitrate)) {
- video_encoder_->SetBitRate(new_bitrate);
- current_requested_bitrate_ = new_bitrate;
+ UpdateBitrate(new_bitrate);
}
}
// We only count duplicate ACKs when we have sent newer frames.
@@ -408,8 +372,7 @@ void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
false, cast_feedback.missing_frames_and_packets_);
uint32 new_bitrate = 0;
if (congestion_control_.OnNack(rtt, &new_bitrate)) {
- video_encoder_->SetBitRate(new_bitrate);
- current_requested_bitrate_ = new_bitrate;
+ UpdateBitrate(new_bitrate);
}
}
ReceivedAck(cast_feedback.ack_frame_id_);
@@ -422,18 +385,13 @@ void VideoSender::ReceivedAck(uint32 acked_frame_id) {
// be acked. Ignore.
return;
}
- // Start sending RTCP packets only after receiving the first ACK, i.e. only
- // after establishing that the receiver is active.
- if (last_acked_frame_id_ == -1) {
- ScheduleNextRtcpReport();
- }
last_acked_frame_id_ = static_cast<int>(acked_frame_id);
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
RtpTimestamp rtp_timestamp =
frame_id_to_rtp_timestamp_[acked_frame_id & 0xff];
cast_environment_->Logging()->InsertFrameEvent(
- now, kVideoAckReceived, rtp_timestamp, acked_frame_id);
+ now, FRAME_ACK_RECEIVED, VIDEO_EVENT, rtp_timestamp, acked_frame_id);
VLOG(2) << "ReceivedAck:" << static_cast<int>(acked_frame_id);
active_session_ = true;
@@ -445,13 +403,12 @@ void VideoSender::UpdateFramesInFlight() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (last_sent_frame_id_ != -1) {
DCHECK_LE(0, last_sent_frame_id_);
- uint32 frames_in_flight = 0;
+ int frames_in_flight = 0;
if (last_acked_frame_id_ != -1) {
DCHECK_LE(0, last_acked_frame_id_);
- frames_in_flight = static_cast<uint32>(last_sent_frame_id_) -
- static_cast<uint32>(last_acked_frame_id_);
+ frames_in_flight = last_sent_frame_id_ - last_acked_frame_id_;
} else {
- frames_in_flight = static_cast<uint32>(last_sent_frame_id_) + 1;
+ frames_in_flight = last_sent_frame_id_ + 1;
}
frames_in_flight += frames_in_encoder_;
VLOG(2) << frames_in_flight
@@ -476,5 +433,12 @@ void VideoSender::ResendFrame(uint32 resend_frame_id) {
transport_sender_->ResendPackets(false, missing_frames_and_packets);
}
+void VideoSender::UpdateBitrate(int new_bitrate) {
+ // Make sure we don't set the bitrate too insanely low.
+ DCHECK_GT(new_bitrate, 1000);
+ video_encoder_->SetBitRate(new_bitrate);
+ current_requested_bitrate_ = new_bitrate;
+}
+
} // namespace cast
} // namespace media
diff --git a/media/cast/video_sender/video_sender.h b/media/cast/video_sender/video_sender.h
index 89ff938ae6..59792f7693 100644
--- a/media/cast/video_sender/video_sender.h
+++ b/media/cast/video_sender/video_sender.h
@@ -17,13 +17,12 @@
#include "media/cast/congestion_control/congestion_control.h"
#include "media/cast/logging/logging_defines.h"
#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtcp/sender_rtcp_event_subscriber.h"
+#include "media/cast/rtp_timestamp_helper.h"
namespace media {
class VideoFrame;
namespace cast {
-class LocalRtcpVideoSenderFeedback;
class LocalVideoEncoderCallback;
class VideoEncoder;
@@ -37,7 +36,8 @@ class CastTransportSender;
// RTCP packets.
// Additionally it posts a bunch of delayed tasks to the main thread for various
// timeouts.
-class VideoSender : public base::NonThreadSafe,
+class VideoSender : public RtcpSenderFeedback,
+ public base::NonThreadSafe,
public base::SupportsWeakPtr<VideoSender> {
public:
VideoSender(scoped_refptr<CastEnvironment> cast_environment,
@@ -59,14 +59,10 @@ class VideoSender : public base::NonThreadSafe,
// Only called from the main cast thread.
void IncomingRtcpPacket(scoped_ptr<Packet> packet);
- // Store rtp stats computed at the Cast transport sender.
- void StoreStatistics(const transport::RtcpSenderInfo& sender_info,
- base::TimeTicks time_sent,
- uint32 rtp_timestamp);
-
protected:
// Protected for testability.
- void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
+ virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback)
+ OVERRIDE;
private:
friend class LocalRtcpVideoSenderFeedback;
@@ -74,7 +70,7 @@ class VideoSender : public base::NonThreadSafe,
// Schedule when we should send the next RTPC report,
// via a PostDelayedTask to the main cast thread.
void ScheduleNextRtcpReport();
- void SendRtcpReport();
+ void SendRtcpReport(bool schedule_future_reports);
// Schedule when we should check that we have received an acknowledgment, or a
// loss report from our remote peer. If we have not heard back from our remote
@@ -90,31 +86,28 @@ class VideoSender : public base::NonThreadSafe,
void ScheduleNextSkippedFramesCheck();
void SkippedFramesCheck();
- void SendEncodedVideoFrame(const transport::EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time);
void ResendFrame(uint32 resend_frame_id);
void ReceivedAck(uint32 acked_frame_id);
void UpdateFramesInFlight();
void SendEncodedVideoFrameMainThread(
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& capture_time);
+ int requested_bitrate_before_encode,
+ scoped_ptr<transport::EncodedFrame> encoded_frame);
void InitializeTimers();
+ void UpdateBitrate(int32 new_bitrate);
+
base::TimeDelta rtp_max_delay_;
const int max_frame_rate_;
scoped_refptr<CastEnvironment> cast_environment_;
transport::CastTransportSender* const transport_sender_;
- // Subscribes to raw events.
- // Processes raw audio events to be sent over to the cast receiver via RTCP.
- SenderRtcpEventSubscriber event_subscriber_;
- RtpSenderStatistics rtp_stats_;
- scoped_ptr<LocalRtcpVideoSenderFeedback> rtcp_feedback_;
+ RtpTimestampHelper rtp_timestamp_helper_;
scoped_ptr<VideoEncoder> video_encoder_;
scoped_ptr<Rtcp> rtcp_;
+ int num_aggressive_rtcp_reports_sent_;
uint8 max_unacked_frames_;
int last_acked_frame_id_;
int last_sent_frame_id_;
@@ -124,6 +117,9 @@ class VideoSender : public base::NonThreadSafe,
base::TimeTicks last_checked_skip_count_time_;
int last_skip_count_;
int current_requested_bitrate_;
+ // When we get close to the max number of un-acked frames, we set lower
+ // the bitrate drastically to ensure that we catch up. Without this we
+ // risk getting stuck in a catch-up state forever.
CongestionControl congestion_control_;
// This is a "good enough" mapping for finding the RTP timestamp associated
diff --git a/media/cast/video_sender/video_sender_unittest.cc b/media/cast/video_sender/video_sender_unittest.cc
index d8b074047d..a7987a2a0c 100644
--- a/media/cast/video_sender/video_sender_unittest.cc
+++ b/media/cast/video_sender/video_sender_unittest.cc
@@ -27,7 +27,6 @@ namespace media {
namespace cast {
namespace {
-static const int64 kStartMillisecond = INT64_C(12345678900000);
static const uint8 kPixelValue = 123;
static const int kWidth = 320;
static const int kHeight = 240;
@@ -62,6 +61,12 @@ class TestPacketSender : public transport::PacketSender {
if (Rtcp::IsRtcpPacket(&packet->data[0], packet->data.size())) {
++number_of_rtcp_packets_;
} else {
+ // Check that at least one RTCP packet was sent before the first RTP
+ // packet. This confirms that the receiver will have the necessary lip
+ // sync info before it has to calculate the playout time of the first
+ // frame.
+ if (number_of_rtp_packets_ == 0)
+ EXPECT_LE(1, number_of_rtcp_packets_);
++number_of_rtp_packets_;
}
return true;
@@ -101,8 +106,7 @@ class VideoSenderTest : public ::testing::Test {
protected:
VideoSenderTest() {
testing_clock_ = new base::SimpleTestTickClock();
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
cast_environment_ =
new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
@@ -129,7 +133,7 @@ class VideoSenderTest : public ::testing::Test {
}
static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
- EXPECT_EQ(status, transport::TRANSPORT_VIDEO_INITIALIZED);
+ EXPECT_EQ(transport::TRANSPORT_VIDEO_INITIALIZED, status);
}
void InitEncoder(bool external) {
@@ -193,7 +197,7 @@ class VideoSenderTest : public ::testing::Test {
}
void InitializationResult(CastInitializationStatus result) {
- EXPECT_EQ(result, STATUS_VIDEO_INITIALIZED);
+ EXPECT_EQ(STATUS_VIDEO_INITIALIZED, result);
}
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
@@ -210,13 +214,12 @@ TEST_F(VideoSenderTest, BuiltInEncoder) {
InitEncoder(false);
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
task_runner_->RunTasks();
- EXPECT_GE(
- transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets(),
- 1);
+ EXPECT_LE(1, transport_.number_of_rtp_packets());
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
}
TEST_F(VideoSenderTest, ExternalEncoder) {
@@ -225,7 +228,7 @@ TEST_F(VideoSenderTest, ExternalEncoder) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
task_runner_->RunTasks();
@@ -240,7 +243,7 @@ TEST_F(VideoSenderTest, RtcpTimer) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
// Make sure that we send at least one RTCP packet.
@@ -248,16 +251,15 @@ TEST_F(VideoSenderTest, RtcpTimer) {
base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
RunTasks(max_rtcp_timeout.InMilliseconds());
- EXPECT_GE(transport_.number_of_rtp_packets(), 1);
- // Don't send RTCP prior to receiving an ACK.
- EXPECT_GE(transport_.number_of_rtcp_packets(), 0);
+ EXPECT_LE(1, transport_.number_of_rtp_packets());
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
// Build Cast msg and expect RTCP packet.
RtcpCastMessage cast_feedback(1);
cast_feedback.media_ssrc_ = 2;
cast_feedback.ack_frame_id_ = 0;
video_sender_->OnReceivedCastFeedback(cast_feedback);
RunTasks(max_rtcp_timeout.InMilliseconds());
- EXPECT_GE(transport_.number_of_rtcp_packets(), 1);
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
}
TEST_F(VideoSenderTest, ResendTimer) {
@@ -265,7 +267,7 @@ TEST_F(VideoSenderTest, ResendTimer) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
// ACK the key frame.
@@ -283,9 +285,9 @@ TEST_F(VideoSenderTest, ResendTimer) {
// Make sure that we do a re-send.
RunTasks(max_resend_timeout.InMilliseconds());
// Should have sent at least 3 packets.
- EXPECT_GE(
- transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets(),
- 3);
+ EXPECT_LE(
+ 3,
+ transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets());
}
TEST_F(VideoSenderTest, LogAckReceivedEvent) {
@@ -297,7 +299,7 @@ TEST_F(VideoSenderTest, LogAckReceivedEvent) {
for (int i = 0; i < num_frames; i++) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
RunTasks(33);
}
@@ -313,7 +315,8 @@ TEST_F(VideoSenderTest, LogAckReceivedEvent) {
event_subscriber.GetFrameEventsAndReset(&frame_events);
ASSERT_TRUE(!frame_events.empty());
- EXPECT_EQ(kVideoAckReceived, frame_events.rbegin()->type);
+ EXPECT_EQ(FRAME_ACK_RECEIVED, frame_events.rbegin()->type);
+ EXPECT_EQ(VIDEO_EVENT, frame_events.rbegin()->media_type);
EXPECT_EQ(num_frames - 1u, frame_events.rbegin()->frame_id);
cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber);
@@ -325,15 +328,13 @@ TEST_F(VideoSenderTest, StopSendingIntheAbsenceOfAck) {
// than 4 frames in flight.
// Store size in packets of frame 0, as it should be resent sue to timeout.
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
RunTasks(33);
const int size_of_frame0 = transport_.number_of_rtp_packets();
for (int i = 1; i < 4; ++i) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
RunTasks(33);
}
@@ -342,8 +343,7 @@ TEST_F(VideoSenderTest, StopSendingIntheAbsenceOfAck) {
// received any acks.
for (int i = 0; i < 3; ++i) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
RunTasks(33);
}
@@ -355,16 +355,16 @@ TEST_F(VideoSenderTest, StopSendingIntheAbsenceOfAck) {
cast_feedback.media_ssrc_ = 2;
cast_feedback.ack_frame_id_ = 0;
video_sender_->OnReceivedCastFeedback(cast_feedback);
- EXPECT_GE(
- transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets(),
- 4);
+ EXPECT_LE(
+ 4,
+ transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets());
// Empty the pipeline.
RunTasks(100);
// Should have sent at least 7 packets.
- EXPECT_GE(
- transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets(),
- 7);
+ EXPECT_LE(
+ 7,
+ transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets());
}
} // namespace cast