summaryrefslogtreecommitdiff
path: root/media/cast
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2013-11-28 11:55:43 +0000
committerTorne (Richard Coles) <torne@google.com>2013-11-28 11:55:43 +0000
commitf2477e01787aa58f445919b809d89e252beef54f (patch)
tree2db962b4af39f0db3a5f83b314373d0530c484b8 /media/cast
parent7daea1dd5ff7e419322de831b642d81af3247912 (diff)
downloadchromium_org-f2477e01787aa58f445919b809d89e252beef54f.tar.gz
Merge from Chromium at DEPS revision 237746
This commit was generated by merge_to_master.py. Change-Id: I8997af4cddfeb09a7c26f7e8e672c712cab461ea
Diffstat (limited to 'media/cast')
-rw-r--r--media/cast/DEPS5
-rw-r--r--media/cast/OWNERS2
-rw-r--r--media/cast/audio_receiver/audio_decoder.cc13
-rw-r--r--media/cast/audio_receiver/audio_decoder.h14
-rw-r--r--media/cast/audio_receiver/audio_decoder_unittest.cc19
-rw-r--r--media/cast/audio_receiver/audio_receiver.cc113
-rw-r--r--media/cast/audio_receiver/audio_receiver.gypi5
-rw-r--r--media/cast/audio_receiver/audio_receiver.h15
-rw-r--r--media/cast/audio_receiver/audio_receiver_unittest.cc8
-rw-r--r--media/cast/audio_sender/audio_encoder.cc367
-rw-r--r--media/cast/audio_sender/audio_encoder.h57
-rw-r--r--media/cast/audio_sender/audio_encoder_unittest.cc214
-rw-r--r--media/cast/audio_sender/audio_sender.cc133
-rw-r--r--media/cast/audio_sender/audio_sender.gypi6
-rw-r--r--media/cast/audio_sender/audio_sender.h34
-rw-r--r--media/cast/audio_sender/audio_sender_unittest.cc66
-rw-r--r--media/cast/cast.gyp71
-rw-r--r--media/cast/cast_config.h34
-rw-r--r--media/cast/cast_defines.h71
-rw-r--r--media/cast/cast_environment.cc18
-rw-r--r--media/cast/cast_environment.h13
-rw-r--r--media/cast/cast_receiver.gyp4
-rw-r--r--media/cast/cast_receiver_impl.cc47
-rw-r--r--media/cast/cast_sender.gyp1
-rw-r--r--media/cast/cast_sender.h30
-rw-r--r--media/cast/cast_sender_impl.cc31
-rw-r--r--media/cast/cast_sender_impl.h4
-rw-r--r--media/cast/congestion_control/congestion_control.gypi1
-rw-r--r--media/cast/congestion_control/congestion_control_unittest.cc70
-rw-r--r--media/cast/framer/cast_message_builder.cc9
-rw-r--r--media/cast/framer/cast_message_builder.h6
-rw-r--r--media/cast/framer/cast_message_builder_unittest.cc38
-rw-r--r--media/cast/framer/frame_buffer.cc2
-rw-r--r--media/cast/framer/frame_buffer.h8
-rw-r--r--media/cast/framer/frame_id_map.cc36
-rw-r--r--media/cast/framer/frame_id_map.h34
-rw-r--r--media/cast/framer/framer.cc6
-rw-r--r--media/cast/framer/framer.h4
-rw-r--r--media/cast/framer/framer_unittest.cc64
-rw-r--r--media/cast/logging/logging.cc113
-rw-r--r--media/cast/logging/logging.gyp30
-rw-r--r--media/cast/logging/logging.h92
-rw-r--r--media/cast/logging/logging_defines.cc30
-rw-r--r--media/cast/logging/logging_defines.h32
-rw-r--r--media/cast/logging/logging_impl.cc90
-rw-r--r--media/cast/logging/logging_impl.h28
-rw-r--r--media/cast/logging/logging_internal.cc10
-rw-r--r--media/cast/logging/logging_internal.h16
-rw-r--r--media/cast/logging/logging_raw.cc19
-rw-r--r--media/cast/logging/logging_raw.h14
-rw-r--r--media/cast/logging/logging_stats.cc13
-rw-r--r--media/cast/logging/logging_stats.h18
-rw-r--r--media/cast/logging/logging_unittest.cc10
-rw-r--r--media/cast/pacing/paced_sender.cc13
-rw-r--r--media/cast/pacing/paced_sender.gyp1
-rw-r--r--media/cast/pacing/paced_sender_unittest.cc3
-rw-r--r--media/cast/rtcp/mock_rtcp_receiver_feedback.h5
-rw-r--r--media/cast/rtcp/mock_rtcp_sender_feedback.h12
-rw-r--r--media/cast/rtcp/rtcp.cc240
-rw-r--r--media/cast/rtcp/rtcp.h59
-rw-r--r--media/cast/rtcp/rtcp_defines.h40
-rw-r--r--media/cast/rtcp/rtcp_receiver.cc231
-rw-r--r--media/cast/rtcp/rtcp_receiver.h18
-rw-r--r--media/cast/rtcp/rtcp_receiver_unittest.cc272
-rw-r--r--media/cast/rtcp/rtcp_sender.cc128
-rw-r--r--media/cast/rtcp/rtcp_sender.h29
-rw-r--r--media/cast/rtcp/rtcp_sender_unittest.cc125
-rw-r--r--media/cast/rtcp/rtcp_unittest.cc263
-rw-r--r--media/cast/rtcp/rtcp_utility.cc149
-rw-r--r--media/cast/rtcp/rtcp_utility.h44
-rw-r--r--media/cast/rtcp/test_rtcp_packet_builder.cc45
-rw-r--r--media/cast/rtcp/test_rtcp_packet_builder.h21
-rw-r--r--media/cast/rtp_common/rtp_defines.h4
-rw-r--r--media/cast/rtp_receiver/rtp_parser/rtp_parser.cc11
-rw-r--r--media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp1
-rw-r--r--media/cast/rtp_receiver/rtp_parser/rtp_parser.h2
-rw-r--r--media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc4
-rw-r--r--media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h8
-rw-r--r--media/cast/rtp_receiver/rtp_receiver.gyp1
-rw-r--r--media/cast/rtp_sender/mock_rtp_sender.h2
-rw-r--r--media/cast/rtp_sender/packet_storage/packet_storage.cc40
-rw-r--r--media/cast/rtp_sender/packet_storage/packet_storage.h8
-rw-r--r--media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc12
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc24
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h6
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc8
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc7
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.h3
-rw-r--r--media/cast/rtp_sender/rtp_sender.cc15
-rw-r--r--media/cast/rtp_sender/rtp_sender.gyp1
-rw-r--r--media/cast/rtp_sender/rtp_sender.h5
-rw-r--r--media/cast/test/audio_utility.cc73
-rw-r--r--media/cast/test/audio_utility.h62
-rw-r--r--media/cast/test/crypto_utility.cc25
-rw-r--r--media/cast/test/crypto_utility.h17
-rw-r--r--media/cast/test/encode_decode_test.cc110
-rw-r--r--media/cast/test/end2end_unittest.cc543
-rw-r--r--media/cast/test/linux_output_window.cc134
-rw-r--r--media/cast/test/linux_output_window.h49
-rw-r--r--media/cast/test/receiver.cc263
-rw-r--r--media/cast/test/sender.cc346
-rw-r--r--media/cast/test/transport/transport.cc218
-rw-r--r--media/cast/test/transport/transport.gyp22
-rw-r--r--media/cast/test/transport/transport.h57
-rw-r--r--media/cast/test/utility/input_helper.cc71
-rw-r--r--media/cast/test/utility/input_helper.h49
-rw-r--r--media/cast/test/utility/utility.gyp28
-rw-r--r--media/cast/test/video_utility.cc113
-rw-r--r--media/cast/test/video_utility.h10
-rw-r--r--media/cast/video_receiver/codecs/vp8/vp8_decoder.cc53
-rw-r--r--media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp1
-rw-r--r--media/cast/video_receiver/codecs/vp8/vp8_decoder.h16
-rw-r--r--media/cast/video_receiver/video_decoder.cc15
-rw-r--r--media/cast/video_receiver/video_decoder.h9
-rw-r--r--media/cast/video_receiver/video_decoder_unittest.cc52
-rw-r--r--media/cast/video_receiver/video_receiver.cc199
-rw-r--r--media/cast/video_receiver/video_receiver.gypi1
-rw-r--r--media/cast/video_receiver/video_receiver.h26
-rw-r--r--media/cast/video_receiver/video_receiver_unittest.cc26
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.cc24
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.gypi1
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.h14
-rw-r--r--media/cast/video_sender/mock_video_encoder_controller.h2
-rw-r--r--media/cast/video_sender/video_encoder.cc30
-rw-r--r--media/cast/video_sender/video_encoder.h21
-rw-r--r--media/cast/video_sender/video_encoder_unittest.cc94
-rw-r--r--media/cast/video_sender/video_sender.cc176
-rw-r--r--media/cast/video_sender/video_sender.gypi3
-rw-r--r--media/cast/video_sender/video_sender.h31
-rw-r--r--media/cast/video_sender/video_sender_unittest.cc87
130 files changed, 5163 insertions, 1956 deletions
diff --git a/media/cast/DEPS b/media/cast/DEPS
index 8e10c67d31..b2a2d05d93 100644
--- a/media/cast/DEPS
+++ b/media/cast/DEPS
@@ -1,4 +1,7 @@
include_rules = [
- "+net",
+ "+crypto",
+ "+net",
"+third_party/webrtc",
+ "+third_party/libyuv",
+ "+ui/gfx",
]
diff --git a/media/cast/OWNERS b/media/cast/OWNERS
index 22e814b0a7..49f41be49c 100644
--- a/media/cast/OWNERS
+++ b/media/cast/OWNERS
@@ -1,2 +1,4 @@
hclam@chromium.org
hubbe@chromium.org
+mikhal@chromium.org
+pwestin@google.com
diff --git a/media/cast/audio_receiver/audio_decoder.cc b/media/cast/audio_receiver/audio_decoder.cc
index b140788fe0..118a020b30 100644
--- a/media/cast/audio_receiver/audio_decoder.cc
+++ b/media/cast/audio_receiver/audio_decoder.cc
@@ -52,7 +52,15 @@ bool AudioDecoder::GetRawAudioFrame(int number_of_10ms_blocks,
int desired_frequency,
PcmAudioFrame* audio_frame,
uint32* rtp_timestamp) {
- if (!have_received_packets_) return false;
+ // We don't care about the race case where a packet arrives at the same time
+ // as this function in called. The data will be there the next time this
+ // function is called.
+ lock_.Acquire();
+ // Get a local copy under lock.
+ bool have_received_packets = have_received_packets_;
+ lock_.Release();
+
+ if (!have_received_packets) return false;
audio_frame->samples.clear();
@@ -92,7 +100,10 @@ void AudioDecoder::IncomingParsedRtpPacket(const uint8* payload_data,
DCHECK_LE(payload_size, kIpPacketSize);
audio_decoder_->IncomingPacket(payload_data, static_cast<int32>(payload_size),
rtp_header.webrtc);
+
+ lock_.Acquire();
have_received_packets_ = true;
+ lock_.Release();
}
} // namespace cast
diff --git a/media/cast/audio_receiver/audio_decoder.h b/media/cast/audio_receiver/audio_decoder.h
index d7c6a792c1..78bb623a01 100644
--- a/media/cast/audio_receiver/audio_decoder.h
+++ b/media/cast/audio_receiver/audio_decoder.h
@@ -6,7 +6,7 @@
#define MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_
#include "base/callback.h"
-#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
#include "media/cast/cast_config.h"
#include "media/cast/rtp_common/rtp_defines.h"
@@ -18,9 +18,10 @@ namespace media {
namespace cast {
// Thread safe class.
-class AudioDecoder : public base::RefCountedThreadSafe<AudioDecoder> {
+class AudioDecoder {
public:
explicit AudioDecoder(const AudioReceiverConfig& audio_config);
+ virtual ~AudioDecoder();
// Extract a raw audio frame from the decoder.
// Set the number of desired 10ms blocks and frequency.
@@ -37,13 +38,12 @@ class AudioDecoder : public base::RefCountedThreadSafe<AudioDecoder> {
size_t payload_size,
const RtpCastHeader& rtp_header);
- protected:
- virtual ~AudioDecoder();
-
private:
- friend class base::RefCountedThreadSafe<AudioDecoder>;
-
+ // The webrtc AudioCodingModule is threadsafe.
scoped_ptr<webrtc::AudioCodingModule> audio_decoder_;
+ // TODO(pwestin): Refactor to avoid this. Call IncomingParsedRtpPacket from
+ // audio decoder thread that way this class does not have to be thread safe.
+ base::Lock lock_;
bool have_received_packets_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
diff --git a/media/cast/audio_receiver/audio_decoder_unittest.cc b/media/cast/audio_receiver/audio_decoder_unittest.cc
index cdfea6bdbf..3437626943 100644
--- a/media/cast/audio_receiver/audio_decoder_unittest.cc
+++ b/media/cast/audio_receiver/audio_decoder_unittest.cc
@@ -15,10 +15,10 @@ class AudioDecoderTest : public ::testing::Test {
virtual ~AudioDecoderTest() {}
void Configure(const AudioReceiverConfig& audio_config) {
- audio_decoder_ = new AudioDecoder(audio_config);
+ audio_decoder_.reset(new AudioDecoder(audio_config));
}
- scoped_refptr<AudioDecoder> audio_decoder_;
+ scoped_ptr<AudioDecoder> audio_decoder_;
};
TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
@@ -41,6 +41,15 @@ TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
rtp_header.webrtc.type.Audio.isCNG = false;
std::vector<int16> payload(640, 0x1234);
+ int number_of_10ms_blocks = 4;
+ int desired_frequency = 16000;
+ PcmAudioFrame audio_frame;
+ uint32 rtp_timestamp;
+
+ EXPECT_FALSE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
+ desired_frequency,
+ &audio_frame,
+ &rtp_timestamp));
uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
size_t payload_size = payload.size() * sizeof(int16);
@@ -48,16 +57,10 @@ TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
audio_decoder_->IncomingParsedRtpPacket(payload_data,
payload_size, rtp_header);
- int number_of_10ms_blocks = 4;
- int desired_frequency = 16000;
- PcmAudioFrame audio_frame;
- uint32 rtp_timestamp;
-
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
desired_frequency,
&audio_frame,
&rtp_timestamp));
-
EXPECT_EQ(1, audio_frame.channels);
EXPECT_EQ(16000, audio_frame.frequency);
EXPECT_EQ(640ul, audio_frame.samples.size());
diff --git a/media/cast/audio_receiver/audio_receiver.cc b/media/cast/audio_receiver/audio_receiver.cc
index c3dc3937b9..01e0026eb0 100644
--- a/media/cast/audio_receiver/audio_receiver.cc
+++ b/media/cast/audio_receiver/audio_receiver.cc
@@ -7,6 +7,8 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "crypto/encryptor.h"
+#include "crypto/symmetric_key.h"
#include "media/cast/audio_receiver/audio_decoder.h"
#include "media/cast/framer/framer.h"
#include "media/cast/rtcp/rtcp.h"
@@ -19,7 +21,6 @@ static const int64 kMinSchedulingDelayMs = 1;
namespace media {
namespace cast {
-
// Local implementation of RtpData (defined in rtp_rtcp_defines.h).
// Used to pass payload data into the audio receiver.
class LocalRtpAudioData : public RtpData {
@@ -80,7 +81,6 @@ AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
PacedPacketSender* const packet_sender)
: cast_environment_(cast_environment),
codec_(audio_config.codec),
- incoming_ssrc_(audio_config.incoming_ssrc),
frequency_(audio_config.frequency),
audio_buffer_(),
audio_decoder_(),
@@ -97,8 +97,20 @@ AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
true,
0));
} else {
- audio_decoder_ = new AudioDecoder(audio_config);
+ audio_decoder_.reset(new AudioDecoder(audio_config));
+ }
+ if (audio_config.aes_iv_mask.size() == kAesKeySize &&
+ audio_config.aes_key.size() == kAesKeySize) {
+ iv_mask_ = audio_config.aes_iv_mask;
+ crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
+ crypto::SymmetricKey::AES, audio_config.aes_key);
+ decryptor_.reset(new crypto::Encryptor());
+ decryptor_->Init(key, crypto::Encryptor::CTR, std::string());
+ } else if (audio_config.aes_iv_mask.size() != 0 ||
+ audio_config.aes_key.size() != 0) {
+ DCHECK(false) << "Invalid crypto configuration";
}
+
rtp_receiver_.reset(new RtpReceiver(cast_environment->Clock(),
&audio_config,
NULL,
@@ -107,36 +119,61 @@ AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
new LocalRtpReceiverStatistics(rtp_receiver_.get()));
base::TimeDelta rtcp_interval_delta =
base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval);
- rtcp_.reset(new Rtcp(cast_environment->Clock(),
+ rtcp_.reset(new Rtcp(cast_environment,
NULL,
packet_sender,
NULL,
rtp_audio_receiver_statistics_.get(),
audio_config.rtcp_mode,
rtcp_interval_delta,
- false,
audio_config.feedback_ssrc,
+ audio_config.incoming_ssrc,
audio_config.rtcp_c_name));
- rtcp_->SetRemoteSSRC(audio_config.incoming_ssrc);
- ScheduleNextRtcpReport();
- ScheduleNextCastMessage();
}
AudioReceiver::~AudioReceiver() {}
+void AudioReceiver::InitializeTimers() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ ScheduleNextRtcpReport();
+ ScheduleNextCastMessage();
+}
+
void AudioReceiver::IncomingParsedRtpPacket(const uint8* payload_data,
size_t payload_size,
const RtpCastHeader& rtp_header) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_environment_->Logging()->InsertPacketEvent(kPacketReceived,
+ rtp_header.webrtc.header.timestamp, rtp_header.frame_id,
+ rtp_header.packet_id, rtp_header.max_packet_id, payload_size);
+
// TODO(pwestin): update this as video to refresh over time.
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (time_first_incoming_packet_.is_null()) {
+ InitializeTimers();
first_incoming_rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
time_first_incoming_packet_ = cast_environment_->Clock()->NowTicks();
}
if (audio_decoder_) {
DCHECK(!audio_buffer_) << "Invalid internal state";
- audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
- rtp_header);
+ std::string plaintext(reinterpret_cast<const char*>(payload_data),
+ payload_size);
+ if (decryptor_) {
+ plaintext.clear();
+ if (!decryptor_->SetCounter(GetAesNonce(rtp_header.frame_id, iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return;
+ }
+ if (!decryptor_->Decrypt(base::StringPiece(reinterpret_cast<const char*>(
+ payload_data), payload_size), &plaintext)) {
+ VLOG(0) << "Decryption error";
+ return;
+ }
+ }
+ audio_decoder_->IncomingParsedRtpPacket(
+ reinterpret_cast<const uint8*>(plaintext.data()), plaintext.size(),
+ rtp_header);
return;
}
DCHECK(audio_buffer_) << "Invalid internal state";
@@ -155,11 +192,13 @@ void AudioReceiver::IncomingParsedRtpPacket(const uint8* payload_data,
void AudioReceiver::GetRawAudioFrame(int number_of_10ms_blocks,
int desired_frequency, const AudioFrameDecodedCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_decoder_) << "Invalid function call in this configuration";
+ // TODO(pwestin): we can skip this function by posting direct to the decoder.
cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE,
base::Bind(&AudioReceiver::DecodeAudioFrameThread,
- weak_factory_.GetWeakPtr(),
+ base::Unretained(this),
number_of_10ms_blocks,
desired_frequency,
callback));
@@ -178,12 +217,17 @@ void AudioReceiver::DecodeAudioFrameThread(
desired_frequency,
audio_frame.get(),
&rtp_timestamp)) {
+ // TODO(pwestin): This looks wrong, we would loose the pending call to
+ // the application provided callback.
return;
}
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
base::TimeTicks playout_time;
playout_time = GetPlayoutTime(now, rtp_timestamp);
+ cast_environment_->Logging()->InsertFrameEventWithDelay(kAudioPlayoutDelay,
+ rtp_timestamp, kFrameIdUnknown, playout_time - now);
+
// Frame is ready - Send back to the main thread.
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(callback,
@@ -191,6 +235,7 @@ void AudioReceiver::DecodeAudioFrameThread(
}
void AudioReceiver::PlayoutTimeout() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_buffer_) << "Invalid function call in this configuration";
if (queued_encoded_callbacks_.empty()) {
// Already released by incoming packet.
@@ -209,6 +254,12 @@ void AudioReceiver::PlayoutTimeout() {
VLOG(1) << "Failed to retrieved a complete frame at this point in time";
return;
}
+
+ if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) {
+ // Logging already done.
+ return;
+ }
+
if (PostEncodedAudioFrame(queued_encoded_callbacks_.front(), rtp_timestamp,
next_frame, &encoded_frame)) {
// Call succeed remove callback from list.
@@ -218,6 +269,7 @@ void AudioReceiver::PlayoutTimeout() {
void AudioReceiver::GetEncodedAudioFrame(
const AudioFrameEncodedCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_buffer_) << "Invalid function call in this configuration";
uint32 rtp_timestamp = 0;
@@ -231,6 +283,11 @@ void AudioReceiver::GetEncodedAudioFrame(
queued_encoded_callbacks_.push_back(callback);
return;
}
+ if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) {
+ // Logging already done.
+ queued_encoded_callbacks_.push_back(callback);
+ return;
+ }
if (!PostEncodedAudioFrame(callback, rtp_timestamp, next_frame,
&encoded_frame)) {
// We have an audio frame; however we are missing packets and we have time
@@ -244,7 +301,9 @@ bool AudioReceiver::PostEncodedAudioFrame(
uint32 rtp_timestamp,
bool next_frame,
scoped_ptr<EncodedAudioFrame>* encoded_frame) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_buffer_) << "Invalid function call in this configuration";
+
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp);
base::TimeDelta time_until_playout = playout_time - now;
@@ -270,6 +329,7 @@ bool AudioReceiver::PostEncodedAudioFrame(
void AudioReceiver::IncomingPacket(const uint8* packet, size_t length,
const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
bool rtcp_packet = Rtcp::IsRtcpPacket(packet, length);
if (!rtcp_packet) {
rtp_receiver_->ReceivedPacket(packet, length);
@@ -280,11 +340,13 @@ void AudioReceiver::IncomingPacket(const uint8* packet, size_t length,
}
void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
- rtcp_->SendRtcpCast(cast_message);
+ // TODO(pwestin): add logging.
+ rtcp_->SendRtcpFromRtpReceiver(&cast_message, NULL);
}
base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
uint32 rtp_timestamp) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// Senders time in ms when this frame was recorded.
// Note: the senders clock and our local clock might not be synced.
base::TimeTicks rtp_timestamp_in_ticks;
@@ -314,7 +376,28 @@ base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
now;
}
+bool AudioReceiver::DecryptAudioFrame(
+ scoped_ptr<EncodedAudioFrame>* audio_frame) {
+ DCHECK(decryptor_) << "Invalid state";
+
+ if (!decryptor_->SetCounter(GetAesNonce((*audio_frame)->frame_id,
+ iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+ std::string decrypted_audio_data;
+ if (!decryptor_->Decrypt((*audio_frame)->data, &decrypted_audio_data)) {
+ VLOG(0) << "Decryption error";
+ // Give up on this frame, release it from jitter buffer.
+ audio_buffer_->ReleaseFrame((*audio_frame)->frame_id);
+ return false;
+ }
+ (*audio_frame)->data.swap(decrypted_audio_data);
+ return true;
+}
+
void AudioReceiver::ScheduleNextRtcpReport() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_send = rtcp_->TimeToSendNextRtcpReport() -
cast_environment_->Clock()->NowTicks();
@@ -327,13 +410,16 @@ void AudioReceiver::ScheduleNextRtcpReport() {
}
void AudioReceiver::SendNextRtcpReport() {
- rtcp_->SendRtcpReport(incoming_ssrc_);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ // TODO(pwestin): add logging.
+ rtcp_->SendRtcpFromRtpReceiver(NULL, NULL);
ScheduleNextRtcpReport();
}
// Cast messages should be sent within a maximum interval. Schedule a call
// if not triggered elsewhere, e.g. by the cast message_builder.
void AudioReceiver::ScheduleNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (audio_buffer_) {
base::TimeTicks send_time;
audio_buffer_->TimeToSendNextCastMessage(&send_time);
@@ -349,6 +435,7 @@ void AudioReceiver::ScheduleNextCastMessage() {
}
void AudioReceiver::SendNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_buffer_) << "Invalid function call in this configuration";
audio_buffer_->SendCastMessage(); // Will only send a message if it is time.
ScheduleNextCastMessage();
diff --git a/media/cast/audio_receiver/audio_receiver.gypi b/media/cast/audio_receiver/audio_receiver.gypi
index 0cdcfcc7af..a851612f72 100644
--- a/media/cast/audio_receiver/audio_receiver.gypi
+++ b/media/cast/audio_receiver/audio_receiver.gypi
@@ -8,8 +8,8 @@
'target_name': 'cast_audio_receiver',
'type': 'static_library',
'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
'<(DEPTH)/third_party/webrtc/',
],
'sources': [
@@ -19,6 +19,7 @@
'audio_receiver.cc',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
'<(DEPTH)/media/cast/rtp_receiver/rtp_receiver.gyp:*',
'<(DEPTH)/third_party/webrtc/webrtc.gyp:webrtc',
diff --git a/media/cast/audio_receiver/audio_receiver.h b/media/cast/audio_receiver/audio_receiver.h
index 2044e16cfc..d9ab207c5b 100644
--- a/media/cast/audio_receiver/audio_receiver.h
+++ b/media/cast/audio_receiver/audio_receiver.h
@@ -19,6 +19,10 @@
#include "media/cast/rtcp/rtcp.h" // RtcpCastMessage
#include "media/cast/rtp_common/rtp_defines.h" // RtpCastHeader
+namespace crypto {
+ class Encryptor;
+}
+
namespace media {
namespace cast {
@@ -81,6 +85,12 @@ class AudioReceiver : public base::NonThreadSafe,
// Return the playout time based on the current time and rtp timestamp.
base::TimeTicks GetPlayoutTime(base::TimeTicks now, uint32 rtp_timestamp);
+ void InitializeTimers();
+
+ // Decrypts the data within the |audio_frame| and replaces the data with the
+ // decrypted string.
+ bool DecryptAudioFrame(scoped_ptr<EncodedAudioFrame>* audio_frame);
+
// Schedule the next RTCP report.
void ScheduleNextRtcpReport();
@@ -97,11 +107,10 @@ class AudioReceiver : public base::NonThreadSafe,
base::WeakPtrFactory<AudioReceiver> weak_factory_;
const AudioCodec codec_;
- const uint32 incoming_ssrc_;
const int frequency_;
base::TimeDelta target_delay_delta_;
scoped_ptr<Framer> audio_buffer_;
- scoped_refptr<AudioDecoder> audio_decoder_;
+ scoped_ptr<AudioDecoder> audio_decoder_;
scoped_ptr<LocalRtpAudioData> incoming_payload_callback_;
scoped_ptr<LocalRtpAudioFeedback> incoming_payload_feedback_;
scoped_ptr<RtpReceiver> rtp_receiver_;
@@ -110,6 +119,8 @@ class AudioReceiver : public base::NonThreadSafe,
base::TimeDelta time_offset_;
base::TimeTicks time_first_incoming_packet_;
uint32 first_incoming_rtp_timestamp_;
+ scoped_ptr<crypto::Encryptor> decryptor_;
+ std::string iv_mask_;
std::list<AudioFrameEncodedCallback> queued_encoded_callbacks_;
};
diff --git a/media/cast/audio_receiver/audio_receiver_unittest.cc b/media/cast/audio_receiver/audio_receiver_unittest.cc
index cbd90c1215..87b4fc7339 100644
--- a/media/cast/audio_receiver/audio_receiver_unittest.cc
+++ b/media/cast/audio_receiver/audio_receiver_unittest.cc
@@ -19,6 +19,7 @@ namespace cast {
static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+namespace {
class TestAudioEncoderCallback :
public base::RefCountedThreadSafe<TestAudioEncoderCallback> {
public:
@@ -39,7 +40,7 @@ class TestAudioEncoderCallback :
num_called_++;
}
- int number_times_called() { return num_called_;}
+ int number_times_called() const { return num_called_;}
protected:
virtual ~TestAudioEncoderCallback() {}
@@ -51,6 +52,7 @@ class TestAudioEncoderCallback :
uint8 expected_frame_id_;
base::TimeTicks expected_playout_time_;
};
+} // namespace
class PeerAudioReceiver : public AudioReceiver {
public:
@@ -71,11 +73,13 @@ class AudioReceiverTest : public ::testing::Test {
audio_config_.channels = 1;
audio_config_.codec = kPcm16;
audio_config_.use_external_decoder = false;
+ audio_config_.feedback_ssrc = 1234;
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_);
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
test_audio_encoder_callback_ = new TestAudioEncoderCallback();
}
diff --git a/media/cast/audio_sender/audio_encoder.cc b/media/cast/audio_sender/audio_encoder.cc
index 3cfca0dfc8..a82d1de39a 100644
--- a/media/cast/audio_sender/audio_encoder.cc
+++ b/media/cast/audio_sender/audio_encoder.cc
@@ -4,171 +4,288 @@
#include "media/cast/audio_sender/audio_encoder.h"
+#include <algorithm>
+
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "base/sys_byteorder.h"
+#include "base/time/time.h"
+#include "media/base/audio_bus.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
-#include "third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
-#include "third_party/webrtc/modules/interface/module_common_types.h"
+#include "third_party/opus/src/include/opus.h"
namespace media {
namespace cast {
-// 48KHz, 2 channels and 100 ms.
-static const int kMaxNumberOfSamples = 48 * 2 * 100;
+void LogAudioEncodedEvent(CastEnvironment* const cast_environment,
+ const base::TimeTicks& recorded_time) {
+ // TODO(mikhal): Resolve timestamp calculation for audio.
+ cast_environment->Logging()->InsertFrameEvent(kAudioFrameEncoded,
+ GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown);
+}
-// This class is only called from the cast audio encoder thread.
-class WebrtEncodedDataCallback : public webrtc::AudioPacketizationCallback {
+// Base class that handles the common problem of feeding one or more AudioBus'
+// data into a 10 ms buffer and then, once the buffer is full, encoding the
+// signal and emitting an EncodedAudioFrame via the FrameEncodedCallback.
+//
+// Subclasses complete the implementation by handling the actual encoding
+// details.
+class AudioEncoder::ImplBase {
public:
- WebrtEncodedDataCallback(scoped_refptr<CastEnvironment> cast_environment,
- AudioCodec codec,
- int frequency)
- : codec_(codec),
- frequency_(frequency),
- cast_environment_(cast_environment),
- last_timestamp_(0) {}
-
- virtual int32 SendData(
- webrtc::FrameType /*frame_type*/,
- uint8 /*payload_type*/,
- uint32 timestamp,
- const uint8* payload_data,
- uint16 payload_size,
- const webrtc::RTPFragmentationHeader* /*fragmentation*/) OVERRIDE {
- scoped_ptr<EncodedAudioFrame> audio_frame(new EncodedAudioFrame());
- audio_frame->codec = codec_;
- audio_frame->samples = timestamp - last_timestamp_;
- DCHECK(audio_frame->samples <= kMaxNumberOfSamples);
- last_timestamp_ = timestamp;
- audio_frame->data.insert(audio_frame->data.begin(),
- payload_data,
- payload_data + payload_size);
+ ImplBase(CastEnvironment* cast_environment,
+ AudioCodec codec, int num_channels, int sampling_rate,
+ const FrameEncodedCallback& callback)
+ : cast_environment_(cast_environment),
+ codec_(codec), num_channels_(num_channels),
+ samples_per_10ms_(sampling_rate / 100),
+ callback_(callback),
+ buffer_fill_end_(0),
+ frame_id_(0) {
+ CHECK_GT(num_channels_, 0);
+ CHECK_GT(samples_per_10ms_, 0);
+ CHECK_EQ(sampling_rate % 100, 0);
+ CHECK_LE(samples_per_10ms_ * num_channels_,
+ EncodedAudioFrame::kMaxNumberOfSamples);
+ }
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(*frame_encoded_callback_, base::Passed(&audio_frame),
- recorded_time_));
- return 0;
+ virtual ~ImplBase() {}
+
+ void EncodeAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback) {
+ int src_pos = 0;
+ while (src_pos < audio_bus->frames()) {
+ const int num_samples_to_xfer =
+ std::min(samples_per_10ms_ - buffer_fill_end_,
+ audio_bus->frames() - src_pos);
+ DCHECK_EQ(audio_bus->channels(), num_channels_);
+ TransferSamplesIntoBuffer(
+ audio_bus, src_pos, buffer_fill_end_, num_samples_to_xfer);
+ src_pos += num_samples_to_xfer;
+ buffer_fill_end_ += num_samples_to_xfer;
+
+ if (src_pos == audio_bus->frames()) {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ done_callback);
+ // Note: |audio_bus| is now invalid..
+ }
+
+ if (buffer_fill_end_ == samples_per_10ms_) {
+ scoped_ptr<EncodedAudioFrame> audio_frame(new EncodedAudioFrame());
+ audio_frame->codec = codec_;
+ audio_frame->frame_id = frame_id_++;
+ audio_frame->samples = samples_per_10ms_;
+ if (EncodeFromFilledBuffer(&audio_frame->data)) {
+ // Compute an offset to determine the recorded time for the first
+ // audio sample in the buffer.
+ const base::TimeDelta buffer_time_offset =
+ (buffer_fill_end_ - src_pos) *
+ base::TimeDelta::FromMilliseconds(10) / samples_per_10ms_;
+ // TODO(miu): Consider batching EncodedAudioFrames so we only post a
+ // at most one task for each call to this method.
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(callback_, base::Passed(&audio_frame),
+ recorded_time - buffer_time_offset));
+ }
+ buffer_fill_end_ = 0;
+ }
+ }
}
- void SetEncodedCallbackInfo(
- const base::TimeTicks& recorded_time,
- const AudioEncoder::FrameEncodedCallback* frame_encoded_callback) {
- recorded_time_ = recorded_time;
- frame_encoded_callback_ = frame_encoded_callback;
+ protected:
+ virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) = 0;
+ virtual bool EncodeFromFilledBuffer(std::string* out) = 0;
+
+ CastEnvironment* const cast_environment_;
+ const AudioCodec codec_;
+ const int num_channels_;
+ const int samples_per_10ms_;
+ const FrameEncodedCallback callback_;
+
+ private:
+ // In the case where a call to EncodeAudio() cannot completely fill the
+ // buffer, this points to the position at which to populate data in a later
+ // call.
+ int buffer_fill_end_;
+
+ // A counter used to label EncodedAudioFrames.
+ uint32 frame_id_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ImplBase);
+};
+
+class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
+ public:
+ OpusImpl(CastEnvironment* cast_environment,
+ int num_channels, int sampling_rate, int bitrate,
+ const FrameEncodedCallback& callback)
+ : ImplBase(cast_environment, kOpus, num_channels, sampling_rate,
+ callback),
+ encoder_memory_(new uint8[opus_encoder_get_size(num_channels)]),
+ opus_encoder_(reinterpret_cast<OpusEncoder*>(encoder_memory_.get())),
+ buffer_(new float[num_channels * samples_per_10ms_]) {
+ CHECK_EQ(opus_encoder_init(opus_encoder_, sampling_rate, num_channels,
+ OPUS_APPLICATION_AUDIO),
+ OPUS_OK);
+ if (bitrate <= 0) {
+ // Note: As of 2013-10-31, the encoder in "auto bitrate" mode would use a
+ // variable bitrate up to 102kbps for 2-channel, 48 kHz audio and a 10 ms
+ // frame size. The opus library authors may, of course, adjust this in
+ // later versions.
+ bitrate = OPUS_AUTO;
+ }
+ CHECK_EQ(opus_encoder_ctl(opus_encoder_, OPUS_SET_BITRATE(bitrate)),
+ OPUS_OK);
}
+ virtual ~OpusImpl() {}
+
private:
- const AudioCodec codec_;
- const int frequency_;
- scoped_refptr<CastEnvironment> cast_environment_;
- uint32 last_timestamp_;
- base::TimeTicks recorded_time_;
- const AudioEncoder::FrameEncodedCallback* frame_encoded_callback_;
+ virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) OVERRIDE {
+ // Opus requires channel-interleaved samples in a single array.
+ for (int ch = 0; ch < audio_bus->channels(); ++ch) {
+ const float* src = audio_bus->channel(ch) + source_offset;
+ const float* const src_end = src + num_samples;
+ float* dest = buffer_.get() + buffer_fill_offset * num_channels_ + ch;
+ for (; src < src_end; ++src, dest += num_channels_)
+ *dest = *src;
+ }
+ }
+
+ virtual bool EncodeFromFilledBuffer(std::string* out) OVERRIDE {
+ out->resize(kOpusMaxPayloadSize);
+ const opus_int32 result = opus_encode_float(
+ opus_encoder_, buffer_.get(), samples_per_10ms_,
+ reinterpret_cast<uint8*>(&out->at(0)), kOpusMaxPayloadSize);
+ if (result > 1) {
+ out->resize(result);
+ return true;
+ } else if (result < 0) {
+ LOG(ERROR) << "Error code from opus_encode_float(): " << result;
+ return false;
+ } else {
+ // Do nothing: The documentation says that a return value of zero or
+ // one byte means the packet does not need to be transmitted.
+ return false;
+ }
+ }
+
+ const scoped_ptr<uint8[]> encoder_memory_;
+ OpusEncoder* const opus_encoder_;
+ const scoped_ptr<float[]> buffer_;
+
+ // This is the recommended value, according to documentation in
+ // third_party/opus/src/include/opus.h, so that the Opus encoder does not
+ // degrade the audio due to memory constraints.
+ //
+ // Note: Whereas other RTP implementations do not, the cast library is
+ // perfectly capable of transporting larger than MTU-sized audio frames.
+ static const int kOpusMaxPayloadSize = 4000;
+
+ DISALLOW_COPY_AND_ASSIGN(OpusImpl);
};
-AudioEncoder::AudioEncoder(scoped_refptr<CastEnvironment> cast_environment,
- const AudioSenderConfig& audio_config)
- : cast_environment_(cast_environment),
- audio_encoder_(webrtc::AudioCodingModule::Create(0)),
- webrtc_encoder_callback_(
- new WebrtEncodedDataCallback(cast_environment, audio_config.codec,
- audio_config.frequency)),
- timestamp_(0) { // Must start at 0; used above.
- if (audio_encoder_->InitializeSender() != 0) {
- DCHECK(false) << "Invalid webrtc return value";
+class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
+ public:
+ Pcm16Impl(CastEnvironment* cast_environment,
+ int num_channels, int sampling_rate,
+ const FrameEncodedCallback& callback)
+ : ImplBase(cast_environment, kPcm16, num_channels, sampling_rate,
+ callback),
+ buffer_(new int16[num_channels * samples_per_10ms_]) {}
+
+ virtual ~Pcm16Impl() {}
+
+ private:
+ virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) OVERRIDE {
+ audio_bus->ToInterleavedPartial(
+ source_offset, num_samples, sizeof(int16),
+ buffer_.get() + buffer_fill_offset * num_channels_);
}
- if (audio_encoder_->RegisterTransportCallback(
- webrtc_encoder_callback_.get()) != 0) {
- DCHECK(false) << "Invalid webrtc return value";
+
+ virtual bool EncodeFromFilledBuffer(std::string* out) OVERRIDE {
+ // Output 16-bit PCM integers in big-endian byte order.
+ out->resize(num_channels_ * samples_per_10ms_ * sizeof(int16));
+ const int16* src = buffer_.get();
+ const int16* const src_end = src + num_channels_ * samples_per_10ms_;
+ uint16* dest = reinterpret_cast<uint16*>(&out->at(0));
+ for (; src < src_end; ++src, ++dest)
+ *dest = base::HostToNet16(*src);
+ return true;
}
- webrtc::CodecInst send_codec;
- send_codec.pltype = audio_config.rtp_payload_type;
- send_codec.plfreq = audio_config.frequency;
- send_codec.channels = audio_config.channels;
+
+ private:
+ const scoped_ptr<int16[]> buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(Pcm16Impl);
+};
+
+AudioEncoder::AudioEncoder(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioSenderConfig& audio_config,
+ const FrameEncodedCallback& frame_encoded_callback)
+ : cast_environment_(cast_environment) {
+ // Note: It doesn't matter which thread constructs AudioEncoder, just so long
+ // as all calls to InsertAudio() are by the same thread.
+ insert_thread_checker_.DetachFromThread();
switch (audio_config.codec) {
case kOpus:
- strncpy(send_codec.plname, "opus", sizeof(send_codec.plname));
- send_codec.pacsize = audio_config.frequency / 50; // 20 ms
- send_codec.rate = audio_config.bitrate; // 64000
+ impl_.reset(new OpusImpl(
+ cast_environment, audio_config.channels, audio_config.frequency,
+ audio_config.bitrate, frame_encoded_callback));
break;
case kPcm16:
- strncpy(send_codec.plname, "L16", sizeof(send_codec.plname));
- send_codec.pacsize = audio_config.frequency / 100; // 10 ms
- // TODO(pwestin) bug in webrtc; it should take audio_config.channels into
- // account.
- send_codec.rate = 8 * 2 * audio_config.frequency;
+ impl_.reset(new Pcm16Impl(
+ cast_environment, audio_config.channels, audio_config.frequency,
+ frame_encoded_callback));
break;
default:
- DCHECK(false) << "Codec must be specified for audio encoder";
- return;
- }
- if (audio_encoder_->RegisterSendCodec(send_codec) != 0) {
- DCHECK(false) << "Invalid webrtc return value; failed to register codec";
+ NOTREACHED() << "Unsupported or unspecified codec for audio encoder";
+ break;
}
}
AudioEncoder::~AudioEncoder() {}
-// Called from main cast thread.
-void AudioEncoder::InsertRawAudioFrame(
- const PcmAudioFrame* audio_frame,
+void AudioEncoder::InsertAudio(
+ const AudioBus* audio_bus,
const base::TimeTicks& recorded_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure release_callback) {
+ const base::Closure& done_callback) {
+ DCHECK(insert_thread_checker_.CalledOnValidThread());
+ if (!impl_) {
+ NOTREACHED();
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ done_callback);
+ return;
+ }
cast_environment_->PostTask(CastEnvironment::AUDIO_ENCODER, FROM_HERE,
- base::Bind(&AudioEncoder::EncodeAudioFrameThread, this, audio_frame,
- recorded_time, frame_encoded_callback, release_callback));
+ base::Bind(&AudioEncoder::EncodeAudio, this, audio_bus, recorded_time,
+ done_callback));
}
-// Called from cast audio encoder thread.
-void AudioEncoder::EncodeAudioFrameThread(
- const PcmAudioFrame* audio_frame,
+void AudioEncoder::EncodeAudio(
+ const AudioBus* audio_bus,
const base::TimeTicks& recorded_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure release_callback) {
+ const base::Closure& done_callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_ENCODER));
- size_t samples_per_10ms = audio_frame->frequency / 100;
- size_t number_of_10ms_blocks = audio_frame->samples.size() /
- (samples_per_10ms * audio_frame->channels);
- DCHECK(webrtc::AudioFrame::kMaxDataSizeSamples > samples_per_10ms)
- << "webrtc sanity check failed";
-
- for (size_t i = 0; i < number_of_10ms_blocks; ++i) {
- webrtc::AudioFrame webrtc_audio_frame;
- webrtc_audio_frame.timestamp_ = timestamp_;
-
- // Due to the webrtc::AudioFrame declaration we need to copy our data into
- // the webrtc structure.
- memcpy(&webrtc_audio_frame.data_[0],
- &audio_frame->samples[i * samples_per_10ms * audio_frame->channels],
- samples_per_10ms * audio_frame->channels * sizeof(int16));
-
- // The webrtc API is int and we have a size_t; the cast should never be an
- // issue since the normal values are in the 480 range.
- DCHECK_GE(1000u, samples_per_10ms);
- webrtc_audio_frame.samples_per_channel_ =
- static_cast<int>(samples_per_10ms);
- webrtc_audio_frame.sample_rate_hz_ = audio_frame->frequency;
- webrtc_audio_frame.num_channels_ = audio_frame->channels;
-
- // webrtc::AudioCodingModule is thread safe.
- if (audio_encoder_->Add10MsData(webrtc_audio_frame) != 0) {
- DCHECK(false) << "Invalid webrtc return value";
- }
- timestamp_ += static_cast<uint32>(samples_per_10ms);
- }
- // We are done with the audio frame release it.
+ impl_->EncodeAudio(audio_bus, recorded_time, done_callback);
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- release_callback);
-
- // Note: Not all insert of 10 ms will generate a callback with encoded data.
- webrtc_encoder_callback_->SetEncodedCallbackInfo(recorded_time,
- &frame_encoded_callback);
- for (size_t i = 0; i < number_of_10ms_blocks; ++i) {
- audio_encoder_->Process();
- }
+ base::Bind(LogAudioEncodedEvent, cast_environment_, recorded_time));
}
} // namespace cast
diff --git a/media/cast/audio_sender/audio_encoder.h b/media/cast/audio_sender/audio_encoder.h
index 8a4acc4f91..4a22d1983b 100644
--- a/media/cast/audio_sender/audio_encoder.h
+++ b/media/cast/audio_sender/audio_encoder.h
@@ -7,37 +7,37 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/rtp_sender/rtp_sender.h"
-namespace webrtc {
-class AudioCodingModule;
+namespace base {
+class TimeTicks;
}
namespace media {
-namespace cast {
+class AudioBus;
+}
-class WebrtEncodedDataCallback;
+namespace media {
+namespace cast {
-// Thread safe class.
-// It should be called from the main cast thread; however that is not required.
class AudioEncoder : public base::RefCountedThreadSafe<AudioEncoder> {
public:
typedef base::Callback<void(scoped_ptr<EncodedAudioFrame>,
const base::TimeTicks&)> FrameEncodedCallback;
- AudioEncoder(scoped_refptr<CastEnvironment> cast_environment,
- const AudioSenderConfig& audio_config);
+ AudioEncoder(const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioSenderConfig& audio_config,
+ const FrameEncodedCallback& frame_encoded_callback);
- // The audio_frame must be valid until the closure callback is called.
- // The closure callback is called from the main cast thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure callback);
+ // The |audio_bus| must be valid until the |done_callback| is called.
+ // The callback is called from the main cast thread as soon as the encoder is
+ // done with |audio_bus|; it does not mean that the encoded data has been
+ // sent out.
+ void InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback);
protected:
virtual ~AudioEncoder();
@@ -45,16 +45,21 @@ class AudioEncoder : public base::RefCountedThreadSafe<AudioEncoder> {
private:
friend class base::RefCountedThreadSafe<AudioEncoder>;
- void EncodeAudioFrameThread(
- const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure release_callback);
+ class ImplBase;
+ class OpusImpl;
+ class Pcm16Impl;
+
+ // Invokes |impl_|'s encode method on the AUDIO_ENCODER thread while holding
+ // a ref-count on AudioEncoder.
+ void EncodeAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback);
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_ptr<ImplBase> impl_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_ptr<webrtc::AudioCodingModule> audio_encoder_;
- scoped_ptr<WebrtEncodedDataCallback> webrtc_encoder_callback_;
- uint32 timestamp_;
+ // Used to ensure only one thread invokes InsertAudio().
+ base::ThreadChecker insert_thread_checker_;
DISALLOW_COPY_AND_ASSIGN(AudioEncoder);
};
diff --git a/media/cast/audio_sender/audio_encoder_unittest.cc b/media/cast/audio_sender/audio_encoder_unittest.cc
index b33424a606..d721f71ef2 100644
--- a/media/cast/audio_sender/audio_encoder_unittest.cc
+++ b/media/cast/audio_sender/audio_encoder_unittest.cc
@@ -2,11 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <sstream>
+#include <string>
+
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_bus.h"
+#include "media/base/media.h"
#include "media/cast/audio_sender/audio_encoder.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/test/audio_utility.h"
#include "media/cast/test/fake_task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -15,17 +22,74 @@ namespace cast {
static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-static void RelaseFrame(const PcmAudioFrame* frame) {
- delete frame;
-}
+namespace {
-static void FrameEncoded(scoped_ptr<EncodedAudioFrame> encoded_frame,
- const base::TimeTicks& recorded_time) {
-}
+class TestEncodedAudioFrameReceiver {
+ public:
+ explicit TestEncodedAudioFrameReceiver(AudioCodec codec) :
+ codec_(codec), frames_received_(0) {}
+ virtual ~TestEncodedAudioFrameReceiver() {}
+
+ int frames_received() const {
+ return frames_received_;
+ }
+
+ void SetRecordedTimeLowerBound(const base::TimeTicks& t) {
+ lower_bound_ = t;
+ }
+
+ void SetRecordedTimeUpperBound(const base::TimeTicks& t) {
+ upper_bound_ = t;
+ }
-class AudioEncoderTest : public ::testing::Test {
- protected:
+ void FrameEncoded(scoped_ptr<EncodedAudioFrame> encoded_frame,
+ const base::TimeTicks& recorded_time) {
+ EXPECT_EQ(codec_, encoded_frame->codec);
+ EXPECT_EQ(static_cast<uint8>(frames_received_ & 0xff),
+ encoded_frame->frame_id);
+ EXPECT_LT(0, encoded_frame->samples);
+ EXPECT_TRUE(!encoded_frame->data.empty());
+
+ EXPECT_LE(lower_bound_, recorded_time);
+ lower_bound_ = recorded_time;
+ EXPECT_GT(upper_bound_, recorded_time);
+
+ ++frames_received_;
+ }
+
+ private:
+ const AudioCodec codec_;
+ int frames_received_;
+ base::TimeTicks lower_bound_;
+ base::TimeTicks upper_bound_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestEncodedAudioFrameReceiver);
+};
+
+struct TestScenario {
+ const int64* durations_in_ms;
+ size_t num_durations;
+
+ TestScenario(const int64* d, size_t n)
+ : durations_in_ms(d), num_durations(n) {}
+
+ std::string ToString() const {
+ std::ostringstream out;
+ for (size_t i = 0; i < num_durations; ++i) {
+ if (i > 0)
+ out << ", ";
+ out << durations_in_ms[i];
+ }
+ return out.str();
+ }
+};
+
+} // namespace
+
+class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
+ public:
AudioEncoderTest() {
+ InitializeMediaLibraryForTesting();
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
@@ -33,38 +97,138 @@ class AudioEncoderTest : public ::testing::Test {
virtual void SetUp() {
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_);
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
+ }
+
+ virtual ~AudioEncoderTest() {}
+
+ void RunTestForCodec(AudioCodec codec) {
+ const TestScenario& scenario = GetParam();
+ SCOPED_TRACE(::testing::Message()
+ << "Durations: " << scenario.ToString());
+
+ CreateObjectsForCodec(codec);
+
+ receiver_->SetRecordedTimeLowerBound(testing_clock_.NowTicks());
+ for (size_t i = 0; i < scenario.num_durations; ++i) {
+ const base::TimeDelta duration =
+ base::TimeDelta::FromMilliseconds(scenario.durations_in_ms[i]);
+ receiver_->SetRecordedTimeUpperBound(
+ testing_clock_.NowTicks() + duration);
+
+ const scoped_ptr<AudioBus> bus(
+ audio_bus_factory_->NextAudioBus(duration));
+
+ const int last_count = release_callback_count_;
+ audio_encoder_->InsertAudio(
+ bus.get(), testing_clock_.NowTicks(),
+ base::Bind(&AudioEncoderTest::IncrementReleaseCallbackCounter,
+ base::Unretained(this)));
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, release_callback_count_ - last_count)
+ << "Release callback was not invoked once.";
+
+ testing_clock_.Advance(duration);
+ }
+
+ DVLOG(1) << "Received " << receiver_->frames_received()
+ << " frames for this test run: " << scenario.ToString();
+ }
+
+ private:
+ void CreateObjectsForCodec(AudioCodec codec) {
AudioSenderConfig audio_config;
- audio_config.codec = kOpus;
+ audio_config.codec = codec;
audio_config.use_external_encoder = false;
- audio_config.frequency = 48000;
+ audio_config.frequency = kDefaultAudioSamplingRate;
audio_config.channels = 2;
- audio_config.bitrate = 64000;
+ audio_config.bitrate = kDefaultAudioEncoderBitrate;
audio_config.rtp_payload_type = 127;
- audio_encoder_ = new AudioEncoder(cast_environment_, audio_config);
+ audio_bus_factory_.reset(new TestAudioBusFactory(
+ audio_config.channels, audio_config.frequency,
+ TestAudioBusFactory::kMiddleANoteFreq, 0.5f));
+
+ receiver_.reset(new TestEncodedAudioFrameReceiver(codec));
+
+ audio_encoder_ = new AudioEncoder(
+ cast_environment_, audio_config,
+ base::Bind(&TestEncodedAudioFrameReceiver::FrameEncoded,
+ base::Unretained(receiver_.get())));
+ release_callback_count_ = 0;
}
- virtual ~AudioEncoderTest() {}
+ void IncrementReleaseCallbackCounter() {
+ ++release_callback_count_;
+ }
base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
+ scoped_ptr<TestEncodedAudioFrameReceiver> receiver_;
scoped_refptr<AudioEncoder> audio_encoder_;
scoped_refptr<CastEnvironment> cast_environment_;
+ int release_callback_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioEncoderTest);
};
-TEST_F(AudioEncoderTest, Encode20ms) {
- PcmAudioFrame* audio_frame = new PcmAudioFrame();
- audio_frame->channels = 2;
- audio_frame->frequency = 48000;
- audio_frame->samples.insert(audio_frame->samples.begin(), 480 * 2 * 2, 123);
-
- base::TimeTicks recorded_time;
- audio_encoder_->InsertRawAudioFrame(audio_frame, recorded_time,
- base::Bind(&FrameEncoded),
- base::Bind(&RelaseFrame, audio_frame));
- task_runner_->RunTasks();
+TEST_P(AudioEncoderTest, EncodeOpus) {
+ RunTestForCodec(kOpus);
}
+TEST_P(AudioEncoderTest, EncodePcm16) {
+ RunTestForCodec(kPcm16);
+}
+
+static const int64 kOneCall_3Millis[] = { 3 };
+static const int64 kOneCall_10Millis[] = { 10 };
+static const int64 kOneCall_13Millis[] = { 13 };
+static const int64 kOneCall_20Millis[] = { 20 };
+
+static const int64 kTwoCalls_3Millis[] = { 3, 3 };
+static const int64 kTwoCalls_10Millis[] = { 10, 10 };
+static const int64 kTwoCalls_Mixed1[] = { 3, 10 };
+static const int64 kTwoCalls_Mixed2[] = { 10, 3 };
+static const int64 kTwoCalls_Mixed3[] = { 3, 17 };
+static const int64 kTwoCalls_Mixed4[] = { 17, 3 };
+
+static const int64 kManyCalls_3Millis[] =
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 };
+static const int64 kManyCalls_10Millis[] =
+ { 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 };
+static const int64 kManyCalls_Mixed1[] =
+ { 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10 };
+static const int64 kManyCalls_Mixed2[] =
+ { 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3 };
+static const int64 kManyCalls_Mixed3[] =
+ { 3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4 };
+static const int64 kManyCalls_Mixed4[] =
+ { 31, 4, 15, 9, 26, 53, 5, 8, 9, 7, 9, 32, 38, 4, 62, 64, 3 };
+static const int64 kManyCalls_Mixed5[] =
+ { 3, 14, 15, 9, 26, 53, 58, 9, 7, 9, 3, 23, 8, 4, 6, 2, 6, 43 };
+
+INSTANTIATE_TEST_CASE_P(
+ AudioEncoderTestScenarios, AudioEncoderTest,
+ ::testing::Values(
+ TestScenario(kOneCall_3Millis, arraysize(kOneCall_3Millis)),
+ TestScenario(kOneCall_10Millis, arraysize(kOneCall_10Millis)),
+ TestScenario(kOneCall_13Millis, arraysize(kOneCall_13Millis)),
+ TestScenario(kOneCall_20Millis, arraysize(kOneCall_20Millis)),
+ TestScenario(kTwoCalls_3Millis, arraysize(kTwoCalls_3Millis)),
+ TestScenario(kTwoCalls_10Millis, arraysize(kTwoCalls_10Millis)),
+ TestScenario(kTwoCalls_Mixed1, arraysize(kTwoCalls_Mixed1)),
+ TestScenario(kTwoCalls_Mixed2, arraysize(kTwoCalls_Mixed2)),
+ TestScenario(kTwoCalls_Mixed3, arraysize(kTwoCalls_Mixed3)),
+ TestScenario(kTwoCalls_Mixed4, arraysize(kTwoCalls_Mixed4)),
+ TestScenario(kManyCalls_3Millis, arraysize(kManyCalls_3Millis)),
+ TestScenario(kManyCalls_10Millis, arraysize(kManyCalls_10Millis)),
+ TestScenario(kManyCalls_Mixed1, arraysize(kManyCalls_Mixed1)),
+ TestScenario(kManyCalls_Mixed2, arraysize(kManyCalls_Mixed2)),
+ TestScenario(kManyCalls_Mixed3, arraysize(kManyCalls_Mixed3)),
+ TestScenario(kManyCalls_Mixed4, arraysize(kManyCalls_Mixed4)),
+ TestScenario(kManyCalls_Mixed5, arraysize(kManyCalls_Mixed5))));
+
} // namespace cast
} // namespace media
diff --git a/media/cast/audio_sender/audio_sender.cc b/media/cast/audio_sender/audio_sender.cc
index 560ebd990c..00f4313e1e 100644
--- a/media/cast/audio_sender/audio_sender.cc
+++ b/media/cast/audio_sender/audio_sender.cc
@@ -7,7 +7,10 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "crypto/encryptor.h"
+#include "crypto/symmetric_key.h"
#include "media/cast/audio_sender/audio_encoder.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtp_sender/rtp_sender.h"
@@ -22,28 +25,6 @@ class LocalRtcpAudioSenderFeedback : public RtcpSenderFeedback {
: audio_sender_(audio_sender) {
}
- virtual void OnReceivedReportBlock(
- const RtcpReportBlock& report_block) OVERRIDE {
- }
-
- virtual void OnReceivedIntraFrameRequest() OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
- virtual void OnReceivedRpsi(uint8 payload_type,
- uint64 picture_id) OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
- virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
- virtual void OnReceivedNackRequest(
- const std::list<uint16>& nack_sequence_numbers) OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
virtual void OnReceivedCastFeedback(
const RtcpCastMessage& cast_feedback) OVERRIDE {
if (!cast_feedback.missing_frames_and_packets_.empty()) {
@@ -75,72 +56,137 @@ class LocalRtpSenderStatistics : public RtpSenderStatistics {
AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
PacedPacketSender* const paced_packet_sender)
- : incoming_feedback_ssrc_(audio_config.incoming_feedback_ssrc),
- cast_environment_(cast_environment),
- rtp_sender_(cast_environment->Clock(), &audio_config, NULL,
+ : cast_environment_(cast_environment),
+ rtp_sender_(cast_environment, &audio_config, NULL,
paced_packet_sender),
rtcp_feedback_(new LocalRtcpAudioSenderFeedback(this)),
rtp_audio_sender_statistics_(
new LocalRtpSenderStatistics(&rtp_sender_)),
- rtcp_(cast_environment->Clock(),
+ rtcp_(cast_environment,
rtcp_feedback_.get(),
paced_packet_sender,
rtp_audio_sender_statistics_.get(),
NULL,
audio_config.rtcp_mode,
base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
- true,
audio_config.sender_ssrc,
+ audio_config.incoming_feedback_ssrc,
audio_config.rtcp_c_name),
+ initialized_(false),
weak_factory_(this) {
- rtcp_.SetRemoteSSRC(audio_config.incoming_feedback_ssrc);
-
+ if (audio_config.aes_iv_mask.size() == kAesKeySize &&
+ audio_config.aes_key.size() == kAesKeySize) {
+ iv_mask_ = audio_config.aes_iv_mask;
+ crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
+ crypto::SymmetricKey::AES, audio_config.aes_key);
+ encryptor_.reset(new crypto::Encryptor());
+ encryptor_->Init(key, crypto::Encryptor::CTR, std::string());
+ } else if (audio_config.aes_iv_mask.size() != 0 ||
+ audio_config.aes_key.size() != 0) {
+ DCHECK(false) << "Invalid crypto configuration";
+ }
if (!audio_config.use_external_encoder) {
- audio_encoder_ = new AudioEncoder(cast_environment, audio_config);
+ audio_encoder_ = new AudioEncoder(
+ cast_environment, audio_config,
+ base::Bind(&AudioSender::SendEncodedAudioFrame,
+ weak_factory_.GetWeakPtr()));
}
- ScheduleNextRtcpReport();
}
AudioSender::~AudioSender() {}
-void AudioSender::InsertRawAudioFrame(
- const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback) {
- DCHECK(audio_encoder_.get()) << "Invalid internal state";
+void AudioSender::InitializeTimers() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (!initialized_) {
+ initialized_ = true;
+ ScheduleNextRtcpReport();
+ }
+}
- audio_encoder_->InsertRawAudioFrame(audio_frame, recorded_time,
- base::Bind(&AudioSender::SendEncodedAudioFrame,
- weak_factory_.GetWeakPtr()),
- callback);
+void AudioSender::InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(audio_encoder_.get()) << "Invalid internal state";
+ // TODO(mikhal): Resolve calculation of the audio rtp_timestamp for logging.
+ // This is a tmp solution to allow the code to build.
+ cast_environment_->Logging()->InsertFrameEvent(kAudioFrameReceived,
+ GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown);
+ audio_encoder_->InsertAudio(audio_bus, recorded_time, done_callback);
}
void AudioSender::InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
const base::TimeTicks& recorded_time,
const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_encoder_.get() == NULL) << "Invalid internal state";
- rtp_sender_.IncomingEncodedAudioFrame(audio_frame, recorded_time);
+
+ cast_environment_->Logging()->InsertFrameEvent(kAudioFrameReceived,
+ GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown);
+
+ if (encryptor_) {
+ EncodedAudioFrame encrypted_frame;
+ if (!EncryptAudioFrame(*audio_frame, &encrypted_frame)) {
+ // Logging already done.
+ return;
+ }
+ rtp_sender_.IncomingEncodedAudioFrame(&encrypted_frame, recorded_time);
+ } else {
+ rtp_sender_.IncomingEncodedAudioFrame(audio_frame, recorded_time);
+ }
callback.Run();
}
void AudioSender::SendEncodedAudioFrame(
scoped_ptr<EncodedAudioFrame> audio_frame,
const base::TimeTicks& recorded_time) {
- rtp_sender_.IncomingEncodedAudioFrame(audio_frame.get(), recorded_time);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ InitializeTimers();
+ if (encryptor_) {
+ EncodedAudioFrame encrypted_frame;
+ if (!EncryptAudioFrame(*audio_frame.get(), &encrypted_frame)) {
+ // Logging already done.
+ return;
+ }
+ rtp_sender_.IncomingEncodedAudioFrame(&encrypted_frame, recorded_time);
+ } else {
+ rtp_sender_.IncomingEncodedAudioFrame(audio_frame.get(), recorded_time);
+ }
+}
+
+bool AudioSender::EncryptAudioFrame(const EncodedAudioFrame& audio_frame,
+ EncodedAudioFrame* encrypted_frame) {
+ DCHECK(encryptor_) << "Invalid state";
+
+ if (!encryptor_->SetCounter(GetAesNonce(audio_frame.frame_id, iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+ if (!encryptor_->Encrypt(audio_frame.data, &encrypted_frame->data)) {
+ NOTREACHED() << "Encrypt error";
+ return false;
+ }
+ encrypted_frame->codec = audio_frame.codec;
+ encrypted_frame->frame_id = audio_frame.frame_id;
+ encrypted_frame->samples = audio_frame.samples;
+ return true;
}
void AudioSender::ResendPackets(
const MissingFramesAndPacketsMap& missing_frames_and_packets) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
rtp_sender_.ResendPackets(missing_frames_and_packets);
}
void AudioSender::IncomingRtcpPacket(const uint8* packet, size_t length,
const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
rtcp_.IncomingRtcpPacket(packet, length);
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
void AudioSender::ScheduleNextRtcpReport() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_next =
rtcp_.TimeToSendNextRtcpReport() - cast_environment_->Clock()->NowTicks();
@@ -153,7 +199,8 @@ void AudioSender::ScheduleNextRtcpReport() {
}
void AudioSender::SendRtcpReport() {
- rtcp_.SendRtcpReport(incoming_feedback_ssrc_);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ rtcp_.SendRtcpFromRtpSender(NULL); // TODO(pwestin): add logging.
ScheduleNextRtcpReport();
}
diff --git a/media/cast/audio_sender/audio_sender.gypi b/media/cast/audio_sender/audio_sender.gypi
index 3e2a56345b..32a316ac68 100644
--- a/media/cast/audio_sender/audio_sender.gypi
+++ b/media/cast/audio_sender/audio_sender.gypi
@@ -10,7 +10,6 @@
'include_dirs': [
'<(DEPTH)/',
'<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc',
],
'sources': [
'audio_encoder.h',
@@ -19,9 +18,12 @@
'audio_sender.cc',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/media/media.gyp:shared_memory_support',
'<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
'<(DEPTH)/media/cast/rtp_sender/rtp_sender.gyp:*',
- '<(DEPTH)/third_party/webrtc/webrtc.gyp:webrtc',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
],
},
],
diff --git a/media/cast/audio_sender/audio_sender.h b/media/cast/audio_sender/audio_sender.h
index ca1fffb37f..e4a078b4d3 100644
--- a/media/cast/audio_sender/audio_sender.h
+++ b/media/cast/audio_sender/audio_sender.h
@@ -17,6 +17,14 @@
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtp_sender/rtp_sender.h"
+namespace crypto {
+ class Encryptor;
+}
+
+namespace media {
+class AudioBus;
+}
+
namespace media {
namespace cast {
@@ -36,13 +44,13 @@ class AudioSender : public base::NonThreadSafe,
virtual ~AudioSender();
- // The audio_frame must be valid until the closure callback is called.
- // The closure callback is called from the main cast thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback);
+ // The |audio_bus| must be valid until the |done_callback| is called.
+ // The callback is called from the main cast thread as soon as the encoder is
+ // done with |audio_bus|; it does not mean that the encoded data has been
+ // sent out.
+ void InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback);
// The audio_frame must be valid until the closure callback is called.
// The closure callback is called from the main cast thread as soon as
@@ -66,18 +74,27 @@ class AudioSender : public base::NonThreadSafe,
void ResendPackets(
const MissingFramesAndPacketsMap& missing_frames_and_packets);
+ // Caller must allocate the destination |encrypted_frame|. The data member
+ // will be resized to hold the encrypted size.
+ bool EncryptAudioFrame(const EncodedAudioFrame& audio_frame,
+ EncodedAudioFrame* encrypted_frame);
+
void ScheduleNextRtcpReport();
void SendRtcpReport();
+ void InitializeTimers();
+
base::WeakPtrFactory<AudioSender> weak_factory_;
- const uint32 incoming_feedback_ssrc_;
scoped_refptr<CastEnvironment> cast_environment_;
scoped_refptr<AudioEncoder> audio_encoder_;
RtpSender rtp_sender_;
scoped_ptr<LocalRtpSenderStatistics> rtp_audio_sender_statistics_;
scoped_ptr<LocalRtcpAudioSenderFeedback> rtcp_feedback_;
Rtcp rtcp_;
+ bool initialized_;
+ scoped_ptr<crypto::Encryptor> encryptor_;
+ std::string iv_mask_;
DISALLOW_COPY_AND_ASSIGN(AudioSender);
};
@@ -86,4 +103,3 @@ class AudioSender : public base::NonThreadSafe,
} // namespace media
#endif // MEDIA_CAST_AUDIO_SENDER_H_
-
diff --git a/media/cast/audio_sender/audio_sender_unittest.cc b/media/cast/audio_sender/audio_sender_unittest.cc
index 9ab6bb549b..5b632bf025 100644
--- a/media/cast/audio_sender/audio_sender_unittest.cc
+++ b/media/cast/audio_sender/audio_sender_unittest.cc
@@ -3,12 +3,15 @@
// found in the LICENSE file.
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
+#include "media/base/media.h"
#include "media/cast/audio_sender/audio_sender.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/test/audio_utility.h"
#include "media/cast/test/fake_task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -18,14 +21,12 @@ namespace cast {
static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
using testing::_;
-
-static void RelaseFrame(const PcmAudioFrame* frame) {
- delete frame;
-}
+using testing::AtLeast;
class AudioSenderTest : public ::testing::Test {
protected:
AudioSenderTest() {
+ InitializeMediaLibraryForTesting();
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
@@ -33,17 +34,17 @@ class AudioSenderTest : public ::testing::Test {
virtual void SetUp() {
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_);
- AudioSenderConfig audio_config;
- audio_config.codec = kOpus;
- audio_config.use_external_encoder = false;
- audio_config.frequency = 48000;
- audio_config.channels = 2;
- audio_config.bitrate = 64000;
- audio_config.rtp_payload_type = 127;
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
+ audio_config_.codec = kOpus;
+ audio_config_.use_external_encoder = false;
+ audio_config_.frequency = kDefaultAudioSamplingRate;
+ audio_config_.channels = 2;
+ audio_config_.bitrate = kDefaultAudioEncoderBitrate;
+ audio_config_.rtp_payload_type = 127;
audio_sender_.reset(
- new AudioSender(cast_environment_, audio_config, &mock_transport_));
+ new AudioSender(cast_environment_, audio_config_, &mock_transport_));
}
virtual ~AudioSenderTest() {}
@@ -53,26 +54,43 @@ class AudioSenderTest : public ::testing::Test {
scoped_refptr<test::FakeTaskRunner> task_runner_;
scoped_ptr<AudioSender> audio_sender_;
scoped_refptr<CastEnvironment> cast_environment_;
+ AudioSenderConfig audio_config_;
};
TEST_F(AudioSenderTest, Encode20ms) {
- EXPECT_CALL(mock_transport_, SendPackets(_)).Times(1);
-
- PcmAudioFrame* audio_frame = new PcmAudioFrame();
- audio_frame->channels = 2;
- audio_frame->frequency = 48000;
- audio_frame->samples.insert(audio_frame->samples.begin(), 480 * 2 * 2, 123);
-
- base::TimeTicks recorded_time;
- audio_sender_->InsertRawAudioFrame(audio_frame, recorded_time,
- base::Bind(&RelaseFrame, audio_frame));
-
+ EXPECT_CALL(mock_transport_, SendPackets(_)).Times(AtLeast(1));
+
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(20);
+ scoped_ptr<AudioBus> bus(TestAudioBusFactory(
+ audio_config_.channels, audio_config_.frequency,
+ TestAudioBusFactory::kMiddleANoteFreq, 0.5f).NextAudioBus(kDuration));
+
+ base::TimeTicks recorded_time = base::TimeTicks::Now();
+ audio_sender_->InsertAudio(
+ bus.get(), recorded_time,
+ base::Bind(base::IgnoreResult(&scoped_ptr<AudioBus>::release),
+ base::Unretained(&bus)));
task_runner_->RunTasks();
+
+ EXPECT_TRUE(!bus) << "AudioBus wasn't released after use.";
}
TEST_F(AudioSenderTest, RtcpTimer) {
+ EXPECT_CALL(mock_transport_, SendPackets(_)).Times(AtLeast(1));
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(20);
+ scoped_ptr<AudioBus> bus(TestAudioBusFactory(
+ audio_config_.channels, audio_config_.frequency,
+ TestAudioBusFactory::kMiddleANoteFreq, 0.5f).NextAudioBus(kDuration));
+
+ base::TimeTicks recorded_time = base::TimeTicks::Now();
+ audio_sender_->InsertAudio(
+ bus.get(), recorded_time,
+ base::Bind(base::IgnoreResult(&scoped_ptr<AudioBus>::release),
+ base::Unretained(&bus)));
+ task_runner_->RunTasks();
+
// Make sure that we send at least one RTCP packet.
base::TimeDelta max_rtcp_timeout =
base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
diff --git a/media/cast/cast.gyp b/media/cast/cast.gyp
index 511ac29f76..1b27ee926e 100644
--- a/media/cast/cast.gyp
+++ b/media/cast/cast.gyp
@@ -14,11 +14,22 @@
'include_dirs': [
'<(DEPTH)/',
],
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ ],
'sources': [
'cast_config.cc',
'cast_config.h',
'cast_environment.cc',
'cast_environment.h',
+ 'logging/logging_defines.cc',
+ 'logging/logging_defines.h',
+ 'logging/logging_impl.cc',
+ 'logging/logging_impl.h',
+ 'logging/logging_raw.cc',
+ 'logging/logging_raw.h',
+ 'logging/logging_stats.cc',
+ 'logging/logging_stats.h',
], # source
},
], # targets,
@@ -32,8 +43,10 @@
'cast_config',
'cast_receiver.gyp:cast_receiver',
'cast_sender.gyp:cast_sender',
- 'logging/logging.gyp:cast_logging',
+ 'test/utility/utility.gyp:cast_test_utility',
'<(DEPTH)/base/base.gyp:run_all_unittests',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/testing/gtest.gyp:gtest',
@@ -71,10 +84,10 @@
'rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc',
'rtp_sender/rtp_packetizer/test/rtp_header_parser.cc',
'rtp_sender/rtp_packetizer/test/rtp_header_parser.h',
+ 'test/crypto_utility.cc',
+ 'test/crypto_utility.h',
'test/encode_decode_test.cc',
'test/end2end_unittest.cc',
- 'test/fake_task_runner.cc',
- 'test/video_utility.cc',
'video_receiver/video_decoder_unittest.cc',
'video_receiver/video_receiver_unittest.cc',
'video_sender/mock_video_encoder_controller.cc',
@@ -87,6 +100,58 @@
'video_sender/video_sender_unittest.cc',
], # source
},
+ {
+ 'target_name': 'cast_sender_app',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
+ '<(DEPTH)/net/net.gyp:net_test_support',
+ 'cast_config',
+ '<(DEPTH)/media/cast/cast_sender.gyp:*',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
+ '<(DEPTH)/media/cast/test/transport/transport.gyp:cast_transport',
+ '<(DEPTH)/media/cast/test/utility/utility.gyp:cast_test_utility',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/sender.cc',
+ ],
+ },
+ {
+ 'target_name': 'cast_receiver_app',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
+ '<(DEPTH)/net/net.gyp:net_test_support',
+ 'cast_config',
+ '<(DEPTH)/media/cast/cast_receiver.gyp:*',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/media/cast/test/transport/transport.gyp:cast_transport',
+ '<(DEPTH)/media/cast/test/utility/utility.gyp:cast_test_utility',
+ '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/receiver.cc',
+ ],
+ 'conditions': [
+ ['OS == "linux"', {
+ 'sources': [
+ '<(DEPTH)/media/cast/test/linux_output_window.cc',
+ '<(DEPTH)/media/cast/test/linux_output_window.h',
+ ],
+ 'libraries': [
+ '-lXext',
+ '-lX11',
+ ],
+ }],
+ ],
+ },
], # targets
}], # include_tests
],
diff --git a/media/cast/cast_config.h b/media/cast/cast_config.h
index 71073f43e5..66d4c1df0d 100644
--- a/media/cast/cast_config.h
+++ b/media/cast/cast_config.h
@@ -51,8 +51,11 @@ struct AudioSenderConfig {
bool use_external_encoder;
int frequency;
int channels;
- int bitrate;
+ int bitrate; // Set to <= 0 for "auto variable bitrate" (libopus knows best).
AudioCodec codec;
+
+ std::string aes_key; // Binary string of size kAesKeySize.
+ std::string aes_iv_mask; // Binary string of size kAesKeySize.
};
struct VideoSenderConfig {
@@ -83,6 +86,9 @@ struct VideoSenderConfig {
int max_number_of_video_buffers_used; // Max value depend on codec.
VideoCodec codec;
int number_of_cores;
+
+ std::string aes_key; // Binary string of size kAesKeySize.
+ std::string aes_iv_mask; // Binary string of size kAesKeySize.
};
struct AudioReceiverConfig {
@@ -103,6 +109,9 @@ struct AudioReceiverConfig {
int frequency;
int channels;
AudioCodec codec;
+
+ std::string aes_key; // Binary string of size kAesKeySize.
+ std::string aes_iv_mask; // Binary string of size kAesKeySize.
};
struct VideoReceiverConfig {
@@ -126,14 +135,21 @@ struct VideoReceiverConfig {
// from catching up after a glitch.
bool decoder_faster_than_max_frame_rate;
VideoCodec codec;
+
+ std::string aes_key; // Binary string of size kAesKeySize.
+ std::string aes_iv_mask; // Binary string of size kAesKeySize.
};
+// DEPRECATED: Do not use in new code. Please migrate existing code to use
+// media::VideoFrame.
struct I420VideoPlane {
int stride;
int length;
uint8* data;
};
+// DEPRECATED: Do not use in new code. Please migrate existing code to use
+// media::VideoFrame.
struct I420VideoFrame {
int width;
int height;
@@ -148,11 +164,13 @@ struct EncodedVideoFrame {
VideoCodec codec;
bool key_frame;
- uint8 frame_id;
- uint8 last_referenced_frame_id;
- std::vector<uint8> data;
+ uint32 frame_id;
+ uint32 last_referenced_frame_id;
+ std::string data;
};
+// DEPRECATED: Do not use in new code. Please migrate existing code to use
+// media::AudioBus.
struct PcmAudioFrame {
PcmAudioFrame();
~PcmAudioFrame();
@@ -167,10 +185,12 @@ struct EncodedAudioFrame {
~EncodedAudioFrame();
AudioCodec codec;
- uint8 frame_id; // Needed to release the frame. Not used send side.
+ uint32 frame_id; // Needed to release the frame.
int samples; // Needed send side to advance the RTP timestamp.
// Not used receive side.
- std::vector<uint8> data;
+ // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
+ static const int kMaxNumberOfSamples = 48 * 2 * 100;
+ std::string data;
};
typedef std::vector<uint8> Packet;
@@ -216,7 +236,7 @@ class VideoEncoderController {
virtual void GenerateKeyFrame() = 0;
// Inform the encoder to only reference frames older or equal to frame_id;
- virtual void LatestFrameIdToReference(uint8 frame_id) = 0;
+ virtual void LatestFrameIdToReference(uint32 frame_id) = 0;
// Query the codec about how many frames it has skipped due to slow ACK.
virtual int NumberOfSkippedFrames() const = 0;
diff --git a/media/cast/cast_defines.h b/media/cast/cast_defines.h
index f44e5fb826..5eff0d74ac 100644
--- a/media/cast/cast_defines.h
+++ b/media/cast/cast_defines.h
@@ -18,7 +18,7 @@ namespace cast {
const int64 kDontShowTimeoutMs = 33;
const float kDefaultCongestionControlBackOff = 0.875f;
-const uint8 kStartFrameId = 255;
+const uint32 kStartFrameId = GG_UINT32_C(0xffffffff);
const uint32 kVideoFrequency = 90000;
const int64 kSkippedFramesCheckPeriodkMs = 10000;
@@ -30,6 +30,8 @@ const int64 kCastMessageUpdateIntervalMs = 33;
const int64 kNackRepeatIntervalMs = 30;
enum DefaultSettings {
+ kDefaultAudioEncoderBitrate = 0, // This means "auto," and may mean VBR.
+ kDefaultAudioSamplingRate = 48000,
kDefaultMaxQp = 56,
kDefaultMinQp = 4,
kDefaultMaxFrameRate = 30,
@@ -46,6 +48,9 @@ const size_t kMinLengthOfRtcp = 8;
// Basic RTP header + cast header.
const size_t kMinLengthOfRtp = 12 + 6;
+const size_t kAesBlockSize = 16;
+const size_t kAesKeySize = 16;
+
// Each uint16 represents one packet id within a cast frame.
typedef std::set<uint16> PacketIdSet;
// Each uint8 represents one cast frame.
@@ -63,12 +68,12 @@ static const int64 kUnixEpochInNtpSeconds = GG_INT64_C(2208988800);
// fractional NTP seconds.
static const double kMagicFractionalUnit = 4.294967296E3;
-inline bool IsNewerFrameId(uint8 frame_id, uint8 prev_frame_id) {
+inline bool IsNewerFrameId(uint32 frame_id, uint32 prev_frame_id) {
return (frame_id != prev_frame_id) &&
- static_cast<uint8>(frame_id - prev_frame_id) < 0x80;
+ static_cast<uint32>(frame_id - prev_frame_id) < 0x80000000;
}
-inline bool IsOlderFrameId(uint8 frame_id, uint8 prev_frame_id) {
+inline bool IsOlderFrameId(uint32 frame_id, uint32 prev_frame_id) {
return (frame_id == prev_frame_id) || IsNewerFrameId(prev_frame_id, frame_id);
}
@@ -130,6 +135,64 @@ inline base::TimeTicks ConvertNtpToTimeTicks(uint32 ntp_seconds,
return base::TimeTicks::UnixEpoch() + elapsed_since_unix_epoch;
}
+class FrameIdWrapHelper {
+ public:
+ FrameIdWrapHelper()
+ : first_(true),
+ can_we_wrap_(false),
+ frame_id_wrap_count_(0) {}
+
+ uint32 MapTo32bitsFrameId(const uint8 over_the_wire_frame_id) {
+ if (first_) {
+ first_ = false;
+ if (over_the_wire_frame_id == 0xff) {
+ // Special case for startup.
+ return kStartFrameId;
+ }
+ }
+ if (can_we_wrap_) {
+ if (over_the_wire_frame_id < 0x0f) {
+ // Disable wrap check until we are closer to the max of uint8.
+ can_we_wrap_ = false;
+ }
+ } else {
+ if (over_the_wire_frame_id > 0xf0) {
+ // Enable wrap check until we have wrapped.
+ can_we_wrap_ = true;
+ }
+ }
+ return (frame_id_wrap_count_ << 8) + over_the_wire_frame_id;
+ }
+
+ private:
+ bool first_;
+ bool can_we_wrap_;
+ uint32 frame_id_wrap_count_;
+};
+
+inline std::string GetAesNonce(uint32 frame_id, const std::string& iv_mask) {
+ std::string aes_nonce(kAesBlockSize, 0);
+
+ // Serializing frame_id in big-endian order (aes_nonce[8] is the most
+ // significant byte of frame_id).
+ aes_nonce[11] = frame_id & 0xff;
+ aes_nonce[10] = (frame_id >> 8) & 0xff;
+ aes_nonce[9] = (frame_id >> 16) & 0xff;
+ aes_nonce[8] = (frame_id >> 24) & 0xff;
+
+ for (size_t i = 0; i < kAesBlockSize; ++i) {
+ aes_nonce[i] ^= iv_mask[i];
+ }
+ return aes_nonce;
+}
+
+inline uint32 GetVideoRtpTimestamp(const base::TimeTicks& time_ticks) {
+ base::TimeTicks zero_time;
+ base::TimeDelta recorded_delta = time_ticks - zero_time;
+ // Timestamp is in 90 KHz for video.
+ return static_cast<uint32>(recorded_delta.InMilliseconds() * 90);
+}
+
} // namespace cast
} // namespace media
diff --git a/media/cast/cast_environment.cc b/media/cast/cast_environment.cc
index dddec1649f..be636bb253 100644
--- a/media/cast/cast_environment.cc
+++ b/media/cast/cast_environment.cc
@@ -17,13 +17,15 @@ CastEnvironment::CastEnvironment(
scoped_refptr<TaskRunner> audio_encode_thread_proxy,
scoped_refptr<TaskRunner> audio_decode_thread_proxy,
scoped_refptr<TaskRunner> video_encode_thread_proxy,
- scoped_refptr<TaskRunner> video_decode_thread_proxy)
+ scoped_refptr<TaskRunner> video_decode_thread_proxy,
+ const CastLoggingConfig& config)
: clock_(clock),
main_thread_proxy_(main_thread_proxy),
audio_encode_thread_proxy_(audio_encode_thread_proxy),
audio_decode_thread_proxy_(audio_decode_thread_proxy),
video_encode_thread_proxy_(video_encode_thread_proxy),
- video_decode_thread_proxy_(video_decode_thread_proxy) {
+ video_decode_thread_proxy_(video_decode_thread_proxy),
+ logging_(new LoggingImpl(clock, main_thread_proxy, config)) {
DCHECK(main_thread_proxy) << "Main thread required";
}
@@ -62,7 +64,7 @@ scoped_refptr<TaskRunner> CastEnvironment::GetMessageTaskRunnerForThread(
case CastEnvironment::VIDEO_DECODER:
return video_decode_thread_proxy_;
default:
- NOTREACHED() << "Invalid Thread ID.";
+ NOTREACHED() << "Invalid Thread identifier";
return NULL;
}
}
@@ -80,14 +82,20 @@ bool CastEnvironment::CurrentlyOn(ThreadId identifier) {
case CastEnvironment::VIDEO_DECODER:
return video_decode_thread_proxy_->RunsTasksOnCurrentThread();
default:
- NOTREACHED() << "Wrong thread identifier";
+ NOTREACHED() << "Invalid thread identifier";
return false;
}
}
-base::TickClock* CastEnvironment::Clock() {
+base::TickClock* CastEnvironment::Clock() const {
return clock_;
}
+LoggingImpl* CastEnvironment::Logging() {
+ DCHECK(CurrentlyOn(CastEnvironment::MAIN)) <<
+ "Must be called from main thread";
+ return logging_.get();
+}
+
} // namespace cast
} // namespace media
diff --git a/media/cast/cast_environment.h b/media/cast/cast_environment.h
index 1129091735..8a135733c0 100644
--- a/media/cast/cast_environment.h
+++ b/media/cast/cast_environment.h
@@ -7,9 +7,12 @@
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
#include "base/task_runner.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/logging_impl.h"
namespace media {
namespace cast {
@@ -38,7 +41,8 @@ class CastEnvironment : public base::RefCountedThreadSafe<CastEnvironment> {
scoped_refptr<base::TaskRunner> audio_encode_thread_proxy,
scoped_refptr<base::TaskRunner> audio_decode_thread_proxy,
scoped_refptr<base::TaskRunner> video_encode_thread_proxy,
- scoped_refptr<base::TaskRunner> video_decode_thread_proxy);
+ scoped_refptr<base::TaskRunner> video_decode_thread_proxy,
+ const CastLoggingConfig& config);
// These are the same methods in message_loop.h, but are guaranteed to either
// get posted to the MessageLoop if it's still alive, or be deleted otherwise.
@@ -56,7 +60,10 @@ class CastEnvironment : public base::RefCountedThreadSafe<CastEnvironment> {
bool CurrentlyOn(ThreadId identifier);
- base::TickClock* Clock();
+ base::TickClock* Clock() const;
+
+ // Logging is not thread safe. Should always be called from the main thread.
+ LoggingImpl* Logging();
protected:
virtual ~CastEnvironment();
@@ -74,6 +81,8 @@ class CastEnvironment : public base::RefCountedThreadSafe<CastEnvironment> {
scoped_refptr<base::TaskRunner> video_encode_thread_proxy_;
scoped_refptr<base::TaskRunner> video_decode_thread_proxy_;
+ scoped_ptr<LoggingImpl> logging_;
+
DISALLOW_COPY_AND_ASSIGN(CastEnvironment);
};
diff --git a/media/cast/cast_receiver.gyp b/media/cast/cast_receiver.gyp
index 30f40df0c1..7227c16b02 100644
--- a/media/cast/cast_receiver.gyp
+++ b/media/cast/cast_receiver.gyp
@@ -22,11 +22,11 @@
'cast_receiver_impl.h',
], # source
'dependencies': [
- 'rtp_receiver/rtp_receiver.gyp:*',
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'cast_audio_receiver',
'cast_video_receiver',
- 'framer/framer.gyp:cast_framer',
'pacing/paced_sender.gyp:cast_paced_sender',
+ 'rtp_receiver/rtp_receiver.gyp:cast_rtp_receiver',
],
},
],
diff --git a/media/cast/cast_receiver_impl.cc b/media/cast/cast_receiver_impl.cc
index 2a88cd290b..e2c004fe96 100644
--- a/media/cast/cast_receiver_impl.cc
+++ b/media/cast/cast_receiver_impl.cc
@@ -18,8 +18,8 @@ namespace cast {
class LocalFrameReceiver : public FrameReceiver {
public:
LocalFrameReceiver(scoped_refptr<CastEnvironment> cast_environment,
- base::WeakPtr<AudioReceiver> audio_receiver,
- base::WeakPtr<VideoReceiver> video_receiver)
+ AudioReceiver* audio_receiver,
+ VideoReceiver* video_receiver)
: cast_environment_(cast_environment),
audio_receiver_(audio_receiver),
video_receiver_(video_receiver) {}
@@ -27,15 +27,15 @@ class LocalFrameReceiver : public FrameReceiver {
virtual void GetRawVideoFrame(
const VideoFrameDecodedCallback& callback) OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::GetRawVideoFrame, video_receiver_,
- callback));
+ base::Bind(&VideoReceiver::GetRawVideoFrame,
+ video_receiver_->AsWeakPtr(), callback));
}
virtual void GetEncodedVideoFrame(
const VideoFrameEncodedCallback& callback) OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::GetEncodedVideoFrame, video_receiver_,
- callback));
+ base::Bind(&VideoReceiver::GetEncodedVideoFrame,
+ video_receiver_->AsWeakPtr(), callback));
}
virtual void GetRawAudioFrame(
@@ -43,14 +43,15 @@ class LocalFrameReceiver : public FrameReceiver {
int desired_frequency,
const AudioFrameDecodedCallback& callback) OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, base::Bind(
- &AudioReceiver::GetRawAudioFrame, audio_receiver_,
+ &AudioReceiver::GetRawAudioFrame, audio_receiver_->AsWeakPtr(),
number_of_10ms_blocks, desired_frequency, callback));
}
virtual void GetCodedAudioFrame(
const AudioFrameEncodedCallback& callback) OVERRIDE {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, base::Bind(
- &AudioReceiver::GetEncodedAudioFrame, audio_receiver_, callback));
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&AudioReceiver::GetEncodedAudioFrame,
+ audio_receiver_->AsWeakPtr(), callback));
}
protected:
@@ -60,16 +61,16 @@ class LocalFrameReceiver : public FrameReceiver {
friend class base::RefCountedThreadSafe<LocalFrameReceiver>;
scoped_refptr<CastEnvironment> cast_environment_;
- base::WeakPtr<AudioReceiver> audio_receiver_;
- base::WeakPtr<VideoReceiver> video_receiver_;
+ AudioReceiver* audio_receiver_;
+ VideoReceiver* video_receiver_;
};
// The video and audio receivers should only be called from the main thread.
class LocalPacketReceiver : public PacketReceiver {
public:
LocalPacketReceiver(scoped_refptr<CastEnvironment> cast_environment,
- base::WeakPtr<AudioReceiver> audio_receiver,
- base::WeakPtr<VideoReceiver> video_receiver,
+ AudioReceiver* audio_receiver,
+ VideoReceiver* video_receiver,
uint32 ssrc_of_audio_sender,
uint32 ssrc_of_video_sender)
: cast_environment_(cast_environment),
@@ -103,12 +104,12 @@ class LocalPacketReceiver : public PacketReceiver {
}
if (ssrc_of_sender == ssrc_of_audio_sender_) {
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::IncomingPacket, audio_receiver_,
- packet, length, callback));
+ base::Bind(&AudioReceiver::IncomingPacket,
+ audio_receiver_->AsWeakPtr(), packet, length, callback));
} else if (ssrc_of_sender == ssrc_of_video_sender_) {
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::IncomingPacket, video_receiver_,
- packet, length, callback));
+ base::Bind(&VideoReceiver::IncomingPacket,
+ video_receiver_->AsWeakPtr(), packet, length, callback));
} else {
// No action; just log and call the callback informing that we are done
// with the packet.
@@ -126,8 +127,8 @@ class LocalPacketReceiver : public PacketReceiver {
friend class base::RefCountedThreadSafe<LocalPacketReceiver>;
scoped_refptr<CastEnvironment> cast_environment_;
- base::WeakPtr<AudioReceiver> audio_receiver_;
- base::WeakPtr<VideoReceiver> video_receiver_;
+ AudioReceiver* audio_receiver_;
+ VideoReceiver* video_receiver_;
const uint32 ssrc_of_audio_sender_;
const uint32 ssrc_of_video_sender_;
};
@@ -152,11 +153,11 @@ CastReceiverImpl::CastReceiverImpl(
audio_receiver_(cast_environment, audio_config, &pacer_),
video_receiver_(cast_environment, video_config, &pacer_),
frame_receiver_(new LocalFrameReceiver(cast_environment,
- audio_receiver_.AsWeakPtr(),
- video_receiver_.AsWeakPtr())),
+ &audio_receiver_,
+ &video_receiver_)),
packet_receiver_(new LocalPacketReceiver(cast_environment,
- audio_receiver_.AsWeakPtr(),
- video_receiver_.AsWeakPtr(),
+ &audio_receiver_,
+ &video_receiver_,
audio_config.incoming_ssrc,
video_config.incoming_ssrc)) {}
diff --git a/media/cast/cast_sender.gyp b/media/cast/cast_sender.gyp
index 66761b7f8f..40b9fa51ea 100644
--- a/media/cast/cast_sender.gyp
+++ b/media/cast/cast_sender.gyp
@@ -23,6 +23,7 @@
'cast_sender_impl.h',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'audio_sender',
'congestion_control',
'pacing/paced_sender.gyp:cast_paced_sender',
diff --git a/media/cast/cast_sender.h b/media/cast/cast_sender.h
index 9dfe4ad72a..ff9e75651c 100644
--- a/media/cast/cast_sender.h
+++ b/media/cast/cast_sender.h
@@ -20,6 +20,11 @@
#include "media/cast/cast_environment.h"
namespace media {
+class AudioBus;
+class VideoFrame;
+}
+
+namespace media {
namespace cast {
// This Class is thread safe.
@@ -29,9 +34,10 @@ class FrameInput : public base::RefCountedThreadSafe<FrameInput> {
// The callback is called from the main cast thread as soon as
// the encoder is done with the frame; it does not mean that the encoded frame
// has been sent out.
- virtual void InsertRawVideoFrame(const I420VideoFrame* video_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback) = 0;
+ virtual void InsertRawVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure& callback) = 0;
// The video_frame must be valid until the callback is called.
// The callback is called from the main cast thread as soon as
@@ -41,13 +47,13 @@ class FrameInput : public base::RefCountedThreadSafe<FrameInput> {
const base::TimeTicks& capture_time,
const base::Closure callback) = 0;
- // The audio_frame must be valid until the callback is called.
- // The callback is called from the main cast thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- virtual void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback) = 0;
+ // The |audio_bus| must be valid until the |done_callback| is called.
+ // The callback is called from the main cast thread as soon as the encoder is
+ // done with |audio_bus|; it does not mean that the encoded data has been
+ // sent out.
+ virtual void InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback) = 0;
// The audio_frame must be valid until the callback is called.
// The callback is called from the main cast thread as soon as
@@ -57,10 +63,6 @@ class FrameInput : public base::RefCountedThreadSafe<FrameInput> {
const base::TimeTicks& recorded_time,
const base::Closure callback) = 0;
- static void DeleteAudioFrame(const PcmAudioFrame* frame);
-
- static void DeleteVideoFrame(const I420VideoFrame* video_frame);
-
protected:
virtual ~FrameInput() {}
diff --git a/media/cast/cast_sender_impl.cc b/media/cast/cast_sender_impl.cc
index cbf7a4d11d..b6e5eb6639 100644
--- a/media/cast/cast_sender_impl.cc
+++ b/media/cast/cast_sender_impl.cc
@@ -7,23 +7,11 @@
#include "base/callback.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "media/base/video_frame.h"
namespace media {
namespace cast {
-// static
-void FrameInput::DeleteAudioFrame(const PcmAudioFrame* frame) {
- delete frame;
-}
-
-// static
-void FrameInput::DeleteVideoFrame(const I420VideoFrame* video_frame) {
- delete [] video_frame->y_plane.data;
- delete [] video_frame->u_plane.data;
- delete [] video_frame->v_plane.data;
- delete video_frame;
-}
-
// The LocalFrameInput class posts all incoming frames; audio and video to the
// main cast thread for processing.
// This make the cast sender interface thread safe.
@@ -36,9 +24,10 @@ class LocalFrameInput : public FrameInput {
audio_sender_(audio_sender),
video_sender_(video_sender) {}
- virtual void InsertRawVideoFrame(const I420VideoFrame* video_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback) OVERRIDE {
+ virtual void InsertRawVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure& callback) OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoSender::InsertRawVideoFrame, video_sender_,
video_frame, capture_time, callback));
@@ -52,12 +41,12 @@ class LocalFrameInput : public FrameInput {
video_frame, capture_time, callback));
}
- virtual void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback) OVERRIDE {
+ virtual void InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback) OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioSender::InsertRawAudioFrame, audio_sender_,
- audio_frame, recorded_time, callback));
+ base::Bind(&AudioSender::InsertAudio, audio_sender_,
+ audio_bus, recorded_time, done_callback));
}
virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
diff --git a/media/cast/cast_sender_impl.h b/media/cast/cast_sender_impl.h
index 361322ea35..f160f7495e 100644
--- a/media/cast/cast_sender_impl.h
+++ b/media/cast/cast_sender_impl.h
@@ -14,6 +14,10 @@
#include "media/cast/video_sender/video_sender.h"
namespace media {
+ class VideoFrame;
+}
+
+namespace media {
namespace cast {
class AudioSender;
diff --git a/media/cast/congestion_control/congestion_control.gypi b/media/cast/congestion_control/congestion_control.gypi
index 9f1accf3f2..20a57ca2a3 100644
--- a/media/cast/congestion_control/congestion_control.gypi
+++ b/media/cast/congestion_control/congestion_control.gypi
@@ -16,7 +16,6 @@
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
],
},
],
diff --git a/media/cast/congestion_control/congestion_control_unittest.cc b/media/cast/congestion_control/congestion_control_unittest.cc
index 60c38b45ba..108d2b340b 100644
--- a/media/cast/congestion_control/congestion_control_unittest.cc
+++ b/media/cast/congestion_control/congestion_control_unittest.cc
@@ -29,14 +29,34 @@ class CongestionControlTest : public ::testing::Test {
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
+ // Returns the last bitrate of the run.
+ uint32 RunWithOneLossEventPerSecond(int fps, int rtt_ms,
+ int runtime_in_seconds) {
+ const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(rtt_ms);
+ const base::TimeDelta ack_rate =
+ base::TimeDelta::FromMilliseconds(GG_INT64_C(1000) / fps);
+ uint32 new_bitrate = 0;
+ EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
+
+ for (int seconds = 0; seconds < runtime_in_seconds; ++seconds) {
+ for (int i = 1; i < fps; ++i) {
+ testing_clock_.Advance(ack_rate);
+ congestion_control_.OnAck(rtt, &new_bitrate);
+ }
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ }
+ return new_bitrate;
+ }
+
base::SimpleTestTickClock testing_clock_;
CongestionControl congestion_control_;
};
TEST_F(CongestionControlTest, Max) {
uint32 new_bitrate = 0;
- base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
- base::TimeDelta ack_rate = base::TimeDelta::FromMilliseconds(kAckRateMs);
+ const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
+ const base::TimeDelta ack_rate =
+ base::TimeDelta::FromMilliseconds(kAckRateMs);
EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
uint32 expected_increase_bitrate = 0;
@@ -55,8 +75,9 @@ TEST_F(CongestionControlTest, Max) {
TEST_F(CongestionControlTest, Min) {
uint32 new_bitrate = 0;
- base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
- base::TimeDelta ack_rate = base::TimeDelta::FromMilliseconds(kAckRateMs);
+ const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
+ const base::TimeDelta ack_rate =
+ base::TimeDelta::FromMilliseconds(kAckRateMs);
EXPECT_FALSE(congestion_control_.OnNack(rtt, &new_bitrate));
uint32 expected_decrease_bitrate = kStartBitrate;
@@ -64,19 +85,20 @@ TEST_F(CongestionControlTest, Min) {
// Expected number is 10. 2000 * 0.875^10 <= 500.
for (int i = 0; i < 10; ++i) {
testing_clock_.Advance(ack_rate);
- EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
- expected_decrease_bitrate = static_cast<uint32>(
- expected_decrease_bitrate * kDefaultCongestionControlBackOff);
- EXPECT_EQ(expected_decrease_bitrate, new_bitrate);
- }
- testing_clock_.Advance(ack_rate);
- EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
- EXPECT_EQ(kMinBitrateConfigured, new_bitrate);
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ expected_decrease_bitrate = static_cast<uint32>(
+ expected_decrease_bitrate * kDefaultCongestionControlBackOff);
+ EXPECT_EQ(expected_decrease_bitrate, new_bitrate);
+ }
+ testing_clock_.Advance(ack_rate);
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ EXPECT_EQ(kMinBitrateConfigured, new_bitrate);
}
TEST_F(CongestionControlTest, Timing) {
- base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
- base::TimeDelta ack_rate = base::TimeDelta::FromMilliseconds(kAckRateMs);
+ const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
+ const base::TimeDelta ack_rate =
+ base::TimeDelta::FromMilliseconds(kAckRateMs);
uint32 new_bitrate = 0;
uint32 expected_bitrate = kStartBitrate;
@@ -134,5 +156,25 @@ TEST_F(CongestionControlTest, Timing) {
EXPECT_EQ(expected_bitrate, new_bitrate);
}
+TEST_F(CongestionControlTest, Convergence24fps) {
+ EXPECT_GE(RunWithOneLossEventPerSecond(24, kRttMs, 100),
+ GG_UINT32_C(3000000));
+}
+
+TEST_F(CongestionControlTest, Convergence24fpsLongRtt) {
+ EXPECT_GE(RunWithOneLossEventPerSecond(24, 100, 100),
+ GG_UINT32_C(500000));
+}
+
+TEST_F(CongestionControlTest, Convergence60fps) {
+ EXPECT_GE(RunWithOneLossEventPerSecond(60, kRttMs, 100),
+ GG_UINT32_C(3500000));
+}
+
+TEST_F(CongestionControlTest, Convergence60fpsLongRtt) {
+ EXPECT_GE(RunWithOneLossEventPerSecond(60, 100, 100),
+ GG_UINT32_C(500000));
+}
+
} // namespace cast
} // namespace media
diff --git a/media/cast/framer/cast_message_builder.cc b/media/cast/framer/cast_message_builder.cc
index f65e355eb2..7d89f74431 100644
--- a/media/cast/framer/cast_message_builder.cc
+++ b/media/cast/framer/cast_message_builder.cc
@@ -32,7 +32,7 @@ CastMessageBuilder::CastMessageBuilder(
CastMessageBuilder::~CastMessageBuilder() {}
-void CastMessageBuilder::CompleteFrameReceived(uint8 frame_id,
+void CastMessageBuilder::CompleteFrameReceived(uint32 frame_id,
bool is_key_frame) {
if (last_update_time_.is_null()) {
// Our first update.
@@ -83,7 +83,7 @@ bool CastMessageBuilder::UpdateAckMessage() {
// time; and it's not needed since we can skip frames to catch up.
}
} else {
- uint8 frame_id = frame_id_map_->LastContinuousFrame();
+ uint32 frame_id = frame_id_map_->LastContinuousFrame();
// Is it a new frame?
if (last_acked_frame_id_ == frame_id) return false;
@@ -153,9 +153,8 @@ void CastMessageBuilder::BuildPacketList() {
// Are we missing packets?
if (frame_id_map_->Empty()) return;
- uint8 newest_frame_id = frame_id_map_->NewestFrameId();
- uint8 next_expected_frame_id =
- static_cast<uint8>(cast_msg_.ack_frame_id_ + 1);
+ uint32 newest_frame_id = frame_id_map_->NewestFrameId();
+ uint32 next_expected_frame_id = cast_msg_.ack_frame_id_ + 1;
// Iterate over all frames.
for (; !IsNewerFrameId(next_expected_frame_id, newest_frame_id);
diff --git a/media/cast/framer/cast_message_builder.h b/media/cast/framer/cast_message_builder.h
index 9bbbf6372e..92fd742ecd 100644
--- a/media/cast/framer/cast_message_builder.h
+++ b/media/cast/framer/cast_message_builder.h
@@ -18,7 +18,7 @@ namespace cast {
class RtpPayloadFeedback;
-typedef std::map<uint8, base::TimeTicks> TimeLastNackMap;
+typedef std::map<uint32, base::TimeTicks> TimeLastNackMap;
class CastMessageBuilder {
public:
@@ -30,7 +30,7 @@ class CastMessageBuilder {
int max_unacked_frames);
~CastMessageBuilder();
- void CompleteFrameReceived(uint8 frame_id, bool is_key_frame);
+ void CompleteFrameReceived(uint32 frame_id, bool is_key_frame);
bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
void UpdateCastMessage();
void Reset();
@@ -57,7 +57,7 @@ class CastMessageBuilder {
bool slowing_down_ack_;
bool acked_last_frame_;
- uint8 last_acked_frame_id_;
+ uint32 last_acked_frame_id_;
DISALLOW_COPY_AND_ASSIGN(CastMessageBuilder);
};
diff --git a/media/cast/framer/cast_message_builder_unittest.cc b/media/cast/framer/cast_message_builder_unittest.cc
index 5bb38f7a27..db5c2b2b38 100644
--- a/media/cast/framer/cast_message_builder_unittest.cc
+++ b/media/cast/framer/cast_message_builder_unittest.cc
@@ -17,7 +17,8 @@ static const uint32 kShortTimeIncrementMs = 10;
static const uint32 kLongTimeIncrementMs = 40;
static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-typedef std::map<uint8, size_t> MissingPacketsMap;
+namespace {
+typedef std::map<uint32, size_t> MissingPacketsMap;
class NackFeedbackVerification : public RtpPayloadFeedback {
public:
@@ -52,7 +53,7 @@ class NackFeedbackVerification : public RtpPayloadFeedback {
triggered_ = true;
}
- size_t num_missing_packets(uint8 frame_id) {
+ size_t num_missing_packets(uint32 frame_id) {
MissingPacketsMap::iterator it;
it = missing_packets_.find(frame_id);
if (it == missing_packets_.end()) return 0;
@@ -67,13 +68,14 @@ class NackFeedbackVerification : public RtpPayloadFeedback {
return ret_val;
}
- uint8 last_frame_acked() { return last_frame_acked_; }
+ uint32 last_frame_acked() { return last_frame_acked_; }
private:
bool triggered_;
MissingPacketsMap missing_packets_; // Missing packets per frame.
- uint8 last_frame_acked_;
+ uint32 last_frame_acked_;
};
+} // namespace
class CastMessageBuilderTest : public ::testing::Test {
protected:
@@ -92,7 +94,7 @@ class CastMessageBuilderTest : public ::testing::Test {
virtual ~CastMessageBuilderTest() {}
- void SetFrameId(uint8 frame_id) {
+ void SetFrameId(uint32 frame_id) {
rtp_header_.frame_id = frame_id;
}
@@ -108,7 +110,7 @@ class CastMessageBuilderTest : public ::testing::Test {
rtp_header_.is_key_frame = is_key;
}
- void SetReferenceFrameId(uint8 reference_frame_id) {
+ void SetReferenceFrameId(uint32 reference_frame_id) {
rtp_header_.is_reference = true;
rtp_header_.reference_frame_id = reference_frame_id;
}
@@ -156,7 +158,7 @@ TEST_F(CastMessageBuilderTest, StartWithAKeyFrame) {
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
cast_msg_builder_->UpdateCastMessage();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(5, feedback_.last_frame_acked());
+ EXPECT_EQ(5u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, OneFrameNackList) {
@@ -203,7 +205,7 @@ TEST_F(CastMessageBuilderTest, FastForwardAck) {
SetMaxPacketId(0);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(255, feedback_.last_frame_acked());
+ EXPECT_EQ(kStartFrameId, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
SetFrameId(0);
@@ -212,7 +214,7 @@ TEST_F(CastMessageBuilderTest, FastForwardAck) {
SetKeyFrame(true);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(2, feedback_.last_frame_acked());
+ EXPECT_EQ(2u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
@@ -235,7 +237,7 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
SetMaxPacketId(5);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(255, feedback_.last_frame_acked());
+ EXPECT_EQ(kStartFrameId, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
SetFrameId(5);
@@ -248,7 +250,7 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
frame_id_map_.RemoveOldFrames(5); // Simulate 5 being pulled for rendering.
cast_msg_builder_->UpdateCastMessage();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(5, feedback_.last_frame_acked());
+ EXPECT_EQ(5u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
SetFrameId(1);
@@ -260,7 +262,7 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(5, feedback_.last_frame_acked());
+ EXPECT_EQ(5u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, WrapFastForward) {
@@ -278,16 +280,16 @@ TEST_F(CastMessageBuilderTest, WrapFastForward) {
SetKeyFrame(false);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(253, feedback_.last_frame_acked());
+ EXPECT_EQ(253u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(0);
+ SetFrameId(256);
SetPacketId(0);
SetMaxPacketId(0);
SetKeyFrame(false);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(253, feedback_.last_frame_acked());
+ EXPECT_EQ(253u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
SetFrameId(254);
@@ -296,7 +298,7 @@ TEST_F(CastMessageBuilderTest, WrapFastForward) {
SetKeyFrame(true);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0, feedback_.last_frame_acked());
+ EXPECT_EQ(256u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacket) {
@@ -470,7 +472,7 @@ TEST_F(CastMessageBuilderTest, SlowDownAck) {
SetKeyFrame(true);
InsertPacket();
- int frame_id;
+ uint32 frame_id;
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
SetKeyFrame(false);
@@ -483,7 +485,7 @@ TEST_F(CastMessageBuilderTest, SlowDownAck) {
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
}
// We should now have entered the slowdown ACK state.
- uint8_t expected_frame_id = 1;
+ uint32 expected_frame_id = 1;
for (; frame_id < 10; ++frame_id) {
if (frame_id % 2) ++expected_frame_id;
EXPECT_TRUE(feedback_.triggered());
diff --git a/media/cast/framer/frame_buffer.cc b/media/cast/framer/frame_buffer.cc
index 6fcdcf4c7a..fc38c7290a 100644
--- a/media/cast/framer/frame_buffer.cc
+++ b/media/cast/framer/frame_buffer.cc
@@ -29,7 +29,7 @@ void FrameBuffer::InsertPacket(const uint8* payload_data,
if (rtp_header.is_reference) {
last_referenced_frame_id_ = rtp_header.reference_frame_id;
} else {
- last_referenced_frame_id_ = static_cast<uint8>(rtp_header.frame_id - 1);
+ last_referenced_frame_id_ = rtp_header.frame_id - 1;
}
rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
diff --git a/media/cast/framer/frame_buffer.h b/media/cast/framer/frame_buffer.h
index 591ff2f281..5ca9a514a8 100644
--- a/media/cast/framer/frame_buffer.h
+++ b/media/cast/framer/frame_buffer.h
@@ -32,16 +32,16 @@ class FrameBuffer {
uint32* rtp_timestamp) const;
bool is_key_frame() const { return is_key_frame_; }
- uint8 frame_id() const { return frame_id_; }
- uint8 last_referenced_frame_id() const { return last_referenced_frame_id_; }
+
+ uint32 last_referenced_frame_id() const { return last_referenced_frame_id_; }
private:
- uint8 frame_id_;
+ uint32 frame_id_;
uint16 max_packet_id_;
uint16 num_packets_received_;
bool is_key_frame_;
size_t total_data_size_;
- uint8 last_referenced_frame_id_;
+ uint32 last_referenced_frame_id_;
uint32 rtp_timestamp_;
PacketMap packets_;
diff --git a/media/cast/framer/frame_id_map.cc b/media/cast/framer/frame_id_map.cc
index 88560ed310..0434b19f26 100644
--- a/media/cast/framer/frame_id_map.cc
+++ b/media/cast/framer/frame_id_map.cc
@@ -10,8 +10,8 @@
namespace media {
namespace cast {
-FrameInfo::FrameInfo(uint8 frame_id,
- uint8 referenced_frame_id,
+FrameInfo::FrameInfo(uint32 frame_id,
+ uint32 referenced_frame_id,
uint16 max_packet_id,
bool key_frame)
: is_key_frame_(key_frame),
@@ -63,20 +63,20 @@ FrameIdMap::FrameIdMap()
FrameIdMap::~FrameIdMap() {}
bool FrameIdMap::InsertPacket(const RtpCastHeader& rtp_header, bool* complete) {
- uint8 frame_id = rtp_header.frame_id;
- uint8 reference_frame_id;
+ uint32 frame_id = rtp_header.frame_id;
+ uint32 reference_frame_id;
if (rtp_header.is_reference) {
reference_frame_id = rtp_header.reference_frame_id;
} else {
- reference_frame_id = static_cast<uint8>(frame_id - 1);
+ reference_frame_id = static_cast<uint32>(frame_id - 1);
}
if (rtp_header.is_key_frame && waiting_for_key_) {
- last_released_frame_ = static_cast<uint8>(frame_id - 1);
+ last_released_frame_ = static_cast<uint32>(frame_id - 1);
waiting_for_key_ = false;
}
- VLOG(1) << "InsertPacket frame:" << static_cast<int>(frame_id)
+ VLOG(1) << "InsertPacket frame:" << frame_id
<< " packet:" << static_cast<int>(rtp_header.packet_id)
<< " max packet:" << static_cast<int>(rtp_header.max_packet_id);
@@ -108,7 +108,7 @@ bool FrameIdMap::InsertPacket(const RtpCastHeader& rtp_header, bool* complete) {
return true;
}
-void FrameIdMap::RemoveOldFrames(uint8 frame_id) {
+void FrameIdMap::RemoveOldFrames(uint32 frame_id) {
FrameMap::iterator it = frame_map_.begin();
while (it != frame_map_.end()) {
@@ -129,11 +129,11 @@ void FrameIdMap::Clear() {
newest_frame_id_ = kStartFrameId;
}
-uint8 FrameIdMap::NewestFrameId() const {
+uint32 FrameIdMap::NewestFrameId() const {
return newest_frame_id_;
}
-bool FrameIdMap::NextContinuousFrame(uint8* frame_id) const {
+bool FrameIdMap::NextContinuousFrame(uint32* frame_id) const {
FrameMap::const_iterator it;
for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
@@ -145,9 +145,9 @@ bool FrameIdMap::NextContinuousFrame(uint8* frame_id) const {
return false;
}
-uint8 FrameIdMap::LastContinuousFrame() const {
- uint8 last_continuous_frame_id = last_released_frame_;
- uint8 next_expected_frame = last_released_frame_;
+uint32 FrameIdMap::LastContinuousFrame() const {
+ uint32 last_continuous_frame_id = last_released_frame_;
+ uint32 next_expected_frame = last_released_frame_;
FrameMap::const_iterator it;
@@ -163,7 +163,7 @@ uint8 FrameIdMap::LastContinuousFrame() const {
return last_continuous_frame_id;
}
-bool FrameIdMap::NextAudioFrameAllowingMissingFrames(uint8* frame_id) const {
+bool FrameIdMap::NextAudioFrameAllowingMissingFrames(uint32* frame_id) const {
// First check if we have continuous frames.
if (NextContinuousFrame(frame_id)) return true;
@@ -191,7 +191,7 @@ bool FrameIdMap::NextAudioFrameAllowingMissingFrames(uint8* frame_id) const {
return true;
}
-bool FrameIdMap::NextVideoFrameAllowingSkippingFrames(uint8* frame_id) const {
+bool FrameIdMap::NextVideoFrameAllowingSkippingFrames(uint32* frame_id) const {
// Find the oldest decodable frame.
FrameMap::const_iterator it_best_match = frame_map_.end();
FrameMap::const_iterator it;
@@ -221,11 +221,11 @@ int FrameIdMap::NumberOfCompleteFrames() const {
return count;
}
-bool FrameIdMap::FrameExists(uint8 frame_id) const {
+bool FrameIdMap::FrameExists(uint32 frame_id) const {
return frame_map_.end() != frame_map_.find(frame_id);
}
-void FrameIdMap::GetMissingPackets(uint8 frame_id,
+void FrameIdMap::GetMissingPackets(uint32 frame_id,
bool last_frame,
PacketIdSet* missing_packets) const {
FrameMap::const_iterator it = frame_map_.find(frame_id);
@@ -237,7 +237,7 @@ void FrameIdMap::GetMissingPackets(uint8 frame_id,
bool FrameIdMap::ContinuousFrame(FrameInfo* frame) const {
DCHECK(frame);
if (waiting_for_key_ && !frame->is_key_frame()) return false;
- return static_cast<uint8>(last_released_frame_ + 1) == frame->frame_id();
+ return static_cast<uint32>(last_released_frame_ + 1) == frame->frame_id();
}
bool FrameIdMap::DecodableVideoFrame(FrameInfo* frame) const {
diff --git a/media/cast/framer/frame_id_map.h b/media/cast/framer/frame_id_map.h
index cecaaebcd1..1d4fb097ea 100644
--- a/media/cast/framer/frame_id_map.h
+++ b/media/cast/framer/frame_id_map.h
@@ -19,8 +19,8 @@ namespace cast {
class FrameInfo {
public:
- FrameInfo(uint8 frame_id,
- uint8 referenced_frame_id,
+ FrameInfo(uint32 frame_id,
+ uint32 referenced_frame_id,
uint16 max_packet_id,
bool key_frame);
~FrameInfo();
@@ -32,13 +32,13 @@ class FrameInfo {
PacketIdSet* missing_packets) const;
bool is_key_frame() const { return is_key_frame_; }
- uint8 frame_id() const { return frame_id_; }
- uint8 referenced_frame_id() const { return referenced_frame_id_; }
+ uint32 frame_id() const { return frame_id_; }
+ uint32 referenced_frame_id() const { return referenced_frame_id_; }
private:
const bool is_key_frame_;
- const uint8 frame_id_;
- const uint8 referenced_frame_id_;
+ const uint32 frame_id_;
+ const uint32 referenced_frame_id_;
uint16 max_received_packet_id_;
PacketIdSet missing_packets_;
@@ -46,7 +46,7 @@ class FrameInfo {
DISALLOW_COPY_AND_ASSIGN(FrameInfo);
};
-typedef std::map<uint8, linked_ptr<FrameInfo> > FrameMap;
+typedef std::map<uint32, linked_ptr<FrameInfo> > FrameMap;
class FrameIdMap {
public:
@@ -57,21 +57,21 @@ class FrameIdMap {
bool InsertPacket(const RtpCastHeader& rtp_header, bool* complete);
bool Empty() const;
- bool FrameExists(uint8 frame_id) const;
- uint8 NewestFrameId() const;
+ bool FrameExists(uint32 frame_id) const;
+ uint32 NewestFrameId() const;
- void RemoveOldFrames(uint8 frame_id);
+ void RemoveOldFrames(uint32 frame_id);
void Clear();
// Identifies the next frame to be released (rendered).
- bool NextContinuousFrame(uint8* frame_id) const;
- uint8 LastContinuousFrame() const;
+ bool NextContinuousFrame(uint32* frame_id) const;
+ uint32 LastContinuousFrame() const;
- bool NextAudioFrameAllowingMissingFrames(uint8* frame_id) const;
- bool NextVideoFrameAllowingSkippingFrames(uint8* frame_id) const;
+ bool NextAudioFrameAllowingMissingFrames(uint32* frame_id) const;
+ bool NextVideoFrameAllowingSkippingFrames(uint32* frame_id) const;
int NumberOfCompleteFrames() const;
- void GetMissingPackets(uint8 frame_id,
+ void GetMissingPackets(uint32 frame_id,
bool last_frame,
PacketIdSet* missing_packets) const;
@@ -81,8 +81,8 @@ class FrameIdMap {
FrameMap frame_map_;
bool waiting_for_key_;
- uint8 last_released_frame_;
- uint8 newest_frame_id_;
+ uint32 last_released_frame_;
+ uint32 newest_frame_id_;
DISALLOW_COPY_AND_ASSIGN(FrameIdMap);
};
diff --git a/media/cast/framer/framer.cc b/media/cast/framer/framer.cc
index fb16e66021..b06e60fd03 100644
--- a/media/cast/framer/framer.cc
+++ b/media/cast/framer/framer.cc
@@ -56,7 +56,7 @@ bool Framer::InsertPacket(const uint8* payload_data,
bool Framer::GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
uint32* rtp_timestamp,
bool* next_frame) {
- uint8 frame_id;
+ uint32 frame_id;
// Find frame id.
if (frame_id_map_.NextContinuousFrame(&frame_id)) {
// We have our next frame.
@@ -79,7 +79,7 @@ bool Framer::GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
bool Framer::GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
uint32* rtp_timestamp,
bool* next_frame) {
- uint8 frame_id;
+ uint32 frame_id;
// Find frame id.
if (frame_id_map_.NextContinuousFrame(&frame_id)) {
// We have our next frame.
@@ -107,7 +107,7 @@ void Framer::Reset() {
cast_msg_builder_->Reset();
}
-void Framer::ReleaseFrame(uint8 frame_id) {
+void Framer::ReleaseFrame(uint32 frame_id) {
frame_id_map_.RemoveOldFrames(frame_id);
frames_.erase(frame_id);
diff --git a/media/cast/framer/framer.h b/media/cast/framer/framer.h
index e208ab117f..0b2e004025 100644
--- a/media/cast/framer/framer.h
+++ b/media/cast/framer/framer.h
@@ -21,7 +21,7 @@
namespace media {
namespace cast {
-typedef std::map<uint8, linked_ptr<FrameBuffer> > FrameList;
+typedef std::map<uint32, linked_ptr<FrameBuffer> > FrameList;
class Framer {
public:
@@ -50,7 +50,7 @@ class Framer {
uint32* rtp_timestamp,
bool* next_frame);
- void ReleaseFrame(uint8 frame_id);
+ void ReleaseFrame(uint32 frame_id);
// Reset framer state to original state and flush all pending buffers.
void Reset();
diff --git a/media/cast/framer/framer_unittest.cc b/media/cast/framer/framer_unittest.cc
index bfeb7de1a8..33897e25a5 100644
--- a/media/cast/framer/framer_unittest.cc
+++ b/media/cast/framer/framer_unittest.cc
@@ -43,7 +43,7 @@ class FramerTest : public ::testing::Test {
TEST_F(FramerTest, EmptyState) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
@@ -51,7 +51,7 @@ TEST_F(FramerTest, EmptyState) {
TEST_F(FramerTest, AlwaysStartWithKey) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
// Insert non key first frame.
@@ -64,14 +64,14 @@ TEST_F(FramerTest, AlwaysStartWithKey) {
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(1, frame.frame_id);
+ EXPECT_EQ(1u, frame.frame_id);
EXPECT_TRUE(frame.key_frame);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, CompleteFrame) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
// start with a complete key frame.
@@ -80,7 +80,7 @@ TEST_F(FramerTest, CompleteFrame) {
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(0u, frame.frame_id);
EXPECT_TRUE(frame.key_frame);
framer_.ReleaseFrame(frame.frame_id);
@@ -102,7 +102,7 @@ TEST_F(FramerTest, CompleteFrame) {
TEST_F(FramerTest, ContinuousSequence) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
// start with a complete key frame.
@@ -111,7 +111,7 @@ TEST_F(FramerTest, ContinuousSequence) {
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(0u, frame.frame_id);
EXPECT_TRUE(frame.key_frame);
framer_.ReleaseFrame(frame.frame_id);
@@ -126,33 +126,33 @@ TEST_F(FramerTest, ContinuousSequence) {
TEST_F(FramerTest, Wrap) {
// Insert key frame, frame_id = 255 (will jump to that)
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
// Start with a complete key frame.
rtp_header_.is_key_frame = true;
- rtp_header_.frame_id = 255;
+ rtp_header_.frame_id = 255u;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(255, frame.frame_id);
+ EXPECT_EQ(255u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert wrapped delta frame - should be continuous.
rtp_header_.is_key_frame = false;
- rtp_header_.frame_id = 0;
+ rtp_header_.frame_id = 256;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(256u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, Reset) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
// Start with a complete key frame.
@@ -165,13 +165,13 @@ TEST_F(FramerTest, Reset) {
TEST_F(FramerTest, RequireKeyAfterReset) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
framer_.Reset();
// Start with a complete key frame.
rtp_header_.is_key_frame = false;
- rtp_header_.frame_id = 0;
+ rtp_header_.frame_id = 0u;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
@@ -185,7 +185,7 @@ TEST_F(FramerTest, RequireKeyAfterReset) {
TEST_F(FramerTest, BasicNonLastReferenceId) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 0;
@@ -198,7 +198,7 @@ TEST_F(FramerTest, BasicNonLastReferenceId) {
rtp_header_.is_key_frame = false;
rtp_header_.is_reference = true;
rtp_header_.reference_frame_id = 0;
- rtp_header_.frame_id = 5;
+ rtp_header_.frame_id = 5u;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
@@ -209,7 +209,7 @@ TEST_F(FramerTest, BasicNonLastReferenceId) {
TEST_F(FramerTest, InOrderReferenceFrameSelection) {
// Create pattern: 0, 1, 4, 5.
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
rtp_header_.is_key_frame = true;
@@ -230,17 +230,17 @@ TEST_F(FramerTest, InOrderReferenceFrameSelection) {
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(0u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(1, frame.frame_id);
+ EXPECT_EQ(1u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_FALSE(next_frame);
- EXPECT_EQ(4, frame.frame_id);
+ EXPECT_EQ(4u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert remaining packet of frame #2 - should no be continuous.
rtp_header_.frame_id = 2;
@@ -256,13 +256,13 @@ TEST_F(FramerTest, InOrderReferenceFrameSelection) {
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(5, frame.frame_id);
+ EXPECT_EQ(5u, frame.frame_id);
}
TEST_F(FramerTest, AudioWrap) {
// All audio frames are marked as key frames.
EncodedAudioFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 254;
@@ -271,33 +271,33 @@ TEST_F(FramerTest, AudioWrap) {
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(254, frame.frame_id);
+ EXPECT_EQ(254u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
rtp_header_.frame_id = 255;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
// Insert wrapped frame - should be continuous.
- rtp_header_.frame_id = 0;
+ rtp_header_.frame_id = 256;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(255, frame.frame_id);
+ EXPECT_EQ(255u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(256u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, AudioWrapWithMissingFrame) {
// All audio frames are marked as key frames.
EncodedAudioFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
// Insert and get first packet.
@@ -307,25 +307,25 @@ TEST_F(FramerTest, AudioWrapWithMissingFrame) {
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(253, frame.frame_id);
+ EXPECT_EQ(253u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert third and fourth packets.
rtp_header_.frame_id = 255;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- rtp_header_.frame_id = 0;
+ rtp_header_.frame_id = 256;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
// Get third and fourth packets.
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_FALSE(next_frame);
- EXPECT_EQ(255, frame.frame_id);
+ EXPECT_EQ(255u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(256u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
diff --git a/media/cast/logging/logging.cc b/media/cast/logging/logging.cc
deleted file mode 100644
index ce68aa41bc..0000000000
--- a/media/cast/logging/logging.cc
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/logging/logging.h"
-
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "base/metrics/histogram.h"
-
-namespace media {
-namespace cast {
-
-Logging::Logging(base::TickClock* clock)
- : clock_(clock),
- frame_map_(),
- packet_map_(),
- generic_map_(),
- weak_factory_(this) {}
-
-Logging::~Logging() {}
-
-void Logging::InsertFrameEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint8 frame_id) {
- // Is this a new event?
- FrameLogMap::iterator it = frame_map_.find(event);
- if (it == frame_map_.end()) {
- // Create new entry.
- FrameLogData data(clock_);
- data.Insert(rtp_timestamp, frame_id);
- frame_map_.insert(std::make_pair(event, &data));
- } else {
- // Insert to existing entry.
- it->second->Insert(rtp_timestamp, frame_id);
- }
-}
-
-void Logging::InsertFrameEventWithSize(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint8 frame_id,
- int size) {
- // Is this a new event?
- FrameLogMap::iterator it = frame_map_.find(event);
- if (it == frame_map_.end()) {
- // Create new entry.
- FrameLogData data(clock_);
- data.InsertWithSize(rtp_timestamp, frame_id, size);
- frame_map_.insert(std::make_pair(event, &data));
- } else {
- // Insert to existing entry.
- it->second->InsertWithSize(rtp_timestamp, frame_id, size);
- }
-}
-
-void Logging::InsertFrameEventWithDelay(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint8 frame_id,
- base::TimeDelta delay) {
- // Is this a new event?
- FrameLogMap::iterator it = frame_map_.find(event);
- if (it == frame_map_.end()) {
- // Create new entry.
- FrameLogData data(clock_);
- data.InsertWithDelay(rtp_timestamp, frame_id, delay);
- frame_map_.insert(std::make_pair(event, &data));
- } else {
- // Insert to existing entry.
- it->second->InsertWithDelay(rtp_timestamp, frame_id, delay);
- }
-}
-
-void Logging::InsertPacketEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint8 frame_id,
- uint16 packet_id,
- uint16 max_packet_id,
- int size) {
- // Is this a new event?
- PacketLogMap::iterator it = packet_map_.find(event);
- if (it == packet_map_.end()) {
- // Create new entry.
- PacketLogData data(clock_);
- data.Insert(rtp_timestamp, frame_id, packet_id, max_packet_id, size);
- packet_map_.insert(std::make_pair(event, &data));
- } else {
- // Insert to existing entry.
- it->second->Insert(rtp_timestamp, frame_id, packet_id, max_packet_id, size);
- }
-}
-
-void Logging::InsertGenericEvent(CastLoggingEvent event, int value) {
- // Is this a new event?
- GenericLogMap::iterator it = generic_map_.find(event);
- if (it == generic_map_.end()) {
- // Create new entry.
- GenericLogData data(clock_);
- data.Insert(value);
- generic_map_.insert(std::make_pair(event, &data));
- } else {
- // Insert to existing entry.
- it->second->Insert(value);
- }
-}
-
-void Logging::Reset() {
- frame_map_.clear();
- packet_map_.clear();
- generic_map_.clear();
-}
-} // namespace cast
-} // namespace media
-
diff --git a/media/cast/logging/logging.gyp b/media/cast/logging/logging.gyp
deleted file mode 100644
index 9140e3a6eb..0000000000
--- a/media/cast/logging/logging.gyp
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-{
- 'targets': [
- {
- 'target_name': 'cast_logging',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'sources': [
- 'logging_defines.cc',
- 'logging_defines.h',
- 'logging_impl.cc',
- 'logging_impl.h',
- 'logging_raw.cc',
- 'logging_raw.h',
- 'logging_stats.cc',
- 'logging_stats.h',
- ],
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
- ],
- },
- ], # targets
-}
diff --git a/media/cast/logging/logging.h b/media/cast/logging/logging.h
deleted file mode 100644
index 426fe3ab3d..0000000000
--- a/media/cast/logging/logging.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_LOGGING_LOGGING_H_
-#define MEDIA_CAST_LOGGING_LOGGING_H_
-
-// Generic class that handles event logging for the cast library.
-// Logging has three possible forms:
-// 1. [default] Raw data accessible by the application.
-// 2. [optional] UMA stats.
-// 3. [optional] Tracing.
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/memory/linked_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
-#include "media/cast/logging/logging_defines.h"
-#include "media/cast/logging/logging_internal.h"
-
-namespace media {
-namespace cast {
-
-// Store all log types in a map based on the event.
-typedef std::map<CastLoggingEvent, linked_ptr<FrameLogData> > FrameLogMap;
-typedef std::map<CastLoggingEvent, linked_ptr<PacketLogData> > PacketLogMap;
-typedef std::map<CastLoggingEvent, linked_ptr<GenericLogData> > GenericLogMap;
-
-
-// This class is not thread safe, and should only be called from the main
-// thread.
-class Logging : public base::NonThreadSafe,
- public base::SupportsWeakPtr<Logging> {
- public:
- // When tracing is enabled - all events will be added to the trace.
- Logging(base::TickClock* clock);
- ~Logging();
- // Inform of new event: three types of events: frame, packets and generic.
- // Frame events can be inserted with different parameters.
- void InsertFrameEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint8 frame_id);
- // Size - Inserting the size implies that this is an encoded frame.
- void InsertFrameEventWithSize(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint8 frame_id,
- int frame_size);
- // Render/playout delay
- void InsertFrameEventWithDelay(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint8 frame_id,
- base::TimeDelta delay);
-
- // Insert a packet event.
- void InsertPacketEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint8 frame_id,
- uint16 packet_id,
- uint16 max_packet_id,
- int size);
-
- void InsertGenericEvent(CastLoggingEvent event, int value);
-
- // Get log data.
- void GetRawFrameData(FrameLogMap frame_data);
- void GetRawPacketData(PacketLogMap packet_data);
- void GetRawGenericData(GenericLogMap generic_data);
-
- // Reset all log data (not flags).
- void Reset();
-
- private:
- base::WeakPtrFactory<Logging> weak_factory_;
- base::TickClock* const clock_; // Not owned by this class.
- FrameLogMap frame_map_;
- PacketLogMap packet_map_;
- GenericLogMap generic_map_;
-
- DISALLOW_COPY_AND_ASSIGN(Logging);
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_LOGGING_LOGGING_H_
-
diff --git a/media/cast/logging/logging_defines.cc b/media/cast/logging/logging_defines.cc
index 3f502f9eaa..85abe7c5d4 100644
--- a/media/cast/logging/logging_defines.cc
+++ b/media/cast/logging/logging_defines.cc
@@ -9,20 +9,40 @@
namespace media {
namespace cast {
+CastLoggingConfig::CastLoggingConfig()
+ : enable_data_collection(false),
+ enable_uma_stats(false),
+ enable_tracing(false) {}
+
+CastLoggingConfig::~CastLoggingConfig() {}
+
+CastLoggingConfig GetDefaultCastLoggingConfig() {
+ CastLoggingConfig config;
+ return config;
+}
+
std::string CastLoggingToString(CastLoggingEvent event) {
switch (event) {
- case(kRtt):
- return "Rtt";
+ case(kUnknown):
+ // Can happen if the sender and receiver of RTCP log messages are not
+ // aligned.
+ return "Unknown";
+ case(kRttMs):
+ return "RttMs";
case(kPacketLoss):
return "PacketLoss";
- case(kJitter):
- return "Jitter";
+ case(kJitterMs):
+ return "JitterMs";
case(kAckReceived):
return "AckReceived";
+ case(kRembBitrate):
+ return "RembBitrate";
case(kAckSent):
return "AckSent";
case(kLastEvent):
return "LastEvent";
+ case(kAudioFrameReceived):
+ return "AudioFrameReceived";
case(kAudioFrameCaptured):
return "AudioFrameCaptured";
case(kAudioFrameEncoded):
@@ -33,6 +53,8 @@ std::string CastLoggingToString(CastLoggingEvent event) {
return "AudioFrameDecoded";
case(kVideoFrameCaptured):
return "VideoFrameCaptured";
+ case(kVideoFrameReceived):
+ return "VideoFrameReceived";
case(kVideoFrameSentToEncoder):
return "VideoFrameSentToEncoder";
case(kVideoFrameEncoded):
diff --git a/media/cast/logging/logging_defines.h b/media/cast/logging/logging_defines.h
index 835c022589..5a7bca1500 100644
--- a/media/cast/logging/logging_defines.h
+++ b/media/cast/logging/logging_defines.h
@@ -15,15 +15,32 @@
namespace media {
namespace cast {
+static const uint32 kFrameIdUnknown = 0xFFFF;
+
+struct CastLoggingConfig {
+ CastLoggingConfig();
+ ~CastLoggingConfig();
+
+ bool enable_data_collection;
+ bool enable_uma_stats;
+ bool enable_tracing;
+};
+
+// By default, enable raw and stats data collection. Disable tracing and UMA.
+CastLoggingConfig GetDefaultCastLoggingConfig();
+
enum CastLoggingEvent {
// Generic events.
- kRtt,
+ kUnknown,
+ kRttMs,
kPacketLoss,
- kJitter,
+ kJitterMs,
kAckReceived,
+ kRembBitrate,
kAckSent,
kLastEvent,
// Audio sender.
+ kAudioFrameReceived,
kAudioFrameCaptured,
kAudioFrameEncoded,
// Audio receiver.
@@ -31,6 +48,7 @@ enum CastLoggingEvent {
kAudioFrameDecoded,
// Video sender.
kVideoFrameCaptured,
+ kVideoFrameReceived,
kVideoFrameSentToEncoder,
kVideoFrameEncoded,
// Video receiver.
@@ -42,6 +60,8 @@ enum CastLoggingEvent {
kPacketRetransmited,
// Receive-side packet events.
kPacketReceived,
+
+ kNumOfLoggingEvents,
};
std::string CastLoggingToString(CastLoggingEvent event);
@@ -50,8 +70,8 @@ struct FrameEvent {
FrameEvent();
~FrameEvent();
- uint8 frame_id;
- int size; // Encoded size only.
+ uint32 frame_id;
+ size_t size; // Encoded size only.
std::vector<base::TimeTicks> timestamp;
std::vector<CastLoggingEvent> type;
base::TimeDelta delay_delta; // Render/playout delay.
@@ -62,7 +82,7 @@ struct BasePacketInfo {
BasePacketInfo();
~BasePacketInfo();
- int size;
+ size_t size;
std::vector<base::TimeTicks> timestamp;
std::vector<CastLoggingEvent> type;
};
@@ -72,7 +92,7 @@ typedef std::map<uint16, BasePacketInfo> BasePacketMap;
struct PacketEvent {
PacketEvent();
~PacketEvent();
- uint8 frame_id;
+ uint32 frame_id;
int max_packet_id;
BasePacketMap packet_map;
};
diff --git a/media/cast/logging/logging_impl.cc b/media/cast/logging/logging_impl.cc
index 7f91df2737..16117b031f 100644
--- a/media/cast/logging/logging_impl.cc
+++ b/media/cast/logging/logging_impl.cc
@@ -5,17 +5,16 @@
#include "base/debug/trace_event.h"
#include "base/metrics/histogram.h"
#include "media/cast/logging/logging_impl.h"
+#include "net/base/big_endian.h"
namespace media {
namespace cast {
LoggingImpl::LoggingImpl(base::TickClock* clock,
- bool enable_data_collection,
- bool enable_uma_stats,
- bool enable_tracing)
- : enable_data_collection_(enable_data_collection),
- enable_uma_stats_(enable_uma_stats),
- enable_tracing_(enable_tracing),
+ scoped_refptr<base::TaskRunner> main_thread_proxy,
+ const CastLoggingConfig& config)
+ : main_thread_proxy_(main_thread_proxy),
+ config_(config),
raw_(clock),
stats_(clock) {}
@@ -23,12 +22,13 @@ LoggingImpl::~LoggingImpl() {}
void LoggingImpl::InsertFrameEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id) {
- if (enable_data_collection_) {
+ uint32 frame_id) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
raw_.InsertFrameEvent(event, rtp_timestamp, frame_id);
stats_.InsertFrameEvent(event, rtp_timestamp, frame_id);
}
- if (enable_tracing_) {
+ if (config_.enable_tracing) {
std::string event_string = CastLoggingToString(event);
TRACE_EVENT_INSTANT2(event_string.c_str(), "FE",
TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp, "frame_id",
@@ -38,16 +38,17 @@ void LoggingImpl::InsertFrameEvent(CastLoggingEvent event,
void LoggingImpl::InsertFrameEventWithSize(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
int frame_size) {
- if (enable_data_collection_) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
raw_.InsertFrameEventWithSize(event, rtp_timestamp, frame_id, frame_size);
stats_.InsertFrameEventWithSize(event, rtp_timestamp, frame_id, frame_size);
}
- if (enable_uma_stats_) {
+ if (config_.enable_uma_stats) {
UMA_HISTOGRAM_COUNTS(CastLoggingToString(event), frame_size);
}
- if (enable_tracing_) {
+ if (config_.enable_tracing) {
std::string event_string = CastLoggingToString(event);
TRACE_EVENT_INSTANT2(event_string.c_str(), "FES",
TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp, "frame_size",
@@ -58,16 +59,17 @@ void LoggingImpl::InsertFrameEventWithSize(CastLoggingEvent event,
void LoggingImpl::InsertFrameEventWithDelay(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
base::TimeDelta delay) {
- if (enable_data_collection_) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
raw_.InsertFrameEventWithDelay(event, rtp_timestamp, frame_id, delay);
stats_.InsertFrameEventWithDelay(event, rtp_timestamp, frame_id, delay);
}
- if (enable_uma_stats_) {
+ if (config_.enable_uma_stats) {
UMA_HISTOGRAM_TIMES(CastLoggingToString(event), delay);
}
- if (enable_tracing_) {
+ if (config_.enable_tracing) {
std::string event_string = CastLoggingToString(event);
TRACE_EVENT_INSTANT2(event_string.c_str(), "FED",
TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp, "delay",
@@ -75,19 +77,41 @@ void LoggingImpl::InsertFrameEventWithDelay(CastLoggingEvent event,
}
}
+void LoggingImpl::InsertPacketListEvent(CastLoggingEvent event,
+ const PacketList& packets) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ for (unsigned int i = 0; i < packets.size(); ++i) {
+ const Packet& packet = packets[i];
+ // Parse basic properties.
+ uint32 rtp_timestamp;
+ uint16 packet_id, max_packet_id;
+ const uint8* packet_data = &packet[0];
+ net::BigEndianReader big_endian_reader(packet_data + 4, 4);
+ big_endian_reader.ReadU32(&rtp_timestamp);
+ net::BigEndianReader cast_big_endian_reader(packet_data + 12 + 2, 4);
+ cast_big_endian_reader.ReadU16(&packet_id);
+ cast_big_endian_reader.ReadU16(&max_packet_id);
+ // rtp_timestamp is enough - no need for frame_id as well.
+ InsertPacketEvent(event, rtp_timestamp, kFrameIdUnknown, packet_id,
+ max_packet_id, packet.size());
+ }
+
+}
+
void LoggingImpl::InsertPacketEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
uint16 packet_id,
uint16 max_packet_id,
- int size) {
- if (enable_data_collection_) {
+ size_t size) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
raw_.InsertPacketEvent(event, rtp_timestamp, frame_id, packet_id,
max_packet_id, size);
stats_.InsertPacketEvent(event, rtp_timestamp, frame_id, packet_id,
max_packet_id, size);
}
- if (enable_tracing_) {
+ if (config_.enable_tracing) {
std::string event_string = CastLoggingToString(event);
TRACE_EVENT_INSTANT2(event_string.c_str(), "PE",
TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp,
@@ -96,14 +120,15 @@ void LoggingImpl::InsertPacketEvent(CastLoggingEvent event,
}
void LoggingImpl::InsertGenericEvent(CastLoggingEvent event, int value) {
- if (enable_data_collection_) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
raw_.InsertGenericEvent(event, value);
stats_.InsertGenericEvent(event, value);
}
- if (enable_uma_stats_) {
+ if (config_.enable_uma_stats) {
UMA_HISTOGRAM_COUNTS(CastLoggingToString(event), value);
}
- if (enable_tracing_) {
+ if (config_.enable_tracing) {
std::string event_string = CastLoggingToString(event);
TRACE_EVENT_INSTANT1(event_string.c_str(), "GE",
TRACE_EVENT_SCOPE_THREAD, "value", value);
@@ -113,21 +138,25 @@ void LoggingImpl::InsertGenericEvent(CastLoggingEvent event, int value) {
// should just get the entire class, would be much easier.
FrameRawMap LoggingImpl::GetFrameRawData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
return raw_.GetFrameData();
}
PacketRawMap LoggingImpl::GetPacketRawData() {
- return raw_.GetPacketData();
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ return raw_.GetPacketData();
}
GenericRawMap LoggingImpl::GetGenericRawData() {
- return raw_.GetGenericData();
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ return raw_.GetGenericData();
}
const FrameStatsMap* LoggingImpl::GetFrameStatsData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
// Get stats data.
const FrameStatsMap* stats = stats_.GetFrameStatsData();
- if (enable_uma_stats_) {
+ if (config_.enable_uma_stats) {
FrameStatsMap::const_iterator it;
for (it = stats->begin(); it != stats->end(); ++it) {
// Check for an active event.
@@ -159,9 +188,10 @@ const FrameStatsMap* LoggingImpl::GetFrameStatsData() {
}
const PacketStatsMap* LoggingImpl::GetPacketStatsData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
// Get stats data.
const PacketStatsMap* stats = stats_.GetPacketStatsData();
- if (enable_uma_stats_) {
+ if (config_.enable_uma_stats) {
PacketStatsMap::const_iterator it;
for (it = stats->begin(); it != stats->end(); ++it) {
if (it->second > 0) {
@@ -174,9 +204,10 @@ const PacketStatsMap* LoggingImpl::GetPacketStatsData() {
}
const GenericStatsMap* LoggingImpl::GetGenericStatsData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
// Get stats data.
const GenericStatsMap* stats = stats_.GetGenericStatsData();
- if (enable_uma_stats_) {
+ if (config_.enable_uma_stats) {
GenericStatsMap::const_iterator it;
for (it = stats->begin(); it != stats->end(); ++it) {
if (it->second > 0) {
@@ -188,6 +219,7 @@ const GenericStatsMap* LoggingImpl::GetGenericStatsData() {
}
void LoggingImpl::Reset() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
raw_.Reset();
stats_.Reset();
}
diff --git a/media/cast/logging/logging_impl.h b/media/cast/logging/logging_impl.h
index 568bff5360..6c2d863eef 100644
--- a/media/cast/logging/logging_impl.h
+++ b/media/cast/logging/logging_impl.h
@@ -10,6 +10,9 @@
// 2. UMA stats.
// 3. Tracing of raw events.
+#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
+#include "media/cast/cast_config.h"
#include "media/cast/logging/logging_defines.h"
#include "media/cast/logging/logging_raw.h"
#include "media/cast/logging/logging_stats.h"
@@ -17,32 +20,34 @@
namespace media {
namespace cast {
-class LoggingImpl {
+// Should only be called from the main thread.
+class LoggingImpl : public base::NonThreadSafe {
public:
LoggingImpl(base::TickClock* clock,
- bool enable_data_collection,
- bool enable_uma_stats,
- bool enable_tracing);
+ scoped_refptr<base::TaskRunner> main_thread_proxy,
+ const CastLoggingConfig& config);
~LoggingImpl();
void InsertFrameEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id);
+ uint32 frame_id);
void InsertFrameEventWithSize(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
int frame_size);
void InsertFrameEventWithDelay(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
base::TimeDelta delay);
+ void InsertPacketListEvent(CastLoggingEvent event, const PacketList& packets);
+
void InsertPacketEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
uint16 packet_id,
uint16 max_packet_id,
- int size);
+ size_t size);
void InsertGenericEvent(CastLoggingEvent event, int value);
// Get raw data.
@@ -57,11 +62,10 @@ class LoggingImpl {
void Reset();
private:
+ scoped_refptr<base::TaskRunner> main_thread_proxy_;
+ const CastLoggingConfig config_;
LoggingRaw raw_;
LoggingStats stats_;
- bool enable_data_collection_;
- bool enable_uma_stats_;
- bool enable_tracing_;
DISALLOW_COPY_AND_ASSIGN(LoggingImpl);
};
diff --git a/media/cast/logging/logging_internal.cc b/media/cast/logging/logging_internal.cc
index aec0c96d57..ce2249ee4e 100644
--- a/media/cast/logging/logging_internal.cc
+++ b/media/cast/logging/logging_internal.cc
@@ -13,27 +13,27 @@ FrameLogData::FrameLogData(base::TickClock* clock)
FrameLogData::~FrameLogData() {}
-void FrameLogData::Insert(uint32 rtp_timestamp, uint8 frame_id) {
+void FrameLogData::Insert(uint32 rtp_timestamp, uint32 frame_id) {
FrameEvent info;
InsertBase(rtp_timestamp, frame_id, info);
}
void FrameLogData::InsertWithSize(
- uint32 rtp_timestamp, uint8 frame_id, int size) {
+ uint32 rtp_timestamp, uint32 frame_id, int size) {
FrameEvent info;
info.size = size;
InsertBase(rtp_timestamp, frame_id, info);
}
void FrameLogData::InsertWithDelay(
- uint32 rtp_timestamp, uint8 frame_id, base::TimeDelta delay) {
+ uint32 rtp_timestamp, uint32 frame_id, base::TimeDelta delay) {
FrameEvent info;
info.delay_delta = delay;
InsertBase(rtp_timestamp, frame_id, info);
}
void FrameLogData::InsertBase(
- uint32 rtp_timestamp, uint8 frame_id, FrameEvent info) {
+ uint32 rtp_timestamp, uint32 frame_id, FrameEvent info) {
info.timestamp = clock_->NowTicks();
info.frame_id = frame_id;
frame_map_.insert(std::make_pair(rtp_timestamp, info));
@@ -46,7 +46,7 @@ PacketLogData::PacketLogData(base::TickClock* clock)
PacketLogData::~PacketLogData() {}
void PacketLogData::Insert(uint32 rtp_timestamp,
- uint8 frame_id, uint16 packet_id, uint16 max_packet_id, int size) {
+ uint32 frame_id, uint16 packet_id, uint16 max_packet_id, int size) {
PacketEvent info;
info.size = size;
info.max_packet_id = max_packet_id;
diff --git a/media/cast/logging/logging_internal.h b/media/cast/logging/logging_internal.h
index 0f787600af..6f028b925f 100644
--- a/media/cast/logging/logging_internal.h
+++ b/media/cast/logging/logging_internal.h
@@ -18,16 +18,16 @@ namespace cast {
// TODO(mikhal): Consider storing only the delta time and not absolute time.
struct FrameEvent {
- uint8 frame_id;
+ uint32 frame_id;
int size;
base::TimeTicks timestamp;
base::TimeDelta delay_delta; // render/playout delay.
};
struct PacketEvent {
- uint8 frame_id;
+ uint32 frame_id;
int max_packet_id;
- int size;
+ size_t size;
base::TimeTicks timestamp;
};
@@ -40,16 +40,16 @@ class FrameLogData {
public:
explicit FrameLogData(base::TickClock* clock);
~FrameLogData();
- void Insert(uint32 rtp_timestamp, uint8 frame_id);
+ void Insert(uint32 rtp_timestamp, uint32 frame_id);
// Include size for encoded images (compute bitrate),
- void InsertWithSize(uint32 rtp_timestamp, uint8 frame_id, int size);
+ void InsertWithSize(uint32 rtp_timestamp, uint32 frame_id, int size);
// Include playout/render delay info.
void InsertWithDelay(
- uint32 rtp_timestamp, uint8 frame_id, base::TimeDelta delay);
+ uint32 rtp_timestamp, uint32 frame_id, base::TimeDelta delay);
void Reset();
private:
- void InsertBase(uint32 rtp_timestamp, uint8 frame_id, FrameEvent info);
+ void InsertBase(uint32 rtp_timestamp, uint32 frame_id, FrameEvent info);
base::TickClock* const clock_; // Not owned by this class.
FrameMap frame_map_;
@@ -62,7 +62,7 @@ class PacketLogData {
public:
explicit PacketLogData(base::TickClock* clock);
~PacketLogData();
- void Insert(uint32 rtp_timestamp, uint8 frame_id, uint16 packet_id,
+ void Insert(uint32 rtp_timestamp, uint32 frame_id, uint16 packet_id,
uint16 max_packet_id, int size);
void Reset();
diff --git a/media/cast/logging/logging_raw.cc b/media/cast/logging/logging_raw.cc
index fa865a4324..93a65f0283 100644
--- a/media/cast/logging/logging_raw.cc
+++ b/media/cast/logging/logging_raw.cc
@@ -4,7 +4,6 @@
#include "media/cast/logging/logging_raw.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/time/time.h"
@@ -23,13 +22,13 @@ LoggingRaw::~LoggingRaw() {}
void LoggingRaw::InsertFrameEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id) {
+ uint32 frame_id) {
InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
}
void LoggingRaw::InsertFrameEventWithSize(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
int size) {
InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
// Now insert size.
@@ -40,7 +39,7 @@ void LoggingRaw::InsertFrameEventWithSize(CastLoggingEvent event,
void LoggingRaw::InsertFrameEventWithDelay(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
base::TimeDelta delay) {
InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
// Now insert delay.
@@ -49,10 +48,10 @@ void LoggingRaw::InsertFrameEventWithDelay(CastLoggingEvent event,
it->second.delay_delta = delay;
}
void LoggingRaw::InsertBaseFrameEvent(CastLoggingEvent event,
- uint8 frame_id,
+ uint32 frame_id,
uint32 rtp_timestamp) {
// Is this a new event?
- FrameRawMap::iterator it = frame_map_.find(event);
+ FrameRawMap::iterator it = frame_map_.find(rtp_timestamp);
if (it == frame_map_.end()) {
// Create a new map entry.
FrameEvent info;
@@ -64,15 +63,19 @@ void LoggingRaw::InsertBaseFrameEvent(CastLoggingEvent event,
// Insert to an existing entry.
it->second.timestamp.push_back(clock_->NowTicks());
it->second.type.push_back(event);
+ // Do we have a valid frame_id?
+ // Not all events have a valid frame id.
+ if (it->second.frame_id == kFrameIdUnknown && frame_id != kFrameIdUnknown)
+ it->second.frame_id = frame_id;
}
}
void LoggingRaw::InsertPacketEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
uint16 packet_id,
uint16 max_packet_id,
- int size) {
+ size_t size) {
// Is this packet belonging to a new frame?
PacketRawMap::iterator it = packet_map_.find(rtp_timestamp);
if (it == packet_map_.end()) {
diff --git a/media/cast/logging/logging_raw.h b/media/cast/logging/logging_raw.h
index 91de12aaaa..4ac8d0fb7a 100644
--- a/media/cast/logging/logging_raw.h
+++ b/media/cast/logging/logging_raw.h
@@ -31,27 +31,27 @@ class LoggingRaw : public base::NonThreadSafe,
// Frame events can be inserted with different parameters.
void InsertFrameEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id);
+ uint32 frame_id);
// Size - Inserting the size implies that this is an encoded frame.
void InsertFrameEventWithSize(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
int frame_size);
// Render/playout delay
void InsertFrameEventWithDelay(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
base::TimeDelta delay);
// Insert a packet event.
void InsertPacketEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
uint16 packet_id,
uint16 max_packet_id,
- int size);
+ size_t size);
void InsertGenericEvent(CastLoggingEvent event, int value);
@@ -66,14 +66,14 @@ class LoggingRaw : public base::NonThreadSafe,
private:
void InsertBaseFrameEvent(CastLoggingEvent event,
- uint8 frame_id,
+ uint32 frame_id,
uint32 rtp_timestamp);
- base::WeakPtrFactory<LoggingRaw> weak_factory_;
base::TickClock* const clock_; // Not owned by this class.
FrameRawMap frame_map_;
PacketRawMap packet_map_;
GenericRawMap generic_map_;
+ base::WeakPtrFactory<LoggingRaw> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(LoggingRaw);
};
diff --git a/media/cast/logging/logging_stats.cc b/media/cast/logging/logging_stats.cc
index 9d79f69b6f..84fdbf7a61 100644
--- a/media/cast/logging/logging_stats.cc
+++ b/media/cast/logging/logging_stats.cc
@@ -16,6 +16,7 @@ LoggingStats::LoggingStats(base::TickClock* clock)
start_time_(),
clock_(clock) {
memset(counts_, 0, sizeof(counts_));
+ memset(start_time_, 0, sizeof(start_time_));
}
LoggingStats::~LoggingStats() {}
@@ -29,13 +30,13 @@ void LoggingStats::Reset() {
void LoggingStats::InsertFrameEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id) {
+ uint32 frame_id) {
InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
}
void LoggingStats::InsertFrameEventWithSize(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
int frame_size) {
InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
// Update size.
@@ -46,7 +47,7 @@ void LoggingStats::InsertFrameEventWithSize(CastLoggingEvent event,
void LoggingStats::InsertFrameEventWithDelay(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
base::TimeDelta delay) {
InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
// Update size.
@@ -63,7 +64,7 @@ void LoggingStats::InsertFrameEventWithDelay(CastLoggingEvent event,
}
void LoggingStats::InsertBaseFrameEvent(CastLoggingEvent event,
- uint8 frame_id,
+ uint32 frame_id,
uint32 rtp_timestamp) {
// Does this belong to an existing event?
FrameStatsMap::iterator it = frame_stats_.find(event);
@@ -79,10 +80,10 @@ void LoggingStats::InsertBaseFrameEvent(CastLoggingEvent event,
void LoggingStats::InsertPacketEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
uint16 packet_id,
uint16 max_packet_id,
- int size) {
+ size_t size) {
// Does this packet belong to an existing event?
PacketStatsMap::iterator it = packet_stats_.find(event);
if (it == packet_stats_.end()) {
diff --git a/media/cast/logging/logging_stats.h b/media/cast/logging/logging_stats.h
index 65dccbd7b4..f08649cc77 100644
--- a/media/cast/logging/logging_stats.h
+++ b/media/cast/logging/logging_stats.h
@@ -13,8 +13,6 @@
namespace media {
namespace cast {
-const int kNumberOfEvents = 19;
-
class LoggingStats {
public:
explicit LoggingStats(base::TickClock* clock);
@@ -25,24 +23,24 @@ class LoggingStats {
void InsertFrameEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id);
+ uint32 frame_id);
void InsertFrameEventWithSize(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
int frame_size);
void InsertFrameEventWithDelay(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
base::TimeDelta delay);
void InsertPacketEvent(CastLoggingEvent event,
uint32 rtp_timestamp,
- uint8 frame_id,
+ uint32 frame_id,
uint16 packet_id,
uint16 max_packet_id,
- int size);
+ size_t size);
void InsertGenericEvent(CastLoggingEvent event, int value);
@@ -56,15 +54,15 @@ class LoggingStats {
private:
void InsertBaseFrameEvent(CastLoggingEvent event,
- uint8 frame_id,
+ uint32 frame_id,
uint32 rtp_timestamp);
FrameStatsMap frame_stats_;
PacketStatsMap packet_stats_;
GenericStatsMap generic_stats_;
// Every event has an individual start time
- base::TimeTicks start_time_[kNumberOfEvents];
+ base::TimeTicks start_time_[kNumOfLoggingEvents];
// Keep track of event counts.
- int counts_[kNumberOfEvents];
+ int counts_[kNumOfLoggingEvents];
base::TickClock* const clock_; // Not owned by this class.
DISALLOW_COPY_AND_ASSIGN(LoggingStats);
diff --git a/media/cast/logging/logging_unittest.cc b/media/cast/logging/logging_unittest.cc
index ce55e65716..5ce760ec4c 100644
--- a/media/cast/logging/logging_unittest.cc
+++ b/media/cast/logging/logging_unittest.cc
@@ -40,7 +40,7 @@ TEST_F(TestLogging, BasicFrameLogging) {
base::TimeTicks start_time = testing_clock_.NowTicks();
base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
uint32 rtp_timestamp = 0;
- uint8 frame_id = 0;
+ uint32 frame_id = 0;
do {
logging_.InsertFrameEvent(kAudioFrameCaptured, rtp_timestamp, frame_id);
testing_clock_.Advance(
@@ -73,7 +73,7 @@ TEST_F(TestLogging, FrameLoggingWithSize) {
base::TimeTicks start_time = testing_clock_.NowTicks();
base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
uint32 rtp_timestamp = 0;
- uint8 frame_id = 0;
+ uint32 frame_id = 0;
do {
int size = kBaseFrameSizeBytes +
base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
@@ -110,7 +110,7 @@ TEST_F(TestLogging, FrameLoggingWithDelay) {
base::TimeTicks start_time = testing_clock_.NowTicks();
base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
uint32 rtp_timestamp = 0;
- uint8 frame_id = 0;
+ uint32 frame_id = 0;
do {
int delay = kPlayoutDelayMs +
base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
@@ -145,7 +145,7 @@ TEST_F(TestLogging, MultipleEventFrameLogging) {
base::TimeTicks start_time = testing_clock_.NowTicks();
base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
uint32 rtp_timestamp = 0;
- uint8 frame_id = 0;
+ uint32 frame_id = 0;
do {
logging_.InsertFrameEvent(kAudioFrameCaptured, rtp_timestamp, frame_id);
if (frame_id % 2) {
@@ -178,7 +178,7 @@ TEST_F(TestLogging, PacketLogging) {
base::TimeTicks start_time = testing_clock_.NowTicks();
base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
uint32 rtp_timestamp = 0;
- uint8 frame_id = 0;
+ uint32 frame_id = 0;
do {
for (int i = 0; i < kNumPacketsPerFrame; ++i) {
int size = kBaseSize + base::RandInt(-kSizeInterval, kSizeInterval);
diff --git a/media/cast/pacing/paced_sender.cc b/media/cast/pacing/paced_sender.cc
index 4abda9bd0b..af13d3ad71 100644
--- a/media/cast/pacing/paced_sender.cc
+++ b/media/cast/pacing/paced_sender.cc
@@ -28,15 +28,22 @@ PacedSender::PacedSender(scoped_refptr<CastEnvironment> cast_environment,
PacedSender::~PacedSender() {}
bool PacedSender::SendPackets(const PacketList& packets) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_environment_->Logging()->InsertPacketListEvent(kPacketSentToPacer,
+ packets);
return SendPacketsToTransport(packets, &packet_list_);
}
bool PacedSender::ResendPackets(const PacketList& packets) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_environment_->Logging()->InsertPacketListEvent(kPacketRetransmited,
+ packets);
return SendPacketsToTransport(packets, &resend_packet_list_);
}
bool PacedSender::SendPacketsToTransport(const PacketList& packets,
PacketList* packets_not_sent) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
UpdateBurstSize(packets.size());
if (!packets_not_sent->empty()) {
@@ -61,10 +68,13 @@ bool PacedSender::SendPacketsToTransport(const PacketList& packets,
packets_sent_in_burst_ += packets_to_send.size();
if (packets_to_send.empty()) return true;
+ cast_environment_->Logging()->InsertPacketListEvent(kPacketSentToNetwork,
+ packets);
return transport_->SendPackets(packets_to_send);
}
bool PacedSender::SendRtcpPacket(const Packet& packet) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// We pass the RTCP packets straight through.
return transport_->SendPacket(packet);
}
@@ -82,12 +92,14 @@ void PacedSender::ScheduleNextSend() {
}
void PacedSender::SendNextPacketBurst() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
SendStoredPackets();
time_last_process_ = cast_environment_->Clock()->NowTicks();
ScheduleNextSend();
}
void PacedSender::SendStoredPackets() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (packet_list_.empty() && resend_packet_list_.empty()) return;
size_t packets_to_send = burst_size_;
@@ -123,6 +135,7 @@ void PacedSender::SendStoredPackets() {
}
void PacedSender::UpdateBurstSize(size_t packets_to_send) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
packets_to_send = std::max(packets_to_send,
resend_packet_list_.size() + packet_list_.size());
diff --git a/media/cast/pacing/paced_sender.gyp b/media/cast/pacing/paced_sender.gyp
index 2cc77507a7..1947dd4ec4 100644
--- a/media/cast/pacing/paced_sender.gyp
+++ b/media/cast/pacing/paced_sender.gyp
@@ -16,7 +16,6 @@
],
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
],
},
], # targets
diff --git a/media/cast/pacing/paced_sender_unittest.cc b/media/cast/pacing/paced_sender_unittest.cc
index 8ae4ea8643..e1fbc2e707 100644
--- a/media/cast/pacing/paced_sender_unittest.cc
+++ b/media/cast/pacing/paced_sender_unittest.cc
@@ -59,7 +59,8 @@ class PacedSenderTest : public ::testing::Test {
virtual void SetUp() {
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_);
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
paced_sender_.reset(new PacedSender(cast_environment_, &mock_transport_));
}
diff --git a/media/cast/rtcp/mock_rtcp_receiver_feedback.h b/media/cast/rtcp/mock_rtcp_receiver_feedback.h
index acde0560bb..0316d9819f 100644
--- a/media/cast/rtcp/mock_rtcp_receiver_feedback.h
+++ b/media/cast/rtcp/mock_rtcp_receiver_feedback.h
@@ -25,6 +25,11 @@ class MockRtcpReceiverFeedback : public RtcpReceiverFeedback {
void(const RtcpReceiverReferenceTimeReport& remote_time_report));
MOCK_METHOD0(OnReceivedSendReportRequest, void());
+
+ MOCK_METHOD1(OnReceivedReceiverLog,
+ void(const RtcpReceiverLogMessage& receiver_log));
+ MOCK_METHOD1(OnReceivedSenderLog,
+ void(const RtcpSenderLogMessage& sender_log));
};
class MockRtcpRttFeedback : public RtcpRttFeedback {
diff --git a/media/cast/rtcp/mock_rtcp_sender_feedback.h b/media/cast/rtcp/mock_rtcp_sender_feedback.h
index 6d47a74421..40547e6283 100644
--- a/media/cast/rtcp/mock_rtcp_sender_feedback.h
+++ b/media/cast/rtcp/mock_rtcp_sender_feedback.h
@@ -18,18 +18,6 @@ class MockRtcpSenderFeedback : public RtcpSenderFeedback {
MockRtcpSenderFeedback();
virtual ~MockRtcpSenderFeedback();
- MOCK_METHOD1(OnReceivedReportBlock,
- void(const RtcpReportBlock& report_block));
-
- MOCK_METHOD0(OnReceivedIntraFrameRequest, void());
-
- MOCK_METHOD2(OnReceivedRpsi, void(uint8 payload_type, uint64 picture_id));
-
- MOCK_METHOD1(OnReceivedRemb, void(uint32 bitrate));
-
- MOCK_METHOD1(OnReceivedNackRequest,
- void(const std::list<uint16>& nack_sequence_numbers));
-
MOCK_METHOD1(OnReceivedCastFeedback,
void(const RtcpCastMessage& cast_feedback));
};
diff --git a/media/cast/rtcp/rtcp.cc b/media/cast/rtcp/rtcp.cc
index a1720ffe1a..d5ee6e3c54 100644
--- a/media/cast/rtcp/rtcp.cc
+++ b/media/cast/rtcp/rtcp.cc
@@ -4,10 +4,10 @@
#include "media/cast/rtcp/rtcp.h"
-#include "base/debug/trace_event.h"
#include "base/rand_util.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/rtcp/rtcp_defines.h"
#include "media/cast/rtcp/rtcp_receiver.h"
#include "media/cast/rtcp/rtcp_sender.h"
@@ -17,7 +17,7 @@
namespace media {
namespace cast {
-static const int kMaxRttMs = 1000000; // 1000 seconds.
+static const int kMaxRttMs = 10000; // 10 seconds.
// Time limit for received RTCP messages when we stop using it for lip-sync.
static const int64 kMaxDiffSinceReceivedRtcpMs = 100000; // 100 seconds.
@@ -52,6 +52,10 @@ RtcpNackMessage::~RtcpNackMessage() {}
RtcpRembMessage::RtcpRembMessage() {}
RtcpRembMessage::~RtcpRembMessage() {}
+RtcpReceiverFrameLogMessage::RtcpReceiverFrameLogMessage(uint32 timestamp)
+ : rtp_timestamp_(timestamp) {}
+
+RtcpReceiverFrameLogMessage::~RtcpReceiverFrameLogMessage() {}
class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
public:
@@ -80,41 +84,57 @@ class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
rtcp_->OnReceivedSendReportRequest();
}
+ virtual void OnReceivedReceiverLog(
+ const RtcpReceiverLogMessage& receiver_log) OVERRIDE {
+ // TODO(pwestin): Implement.
+ // Add received log messages into our log system.
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedSenderLog(
+ const RtcpSenderLogMessage& sender_log) OVERRIDE {
+ // TODO(pwestin): Implement.
+ // Add received log messages into our log system.
+ NOTIMPLEMENTED();
+ }
+
private:
Rtcp* rtcp_;
};
-Rtcp::Rtcp(base::TickClock* clock,
+Rtcp::Rtcp(scoped_refptr<CastEnvironment> cast_environment,
RtcpSenderFeedback* sender_feedback,
PacedPacketSender* paced_packet_sender,
RtpSenderStatistics* rtp_sender_statistics,
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
- bool sending_media,
uint32 local_ssrc,
+ uint32 remote_ssrc,
const std::string& c_name)
: rtcp_interval_(rtcp_interval),
rtcp_mode_(rtcp_mode),
- sending_media_(sending_media),
local_ssrc_(local_ssrc),
+ remote_ssrc_(remote_ssrc),
rtp_sender_statistics_(rtp_sender_statistics),
rtp_receiver_statistics_(rtp_receiver_statistics),
receiver_feedback_(new LocalRtcpReceiverFeedback(this)),
rtt_feedback_(new LocalRtcpRttFeedback(this)),
- rtcp_sender_(new RtcpSender(paced_packet_sender, local_ssrc, c_name)),
- last_report_sent_(0),
+ rtcp_sender_(new RtcpSender(cast_environment, paced_packet_sender,
+ local_ssrc, c_name)),
last_report_received_(0),
last_received_rtp_timestamp_(0),
last_received_ntp_seconds_(0),
last_received_ntp_fraction_(0),
min_rtt_(base::TimeDelta::FromMilliseconds(kMaxRttMs)),
number_of_rtt_in_avg_(0),
- clock_(clock) {
- rtcp_receiver_.reset(new RtcpReceiver(sender_feedback,
+ cast_environment_(cast_environment) {
+ rtcp_receiver_.reset(new RtcpReceiver(cast_environment,
+ sender_feedback,
receiver_feedback_.get(),
rtt_feedback_.get(),
local_ssrc));
+ rtcp_receiver_->SetRemoteSSRC(remote_ssrc);
}
Rtcp::~Rtcp() {}
@@ -148,10 +168,6 @@ base::TimeTicks Rtcp::TimeToSendNextRtcpReport() {
return next_time_to_send_rtcp_;
}
-void Rtcp::SetRemoteSSRC(uint32 ssrc) {
- rtcp_receiver_->SetRemoteSSRC(ssrc);
-}
-
void Rtcp::IncomingRtcpPacket(const uint8* rtcp_buffer, size_t length) {
RtcpParser rtcp_parser(rtcp_buffer, length);
if (!rtcp_parser.IsValid()) {
@@ -162,99 +178,38 @@ void Rtcp::IncomingRtcpPacket(const uint8* rtcp_buffer, size_t length) {
rtcp_receiver_->IncomingRtcpPacket(&rtcp_parser);
}
-void Rtcp::SendRtcpCast(const RtcpCastMessage& cast_message) {
+void Rtcp::SendRtcpFromRtpReceiver(const RtcpCastMessage* cast_message,
+ const RtcpReceiverLogMessage* receiver_log) {
uint32 packet_type_flags = 0;
- base::TimeTicks now = clock_->NowTicks();
- if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
- if (sending_media_) {
- packet_type_flags = RtcpSender::kRtcpSr;
- } else {
- packet_type_flags = RtcpSender::kRtcpRr;
- }
- }
- packet_type_flags |= RtcpSender::kRtcpCast;
-
- SendRtcp(now, packet_type_flags, 0, &cast_message);
-}
-
-void Rtcp::SendRtcpPli(uint32 pli_remote_ssrc) {
- uint32 packet_type_flags = 0;
- base::TimeTicks now = clock_->NowTicks();
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ RtcpReportBlock report_block;
+ RtcpReceiverReferenceTimeReport rrtr;
- if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
- if (sending_media_) {
- packet_type_flags = RtcpSender::kRtcpSr;
- } else {
- packet_type_flags = RtcpSender::kRtcpRr;
- }
- }
- packet_type_flags |= RtcpSender::kRtcpPli;
- SendRtcp(now, packet_type_flags, pli_remote_ssrc, NULL);
-}
-
-void Rtcp::SendRtcpReport(uint32 media_ssrc) {
- uint32 packet_type_flags;
- base::TimeTicks now = clock_->NowTicks();
- if (sending_media_) {
- packet_type_flags = RtcpSender::kRtcpSr;
- } else {
- packet_type_flags = RtcpSender::kRtcpRr;
+ if (cast_message) {
+ packet_type_flags |= RtcpSender::kRtcpCast;
+ cast_environment_->Logging()->InsertGenericEvent(kAckSent,
+ cast_message->ack_frame_id_);
}
- SendRtcp(now, packet_type_flags, media_ssrc, NULL);
-}
-
-void Rtcp::SendRtcp(const base::TimeTicks& now,
- uint32 packet_type_flags,
- uint32 media_ssrc,
- const RtcpCastMessage* cast_message) {
- if (packet_type_flags & RtcpSender::kRtcpSr ||
- packet_type_flags & RtcpSender::kRtcpRr) {
- UpdateNextTimeToSendRtcp();
+ if (receiver_log) {
+ packet_type_flags |= RtcpSender::kRtcpReceiverLog;
}
- if (packet_type_flags & RtcpSender::kRtcpSr) {
- RtcpSenderInfo sender_info;
-
- if (rtp_sender_statistics_) {
- rtp_sender_statistics_->GetStatistics(now, &sender_info);
- } else {
- memset(&sender_info, 0, sizeof(sender_info));
- }
- time_last_report_sent_ = now;
- last_report_sent_ = (sender_info.ntp_seconds << 16) +
- (sender_info.ntp_fraction >> 16);
-
- RtcpDlrrReportBlock dlrr;
- if (!time_last_report_received_.is_null()) {
- packet_type_flags |= RtcpSender::kRtcpDlrr;
- dlrr.last_rr = last_report_received_;
- uint32 delay_seconds = 0;
- uint32 delay_fraction = 0;
- base::TimeDelta delta = now - time_last_report_received_;
- ConvertTimeToFractions(delta.InMicroseconds(),
- &delay_seconds,
- &delay_fraction);
+ if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
+ packet_type_flags |= RtcpSender::kRtcpRr;
- dlrr.delay_since_last_rr =
- ConvertToNtpDiff(delay_seconds, delay_fraction);
- }
- rtcp_sender_->SendRtcp(packet_type_flags,
- &sender_info,
- NULL,
- media_ssrc,
- &dlrr,
- NULL,
- NULL);
- } else {
- RtcpReportBlock report_block;
report_block.remote_ssrc = 0; // Not needed to set send side.
- report_block.media_ssrc = media_ssrc; // SSRC of the RTP packet sender.
+ report_block.media_ssrc = remote_ssrc_; // SSRC of the RTP packet sender.
if (rtp_receiver_statistics_) {
rtp_receiver_statistics_->GetStatistics(
&report_block.fraction_lost,
&report_block.cumulative_lost,
&report_block.extended_high_sequence_number,
&report_block.jitter);
+ cast_environment_->Logging()->InsertGenericEvent(kJitterMs,
+ report_block.jitter);
+ cast_environment_->Logging()->InsertGenericEvent(kPacketLoss,
+ report_block.fraction_lost);
+
}
report_block.last_sr = last_report_received_;
@@ -272,26 +227,58 @@ void Rtcp::SendRtcp(const base::TimeTicks& now,
}
packet_type_flags |= RtcpSender::kRtcpRrtr;
- RtcpReceiverReferenceTimeReport rrtr;
ConvertTimeTicksToNtp(now, &rrtr.ntp_seconds, &rrtr.ntp_fraction);
+ SaveLastSentNtpTime(now, rrtr.ntp_seconds, rrtr.ntp_fraction);
+ UpdateNextTimeToSendRtcp();
+ }
+ rtcp_sender_->SendRtcpFromRtpReceiver(packet_type_flags,
+ &report_block,
+ &rrtr,
+ cast_message,
+ receiver_log);
+}
+
+void Rtcp::SendRtcpFromRtpSender(
+ const RtcpSenderLogMessage* sender_log_message) {
+ uint32 packet_type_flags = RtcpSender::kRtcpSr;
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- time_last_report_sent_ = now;
- last_report_sent_ = ConvertToNtpDiff(rrtr.ntp_seconds, rrtr.ntp_fraction);
+ RtcpSenderInfo sender_info;
+ RtcpDlrrReportBlock dlrr;
- rtcp_sender_->SendRtcp(packet_type_flags,
- NULL,
- &report_block,
- media_ssrc,
- NULL,
- &rrtr,
- cast_message);
+ if (sender_log_message) packet_type_flags |= RtcpSender::kRtcpSenderLog;
+
+ if (rtp_sender_statistics_) {
+ rtp_sender_statistics_->GetStatistics(now, &sender_info);
+ } else {
+ memset(&sender_info, 0, sizeof(sender_info));
}
+ SaveLastSentNtpTime(now, sender_info.ntp_seconds, sender_info.ntp_fraction);
+
+ if (!time_last_report_received_.is_null()) {
+ packet_type_flags |= RtcpSender::kRtcpDlrr;
+ dlrr.last_rr = last_report_received_;
+ uint32 delay_seconds = 0;
+ uint32 delay_fraction = 0;
+ base::TimeDelta delta = now - time_last_report_received_;
+ ConvertTimeToFractions(delta.InMicroseconds(),
+ &delay_seconds,
+ &delay_fraction);
+
+ dlrr.delay_since_last_rr = ConvertToNtpDiff(delay_seconds, delay_fraction);
+ }
+
+ rtcp_sender_->SendRtcpFromRtpSender(packet_type_flags,
+ &sender_info,
+ &dlrr,
+ sender_log_message);
+ UpdateNextTimeToSendRtcp();
}
void Rtcp::OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction) {
last_report_received_ = (ntp_seconds << 16) + (ntp_fraction >> 16);
- base::TimeTicks now = clock_->NowTicks();
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
time_last_report_received_ = now;
}
@@ -304,7 +291,7 @@ void Rtcp::OnReceivedLipSyncInfo(uint32 rtp_timestamp,
}
void Rtcp::OnReceivedSendReportRequest() {
- base::TimeTicks now = clock_->NowTicks();
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
// Trigger a new RTCP report at next timer.
next_time_to_send_rtcp_ = now;
@@ -344,13 +331,43 @@ bool Rtcp::RtpTimestampInSenderTime(int frequency, uint32 rtp_timestamp,
void Rtcp::OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
uint32 last_report,
uint32 delay_since_last_report) {
- if (last_report_sent_ != last_report) return; // Feedback on another report.
- if (time_last_report_sent_.is_null()) return;
+ RtcpSendTimeMap::iterator it = last_reports_sent_map_.find(last_report);
+ if (it == last_reports_sent_map_.end()) {
+ return; // Feedback on another report.
+ }
- base::TimeDelta sender_delay = clock_->NowTicks() - time_last_report_sent_;
+ base::TimeDelta sender_delay = cast_environment_->Clock()->NowTicks()
+ - it->second;
UpdateRtt(sender_delay, ConvertFromNtpDiff(delay_since_last_report));
}
+void Rtcp::SaveLastSentNtpTime(const base::TimeTicks& now,
+ uint32 last_ntp_seconds,
+ uint32 last_ntp_fraction) {
+ // Make sure |now| is always greater than the last element in
+ // |last_reports_sent_queue_|.
+ if (!last_reports_sent_queue_.empty()) {
+ DCHECK(now >= last_reports_sent_queue_.back().second);
+ }
+
+ uint32 last_report = ConvertToNtpDiff(last_ntp_seconds, last_ntp_fraction);
+ last_reports_sent_map_[last_report] = now;
+ last_reports_sent_queue_.push(std::make_pair(last_report, now));
+
+ base::TimeTicks timeout = now - base::TimeDelta::FromMilliseconds(kMaxRttMs);
+
+ // Cleanup old statistics older than |timeout|.
+ while (!last_reports_sent_queue_.empty()) {
+ RtcpSendTimePair oldest_report = last_reports_sent_queue_.front();
+ if (oldest_report.second < timeout) {
+ last_reports_sent_map_.erase(oldest_report.first);
+ last_reports_sent_queue_.pop();
+ } else {
+ break;
+ }
+ }
+}
+
void Rtcp::UpdateRtt(const base::TimeDelta& sender_delay,
const base::TimeDelta& receiver_delay) {
base::TimeDelta rtt = sender_delay - receiver_delay;
@@ -367,7 +384,6 @@ void Rtcp::UpdateRtt(const base::TimeDelta& sender_delay,
avg_rtt_ms_ = rtt.InMilliseconds();
}
number_of_rtt_in_avg_++;
- TRACE_COUNTER_ID1("cast_rtcp", "RTT", local_ssrc_, rtt.InMilliseconds());
}
bool Rtcp::Rtt(base::TimeDelta* rtt,
@@ -379,7 +395,9 @@ bool Rtcp::Rtt(base::TimeDelta* rtt,
DCHECK(min_rtt) << "Invalid argument";
DCHECK(max_rtt) << "Invalid argument";
- if (number_of_rtt_in_avg_ == 0) return false;
+ if (number_of_rtt_in_avg_ == 0) return false;
+ cast_environment_->Logging()->InsertGenericEvent(kRttMs,
+ rtt->InMilliseconds());
*rtt = rtt_;
*avg_rtt = base::TimeDelta::FromMilliseconds(avg_rtt_ms_);
@@ -410,7 +428,7 @@ void Rtcp::UpdateNextTimeToSendRtcp() {
base::TimeDelta time_to_next = (rtcp_interval_ / 2) +
(rtcp_interval_ * random / 1000);
- base::TimeTicks now = clock_->NowTicks();
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
next_time_to_send_rtcp_ = now + time_to_next;
}
diff --git a/media/cast/rtcp/rtcp.h b/media/cast/rtcp/rtcp.h
index 7a043c2545..44bf270679 100644
--- a/media/cast/rtcp/rtcp.h
+++ b/media/cast/rtcp/rtcp.h
@@ -7,6 +7,7 @@
#include <list>
#include <map>
+#include <queue>
#include <set>
#include <string>
@@ -16,6 +17,7 @@
#include "base/time/time.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/rtcp/rtcp_defines.h"
namespace media {
@@ -27,22 +29,26 @@ class PacedPacketSender;
class RtcpReceiver;
class RtcpSender;
+typedef std::pair<uint32, base::TimeTicks> RtcpSendTimePair;
+typedef std::map<uint32, base::TimeTicks> RtcpSendTimeMap;
+typedef std::queue<RtcpSendTimePair> RtcpSendTimeQueue;
+
class RtcpSenderFeedback {
public:
- virtual void OnReceivedReportBlock(const RtcpReportBlock& report_block) = 0;
-
- virtual void OnReceivedIntraFrameRequest() = 0;
-
- virtual void OnReceivedRpsi(uint8 payload_type, uint64 picture_id) = 0;
+ virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) = 0;
- virtual void OnReceivedRemb(uint32 bitrate) = 0;
+ virtual ~RtcpSenderFeedback() {}
+};
- virtual void OnReceivedNackRequest(
- const std::list<uint16>& nack_sequence_numbers) = 0;
+class RtcpReceivedLog {
+ public:
+ virtual void OnReceivedReceiverLog(
+ const RtcpReceiverLogMessage& receiver_log) = 0;
- virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) = 0;
+ virtual void OnReceivedSenderLog(
+ const RtcpSenderLogMessage& sender_log) = 0;
- virtual ~RtcpSenderFeedback() {}
+ virtual ~RtcpReceivedLog() {}
};
class RtpSenderStatistics {
@@ -65,15 +71,15 @@ class RtpReceiverStatistics {
class Rtcp {
public:
- Rtcp(base::TickClock* clock,
+ Rtcp(scoped_refptr<CastEnvironment> cast_environment,
RtcpSenderFeedback* sender_feedback,
PacedPacketSender* paced_packet_sender,
RtpSenderStatistics* rtp_sender_statistics,
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
- bool sending_media,
uint32 local_ssrc,
+ uint32 remote_ssrc,
const std::string& c_name);
virtual ~Rtcp();
@@ -83,10 +89,18 @@ class Rtcp {
static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, size_t length);
base::TimeTicks TimeToSendNextRtcpReport();
- void SendRtcpReport(uint32 media_ssrc);
- void SendRtcpPli(uint32 media_ssrc);
- void SendRtcpCast(const RtcpCastMessage& cast_message);
- void SetRemoteSSRC(uint32 ssrc);
+ // |sender_log_message| is optional; without it no log messages will be
+ // attached to the RTCP report; instead a normal RTCP send report will be
+ // sent.
+ void SendRtcpFromRtpSender(const RtcpSenderLogMessage* sender_log_message);
+
+ // |cast_message| and |receiver_log| is optional; if |cast_message| is
+ // provided the RTCP receiver report will append a Cast message containing
+ // Acks and Nacks; if |receiver_log| is provided the RTCP receiver report will
+ // append the log messages. If no argument is set a normal RTCP receiver
+ // report will be sent.
+ void SendRtcpFromRtpReceiver(const RtcpCastMessage* cast_message,
+ const RtcpReceiverLogMessage* receiver_log);
void IncomingRtcpPacket(const uint8* rtcp_buffer, size_t length);
bool Rtt(base::TimeDelta* rtt, base::TimeDelta* avg_rtt,
@@ -125,11 +139,14 @@ class Rtcp {
void UpdateNextTimeToSendRtcp();
- base::TickClock* const clock_; // Not owned by this class.
+ void SaveLastSentNtpTime(const base::TimeTicks& now, uint32 last_ntp_seconds,
+ uint32 last_ntp_fraction);
+
+ scoped_refptr<CastEnvironment> cast_environment_;
const base::TimeDelta rtcp_interval_;
const RtcpMode rtcp_mode_;
- const bool sending_media_;
const uint32 local_ssrc_;
+ const uint32 remote_ssrc_;
// Not owned by this class.
RtpSenderStatistics* const rtp_sender_statistics_;
@@ -141,10 +158,8 @@ class Rtcp {
scoped_ptr<RtcpReceiver> rtcp_receiver_;
base::TimeTicks next_time_to_send_rtcp_;
-
- base::TimeTicks time_last_report_sent_;
- uint32 last_report_sent_;
-
+ RtcpSendTimeMap last_reports_sent_map_;
+ RtcpSendTimeQueue last_reports_sent_queue_;
base::TimeTicks time_last_report_received_;
uint32 last_report_received_;
diff --git a/media/cast/rtcp/rtcp_defines.h b/media/cast/rtcp/rtcp_defines.h
index d2e7c90d43..0277bd1fea 100644
--- a/media/cast/rtcp/rtcp_defines.h
+++ b/media/cast/rtcp/rtcp_defines.h
@@ -11,20 +11,58 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/logging/logging_defines.h"
namespace media {
namespace cast {
+// Handle the per frame ACK and NACK messages.
class RtcpCastMessage {
public:
explicit RtcpCastMessage(uint32 media_ssrc);
~RtcpCastMessage();
uint32 media_ssrc_;
- uint8 ack_frame_id_;
+ uint32 ack_frame_id_;
MissingFramesAndPacketsMap missing_frames_and_packets_;
};
+// Log messages form sender to receiver.
+enum RtcpSenderFrameStatus {
+ kRtcpSenderFrameStatusUnknown = 0,
+ kRtcpSenderFrameStatusDroppedByEncoder = 1,
+ kRtcpSenderFrameStatusDroppedByFlowControl = 2,
+ kRtcpSenderFrameStatusSentToNetwork = 3,
+};
+
+struct RtcpSenderFrameLogMessage {
+ RtcpSenderFrameStatus frame_status;
+ uint32 rtp_timestamp;
+};
+
+typedef std::list<RtcpSenderFrameLogMessage> RtcpSenderLogMessage;
+
+// Log messages from receiver to sender.
+struct RtcpReceiverEventLogMessage {
+ CastLoggingEvent type;
+ base::TimeTicks event_timestamp;
+ base::TimeDelta delay_delta;
+ uint16 packet_id;
+};
+
+typedef std::list<RtcpReceiverEventLogMessage> RtcpReceiverEventLogMessages;
+
+class RtcpReceiverFrameLogMessage {
+ public:
+ explicit RtcpReceiverFrameLogMessage(uint32 rtp_timestamp);
+ ~RtcpReceiverFrameLogMessage();
+
+ uint32 rtp_timestamp_;
+ RtcpReceiverEventLogMessages event_log_messages_;
+};
+
+typedef std::list<RtcpReceiverFrameLogMessage> RtcpReceiverLogMessage;
+
struct RtcpSenderInfo {
// First three members are used for lipsync.
// First two members are used for rtt.
diff --git a/media/cast/rtcp/rtcp_receiver.cc b/media/cast/rtcp/rtcp_receiver.cc
index f5dc11f4a9..152ebc00d7 100644
--- a/media/cast/rtcp/rtcp_receiver.cc
+++ b/media/cast/rtcp/rtcp_receiver.cc
@@ -4,23 +4,70 @@
#include "media/cast/rtcp/rtcp_receiver.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "media/cast/rtcp/rtcp_utility.h"
+namespace {
+
+media::cast::CastLoggingEvent TranslateToLogEventFromWireFormat(uint8 event) {
+ switch (event) {
+ case 1:
+ return media::cast::kAckSent;
+ case 2:
+ return media::cast::kAudioPlayoutDelay;
+ case 3:
+ return media::cast::kAudioFrameDecoded;
+ case 4:
+ return media::cast::kVideoFrameDecoded;
+ case 5:
+ return media::cast::kVideoRenderDelay;
+ case 6:
+ return media::cast::kPacketReceived;
+ default:
+ // If the sender adds new log messages we will end up here until we add
+ // the new messages in the receiver.
+ VLOG(1) << "Unexpected log message received: " << static_cast<int>(event);
+ NOTREACHED();
+ return media::cast::kUnknown;
+ }
+}
+
+media::cast::RtcpSenderFrameStatus TranslateToFrameStatusFromWireFormat(
+ uint8 status) {
+ switch (status) {
+ case 0:
+ return media::cast::kRtcpSenderFrameStatusUnknown;
+ case 1:
+ return media::cast::kRtcpSenderFrameStatusDroppedByEncoder;
+ case 2:
+ return media::cast::kRtcpSenderFrameStatusDroppedByFlowControl;
+ case 3:
+ return media::cast::kRtcpSenderFrameStatusSentToNetwork;
+ default:
+ // If the sender adds new log messages we will end up here until we add
+ // the new messages in the receiver.
+ NOTREACHED();
+ VLOG(1) << "Unexpected status received: " << static_cast<int>(status);
+ return media::cast::kRtcpSenderFrameStatusUnknown;
+ }
+}
+
+} // namespace
+
namespace media {
namespace cast {
-RtcpReceiver::RtcpReceiver(RtcpSenderFeedback* sender_feedback,
+RtcpReceiver::RtcpReceiver(scoped_refptr<CastEnvironment> cast_environment,
+ RtcpSenderFeedback* sender_feedback,
RtcpReceiverFeedback* receiver_feedback,
RtcpRttFeedback* rtt_feedback,
uint32 local_ssrc)
- : ssrc_(local_ssrc),
+ : ssrc_(local_ssrc),
remote_ssrc_(0),
sender_feedback_(sender_feedback),
receiver_feedback_(receiver_feedback),
- rtt_feedback_(rtt_feedback) {
-}
+ rtt_feedback_(rtt_feedback),
+ cast_environment_(cast_environment) {}
RtcpReceiver::~RtcpReceiver() {}
@@ -67,15 +114,18 @@ void RtcpReceiver::IncomingRtcpPacket(RtcpParser* rtcp_parser) {
case kRtcpPayloadSpecificAppCode:
HandlePayloadSpecificApp(rtcp_parser);
break;
+ case kRtcpApplicationSpecificCastReceiverLogCode:
+ HandleApplicationSpecificCastReceiverLog(rtcp_parser);
+ break;
+ case kRtcpApplicationSpecificCastSenderLogCode:
+ HandleApplicationSpecificCastSenderLog(rtcp_parser);
+ break;
case kRtcpPayloadSpecificRembCode:
case kRtcpPayloadSpecificRembItemCode:
- // Ignore this until we want to support interop with webrtc.
- rtcp_parser->Iterate();
- break;
case kRtcpPayloadSpecificCastCode:
case kRtcpPayloadSpecificCastNackItemCode:
- rtcp_parser->Iterate();
- break;
+ case kRtcpApplicationSpecificCastReceiverLogFrameCode:
+ case kRtcpApplicationSpecificCastReceiverLogEventCode:
case kRtcpNotValidCode:
case kRtcpReportBlockItemCode:
case kRtcpSdesChunkCode:
@@ -85,7 +135,7 @@ void RtcpReceiver::IncomingRtcpPacket(RtcpParser* rtcp_parser) {
case kRtcpXrDlrrCode:
case kRtcpXrUnknownItemCode:
rtcp_parser->Iterate();
- DCHECK(false) << "Invalid state";
+ NOTREACHED() << "Invalid state";
break;
}
field_type = rtcp_parser->FieldType();
@@ -101,8 +151,7 @@ void RtcpReceiver::HandleSenderReport(RtcpParser* rtcp_parser) {
// Synchronization source identifier for the originator of this SR packet.
uint32 remote_ssrc = rtcp_field.sender_report.sender_ssrc;
- TRACE_EVENT_INSTANT1("cast_rtcp", "SR", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc);
+ VLOG(1) << "Cast RTCP received SR from SSRC " << remote_ssrc;
if (remote_ssrc_ == remote_ssrc) {
RtcpSenderInfo remote_sender_info;
@@ -135,8 +184,7 @@ void RtcpReceiver::HandleReceiverReport(RtcpParser* rtcp_parser) {
uint32 remote_ssrc = rtcp_field.receiver_report.sender_ssrc;
- TRACE_EVENT_INSTANT1("cast_rtcp", "RR", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc);
+ VLOG(1) << "Cast RTCP received RR from SSRC " << remote_ssrc;
rtcp_field_type = rtcp_parser->Iterate();
while (rtcp_field_type == kRtcpReportBlockItemCode) {
@@ -163,16 +211,11 @@ void RtcpReceiver::HandleReportBlock(const RtcpField* rtcp_field,
// This block is not for us ignore it.
return;
}
- TRACE_EVENT_INSTANT2("cast_rtcp", "RB", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc,
- "ssrc", ssrc_);
-
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpReceiver::FractionLost",
- rb.ssrc, rb.fraction_lost);
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpReceiver::CumulativeNumberOfPacketsLost",
- rb.ssrc, rb.cumulative_number_of_packets_lost);
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpReceiver::Jitter",
- rb.ssrc, rb.jitter);
+ VLOG(1) << "Cast RTCP received RB from SSRC " << remote_ssrc;
+ cast_environment_->Logging()->InsertGenericEvent(kPacketLoss,
+ rb.fraction_lost);
+ cast_environment_->Logging()->InsertGenericEvent(kJitterMs,
+ rb.jitter);
RtcpReportBlock report_block;
report_block.remote_ssrc = remote_ssrc;
@@ -185,9 +228,6 @@ void RtcpReceiver::HandleReportBlock(const RtcpField* rtcp_field,
report_block.last_sr = rb.last_sender_report;
report_block.delay_since_last_sr = rb.delay_last_sender_report;
- if (sender_feedback_) {
- sender_feedback_->OnReceivedReportBlock(report_block);
- }
if (rtt_feedback_) {
rtt_feedback_->OnReceivedDelaySinceLastReport(rb.ssrc,
rb.last_sender_report,
@@ -205,8 +245,7 @@ void RtcpReceiver::HandleSDES(RtcpParser* rtcp_parser) {
void RtcpReceiver::HandleSDESChunk(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
- TRACE_EVENT_INSTANT1("cast_rtcp", "SDES", TRACE_EVENT_SCOPE_THREAD,
- "cname", TRACE_STR_COPY(rtcp_field.c_name.name));
+ VLOG(1) << "Cast RTCP received SDES with cname " << rtcp_field.c_name.name;
}
void RtcpReceiver::HandleXr(RtcpParser* rtcp_parser) {
@@ -263,8 +302,11 @@ void RtcpReceiver::HandleDlrr(RtcpParser* rtcp_parser) {
void RtcpReceiver::HandleNACK(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
if (ssrc_ != rtcp_field.nack.media_ssrc) {
- // Not to us.
- rtcp_parser->Iterate();
+ RtcpFieldTypes field_type;
+ // Message not to us. Iterate until we have passed this message.
+ do {
+ field_type = rtcp_parser->Iterate();
+ } while (field_type == kRtcpGenericRtpFeedbackNackItemCode);
return;
}
std::list<uint16> nackSequenceNumbers;
@@ -274,9 +316,6 @@ void RtcpReceiver::HandleNACK(RtcpParser* rtcp_parser) {
HandleNACKItem(&rtcp_field, &nackSequenceNumbers);
field_type = rtcp_parser->Iterate();
}
- if (sender_feedback_) {
- sender_feedback_->OnReceivedNackRequest(nackSequenceNumbers);
- }
}
void RtcpReceiver::HandleNACKItem(const RtcpField* rtcp_field,
@@ -298,8 +337,7 @@ void RtcpReceiver::HandleBYE(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
uint32 remote_ssrc = rtcp_field.bye.sender_ssrc;
if (remote_ssrc_ == remote_ssrc) {
- TRACE_EVENT_INSTANT1("cast_rtcp", "BYE", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc);
+ VLOG(1) << "Cast RTCP received BYE from SSRC " << remote_ssrc;
}
rtcp_parser->Iterate();
}
@@ -308,9 +346,7 @@ void RtcpReceiver::HandlePLI(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
if (ssrc_ == rtcp_field.pli.media_ssrc) {
// Received a signal that we need to send a new key frame.
- if (sender_feedback_) {
- sender_feedback_->OnReceivedIntraFrameRequest();
- }
+ VLOG(1) << "Cast RTCP received PLI on our SSRC " << ssrc_;
}
rtcp_parser->Iterate();
}
@@ -340,18 +376,22 @@ void RtcpReceiver::HandleRpsi(RtcpParser* rtcp_parser) {
rpsi_picture_id <<= 7; // Prepare next.
}
rpsi_picture_id += (rtcp_field.rpsi.native_bit_string[bytes - 1] & 0x7f);
- if (sender_feedback_) {
- sender_feedback_->OnReceivedRpsi(rtcp_field.rpsi.payload_type,
- rpsi_picture_id);
- }
+
+ VLOG(1) << "Cast RTCP received RPSI with picture_id " << rpsi_picture_id;
}
void RtcpReceiver::HandlePayloadSpecificApp(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
uint32 remote_ssrc = rtcp_field.application_specific.sender_ssrc;
if (remote_ssrc_ != remote_ssrc) {
- // Message not to us.
- rtcp_parser->Iterate();
+ // Message not to us. Iterate until we have passed this message.
+ RtcpFieldTypes field_type;
+ do {
+ field_type = rtcp_parser->Iterate();
+ } while (field_type == kRtcpPayloadSpecificRembCode ||
+ field_type == kRtcpPayloadSpecificRembItemCode ||
+ field_type == kRtcpPayloadSpecificCastCode ||
+ field_type == kRtcpPayloadSpecificCastNackItemCode);
return;
}
@@ -381,19 +421,101 @@ void RtcpReceiver::HandlePayloadSpecificRembItem(RtcpParser* rtcp_parser) {
for (int i = 0; i < rtcp_field.remb_item.number_of_ssrcs; ++i) {
if (rtcp_field.remb_item.ssrcs[i] == ssrc_) {
// Found matching ssrc.
- if (sender_feedback_) {
- sender_feedback_->OnReceivedRemb(rtcp_field.remb_item.bitrate);
- }
+ VLOG(1) << "Cast RTCP received REMB with received_bitrate "
+ << rtcp_field.remb_item.bitrate;
return;
}
}
}
-void RtcpReceiver::HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser) {
+void RtcpReceiver::HandleApplicationSpecificCastReceiverLog(
+ RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
+ uint32 remote_ssrc = rtcp_field.cast_receiver_log.sender_ssrc;
+ if (remote_ssrc_ != remote_ssrc) {
+ // Message not to us. Iterate until we have passed this message.
+ RtcpFieldTypes field_type;
+ do {
+ field_type = rtcp_parser->Iterate();
+ } while (field_type == kRtcpApplicationSpecificCastReceiverLogFrameCode ||
+ field_type == kRtcpApplicationSpecificCastReceiverLogEventCode);
+ return;
+ }
+ RtcpReceiverLogMessage receiver_log;
+ RtcpFieldTypes field_type = rtcp_parser->Iterate();
+ while (field_type == kRtcpApplicationSpecificCastReceiverLogFrameCode) {
+ RtcpReceiverFrameLogMessage frame_log(
+ rtcp_field.cast_receiver_log.rtp_timestamp);
+
+ field_type = rtcp_parser->Iterate();
+ while (field_type == kRtcpApplicationSpecificCastReceiverLogEventCode) {
+ HandleApplicationSpecificCastReceiverEventLog(rtcp_parser,
+ &frame_log.event_log_messages_);
+ field_type = rtcp_parser->Iterate();
+ }
+ receiver_log.push_back(frame_log);
+ }
+
+ if (receiver_feedback_ && !receiver_log.empty()) {
+ receiver_feedback_->OnReceivedReceiverLog(receiver_log);
+ }
+}
+
+void RtcpReceiver::HandleApplicationSpecificCastReceiverEventLog(
+ RtcpParser* rtcp_parser,
+ RtcpReceiverEventLogMessages* event_log_messages) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+
+ RtcpReceiverEventLogMessage event_log;
+ event_log.type = TranslateToLogEventFromWireFormat(
+ rtcp_field.cast_receiver_log.event);
+ event_log.event_timestamp = base::TimeTicks() +
+ base::TimeDelta::FromMilliseconds(
+ rtcp_field.cast_receiver_log.event_timestamp_base +
+ rtcp_field.cast_receiver_log.event_timestamp_delta);
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(
+ rtcp_field.cast_receiver_log.delay_delta_or_packet_id);
+ event_log.packet_id =
+ rtcp_field.cast_receiver_log.delay_delta_or_packet_id;
+ event_log_messages->push_back(event_log);
+}
+
+void RtcpReceiver::HandleApplicationSpecificCastSenderLog(
+ RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ uint32 remote_ssrc = rtcp_field.cast_sender_log.sender_ssrc;
+
+ if (remote_ssrc_ != remote_ssrc) {
+ RtcpFieldTypes field_type;
+ // Message not to us. Iterate until we have passed this message.
+ do {
+ field_type = rtcp_parser->Iterate();
+ } while (field_type == kRtcpApplicationSpecificCastSenderLogCode);
+ return;
+ }
+ RtcpSenderLogMessage sender_log;
+
+ RtcpFieldTypes field_type = rtcp_parser->Iterate();
+ while (field_type == kRtcpApplicationSpecificCastSenderLogCode) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ RtcpSenderFrameLogMessage frame_log;
+ frame_log.frame_status =
+ TranslateToFrameStatusFromWireFormat(rtcp_field.cast_sender_log.status);
+ frame_log.rtp_timestamp = rtcp_field.cast_sender_log.rtp_timestamp;
+ sender_log.push_back(frame_log);
+ field_type = rtcp_parser->Iterate();
+ }
+ if (receiver_feedback_) {
+ receiver_feedback_->OnReceivedSenderLog(sender_log);
+ }
+}
+
+void RtcpReceiver::HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
RtcpCastMessage cast_message(remote_ssrc_);
- cast_message.ack_frame_id_ = rtcp_field.cast_item.last_frame_id;
+ cast_message.ack_frame_id_ = ack_frame_id_wrap_helper_.MapTo32bitsFrameId(
+ rtcp_field.cast_item.last_frame_id);
RtcpFieldTypes packet_type = rtcp_parser->Iterate();
while (packet_type == kRtcpPayloadSpecificCastNackItemCode) {
@@ -454,10 +576,9 @@ void RtcpReceiver::HandleFIR(RtcpParser* rtcp_parser) {
void RtcpReceiver::HandleFIRItem(const RtcpField* rtcp_field) {
// Is it our sender that is requested to generate a new keyframe.
- if (ssrc_ != rtcp_field->fir_item.ssrc) return;
- if (sender_feedback_) {
- sender_feedback_->OnReceivedIntraFrameRequest();
- }
+ if (ssrc_ != rtcp_field->fir_item.ssrc) return;
+
+ VLOG(1) << "Cast RTCP received FIR on our SSRC " << ssrc_;
}
} // namespace cast
diff --git a/media/cast/rtcp/rtcp_receiver.h b/media/cast/rtcp/rtcp_receiver.h
index 585f861174..337f6d0c22 100644
--- a/media/cast/rtcp/rtcp_receiver.h
+++ b/media/cast/rtcp/rtcp_receiver.h
@@ -22,6 +22,12 @@ class RtcpReceiverFeedback {
virtual void OnReceivedSendReportRequest() = 0;
+ virtual void OnReceivedReceiverLog(
+ const RtcpReceiverLogMessage& receiver_log) = 0;
+
+ virtual void OnReceivedSenderLog(
+ const RtcpSenderLogMessage& sender_log) = 0;
+
virtual ~RtcpReceiverFeedback() {}
};
@@ -37,7 +43,8 @@ class RtcpRttFeedback {
class RtcpReceiver {
public:
- explicit RtcpReceiver(RtcpSenderFeedback* sender_feedback,
+ explicit RtcpReceiver(scoped_refptr<CastEnvironment> cast_environment,
+ RtcpSenderFeedback* sender_feedback,
RtcpReceiverFeedback* receiver_feedback,
RtcpRttFeedback* rtt_feedback,
uint32 local_ssrc);
@@ -89,6 +96,12 @@ class RtcpReceiver {
const RtcpField* rtcp_field,
MissingFramesAndPacketsMap* missing_frames_and_packets);
+ void HandleApplicationSpecificCastReceiverLog(RtcpParser* rtcp_parser);
+ void HandleApplicationSpecificCastSenderLog(RtcpParser* rtcp_parser);
+ void HandleApplicationSpecificCastReceiverEventLog(
+ RtcpParser* rtcp_parser,
+ RtcpReceiverEventLogMessages* event_log_messages);
+
const uint32 ssrc_;
uint32 remote_ssrc_;
@@ -96,6 +109,9 @@ class RtcpReceiver {
RtcpSenderFeedback* const sender_feedback_;
RtcpReceiverFeedback* const receiver_feedback_;
RtcpRttFeedback* const rtt_feedback_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+
+ FrameIdWrapHelper ack_frame_id_wrap_helper_;
DISALLOW_COPY_AND_ASSIGN(RtcpReceiver);
};
diff --git a/media/cast/rtcp/rtcp_receiver_unittest.cc b/media/cast/rtcp/rtcp_receiver_unittest.cc
index 7062df0d2a..b5c5d2d388 100644
--- a/media/cast/rtcp/rtcp_receiver_unittest.cc
+++ b/media/cast/rtcp/rtcp_receiver_unittest.cc
@@ -3,11 +3,14 @@
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
#include "media/cast/rtcp/rtcp_receiver.h"
#include "media/cast/rtcp/rtcp_utility.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/fake_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -20,17 +23,10 @@ static const uint32 kSourceSsrc = 0x40506;
static const uint32 kUnknownSsrc = 0xDEAD;
static const std::string kCName("test@10.1.1.1");
+namespace {
class SenderFeedbackCastVerification : public RtcpSenderFeedback {
public:
SenderFeedbackCastVerification() : called_(false) {}
- virtual void OnReceivedReportBlock(
- const RtcpReportBlock& report_block) OVERRIDE {};
- virtual void OnReceivedIntraFrameRequest() OVERRIDE {};
- virtual void OnReceivedRpsi(uint8 payload_type,
- uint64 picture_id) OVERRIDE {};
- virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {};
- virtual void OnReceivedNackRequest(
- const std::list<uint16>& nack_sequence_numbers) OVERRIDE {};
virtual void OnReceivedCastFeedback(
const RtcpCastMessage& cast_feedback) OVERRIDE {
@@ -58,17 +54,109 @@ class SenderFeedbackCastVerification : public RtcpSenderFeedback {
called_ = true;
}
- bool called() { return called_; }
+ bool called() const { return called_; }
private:
bool called_;
};
+class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
+ public:
+ RtcpReceiverCastLogVerification()
+ : called_on_received_sender_log_(false),
+ called_on_received_receiver_log_(false) {}
+
+ virtual void OnReceivedSenderReport(
+ const RtcpSenderInfo& remote_sender_info) OVERRIDE {};
+
+ virtual void OnReceiverReferenceTimeReport(
+ const RtcpReceiverReferenceTimeReport& remote_time_report) OVERRIDE {};
+
+ virtual void OnReceivedSendReportRequest() OVERRIDE {};
+
+ virtual void OnReceivedReceiverLog(
+ const RtcpReceiverLogMessage& receiver_log) OVERRIDE {
+ EXPECT_EQ(expected_receiver_log_.size(), receiver_log.size());
+ RtcpReceiverLogMessage::const_iterator expected_it =
+ expected_receiver_log_.begin();
+ RtcpReceiverLogMessage::const_iterator incoming_it = receiver_log.begin();
+ for (; incoming_it != receiver_log.end(); ++incoming_it) {
+ EXPECT_EQ(expected_it->rtp_timestamp_, incoming_it->rtp_timestamp_);
+ EXPECT_EQ(expected_it->event_log_messages_.size(),
+ incoming_it->event_log_messages_.size());
+
+ RtcpReceiverEventLogMessages::const_iterator event_incoming_it =
+ incoming_it->event_log_messages_.begin();
+ RtcpReceiverEventLogMessages::const_iterator event_expected_it =
+ expected_it->event_log_messages_.begin();
+ for (; event_incoming_it != incoming_it->event_log_messages_.end();
+ ++event_incoming_it, ++event_expected_it) {
+ EXPECT_EQ(event_expected_it->type, event_incoming_it->type);
+ EXPECT_EQ(event_expected_it->event_timestamp,
+ event_incoming_it->event_timestamp);
+ if (event_expected_it->type == kPacketReceived) {
+ EXPECT_EQ(event_expected_it->packet_id, event_incoming_it->packet_id);
+ } else {
+ EXPECT_EQ(event_expected_it->delay_delta,
+ event_incoming_it->delay_delta);
+ }
+ }
+ expected_receiver_log_.pop_front();
+ expected_it = expected_receiver_log_.begin();
+ }
+ called_on_received_receiver_log_ = true;
+ }
+
+ virtual void OnReceivedSenderLog(
+ const RtcpSenderLogMessage& sender_log) OVERRIDE {
+ EXPECT_EQ(expected_sender_log_.size(), sender_log.size());
+
+ RtcpSenderLogMessage::const_iterator expected_it =
+ expected_sender_log_.begin();
+ RtcpSenderLogMessage::const_iterator incoming_it = sender_log.begin();
+ for (; expected_it != expected_sender_log_.end();
+ ++expected_it, ++incoming_it) {
+ EXPECT_EQ(expected_it->frame_status, incoming_it->frame_status);
+ EXPECT_EQ(0xffffff & expected_it->rtp_timestamp,
+ incoming_it->rtp_timestamp);
+ }
+ called_on_received_sender_log_ = true;
+ }
+
+ bool OnReceivedSenderLogCalled() {
+ return called_on_received_sender_log_;
+ }
+
+ bool OnReceivedReceiverLogCalled() {
+ return called_on_received_receiver_log_ && expected_receiver_log_.empty();
+ }
+
+ void SetExpectedReceiverLog(const RtcpReceiverLogMessage& receiver_log) {
+ expected_receiver_log_ = receiver_log;
+ }
+
+ void SetExpectedSenderLog(const RtcpSenderLogMessage& sender_log) {
+ expected_sender_log_ = sender_log;
+ }
+
+ private:
+ RtcpReceiverLogMessage expected_receiver_log_;
+ RtcpSenderLogMessage expected_sender_log_;
+ bool called_on_received_sender_log_;
+ bool called_on_received_receiver_log_;
+};
+
+} // namespace
class RtcpReceiverTest : public ::testing::Test {
protected:
RtcpReceiverTest()
- : rtcp_receiver_(new RtcpReceiver(&mock_sender_feedback_,
+ : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig())),
+ rtcp_receiver_(new RtcpReceiver(cast_environment_,
+ &mock_sender_feedback_,
&mock_receiver_feedback_,
&mock_rtt_feedback_,
kSourceSsrc)) {
@@ -82,14 +170,7 @@ class RtcpReceiverTest : public ::testing::Test {
OnReceiverReferenceTimeReport(_)).Times(0);
EXPECT_CALL(mock_receiver_feedback_,
OnReceivedSendReportRequest()).Times(0);
-
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedRpsi(_, _)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedRemb(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedNackRequest(_)).Times(0);
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
-
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(_, _, _)).Times(0);
@@ -104,7 +185,7 @@ class RtcpReceiverTest : public ::testing::Test {
expected_report_block_.fraction_lost = kLoss >> 24;
expected_report_block_.cumulative_lost = kLoss & 0xffffff;
expected_report_block_.extended_high_sequence_number = kExtendedMax;
- expected_report_block_.jitter = kJitter;
+ expected_report_block_.jitter = kTestJitter;
expected_report_block_.last_sr = kLastSr;
expected_report_block_.delay_since_last_sr = kDelayLastSr;
expected_receiver_reference_report_.remote_ssrc = kSenderSsrc;
@@ -118,6 +199,9 @@ class RtcpReceiverTest : public ::testing::Test {
rtcp_receiver_->IncomingRtcpPacket(&rtcp_parser);
}
+ base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
MockRtcpReceiverFeedback mock_receiver_feedback_;
MockRtcpRttFeedback mock_rtt_feedback_;
MockRtcpSenderFeedback mock_sender_feedback_;
@@ -157,9 +241,6 @@ TEST_F(RtcpReceiverTest, InjectReceiveReportPacket) {
// local ssrc.
InjectRtcpPacket(p1.Packet(), p1.Length());
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
-
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -195,8 +276,6 @@ TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
InjectRtcpPacket(p1.Packet(), p1.Length());
EXPECT_CALL(mock_receiver_feedback_, OnReceivedSenderReport(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -216,8 +295,6 @@ TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
EXPECT_CALL(mock_receiver_feedback_,
OnReceivedSenderReport(expected_sender_info_)).Times(1);
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -270,8 +347,6 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithRrtr) {
// local ssrc.
InjectRtcpPacket(p1.Packet(), p1.Length());
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -302,13 +377,10 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithIntraFrameRequest) {
// local ssrc.
InjectRtcpPacket(p1.Packet(), p1.Length());
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
kDelayLastSr)).Times(1);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(1);
TestRtcpPacketBuilder p2;
p2.AddRr(kSenderSsrc, 1);
@@ -329,8 +401,6 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastFeedback) {
// local ssrc.
InjectRtcpPacket(p1.Packet(), p1.Length());
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -351,7 +421,8 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastFeedback) {
TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
SenderFeedbackCastVerification sender_feedback_cast_verification;
- RtcpReceiver rtcp_receiver(&sender_feedback_cast_verification,
+ RtcpReceiver rtcp_receiver(cast_environment_,
+ &sender_feedback_cast_verification,
&mock_receiver_feedback_,
&mock_rtt_feedback_,
kSourceSsrc);
@@ -376,5 +447,140 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
EXPECT_TRUE(sender_feedback_cast_verification.called());
}
+TEST_F(RtcpReceiverTest, InjectSenderReportWithCastSenderLogVerification) {
+ RtcpReceiverCastLogVerification cast_log_verification;
+ RtcpReceiver rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &cast_log_verification,
+ &mock_rtt_feedback_,
+ kSourceSsrc);
+ rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+
+ RtcpSenderLogMessage sender_log;
+ for (int j = 0; j < 359; ++j) {
+ RtcpSenderFrameLogMessage sender_frame_log;
+ sender_frame_log.frame_status = kRtcpSenderFrameStatusSentToNetwork;
+ sender_frame_log.rtp_timestamp = kRtpTimestamp + j * 90;
+ sender_log.push_back(sender_frame_log);
+ }
+ cast_log_verification.SetExpectedSenderLog(sender_log);
+
+ TestRtcpPacketBuilder p;
+ p.AddSr(kSenderSsrc, 0);
+ p.AddSdesCname(kSenderSsrc, kCName);
+ p.AddSenderLog(kSenderSsrc);
+
+ for (int i = 0; i < 359; ++i) {
+ p.AddSenderFrameLog(kRtcpSenderFrameStatusSentToNetwork,
+ kRtpTimestamp + i * 90);
+ }
+ RtcpParser rtcp_parser(p.Packet(), p.Length());
+ rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
+
+ EXPECT_TRUE(cast_log_verification.OnReceivedSenderLogCalled());
+}
+
+TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationBase) {
+ static const uint32 kTimeBaseMs = 12345678;
+ static const uint32 kTimeDelayMs = 10;
+ static const uint32 kDelayDeltaMs = 123;
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
+
+ RtcpReceiverCastLogVerification cast_log_verification;
+ RtcpReceiver rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &cast_log_verification,
+ &mock_rtt_feedback_,
+ kSourceSsrc);
+ rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+
+ RtcpReceiverLogMessage receiver_log;
+ RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
+ RtcpReceiverEventLogMessage event_log;
+
+ event_log.type = kAckSent;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
+ frame_log.event_log_messages_.push_back(event_log);
+
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
+ event_log.type = kPacketReceived;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.packet_id = kLostPacketId1;
+ frame_log.event_log_messages_.push_back(event_log);
+ receiver_log.push_back(frame_log);
+
+ cast_log_verification.SetExpectedReceiverLog(receiver_log);
+
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSenderSsrc, 1);
+ p.AddRb(kSourceSsrc);
+ p.AddReceiverLog(kSenderSsrc);
+ p.AddReceiverFrameLog(kRtpTimestamp, 2, kTimeBaseMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
+ p.AddReceiverEventLog(kLostPacketId1, 6, kTimeDelayMs);
+
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc, kLastSr, kDelayLastSr)).
+ Times(1);
+
+ RtcpParser rtcp_parser(p.Packet(), p.Length());
+ rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
+
+ EXPECT_TRUE(cast_log_verification.OnReceivedReceiverLogCalled());
+}
+
+TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationMulti) {
+ static const uint32 kTimeBaseMs = 12345678;
+ static const uint32 kTimeDelayMs = 10;
+ static const uint32 kDelayDeltaMs = 123;
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
+
+ RtcpReceiverCastLogVerification cast_log_verification;
+ RtcpReceiver rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &cast_log_verification,
+ &mock_rtt_feedback_,
+ kSourceSsrc);
+ rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+
+ RtcpReceiverLogMessage receiver_log;
+
+ for (int j = 0; j < 100; ++j) {
+ RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
+ RtcpReceiverEventLogMessage event_log;
+ event_log.type = kAckSent;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
+ frame_log.event_log_messages_.push_back(event_log);
+ receiver_log.push_back(frame_log);
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
+ }
+
+ cast_log_verification.SetExpectedReceiverLog(receiver_log);
+
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSenderSsrc, 1);
+ p.AddRb(kSourceSsrc);
+ p.AddReceiverLog(kSenderSsrc);
+ for (int i = 0; i < 100; ++i) {
+ p.AddReceiverFrameLog(kRtpTimestamp, 1, kTimeBaseMs + i * kTimeDelayMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
+ }
+
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc, kLastSr, kDelayLastSr)).
+ Times(1);
+
+ RtcpParser rtcp_parser(p.Packet(), p.Length());
+ rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
+
+ EXPECT_TRUE(cast_log_verification.OnReceivedReceiverLogCalled());
+}
+
+
+
} // namespace cast
} // namespace media
diff --git a/media/cast/rtcp/rtcp_sender.cc b/media/cast/rtcp/rtcp_sender.cc
index cd1c50c579..76e81e06b6 100644
--- a/media/cast/rtcp/rtcp_sender.cc
+++ b/media/cast/rtcp/rtcp_sender.cc
@@ -7,8 +7,8 @@
#include <algorithm>
#include <vector>
-#include "base/debug/trace_event.h"
#include "base/logging.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/pacing/paced_sender.h"
#include "media/cast/rtcp/rtcp_utility.h"
#include "net/base/big_endian.h"
@@ -16,60 +16,87 @@
namespace media {
namespace cast {
-static const size_t kRtcpMaxNackFields = 253;
-static const size_t kRtcpMaxCastLossFields = 100;
-
-RtcpSender::RtcpSender(PacedPacketSender* outgoing_transport,
+RtcpSender::RtcpSender(scoped_refptr<CastEnvironment> cast_environment,
+ PacedPacketSender* outgoing_transport,
uint32 sending_ssrc,
const std::string& c_name)
: ssrc_(sending_ssrc),
c_name_(c_name),
- transport_(outgoing_transport) {
+ transport_(outgoing_transport),
+ cast_environment_(cast_environment) {
DCHECK_LT(c_name_.length(), kRtcpCnameSize) << "Invalid config";
}
RtcpSender::~RtcpSender() {}
-void RtcpSender::SendRtcp(uint32 packet_type_flags,
- const RtcpSenderInfo* sender_info,
- const RtcpReportBlock* report_block,
- uint32 pli_remote_ssrc,
- const RtcpDlrrReportBlock* dlrr,
- const RtcpReceiverReferenceTimeReport* rrtr,
- const RtcpCastMessage* cast_message) {
+void RtcpSender::SendRtcpFromRtpSender(uint32 packet_type_flags,
+ const RtcpSenderInfo* sender_info,
+ const RtcpDlrrReportBlock* dlrr,
+ const RtcpSenderLogMessage* sender_log) {
+ if (packet_type_flags & kRtcpRr ||
+ packet_type_flags & kRtcpPli ||
+ packet_type_flags & kRtcpRrtr ||
+ packet_type_flags & kRtcpCast ||
+ packet_type_flags & kRtcpReceiverLog ||
+ packet_type_flags & kRtcpRpsi ||
+ packet_type_flags & kRtcpRemb ||
+ packet_type_flags & kRtcpNack) {
+ NOTREACHED() << "Invalid argument";
+ }
+
std::vector<uint8> packet;
packet.reserve(kIpPacketSize);
if (packet_type_flags & kRtcpSr) {
DCHECK(sender_info) << "Invalid argument";
- BuildSR(*sender_info, report_block, &packet);
+ BuildSR(*sender_info, NULL, &packet);
BuildSdec(&packet);
- } else if (packet_type_flags & kRtcpRr) {
- BuildRR(report_block, &packet);
- if (!c_name_.empty()) {
- BuildSdec(&packet);
- }
- }
- if (packet_type_flags & kRtcpPli) {
- BuildPli(pli_remote_ssrc, &packet);
}
if (packet_type_flags & kRtcpBye) {
BuildBye(&packet);
}
- if (packet_type_flags & kRtcpRpsi) {
- // Implement this for webrtc interop.
- NOTIMPLEMENTED();
+ if (packet_type_flags & kRtcpDlrr) {
+ DCHECK(dlrr) << "Invalid argument";
+ BuildDlrrRb(dlrr, &packet);
}
- if (packet_type_flags & kRtcpRemb) {
- // Implement this for webrtc interop.
- NOTIMPLEMENTED();
+ if (packet_type_flags & kRtcpSenderLog) {
+ DCHECK(sender_log) << "Invalid argument";
+ BuildSenderLog(sender_log, &packet);
+ }
+ if (packet.empty())
+ return; // Sanity don't send empty packets.
+
+ transport_->SendRtcpPacket(packet);
+}
+
+void RtcpSender::SendRtcpFromRtpReceiver(
+ uint32 packet_type_flags,
+ const RtcpReportBlock* report_block,
+ const RtcpReceiverReferenceTimeReport* rrtr,
+ const RtcpCastMessage* cast_message,
+ const RtcpReceiverLogMessage* receiver_log) {
+ if (packet_type_flags & kRtcpSr ||
+ packet_type_flags & kRtcpDlrr ||
+ packet_type_flags & kRtcpSenderLog) {
+ NOTREACHED() << "Invalid argument";
}
- if (packet_type_flags & kRtcpNack) {
- // Implement this for webrtc interop.
+ if (packet_type_flags & kRtcpPli ||
+ packet_type_flags & kRtcpRpsi ||
+ packet_type_flags & kRtcpRemb ||
+ packet_type_flags & kRtcpNack) {
+ // Implement these for webrtc interop.
NOTIMPLEMENTED();
}
- if (packet_type_flags & kRtcpDlrr) {
- DCHECK(dlrr) << "Invalid argument";
- BuildDlrrRb(dlrr, &packet);
+ std::vector<uint8> packet;
+ packet.reserve(kIpPacketSize);
+
+ if (packet_type_flags & kRtcpRr) {
+ BuildRR(report_block, &packet);
+ if (!c_name_.empty()) {
+ BuildSdec(&packet);
+ }
+ }
+ if (packet_type_flags & kRtcpBye) {
+ BuildBye(&packet);
}
if (packet_type_flags & kRtcpRrtr) {
DCHECK(rrtr) << "Invalid argument";
@@ -79,7 +106,10 @@ void RtcpSender::SendRtcp(uint32 packet_type_flags,
DCHECK(cast_message) << "Invalid argument";
BuildCast(cast_message, &packet);
}
-
+ if (packet_type_flags & kRtcpReceiverLog) {
+ DCHECK(receiver_log) << "Invalid argument";
+ BuildReceiverLog(receiver_log, &packet);
+ }
if (packet.empty()) return; // Sanity don't send empty packets.
transport_->SendRtcpPacket(packet);
@@ -218,9 +248,6 @@ void RtcpSender::BuildPli(uint32 remote_ssrc,
big_endian_writer.WriteU16(2); // Used fixed length of 2.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(remote_ssrc); // Add the remote SSRC.
- TRACE_EVENT_INSTANT2("cast_rtcp", "RtcpSender::PLI", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc,
- "ssrc", ssrc_);
}
/*
@@ -324,8 +351,8 @@ void RtcpSender::BuildRemb(const RtcpRembMessage* remb,
for (; it != remb->remb_ssrcs.end(); ++it) {
big_endian_writer.WriteU32(*it);
}
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpSender::RembBitrate", ssrc_,
- remb->remb_bitrate);
+ cast_environment_->Logging()->InsertGenericEvent(kRembBitrate,
+ remb->remb_bitrate);
}
void RtcpSender::BuildNack(const RtcpNackMessage* nack,
@@ -381,8 +408,6 @@ void RtcpSender::BuildNack(const RtcpNackMessage* nack,
}
DCHECK_GE(kRtcpMaxNackFields, number_of_nack_fields);
(*packet)[nack_size_pos] = static_cast<uint8>(2 + number_of_nack_fields);
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpSender::NACK", ssrc_,
- nack->nack_list.size());
}
void RtcpSender::BuildBye(std::vector<uint8>* packet) const {
@@ -478,7 +503,7 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(cast->media_ssrc_); // Remote SSRC.
big_endian_writer.WriteU32(kCast);
- big_endian_writer.WriteU8(cast->ack_frame_id_);
+ big_endian_writer.WriteU8(static_cast<uint8>(cast->ack_frame_id_));
size_t cast_loss_field_pos = start_size + 17; // Save loss field position.
big_endian_writer.WriteU8(0); // Overwritten with number_of_loss_fields.
big_endian_writer.WriteU8(0); // Reserved.
@@ -499,7 +524,7 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
start_size = packet->size();
packet->resize(start_size + 4);
net::BigEndianWriter big_endian_nack_writer(&((*packet)[start_size]), 4);
- big_endian_nack_writer.WriteU8(frame_it->first);
+ big_endian_nack_writer.WriteU8(static_cast<uint8>(frame_it->first));
big_endian_nack_writer.WriteU16(kRtcpCastAllPacketsLost);
big_endian_nack_writer.WriteU8(0);
++number_of_loss_fields;
@@ -514,7 +539,7 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
&((*packet)[start_size]), 4);
// Write frame and packet id to buffer before calculating bitmask.
- big_endian_nack_writer.WriteU8(frame_it->first);
+ big_endian_nack_writer.WriteU8(static_cast<uint8>(frame_it->first));
big_endian_nack_writer.WriteU16(packet_id);
uint8 bitmask = 0;
@@ -536,10 +561,19 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
DCHECK_LE(number_of_loss_fields, kRtcpMaxCastLossFields);
(*packet)[cast_size_pos] = static_cast<uint8>(4 + number_of_loss_fields);
(*packet)[cast_loss_field_pos] = static_cast<uint8>(number_of_loss_fields);
+}
+
+void RtcpSender::BuildSenderLog(const RtcpSenderLogMessage* sender_log_message,
+ std::vector<uint8>* packet) const {
+ // TODO(pwestin): Implement.
+ NOTIMPLEMENTED();
+}
- // Frames with missing packets.
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpSender::CastNACK", ssrc_,
- cast->missing_frames_and_packets_.size());
+void RtcpSender::BuildReceiverLog(
+ const RtcpReceiverLogMessage* receiver_log_message,
+ std::vector<uint8>* packet) const {
+ // TODO(pwestin): Implement.
+ NOTIMPLEMENTED();
}
} // namespace cast
diff --git a/media/cast/rtcp/rtcp_sender.h b/media/cast/rtcp/rtcp_sender.h
index 7dbbc0f95b..24915ddc83 100644
--- a/media/cast/rtcp/rtcp_sender.h
+++ b/media/cast/rtcp/rtcp_sender.h
@@ -18,19 +18,23 @@ namespace cast {
class RtcpSender {
public:
- RtcpSender(PacedPacketSender* const paced_packet_sender,
+ RtcpSender(scoped_refptr<CastEnvironment> cast_environment,
+ PacedPacketSender* const paced_packet_sender,
uint32 sending_ssrc,
const std::string& c_name);
virtual ~RtcpSender();
- void SendRtcp(uint32 packet_type_flags,
- const RtcpSenderInfo* sender_info,
- const RtcpReportBlock* report_block,
- uint32 pli_remote_ssrc,
- const RtcpDlrrReportBlock* dlrr,
- const RtcpReceiverReferenceTimeReport* rrtr,
- const RtcpCastMessage* cast_message);
+ void SendRtcpFromRtpSender(uint32 packet_type_flags,
+ const RtcpSenderInfo* sender_info,
+ const RtcpDlrrReportBlock* dlrr,
+ const RtcpSenderLogMessage* sender_log);
+
+ void SendRtcpFromRtpReceiver(uint32 packet_type_flags,
+ const RtcpReportBlock* report_block,
+ const RtcpReceiverReferenceTimeReport* rrtr,
+ const RtcpCastMessage* cast_message,
+ const RtcpReceiverLogMessage* receiver_log);
enum RtcpPacketType {
kRtcpSr = 0x0002,
@@ -45,6 +49,8 @@ class RtcpSender {
kRtcpRpsi = 0x8000,
kRtcpRemb = 0x10000,
kRtcpCast = 0x20000,
+ kRtcpSenderLog = 0x40000,
+ kRtcpReceiverLog = 0x80000,
};
private:
@@ -83,6 +89,12 @@ class RtcpSender {
void BuildCast(const RtcpCastMessage* cast_message,
std::vector<uint8>* packet) const;
+ void BuildSenderLog(const RtcpSenderLogMessage* sender_log_message,
+ std::vector<uint8>* packet) const;
+
+ void BuildReceiverLog(const RtcpReceiverLogMessage* receiver_log_message,
+ std::vector<uint8>* packet) const;
+
inline void BitrateToRembExponentBitrate(uint32 bitrate,
uint8* exponent,
uint32* mantissa) const {
@@ -102,6 +114,7 @@ class RtcpSender {
// Not owned by this class.
PacedPacketSender* transport_;
+ scoped_refptr<CastEnvironment> cast_environment_;
DISALLOW_COPY_AND_ASSIGN(RtcpSender);
};
diff --git a/media/cast/rtcp/rtcp_sender_unittest.cc b/media/cast/rtcp/rtcp_sender_unittest.cc
index c8c2f175db..7521c12ac9 100644
--- a/media/cast/rtcp/rtcp_sender_unittest.cc
+++ b/media/cast/rtcp/rtcp_sender_unittest.cc
@@ -3,18 +3,23 @@
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/pacing/paced_sender.h"
#include "media/cast/rtcp/rtcp_sender.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/fake_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
namespace cast {
+namespace {
static const uint32 kSendingSsrc = 0x12345678;
static const uint32 kMediaSsrc = 0x87654321;
static const std::string kCName("test@10.1.1.1");
+} // namespace
class TestRtcpTransport : public PacedPacketSender {
public:
@@ -43,7 +48,7 @@ class TestRtcpTransport : public PacedPacketSender {
memcpy(expected_packet_, rtcp_buffer, length);
}
- int packet_count() { return packet_count_; }
+ int packet_count() const { return packet_count_; }
private:
uint8 expected_packet_[kIpPacketSize];
@@ -54,12 +59,20 @@ class TestRtcpTransport : public PacedPacketSender {
class RtcpSenderTest : public ::testing::Test {
protected:
RtcpSenderTest()
- : rtcp_sender_(new RtcpSender(&test_transport_,
+ : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig())),
+ rtcp_sender_(new RtcpSender(cast_environment_,
+ &test_transport_,
kSendingSsrc,
kCName)) {
}
+ base::SimpleTestTickClock testing_clock_;
TestRtcpTransport test_transport_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<RtcpSender> rtcp_sender_;
};
@@ -77,13 +90,10 @@ TEST_F(RtcpSenderTest, RtcpSenderReport) {
p.AddSdesCname(kSendingSsrc, kCName);
test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpSr,
- &sender_info,
- NULL,
- 0,
- NULL,
- NULL,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpSender(RtcpSender::kRtcpSr,
+ &sender_info,
+ NULL,
+ NULL);
EXPECT_EQ(1, test_transport_.packet_count());
}
@@ -95,13 +105,8 @@ TEST_F(RtcpSenderTest, RtcpReceiverReport) {
p1.AddSdesCname(kSendingSsrc, kCName);
test_transport_.SetExpectedRtcpPacket(p1.Packet(), p1.Length());
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr,
- NULL,
- NULL,
- 0,
- NULL,
- NULL,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpReceiver(RtcpSender::kRtcpRr,
+ NULL, NULL, NULL, NULL);
EXPECT_EQ(1, test_transport_.packet_count());
@@ -118,19 +123,13 @@ TEST_F(RtcpSenderTest, RtcpReceiverReport) {
report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
report_block.fraction_lost = kLoss >> 24;
report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number =
- kExtendedMax;
- report_block.jitter = kJitter;
+ report_block.extended_high_sequence_number = kExtendedMax;
+ report_block.jitter = kTestJitter;
report_block.last_sr = kLastSr;
report_block.delay_since_last_sr = kDelayLastSr;
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr,
- NULL,
- &report_block,
- 0,
- NULL,
- NULL,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpReceiver(RtcpSender::kRtcpRr, &report_block,
+ NULL, NULL, NULL);
EXPECT_EQ(2, test_transport_.packet_count());
}
@@ -155,13 +154,11 @@ TEST_F(RtcpSenderTest, RtcpSenderReportWithDlrr) {
dlrr_rb.last_rr = kLastRr;
dlrr_rb.delay_since_last_rr = kDelayLastRr;
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpSr | RtcpSender::kRtcpDlrr,
- &sender_info,
- NULL,
- 0,
- &dlrr_rb,
- NULL,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpSender(
+ RtcpSender::kRtcpSr | RtcpSender::kRtcpDlrr,
+ &sender_info,
+ &dlrr_rb,
+ NULL);
EXPECT_EQ(1, test_transport_.packet_count());
}
@@ -184,7 +181,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtr) {
report_block.cumulative_lost = kLoss; // 24 bits valid.
report_block.extended_high_sequence_number =
kExtendedMax;
- report_block.jitter = kJitter;
+ report_block.jitter = kTestJitter;
report_block.last_sr = kLastSr;
report_block.delay_since_last_sr = kDelayLastSr;
@@ -192,13 +189,12 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtr) {
rrtr.ntp_seconds = kNtpHigh;
rrtr.ntp_fraction = kNtpLow;
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr,
- NULL,
- &report_block,
- 0,
- NULL,
- &rrtr,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr,
+ &report_block,
+ &rrtr,
+ NULL,
+ NULL);
EXPECT_EQ(1, test_transport_.packet_count());
}
@@ -219,7 +215,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithCast) {
report_block.fraction_lost = kLoss >> 24;
report_block.cumulative_lost = kLoss; // 24 bits valid.
report_block.extended_high_sequence_number = kExtendedMax;
- report_block.jitter = kJitter;
+ report_block.jitter = kTestJitter;
report_block.last_sr = kLastSr;
report_block.delay_since_last_sr = kDelayLastSr;
@@ -235,45 +231,12 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithCast) {
cast_message.missing_frames_and_packets_[kFrameIdWithLostPackets] =
missing_packets;
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr | RtcpSender::kRtcpCast,
- NULL,
- &report_block,
- 0,
- NULL,
- NULL,
- &cast_message);
-
- EXPECT_EQ(1, test_transport_.packet_count());
-}
-
-TEST_F(RtcpSenderTest, RtcpReceiverReportWithIntraFrameRequest) {
- // Receiver report with report block + c_name.
- TestRtcpPacketBuilder p;
- p.AddRr(kSendingSsrc, 1);
- p.AddRb(kMediaSsrc);
- p.AddSdesCname(kSendingSsrc, kCName);
- p.AddPli(kSendingSsrc, kMediaSsrc);
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
-
- RtcpReportBlock report_block;
- // Initialize remote_ssrc to a "clearly illegal" value.
- report_block.remote_ssrc = 0xDEAD;
- report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
- report_block.fraction_lost = kLoss >> 24;
- report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number =
- kExtendedMax;
- report_block.jitter = kJitter;
- report_block.last_sr = kLastSr;
- report_block.delay_since_last_sr = kDelayLastSr;
-
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr | RtcpSender::kRtcpPli,
- NULL,
- &report_block,
- kMediaSsrc,
- NULL,
- NULL,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ RtcpSender::kRtcpRr | RtcpSender::kRtcpCast,
+ &report_block,
+ NULL,
+ &cast_message,
+ NULL);
EXPECT_EQ(1, test_transport_.packet_count());
}
diff --git a/media/cast/rtcp/rtcp_unittest.cc b/media/cast/rtcp/rtcp_unittest.cc
index ccbdcac863..8287d99927 100644
--- a/media/cast/rtcp/rtcp_unittest.cc
+++ b/media/cast/rtcp/rtcp_unittest.cc
@@ -4,11 +4,13 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/pacing/paced_sender.h"
#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/fake_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -18,7 +20,6 @@ using testing::_;
static const uint32 kSenderSsrc = 0x10203;
static const uint32 kReceiverSsrc = 0x40506;
-static const uint32 kUnknownSsrc = 0xDEAD;
static const std::string kCName("test@10.1.1.1");
static const uint32 kRtcpIntervalMs = 500;
static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
@@ -27,14 +28,19 @@ static const int64 kAddedShortDelay= 100;
class LocalRtcpTransport : public PacedPacketSender {
public:
- explicit LocalRtcpTransport(base::SimpleTestTickClock* testing_clock)
- : short_delay_(false),
+ explicit LocalRtcpTransport(scoped_refptr<CastEnvironment> cast_environment,
+ base::SimpleTestTickClock* testing_clock)
+ : drop_packets_(false),
+ short_delay_(false),
testing_clock_(testing_clock) {}
void SetRtcpReceiver(Rtcp* rtcp) { rtcp_ = rtcp; }
void SetShortDelay() { short_delay_ = true; }
+ void SetDropPackets(bool drop_packets) { drop_packets_ = drop_packets; }
+
+
virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE {
if (short_delay_) {
testing_clock_->Advance(
@@ -42,6 +48,8 @@ class LocalRtcpTransport : public PacedPacketSender {
} else {
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(kAddedDelay));
}
+ if (drop_packets_) return true;
+
rtcp_->IncomingRtcpPacket(&(packet[0]), packet.size());
return true;
}
@@ -55,32 +63,34 @@ class LocalRtcpTransport : public PacedPacketSender {
}
private:
+ bool drop_packets_;
bool short_delay_;
Rtcp* rtcp_;
base::SimpleTestTickClock* testing_clock_;
+ scoped_refptr<CastEnvironment> cast_environment_;
};
class RtcpPeer : public Rtcp {
public:
- RtcpPeer(base::TickClock* clock,
+ RtcpPeer(scoped_refptr<CastEnvironment> cast_environment,
RtcpSenderFeedback* sender_feedback,
PacedPacketSender* const paced_packet_sender,
RtpSenderStatistics* rtp_sender_statistics,
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
- bool sending_media,
uint32 local_ssrc,
+ uint32 remote_ssrc,
const std::string& c_name)
- : Rtcp(clock,
+ : Rtcp(cast_environment,
sender_feedback,
paced_packet_sender,
rtp_sender_statistics,
rtp_receiver_statistics,
rtcp_mode,
rtcp_interval,
- sending_media,
local_ssrc,
+ remote_ssrc,
c_name) {
}
@@ -91,7 +101,11 @@ class RtcpPeer : public Rtcp {
class RtcpTest : public ::testing::Test {
protected:
RtcpTest()
- : transport_(&testing_clock_) {
+ : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig())),
+ transport_(cast_environment_, &testing_clock_) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
@@ -99,15 +113,12 @@ class RtcpTest : public ::testing::Test {
virtual ~RtcpTest() {}
virtual void SetUp() {
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedRpsi(_, _)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedRemb(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedNackRequest(_)).Times(0);
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
}
base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
LocalRtcpTransport transport_;
MockRtcpSenderFeedback mock_sender_feedback_;
};
@@ -115,15 +126,15 @@ class RtcpTest : public ::testing::Test {
TEST_F(RtcpTest, TimeToSend) {
base::TimeTicks start_time;
start_time += base::TimeDelta::FromMilliseconds(kStartMillisecond);
- Rtcp rtcp(&testing_clock_,
+ Rtcp rtcp(cast_environment_,
&mock_sender_feedback_,
&transport_,
NULL,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- true, // Media sender.
kSenderSsrc,
+ kReceiverSsrc,
kCName);
transport_.SetRtcpReceiver(&rtcp);
EXPECT_LE(start_time, rtcp.TimeToSendNextRtcpReport());
@@ -136,73 +147,50 @@ TEST_F(RtcpTest, TimeToSend) {
}
TEST_F(RtcpTest, BasicSenderReport) {
- Rtcp rtcp(&testing_clock_,
+ Rtcp rtcp(cast_environment_,
&mock_sender_feedback_,
&transport_,
NULL,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- true, // Media sender.
kSenderSsrc,
+ kReceiverSsrc,
kCName);
transport_.SetRtcpReceiver(&rtcp);
- rtcp.SendRtcpReport(kUnknownSsrc);
+ rtcp.SendRtcpFromRtpSender(NULL);
}
TEST_F(RtcpTest, BasicReceiverReport) {
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(1);
- Rtcp rtcp(&testing_clock_,
+ Rtcp rtcp(cast_environment_,
&mock_sender_feedback_,
&transport_,
NULL,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false, // Media receiver.
- kSenderSsrc,
- kCName);
- transport_.SetRtcpReceiver(&rtcp);
- rtcp.SetRemoteSSRC(kSenderSsrc);
- rtcp.SendRtcpReport(kSenderSsrc);
-}
-
-TEST_F(RtcpTest, BasicPli) {
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(1);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(1);
-
- // Media receiver.
- Rtcp rtcp(&testing_clock_,
- &mock_sender_feedback_,
- &transport_,
- NULL,
- NULL,
- kRtcpReducedSize,
- base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
kSenderSsrc,
+ kReceiverSsrc,
kCName);
transport_.SetRtcpReceiver(&rtcp);
- rtcp.SetRemoteSSRC(kSenderSsrc);
- rtcp.SendRtcpPli(kSenderSsrc);
+ rtcp.SendRtcpFromRtpReceiver(NULL, NULL);
}
TEST_F(RtcpTest, BasicCast) {
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(1);
// Media receiver.
- Rtcp rtcp(&testing_clock_,
+ Rtcp rtcp(cast_environment_,
&mock_sender_feedback_,
&transport_,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
+ kSenderSsrc,
kSenderSsrc,
kCName);
transport_.SetRtcpReceiver(&rtcp);
- rtcp.SetRemoteSSRC(kSenderSsrc);
RtcpCastMessage cast_message(kSenderSsrc);
cast_message.ack_frame_id_ = kAckFrameId;
PacketIdSet missing_packets;
@@ -212,45 +200,94 @@ TEST_F(RtcpTest, BasicCast) {
missing_packets.insert(kLostPacketId1);
missing_packets.insert(kLostPacketId2);
missing_packets.insert(kLostPacketId3);
- cast_message.missing_frames_and_packets_[
- kFrameIdWithLostPackets] = missing_packets;
- rtcp.SendRtcpCast(cast_message);
+ cast_message.missing_frames_and_packets_[kFrameIdWithLostPackets] =
+ missing_packets;
+ rtcp.SendRtcpFromRtpReceiver(&cast_message, NULL);
}
-TEST_F(RtcpTest, Rtt) {
+TEST_F(RtcpTest, RttReducedSizeRtcp) {
// Media receiver.
- LocalRtcpTransport receiver_transport(&testing_clock_);
- Rtcp rtcp_receiver(&testing_clock_,
+ LocalRtcpTransport receiver_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_receiver(cast_environment_,
&mock_sender_feedback_,
&receiver_transport,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
kReceiverSsrc,
+ kSenderSsrc,
kCName);
// Media sender.
- LocalRtcpTransport sender_transport(&testing_clock_);
- Rtcp rtcp_sender(&testing_clock_,
+ LocalRtcpTransport sender_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_sender(cast_environment_,
&mock_sender_feedback_,
&sender_transport,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- true,
kSenderSsrc,
+ kReceiverSsrc,
kCName);
receiver_transport.SetRtcpReceiver(&rtcp_sender);
sender_transport.SetRtcpReceiver(&rtcp_receiver);
- rtcp_sender.SetRemoteSSRC(kReceiverSsrc);
- rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+ EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+}
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(2);
+TEST_F(RtcpTest, Rtt) {
+ // Media receiver.
+ LocalRtcpTransport receiver_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &receiver_transport,
+ NULL,
+ NULL,
+ kRtcpCompound,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ kReceiverSsrc,
+ kSenderSsrc,
+ kCName);
+
+ // Media sender.
+ LocalRtcpTransport sender_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_sender(cast_environment_,
+ &mock_sender_feedback_,
+ &sender_transport,
+ NULL,
+ NULL,
+ kRtcpCompound,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ kSenderSsrc,
+ kReceiverSsrc,
+ kCName);
+
+ receiver_transport.SetRtcpReceiver(&rtcp_sender);
+ sender_transport.SetRtcpReceiver(&rtcp_receiver);
base::TimeDelta rtt;
base::TimeDelta avg_rtt;
@@ -259,17 +296,17 @@ TEST_F(RtcpTest, Rtt) {
EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- rtcp_sender.SendRtcpReport(kSenderSsrc);
- rtcp_receiver.SendRtcpReport(kSenderSsrc);
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
- rtcp_sender.SendRtcpReport(kSenderSsrc);
- EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
@@ -277,24 +314,90 @@ TEST_F(RtcpTest, Rtt) {
receiver_transport.SetShortDelay();
sender_transport.SetShortDelay();
- rtcp_receiver.SendRtcpReport(kSenderSsrc);
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
-
EXPECT_NEAR(kAddedDelay + kAddedShortDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR((kAddedShortDelay + 3 * kAddedDelay) / 2,
avg_rtt.InMilliseconds(),
1);
EXPECT_NEAR(kAddedDelay + kAddedShortDelay, min_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
- rtcp_sender.SendRtcpReport(kSenderSsrc);
- EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR((2 * kAddedShortDelay + 2 * kAddedDelay) / 2,
avg_rtt.InMilliseconds(),
1);
EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+}
+
+TEST_F(RtcpTest, RttWithPacketLoss) {
+ // Media receiver.
+ LocalRtcpTransport receiver_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &receiver_transport,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ kSenderSsrc,
+ kReceiverSsrc,
+ kCName);
+
+ // Media sender.
+ LocalRtcpTransport sender_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_sender(cast_environment_,
+ &mock_sender_feedback_,
+ &sender_transport,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ kReceiverSsrc,
+ kSenderSsrc,
+ kCName);
+
+ receiver_transport.SetRtcpReceiver(&rtcp_sender);
+ sender_transport.SetRtcpReceiver(&rtcp_receiver);
+
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+ EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+
+ receiver_transport.SetShortDelay();
+ sender_transport.SetShortDelay();
+ receiver_transport.SetDropPackets(true);
+
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(kAddedDelay + kAddedShortDelay, rtt.InMilliseconds(), 1);
}
TEST_F(RtcpTest, NtpAndTime) {
@@ -313,7 +416,7 @@ TEST_F(RtcpTest, NtpAndTime) {
base::TimeTicks out_1 = ConvertNtpToTimeTicks(ntp_seconds_1, ntp_fractions_1);
EXPECT_EQ(input_time, out_1); // Verify inverse.
- base::TimeDelta time_delta = base::TimeDelta::FromMilliseconds(1100);
+ base::TimeDelta time_delta = base::TimeDelta::FromMilliseconds(1000);
input_time += time_delta;
uint32 ntp_seconds_2 = 0;
@@ -326,19 +429,33 @@ TEST_F(RtcpTest, NtpAndTime) {
// Verify delta.
EXPECT_EQ((out_2 - out_1), time_delta);
EXPECT_EQ((ntp_seconds_2 - ntp_seconds_1), GG_UINT32_C(1));
- EXPECT_NEAR((ntp_fractions_2 - ntp_fractions_1), 0xffffffff / 10, 1);
+ EXPECT_NEAR(ntp_fractions_2, ntp_fractions_1, 1);
+
+ time_delta = base::TimeDelta::FromMilliseconds(500);
+ input_time += time_delta;
+
+ uint32 ntp_seconds_3 = 0;
+ uint32 ntp_fractions_3 = 0;
+
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_3, &ntp_fractions_3);
+ base::TimeTicks out_3 = ConvertNtpToTimeTicks(ntp_seconds_3, ntp_fractions_3);
+ EXPECT_EQ(input_time, out_3); // Verify inverse.
+
+ // Verify delta.
+ EXPECT_EQ((out_3 - out_2), time_delta);
+ EXPECT_NEAR((ntp_fractions_3 - ntp_fractions_2), 0xffffffff / 2, 1);
}
TEST_F(RtcpTest, WrapAround) {
- RtcpPeer rtcp_peer(&testing_clock_,
+ RtcpPeer rtcp_peer(cast_environment_,
&mock_sender_feedback_,
NULL,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
kReceiverSsrc,
+ kSenderSsrc,
kCName);
uint32 new_timestamp = 0;
uint32 old_timestamp = 0;
@@ -358,15 +475,15 @@ TEST_F(RtcpTest, WrapAround) {
}
TEST_F(RtcpTest, RtpTimestampInSenderTime) {
- RtcpPeer rtcp_peer(&testing_clock_,
+ RtcpPeer rtcp_peer(cast_environment_,
&mock_sender_feedback_,
NULL,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
kReceiverSsrc,
+ kSenderSsrc,
kCName);
int frequency = 32000;
uint32 rtp_timestamp = 64000;
diff --git a/media/cast/rtcp/rtcp_utility.cc b/media/cast/rtcp/rtcp_utility.cc
index 4f9d2ec769..daeaa8aace 100644
--- a/media/cast/rtcp/rtcp_utility.cc
+++ b/media/cast/rtcp/rtcp_utility.cc
@@ -56,6 +56,15 @@ RtcpFieldTypes RtcpParser::Iterate() {
case kStateBye:
IterateByeItem();
break;
+ case kStateApplicationSpecificCastReceiverFrameLog:
+ IterateCastReceiverLogFrame();
+ break;
+ case kStateApplicationSpecificCastReceiverEventLog:
+ IterateCastReceiverLogEvent();
+ break;
+ case kStateApplicationSpecificCastSenderLog:
+ IterateCastSenderLog();
+ break;
case kStateExtendedReportBlock:
IterateExtendedReportItem();
break;
@@ -123,6 +132,12 @@ void RtcpParser::IterateTopLevel() {
break;
}
return;
+ case kPacketTypeApplicationDefined:
+ if (!ParseApplicationDefined(header.IC)) {
+ // Nothing supported found, continue to next block!
+ break;
+ }
+ return;
case kPacketTypeGenericRtpFeedback: // Fall through!
case kPacketTypePayloadSpecific:
if (!ParseFeedBackCommon(header)) {
@@ -203,6 +218,21 @@ void RtcpParser::IteratePayloadSpecificCastNackItem() {
if (!success) Iterate();
}
+void RtcpParser::IterateCastReceiverLogFrame() {
+ bool success = ParseCastReceiverLogFrameItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateCastReceiverLogEvent() {
+ bool success = ParseCastReceiverLogEventItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateCastSenderLog() {
+ bool success = ParseCastSenderLogItem();
+ if (!success) Iterate();
+}
+
void RtcpParser::Validate() {
if (rtcp_data_ == NULL) return; // NOT VALID
@@ -466,6 +496,124 @@ bool RtcpParser::ParseByeItem() {
return true;
}
+bool RtcpParser::ParseApplicationDefined(uint8 subtype) {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 16 ||
+ !(subtype == kSenderLogSubtype || subtype == kReceiverLogSubtype)) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ uint32 sender_ssrc;
+ uint32 name;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.Skip(4); // Skip header.
+ big_endian_reader.ReadU32(&sender_ssrc);
+ big_endian_reader.ReadU32(&name);
+
+ if (name != kCast) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ rtcp_data_ += 12;
+ switch (subtype) {
+ case kSenderLogSubtype:
+ state_ = kStateApplicationSpecificCastSenderLog;
+ field_type_ = kRtcpApplicationSpecificCastSenderLogCode;
+ field_.cast_sender_log.sender_ssrc = sender_ssrc;
+ break;
+ case kReceiverLogSubtype:
+ state_ = kStateApplicationSpecificCastReceiverFrameLog;
+ field_type_ = kRtcpApplicationSpecificCastReceiverLogCode;
+ field_.cast_receiver_log.sender_ssrc = sender_ssrc;
+ break;
+ default:
+ NOTREACHED();
+ }
+ return true;
+}
+
+bool RtcpParser::ParseCastReceiverLogFrameItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 12) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ uint32 rtp_timestamp;
+ uint32 data;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&rtp_timestamp);
+ big_endian_reader.ReadU32(&data);
+
+ rtcp_data_ += 8;
+
+ field_.cast_receiver_log.rtp_timestamp = rtp_timestamp;
+ // We have 24 LSB of the event timestamp base on the wire.
+ field_.cast_receiver_log.event_timestamp_base = data & 0xffffff;
+
+ number_of_blocks_ = 1 + static_cast<uint8>(data >> 24);
+ state_ = kStateApplicationSpecificCastReceiverEventLog;
+ field_type_ = kRtcpApplicationSpecificCastReceiverLogFrameCode;
+ return true;
+}
+
+bool RtcpParser::ParseCastReceiverLogEventItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ if (number_of_blocks_ == 0) {
+ // Continue parsing the next receiver frame event.
+ state_ = kStateApplicationSpecificCastReceiverFrameLog;
+ return false;
+ }
+ number_of_blocks_--;
+
+ uint16 delay_delta_or_packet_id;
+ uint16 event_type_and_timestamp_delta;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU16(&delay_delta_or_packet_id);
+ big_endian_reader.ReadU16(&event_type_and_timestamp_delta);
+
+ rtcp_data_ += 4;
+
+ field_.cast_receiver_log.event =
+ static_cast<uint8>(event_type_and_timestamp_delta >> 12);
+ field_.cast_receiver_log.delay_delta_or_packet_id = delay_delta_or_packet_id;
+ field_.cast_receiver_log.event_timestamp_delta =
+ event_type_and_timestamp_delta & 0xfff;
+
+ field_type_ = kRtcpApplicationSpecificCastReceiverLogEventCode;
+ return true;
+}
+
+bool RtcpParser::ParseCastSenderLogItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ uint32 data;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&data);
+
+ rtcp_data_ += 4;
+
+ field_.cast_sender_log.status = static_cast<uint8>(data >> 24);
+ // We have 24 LSB of the RTP timestamp on the wire.
+ field_.cast_sender_log.rtp_timestamp = data & 0xffffff;
+ field_type_ = kRtcpApplicationSpecificCastSenderLogCode;
+ return true;
+}
+
bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
DCHECK((header.PT == kPacketTypeGenericRtpFeedback) ||
(header.PT == kPacketTypePayloadSpecific)) << "Invalid state";
@@ -686,7 +834,6 @@ bool RtcpParser::ParsePayloadSpecificRembItem() {
bool RtcpParser::ParsePayloadSpecificCastItem() {
ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
-
if (length < 4) {
state_ = kStateTopLevel;
EndCurrentBlock();
diff --git a/media/cast/rtcp/rtcp_utility.h b/media/cast/rtcp/rtcp_utility.h
index ab7312ffb6..5cf55d9106 100644
--- a/media/cast/rtcp/rtcp_utility.h
+++ b/media/cast/rtcp/rtcp_utility.h
@@ -21,6 +21,13 @@ static const int kRtcpMaxNumberOfRembFeedbackSsrcs = 255;
static const uint32 kRemb = ('R' << 24) + ('E' << 16) + ('M' << 8) + 'B';
static const uint32 kCast = ('C' << 24) + ('A' << 16) + ('S' << 8) + 'T';
+static const uint8 kSenderLogSubtype = 1;
+static const uint8 kReceiverLogSubtype = 2;
+
+static const size_t kRtcpMaxReceiverLogMessages = 256;
+static const size_t kRtcpMaxNackFields = 253;
+static const size_t kRtcpMaxCastLossFields = 100;
+
struct RtcpFieldReceiverReport {
// RFC 3550.
uint32 sender_ssrc;
@@ -139,6 +146,21 @@ struct RtcpFieldPayloadSpecificCastNackItem {
uint8 bitmask;
};
+struct RtcpFieldApplicationSpecificCastReceiverLogItem {
+ uint32 sender_ssrc;
+ uint32 rtp_timestamp;
+ uint32 event_timestamp_base;
+ uint8 event;
+ uint16 delay_delta_or_packet_id;
+ uint16 event_timestamp_delta;
+};
+
+struct RtcpFieldApplicationSpecificCastSenderLogItem {
+ uint32 sender_ssrc;
+ uint8 status;
+ uint32 rtp_timestamp;
+};
+
union RtcpField {
RtcpFieldReceiverReport receiver_report;
RtcpFieldSenderReport sender_report;
@@ -161,6 +183,9 @@ union RtcpField {
RtcpFieldPayloadSpecificRembItem remb_item;
RtcpFieldPayloadSpecificCastItem cast_item;
RtcpFieldPayloadSpecificCastNackItem cast_nack_item;
+
+ RtcpFieldApplicationSpecificCastReceiverLogItem cast_receiver_log;
+ RtcpFieldApplicationSpecificCastSenderLogItem cast_sender_log;
};
enum RtcpFieldTypes {
@@ -189,10 +214,15 @@ enum RtcpFieldTypes {
kRtcpPayloadSpecificRpsiCode,
kRtcpPayloadSpecificAppCode,
+ // Application specific.
kRtcpPayloadSpecificRembCode,
kRtcpPayloadSpecificRembItemCode,
kRtcpPayloadSpecificCastCode,
kRtcpPayloadSpecificCastNackItemCode,
+ kRtcpApplicationSpecificCastReceiverLogCode,
+ kRtcpApplicationSpecificCastReceiverLogFrameCode,
+ kRtcpApplicationSpecificCastReceiverLogEventCode,
+ kRtcpApplicationSpecificCastSenderLogCode,
// RFC 5104.
kRtcpPayloadSpecificFirCode,
@@ -215,11 +245,11 @@ enum RtcpPacketTypes {
kPacketTypeInterArrivalJitterReport = 195,
kPacketTypeSenderReport = 200,
kPacketTypeReceiverReport = 201,
- kPacketTypeSdes= 202,
+ kPacketTypeSdes = 202,
kPacketTypeBye = 203,
kPacketTypeApplicationDefined = 204,
kPacketTypeGenericRtpFeedback = 205,
- kPacketTypePayloadSpecific = 206,
+ kPacketTypePayloadSpecific = 206,
kPacketTypeXr = 207,
kPacketTypeHigh = 210, // Port Mapping.
};
@@ -243,6 +273,9 @@ class RtcpParser {
kStateReportBlock, // Sender/Receiver report report blocks.
kStateSdes,
kStateBye,
+ kStateApplicationSpecificCastReceiverFrameLog,
+ kStateApplicationSpecificCastReceiverEventLog,
+ kStateApplicationSpecificCastSenderLog,
kStateExtendedReportBlock,
kStateExtendedReportDelaySinceLastReceiverReport,
kStateGenericRtpFeedbackNack,
@@ -262,6 +295,9 @@ class RtcpParser {
void IterateReportBlockItem();
void IterateSdesItem();
void IterateByeItem();
+ void IterateCastReceiverLogFrame();
+ void IterateCastReceiverLogEvent();
+ void IterateCastSenderLog();
void IterateExtendedReportItem();
void IterateExtendedReportDelaySinceLastReceiverReportItem();
void IterateNackItem();
@@ -284,6 +320,10 @@ class RtcpParser {
bool ParseSdesTypes();
bool ParseBye();
bool ParseByeItem();
+ bool ParseApplicationDefined(uint8 subtype);
+ bool ParseCastReceiverLogFrameItem();
+ bool ParseCastReceiverLogEventItem();
+ bool ParseCastSenderLogItem();
bool ParseExtendedReport();
bool ParseExtendedReportItem();
diff --git a/media/cast/rtcp/test_rtcp_packet_builder.cc b/media/cast/rtcp/test_rtcp_packet_builder.cc
index 3a6a774920..f4117f53de 100644
--- a/media/cast/rtcp/test_rtcp_packet_builder.cc
+++ b/media/cast/rtcp/test_rtcp_packet_builder.cc
@@ -48,7 +48,7 @@ void TestRtcpPacketBuilder::AddRb(uint32 rtp_ssrc) {
big_endian_writer_.WriteU32(rtp_ssrc);
big_endian_writer_.WriteU32(kLoss);
big_endian_writer_.WriteU32(kExtendedMax);
- big_endian_writer_.WriteU32(kJitter);
+ big_endian_writer_.WriteU32(kTestJitter);
big_endian_writer_.WriteU32(kLastSr);
big_endian_writer_.WriteU32(kDelayLastSr);
}
@@ -185,7 +185,7 @@ void TestRtcpPacketBuilder::AddRemb(uint32 sender_ssrc, uint32 media_ssrc) {
big_endian_writer_.WriteU8(1); // Number of SSRCs.
big_endian_writer_.WriteU8(1); // BR Exp.
// BR Mantissa.
- big_endian_writer_.WriteU16(static_cast<uint16>(kRembBitrate / 2));
+ big_endian_writer_.WriteU16(static_cast<uint16>(kTestRembBitrate / 2));
big_endian_writer_.WriteU32(media_ssrc);
}
@@ -211,6 +211,47 @@ void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc, uint32 media_ssrc) {
big_endian_writer_.WriteU8(0); // Lost packet id mask.
}
+void TestRtcpPacketBuilder::AddSenderLog(uint32 sender_ssrc) {
+ AddRtcpHeader(204, 1);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU8('C');
+ big_endian_writer_.WriteU8('A');
+ big_endian_writer_.WriteU8('S');
+ big_endian_writer_.WriteU8('T');
+}
+
+void TestRtcpPacketBuilder::AddSenderFrameLog(uint8 event_id,
+ uint32 rtp_timestamp) {
+ big_endian_writer_.WriteU32(
+ (static_cast<uint32>(event_id) << 24) + (rtp_timestamp & 0xffffff));
+}
+
+void TestRtcpPacketBuilder::AddReceiverLog(uint32 sender_ssrc) {
+ AddRtcpHeader(204, 2);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU8('C');
+ big_endian_writer_.WriteU8('A');
+ big_endian_writer_.WriteU8('S');
+ big_endian_writer_.WriteU8('T');
+}
+
+void TestRtcpPacketBuilder::AddReceiverFrameLog(uint32 rtp_timestamp,
+ int num_events, uint32 event_timesamp_base) {
+ big_endian_writer_.WriteU32(rtp_timestamp);
+ big_endian_writer_.WriteU8(static_cast<uint8>(num_events - 1));
+ big_endian_writer_.WriteU8(static_cast<uint8>(event_timesamp_base >> 16));
+ big_endian_writer_.WriteU8(static_cast<uint8>(event_timesamp_base >> 8));
+ big_endian_writer_.WriteU8(static_cast<uint8>(event_timesamp_base));
+}
+
+void TestRtcpPacketBuilder::AddReceiverEventLog(uint16 event_data,
+ uint8 event_id, uint16 event_timesamp_delta) {
+ big_endian_writer_.WriteU16(event_data);
+ uint16 type_and_delta = static_cast<uint16>(event_id) << 12;
+ type_and_delta += event_timesamp_delta & 0x0fff;
+ big_endian_writer_.WriteU16(type_and_delta);
+}
+
const uint8* TestRtcpPacketBuilder::Packet() {
PatchLengthField();
return buffer_;
diff --git a/media/cast/rtcp/test_rtcp_packet_builder.h b/media/cast/rtcp/test_rtcp_packet_builder.h
index 8c5479ff48..9b63a37fa4 100644
--- a/media/cast/rtcp/test_rtcp_packet_builder.h
+++ b/media/cast/rtcp/test_rtcp_packet_builder.h
@@ -15,17 +15,18 @@ namespace cast {
// These values are arbitrary only for the purpose of testing.
+namespace {
// Sender report.
static const int kNtpHigh = 0x01020304;
static const int kNtpLow = 0x05060708;
-static const int kRtpTimestamp = 0x10203;
+static const int kRtpTimestamp = 0x10203040;
static const int kSendPacketCount = 987;
static const int kSendOctetCount = 87654;
// Report block.
static const int kLoss = 0x01000123;
static const int kExtendedMax = 0x15678;
-static const int kJitter = 0x10203;
+static const int kTestJitter = 0x10203;
static const int kLastSr = 0x34561234;
static const int kDelayLastSr = 1000;
@@ -34,7 +35,7 @@ static const int kLastRr = 0x34561234;
static const int kDelayLastRr = 1000;
// REMB.
-static const int kRembBitrate = 52428;
+static const int kTestRembBitrate = 52428;
// RPSI.
static const int kPayloadtype = 126;
@@ -44,12 +45,13 @@ static const uint64 kPictureId = 0x1234567890;
static const int kMissingPacket = 34567;
// CAST.
-static const int kAckFrameId = 17;
-static const int kLostFrameId = 18;
-static const int kFrameIdWithLostPackets = 19;
+static const uint32 kAckFrameId = 17;
+static const uint32 kLostFrameId = 18;
+static const uint32 kFrameIdWithLostPackets = 19;
static const int kLostPacketId1 = 3;
static const int kLostPacketId2 = 5;
static const int kLostPacketId3 = 12;
+} // namespace
class TestRtcpPacketBuilder {
public:
@@ -75,6 +77,13 @@ class TestRtcpPacketBuilder {
void AddRpsi(uint32 sender_ssrc, uint32 media_ssrc);
void AddRemb(uint32 sender_ssrc, uint32 media_ssrc);
void AddCast(uint32 sender_ssrc, uint32 media_ssrc);
+ void AddSenderLog(uint32 sender_ssrc);
+ void AddSenderFrameLog(uint8 event_id, uint32 rtp_timestamp);
+ void AddReceiverLog(uint32 sender_ssrc);
+ void AddReceiverFrameLog(uint32 rtp_timestamp, int num_events,
+ uint32 event_timesamp_base);
+ void AddReceiverEventLog(uint16 event_data, uint8 event_id,
+ uint16 event_timesamp_delta);
const uint8* Packet();
int Length() { return kIpPacketSize - big_endian_writer_.remaining(); }
diff --git a/media/cast/rtp_common/rtp_defines.h b/media/cast/rtp_common/rtp_defines.h
index ca5ca94677..b4bc1a204e 100644
--- a/media/cast/rtp_common/rtp_defines.h
+++ b/media/cast/rtp_common/rtp_defines.h
@@ -26,12 +26,12 @@ struct RtpCastHeader {
}
webrtc::WebRtcRTPHeader webrtc;
bool is_key_frame;
- uint8 frame_id;
+ uint32 frame_id;
uint16 packet_id;
uint16 max_packet_id;
bool is_reference; // Set to true if the previous frame is not available,
// and the reference frame id is available.
- uint8 reference_frame_id;
+ uint32 reference_frame_id;
};
class RtpPayloadFeedback {
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc b/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
index 79cc55ebb0..6ef20fe64e 100644
--- a/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
+++ b/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
@@ -20,11 +20,9 @@ static const uint8 kCastReferenceFrameIdBitMask = 0x40;
RtpParser::RtpParser(RtpData* incoming_payload_callback,
const RtpParserConfig parser_config)
: data_callback_(incoming_payload_callback),
- parser_config_(parser_config) {
-}
+ parser_config_(parser_config) {}
-RtpParser::~RtpParser() {
-}
+RtpParser::~RtpParser() {}
bool RtpParser::ParsePacket(const uint8* packet, size_t length,
RtpCastHeader* rtp_header) {
@@ -85,14 +83,15 @@ bool RtpParser::ParseCast(const uint8* packet,
size_t data_length = length;
rtp_header->is_key_frame = (data_ptr[0] & kCastKeyFrameBitMask);
rtp_header->is_reference = (data_ptr[0] & kCastReferenceFrameIdBitMask);
- rtp_header->frame_id = data_ptr[1];
+ rtp_header->frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(data_ptr[1]);
net::BigEndianReader big_endian_reader(data_ptr + 2, 4);
big_endian_reader.ReadU16(&rtp_header->packet_id);
big_endian_reader.ReadU16(&rtp_header->max_packet_id);
if (rtp_header->is_reference) {
- rtp_header->reference_frame_id = data_ptr[6];
+ rtp_header->reference_frame_id =
+ reference_frame_id_wrap_helper_.MapTo32bitsFrameId(data_ptr[6]);
data_ptr += kRtpCastHeaderLength;
data_length -= kRtpCastHeaderLength;
} else {
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp b/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
index ade15eebff..258b0bff53 100644
--- a/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
+++ b/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
@@ -17,7 +17,6 @@
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
],
},
],
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.h b/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
index d8d1e34e86..92ac30c11e 100644
--- a/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
+++ b/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
@@ -45,6 +45,8 @@ class RtpParser {
RtpData* data_callback_;
RtpParserConfig parser_config_;
+ FrameIdWrapHelper frame_id_wrap_helper_;
+ FrameIdWrapHelper reference_frame_id_wrap_helper_;
};
} // namespace cast
diff --git a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc b/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc
index d667deecf0..e8d213d26d 100644
--- a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc
+++ b/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc
@@ -30,7 +30,7 @@ void RtpPacketBuilder::SetKeyFrame(bool is_key) {
is_key_ = is_key;
}
-void RtpPacketBuilder::SetFrameId(uint8 frame_id) {
+void RtpPacketBuilder::SetFrameId(uint32 frame_id) {
frame_id_ = frame_id;
}
@@ -42,7 +42,7 @@ void RtpPacketBuilder::SetMaxPacketId(uint16 max_packet_id) {
max_packet_id_ = max_packet_id;
}
-void RtpPacketBuilder::SetReferenceFrameId(uint8 reference_frame_id,
+void RtpPacketBuilder::SetReferenceFrameId(uint32 reference_frame_id,
bool is_set) {
is_reference_set_ = is_set;
if (is_set)
diff --git a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h b/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h
index 70f520e14d..b6b1a41261 100644
--- a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h
+++ b/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h
@@ -17,10 +17,10 @@ class RtpPacketBuilder {
public:
RtpPacketBuilder();
void SetKeyFrame(bool is_key);
- void SetFrameId(uint8 frame_id);
+ void SetFrameId(uint32 frame_id);
void SetPacketId(uint16 packet_id);
void SetMaxPacketId(uint16 max_packet_id);
- void SetReferenceFrameId(uint8 reference_frame_id, bool is_set);
+ void SetReferenceFrameId(uint32 reference_frame_id, bool is_set);
void SetTimestamp(uint32 timestamp);
void SetSequenceNumber(uint16 sequence_number);
void SetMarkerBit(bool marker);
@@ -30,10 +30,10 @@ class RtpPacketBuilder {
private:
bool is_key_;
- uint8 frame_id_;
+ uint32 frame_id_;
uint16 packet_id_;
uint16 max_packet_id_;
- uint8 reference_frame_id_;
+ uint32 reference_frame_id_;
bool is_reference_set_;
uint32 timestamp_;
uint16 sequence_number_;
diff --git a/media/cast/rtp_receiver/rtp_receiver.gyp b/media/cast/rtp_receiver/rtp_receiver.gyp
index 7bc25c5493..b612964c07 100644
--- a/media/cast/rtp_receiver/rtp_receiver.gyp
+++ b/media/cast/rtp_receiver/rtp_receiver.gyp
@@ -19,7 +19,6 @@
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
'rtp_parser/rtp_parser.gyp:*',
],
},
diff --git a/media/cast/rtp_sender/mock_rtp_sender.h b/media/cast/rtp_sender/mock_rtp_sender.h
index 334bc885db..4e74aac7ac 100644
--- a/media/cast/rtp_sender/mock_rtp_sender.h
+++ b/media/cast/rtp_sender/mock_rtp_sender.h
@@ -22,7 +22,7 @@ class MockRtpSender : public RtpSender {
bool(const EncodedAudioFrame& frame, int64 recorded_time));
MOCK_METHOD3(ResendPacket,
- bool(bool is_audio, uint8 frame_id, uint16 packet_id));
+ bool(bool is_audio, uint32 frame_id, uint16 packet_id));
MOCK_METHOD0(RtpStatistics, void());
};
diff --git a/media/cast/rtp_sender/packet_storage/packet_storage.cc b/media/cast/rtp_sender/packet_storage/packet_storage.cc
index d20dc9d169..96e2d275d6 100644
--- a/media/cast/rtp_sender/packet_storage/packet_storage.cc
+++ b/media/cast/rtp_sender/packet_storage/packet_storage.cc
@@ -96,12 +96,13 @@ void PacketStorage::CleanupOldPackets(base::TimeTicks now) {
}
}
-void PacketStorage::StorePacket(uint8 frame_id, uint16 packet_id,
+void PacketStorage::StorePacket(uint32 frame_id, uint16 packet_id,
const Packet* packet) {
base::TimeTicks now = clock_->NowTicks();
CleanupOldPackets(now);
- uint32 index = (static_cast<uint32>(frame_id) << 16) + packet_id;
+ // Internally we only use the 8 LSB of the frame id.
+ uint32 index = ((0xff & frame_id) << 16) + packet_id;
PacketMapIterator it = stored_packets_.find(index);
if (it != stored_packets_.end()) {
// We have already saved this.
@@ -122,15 +123,50 @@ void PacketStorage::StorePacket(uint8 frame_id, uint16 packet_id,
time_to_packet_map_.insert(std::make_pair(now, index));
}
+PacketList PacketStorage::GetPackets(
+ const MissingFramesAndPacketsMap& missing_frames_and_packets) {
+ PacketList packets_to_resend;
+
+ // Iterate over all frames in the list.
+ for (MissingFramesAndPacketsMap::const_iterator it =
+ missing_frames_and_packets.begin();
+ it != missing_frames_and_packets.end(); ++it) {
+ uint8 frame_id = it->first;
+ const PacketIdSet& packets_set = it->second;
+ bool success = false;
+
+ if (packets_set.empty()) {
+ VLOG(1) << "Missing all packets in frame " << static_cast<int>(frame_id);
+
+ uint16 packet_id = 0;
+ do {
+ // Get packet from storage.
+ success = GetPacket(frame_id, packet_id, &packets_to_resend);
+ ++packet_id;
+ } while (success);
+ } else {
+ // Iterate over all of the packets in the frame.
+ for (PacketIdSet::const_iterator set_it = packets_set.begin();
+ set_it != packets_set.end(); ++set_it) {
+ GetPacket(frame_id, *set_it, &packets_to_resend);
+ }
+ }
+ }
+ return packets_to_resend;
+}
+
bool PacketStorage::GetPacket(uint8 frame_id,
uint16 packet_id,
PacketList* packets) {
+ // Internally we only use the 8 LSB of the frame id.
uint32 index = (static_cast<uint32>(frame_id) << 16) + packet_id;
PacketMapIterator it = stored_packets_.find(index);
if (it == stored_packets_.end()) {
return false;
}
it->second->GetCopy(packets);
+ VLOG(1) << "Resend " << static_cast<int>(frame_id)
+ << ":" << packet_id;
return true;
}
diff --git a/media/cast/rtp_sender/packet_storage/packet_storage.h b/media/cast/rtp_sender/packet_storage/packet_storage.h
index 791fed6624..bcb0079dd0 100644
--- a/media/cast/rtp_sender/packet_storage/packet_storage.h
+++ b/media/cast/rtp_sender/packet_storage/packet_storage.h
@@ -30,9 +30,13 @@ class PacketStorage {
PacketStorage(base::TickClock* clock, int max_time_stored_ms);
virtual ~PacketStorage();
- void StorePacket(uint8 frame_id, uint16 packet_id, const Packet* packet);
+ void StorePacket(uint32 frame_id, uint16 packet_id, const Packet* packet);
- // Copies packet into the buffer pointed to by rtp_buffer.
+ // Copies all missing packets into the packet list.
+ PacketList GetPackets(
+ const MissingFramesAndPacketsMap& missing_frames_and_packets);
+
+ // Copies packet into the packet list.
bool GetPacket(uint8 frame_id, uint16 packet_id, PacketList* packets);
private:
diff --git a/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc b/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc
index 04092c2452..86ce06c66f 100644
--- a/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc
+++ b/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc
@@ -33,7 +33,7 @@ class PacketStorageTest : public ::testing::Test {
TEST_F(PacketStorageTest, TimeOut) {
Packet test_123(100, 123); // 100 insertions of the value 123.
PacketList packets;
- for (uint8 frame_id = 0; frame_id < 30; ++frame_id) {
+ for (uint32 frame_id = 0; frame_id < 30; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
packet_storage_.StorePacket(frame_id, packet_id, &test_123);
}
@@ -41,14 +41,14 @@ TEST_F(PacketStorageTest, TimeOut) {
}
// All packets belonging to the first 14 frames is expected to be expired.
- for (uint8 frame_id = 0; frame_id < 14; ++frame_id) {
+ for (uint32 frame_id = 0; frame_id < 14; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
Packet packet;
EXPECT_FALSE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
}
}
// All packets belonging to the next 15 frames is expected to be valid.
- for (uint8 frame_id = 14; frame_id < 30; ++frame_id) {
+ for (uint32 frame_id = 14; frame_id < 30; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
EXPECT_TRUE(packets.front() == test_123);
@@ -60,7 +60,7 @@ TEST_F(PacketStorageTest, MaxNumberOfPackets) {
Packet test_123(100, 123); // 100 insertions of the value 123.
PacketList packets;
- uint8 frame_id = 0;
+ uint32 frame_id = 0;
for (uint16 packet_id = 0; packet_id <= PacketStorage::kMaxStoredPackets;
++packet_id) {
packet_storage_.StorePacket(frame_id, packet_id, &test_123);
@@ -81,7 +81,7 @@ TEST_F(PacketStorageTest, PacketContent) {
Packet test_234(200, 234); // 200 insertions of the value 234.
PacketList packets;
- for (uint8 frame_id = 0; frame_id < 10; ++frame_id) {
+ for (uint32 frame_id = 0; frame_id < 10; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
// Every other packet.
if (packet_id % 2 == 0) {
@@ -92,7 +92,7 @@ TEST_F(PacketStorageTest, PacketContent) {
}
testing_clock_.Advance(kDeltaBetweenFrames);
}
- for (uint8 frame_id = 0; frame_id < 10; ++frame_id) {
+ for (uint32 frame_id = 0; frame_id < 10; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
// Every other packet.
diff --git a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
index 3fdbefec92..6c5141d604 100644
--- a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
+++ b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
@@ -25,7 +25,6 @@ RtpPacketizer::RtpPacketizer(PacedPacketSender* transport,
packet_storage_(packet_storage),
sequence_number_(config_.sequence_number),
rtp_timestamp_(config_.rtp_timestamp),
- frame_id_(0),
packet_id_(0),
send_packets_count_(0),
send_octet_count_(0) {
@@ -40,14 +39,12 @@ void RtpPacketizer::IncomingEncodedVideoFrame(
DCHECK(!config_.audio) << "Invalid state";
if (config_.audio) return;
- base::TimeTicks zero_time;
- base::TimeDelta capture_delta = capture_time - zero_time;
-
// Timestamp is in 90 KHz for video.
- rtp_timestamp_ = static_cast<uint32>(capture_delta.InMilliseconds() * 90);
+ rtp_timestamp_ = GetVideoRtpTimestamp(capture_time);
time_last_sent_rtp_timestamp_ = capture_time;
Cast(video_frame->key_frame,
+ video_frame->frame_id,
video_frame->last_referenced_frame_id,
rtp_timestamp_,
video_frame->data);
@@ -61,7 +58,7 @@ void RtpPacketizer::IncomingEncodedAudioFrame(
rtp_timestamp_ += audio_frame->samples; // Timestamp is in samples for audio.
time_last_sent_rtp_timestamp_ = recorded_time;
- Cast(true, 0, rtp_timestamp_, audio_frame->data);
+ Cast(true, audio_frame->frame_id, 0, rtp_timestamp_, audio_frame->data);
}
uint16 RtpPacketizer::NextSequenceNumber() {
@@ -78,10 +75,12 @@ bool RtpPacketizer::LastSentTimestamp(base::TimeTicks* time_sent,
return true;
}
+// TODO(mikhal): Switch to pass data with a const_ref.
void RtpPacketizer::Cast(bool is_key,
- uint8 reference_frame_id,
+ uint32 frame_id,
+ uint32 reference_frame_id,
uint32 timestamp,
- Packet data) {
+ const std::string& data) {
uint16 rtp_header_length = kCommonRtpHeaderLength + kCastRtpHeaderLength;
uint16 max_length = config_.max_payload_length - rtp_header_length - 1;
@@ -93,7 +92,7 @@ void RtpPacketizer::Cast(bool is_key,
PacketList packets;
size_t remaining_size = data.size();
- Packet::iterator data_iter = data.begin();
+ std::string::const_iterator data_iter = data.begin();
while (remaining_size > 0) {
Packet packet;
@@ -106,19 +105,19 @@ void RtpPacketizer::Cast(bool is_key,
// Build Cast header.
packet.push_back(
(is_key ? kCastKeyFrameBitMask : 0) | kCastReferenceFrameIdBitMask);
- packet.push_back(frame_id_);
+ packet.push_back(frame_id);
size_t start_size = packet.size();
packet.resize(start_size + 4);
net::BigEndianWriter big_endian_writer(&(packet[start_size]), 4);
big_endian_writer.WriteU16(packet_id_);
big_endian_writer.WriteU16(static_cast<uint16>(num_packets - 1));
- packet.push_back(reference_frame_id);
+ packet.push_back(static_cast<uint8>(reference_frame_id));
// Copy payload data.
packet.insert(packet.end(), data_iter, data_iter + payload_length);
// Store packet.
- packet_storage_->StorePacket(frame_id_, packet_id_, &packet);
+ packet_storage_->StorePacket(frame_id, packet_id_, &packet);
++packet_id_;
data_iter += payload_length;
@@ -134,7 +133,6 @@ void RtpPacketizer::Cast(bool is_key,
// Prepare for next frame.
packet_id_ = 0;
- frame_id_ = static_cast<uint8>(frame_id_ + 1);
}
void RtpPacketizer::BuildCommonRTPheader(
diff --git a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
index 783c87c440..6dc66690cf 100644
--- a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
+++ b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
@@ -49,8 +49,9 @@ class RtpPacketizer {
size_t send_octet_count() { return send_octet_count_; }
private:
- void Cast(bool is_key, uint8 reference_frame_id,
- uint32 timestamp, std::vector<uint8> data);
+ void Cast(bool is_key, uint32 frame_id, uint32 reference_frame_id,
+ uint32 timestamp, const std::string& data);
+
void BuildCommonRTPheader(std::vector<uint8>* packet, bool marker_bit,
uint32 time_stamp);
@@ -61,7 +62,6 @@ class RtpPacketizer {
base::TimeTicks time_last_sent_rtp_timestamp_;
uint16 sequence_number_;
uint32 rtp_timestamp_;
- uint8 frame_id_;
uint16 packet_id_;
int send_packets_count_;
diff --git a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
index c4cb42e809..16959e069e 100644
--- a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
+++ b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
@@ -55,8 +55,7 @@ class TestRtpPacketTransport : public PacedPacketSender {
EXPECT_EQ(expected_packet_id_, rtp_header.packet_id);
EXPECT_EQ(expected_number_of_packets_ - 1, rtp_header.max_packet_id);
EXPECT_TRUE(rtp_header.is_reference);
- EXPECT_EQ(static_cast<uint8>(expected_frame_id_ - 1),
- rtp_header.reference_frame_id);
+ EXPECT_EQ(expected_frame_id_ - 1u, rtp_header.reference_frame_id);
}
virtual bool SendPackets(const PacketList& packets) OVERRIDE {
@@ -94,7 +93,7 @@ class TestRtpPacketTransport : public PacedPacketSender {
int expected_number_of_packets_;
// Assuming packets arrive in sequence.
int expected_packet_id_;
- int expected_frame_id_;
+ uint32 expected_frame_id_;
};
class RtpPacketizerTest : public ::testing::Test {
@@ -115,7 +114,8 @@ class RtpPacketizerTest : public ::testing::Test {
virtual void SetUp() {
video_frame_.key_frame = false;
- video_frame_.last_referenced_frame_id = 255;
+ video_frame_.frame_id = 0;
+ video_frame_.last_referenced_frame_id = kStartFrameId;
video_frame_.data.assign(kFrameSize, 123);
}
diff --git a/media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc b/media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc
index 49aac94602..5c1c9fe6d0 100644
--- a/media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc
+++ b/media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc
@@ -39,9 +39,7 @@ bool RtpHeaderParser::ParseCommon(RtpCastHeader* parsed_packet) const {
const uint8 num_csrcs = rtp_data_begin_[0] & 0x0f;
const bool marker = ((rtp_data_begin_[1] & 0x80) == 0) ? false : true;
-
const uint8 payload_type = rtp_data_begin_[1] & 0x7f;
-
const uint16 sequence_number = (rtp_data_begin_[2] << 8) +
rtp_data_begin_[3];
@@ -72,14 +70,15 @@ bool RtpHeaderParser::ParseCast(RtpCastHeader* parsed_packet) const {
const uint8* data = rtp_data_begin_ + kRtpCommonHeaderLength;
parsed_packet->is_key_frame = (data[0] & kCastKeyFrameBitMask);
parsed_packet->is_reference = (data[0] & kCastReferenceFrameIdBitMask);
- parsed_packet->frame_id = data[1];
+ parsed_packet->frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(data[1]);
net::BigEndianReader big_endian_reader(data + 2, 8);
big_endian_reader.ReadU16(&parsed_packet->packet_id);
big_endian_reader.ReadU16(&parsed_packet->max_packet_id);
if (parsed_packet->is_reference) {
- parsed_packet->reference_frame_id = data[6];
+ parsed_packet->reference_frame_id =
+ reference_frame_id_wrap_helper_.MapTo32bitsFrameId(data[6]);
}
return true;
}
diff --git a/media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.h b/media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.h
index d28f5a8dec..e4b1465fcf 100644
--- a/media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.h
+++ b/media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.h
@@ -23,6 +23,9 @@ class RtpHeaderParser {
const uint8* const rtp_data_begin_;
size_t length_;
+ mutable FrameIdWrapHelper frame_id_wrap_helper_;
+ mutable FrameIdWrapHelper reference_frame_id_wrap_helper_;
+
DISALLOW_COPY_AND_ASSIGN(RtpHeaderParser);
};
diff --git a/media/cast/rtp_sender/rtp_sender.cc b/media/cast/rtp_sender/rtp_sender.cc
index d06a503dc8..d222a56f84 100644
--- a/media/cast/rtp_sender/rtp_sender.cc
+++ b/media/cast/rtp_sender/rtp_sender.cc
@@ -9,28 +9,31 @@
#include "media/cast/cast_defines.h"
#include "media/cast/pacing/paced_sender.h"
#include "media/cast/rtcp/rtcp_defines.h"
+#include "net/base/big_endian.h"
namespace media {
namespace cast {
-RtpSender::RtpSender(base::TickClock* clock,
+RtpSender::RtpSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig* audio_config,
const VideoSenderConfig* video_config,
PacedPacketSender* transport)
- : config_(),
- transport_(transport),
- clock_(clock) {
+ : cast_environment_(cast_environment),
+ config_(),
+ transport_(transport) {
// Store generic cast config and create packetizer config.
DCHECK(audio_config || video_config) << "Invalid argument";
if (audio_config) {
- storage_.reset(new PacketStorage(clock, audio_config->rtp_history_ms));
+ storage_.reset(new PacketStorage(cast_environment->Clock(),
+ audio_config->rtp_history_ms));
config_.audio = true;
config_.ssrc = audio_config->sender_ssrc;
config_.payload_type = audio_config->rtp_payload_type;
config_.frequency = audio_config->frequency;
config_.audio_codec = audio_config->codec;
} else {
- storage_.reset(new PacketStorage(clock, video_config->rtp_history_ms));
+ storage_.reset(new PacketStorage(cast_environment->Clock(),
+ video_config->rtp_history_ms));
config_.audio = false;
config_.ssrc = video_config->sender_ssrc;
config_.payload_type = video_config->rtp_payload_type;
diff --git a/media/cast/rtp_sender/rtp_sender.gyp b/media/cast/rtp_sender/rtp_sender.gyp
index ff85956daf..f689b99b14 100644
--- a/media/cast/rtp_sender/rtp_sender.gyp
+++ b/media/cast/rtp_sender/rtp_sender.gyp
@@ -18,7 +18,6 @@
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
'packet_storage/packet_storage.gyp:*',
'rtp_packetizer/rtp_packetizer.gyp:*',
],
diff --git a/media/cast/rtp_sender/rtp_sender.h b/media/cast/rtp_sender/rtp_sender.h
index 45fd987f7b..f56d7efc9f 100644
--- a/media/cast/rtp_sender/rtp_sender.h
+++ b/media/cast/rtp_sender/rtp_sender.h
@@ -14,6 +14,7 @@
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h"
#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
@@ -30,7 +31,7 @@ struct RtcpSenderInfo;
// acknowledged by the remote peer or timed out.
class RtpSender {
public:
- RtpSender(base::TickClock* clock,
+ RtpSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig* audio_config,
const VideoSenderConfig* video_config,
PacedPacketSender* transport);
@@ -52,7 +53,7 @@ class RtpSender {
private:
void UpdateSequenceNumber(std::vector<uint8>* packet);
- base::TickClock* const clock_; // Not owned by this class.
+ scoped_refptr<CastEnvironment> cast_environment_;
RtpPacketizerConfig config_;
scoped_ptr<RtpPacketizer> packetizer_;
scoped_ptr<PacketStorage> storage_;
diff --git a/media/cast/test/audio_utility.cc b/media/cast/test/audio_utility.cc
new file mode 100644
index 0000000000..46f0af5b28
--- /dev/null
+++ b/media/cast/test/audio_utility.cc
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/test/audio_utility.h"
+
+#include "base/time/time.h"
+#include "media/base/audio_bus.h"
+#include "media/cast/cast_config.h"
+
+namespace media {
+namespace cast {
+
+TestAudioBusFactory::TestAudioBusFactory(int num_channels,
+ int sample_rate,
+ float sine_wave_frequency,
+ float volume)
+ : num_channels_(num_channels),
+ sample_rate_(sample_rate),
+ volume_(volume),
+ source_(num_channels, sine_wave_frequency, sample_rate) {
+ CHECK_LT(0, num_channels);
+ CHECK_LT(0, sample_rate);
+ CHECK_LE(0.0f, volume_);
+ CHECK_LE(volume_, 1.0f);
+}
+
+TestAudioBusFactory::~TestAudioBusFactory() {}
+
+scoped_ptr<AudioBus> TestAudioBusFactory::NextAudioBus(
+ const base::TimeDelta& duration) {
+ const int num_samples = static_cast<int>(
+ (sample_rate_ * duration) / base::TimeDelta::FromSeconds(1));
+ scoped_ptr<AudioBus> bus(AudioBus::Create(num_channels_, num_samples));
+ source_.OnMoreData(bus.get(), AudioBuffersState());
+ bus->Scale(volume_);
+ return bus.Pass();
+}
+
+scoped_ptr<PcmAudioFrame> ToPcmAudioFrame(const AudioBus& audio_bus,
+ int sample_rate) {
+ scoped_ptr<PcmAudioFrame> audio_frame(new PcmAudioFrame());
+ audio_frame->channels = audio_bus.channels();
+ audio_frame->frequency = sample_rate;
+ audio_frame->samples.resize(audio_bus.channels() * audio_bus.frames());
+ audio_bus.ToInterleaved(
+ audio_bus.frames(), sizeof(audio_frame->samples.front()),
+ &audio_frame->samples.front());
+ return audio_frame.Pass();
+}
+
+int CountZeroCrossings(const std::vector<int16>& samples) {
+ // The sample values must pass beyond |kAmplitudeThreshold| on the opposite
+ // side of zero before a crossing will be counted.
+ const int kAmplitudeThreshold = 1000; // Approx. 3% of max amplitude.
+
+ int count = 0;
+ std::vector<int16>::const_iterator i = samples.begin();
+ int16 last = 0;
+ for (; i != samples.end() && abs(last) < kAmplitudeThreshold; ++i)
+ last = *i;
+ for (; i != samples.end(); ++i) {
+ if (abs(*i) >= kAmplitudeThreshold &&
+ (last < 0) != (*i < 0)) {
+ ++count;
+ last = *i;
+ }
+ }
+ return count;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/audio_utility.h b/media/cast/test/audio_utility.h
new file mode 100644
index 0000000000..eea476badc
--- /dev/null
+++ b/media/cast/test/audio_utility.h
@@ -0,0 +1,62 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TEST_AUDIO_UTILITY_H_
+#define MEDIA_CAST_TEST_AUDIO_UTILITY_H_
+
+#include "media/audio/simple_sources.h"
+
+namespace base {
+class TimeDelta;
+}
+
+namespace media {
+class AudioBus;
+}
+
+namespace media {
+namespace cast {
+
+struct PcmAudioFrame;
+
+// Produces AudioBuses of varying duration where each successive output contains
+// the continuation of a single sine wave.
+class TestAudioBusFactory {
+ public:
+ TestAudioBusFactory(int num_channels, int sample_rate,
+ float sine_wave_frequency, float volume);
+ ~TestAudioBusFactory();
+
+ // Creates a new AudioBus of the given |duration|, filled with the next batch
+ // of sine wave samples.
+ scoped_ptr<AudioBus> NextAudioBus(const base::TimeDelta& duration);
+
+ // A reasonable test tone.
+ static const int kMiddleANoteFreq = 440;
+
+ private:
+ const int num_channels_;
+ const int sample_rate_;
+ const float volume_;
+ SineWaveAudioSource source_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestAudioBusFactory);
+};
+
+// Convenience function to convert an |audio_bus| to its equivalent
+// PcmAudioFrame.
+// TODO(miu): Remove this once all code has migrated to use AudioBus. See
+// comment in media/cast/cast_config.h.
+scoped_ptr<PcmAudioFrame> ToPcmAudioFrame(const AudioBus& audio_bus,
+ int sample_rate);
+
+// Assuming |samples| contains a single-frequency sine wave (and maybe some
+// low-amplitude noise), count the number of times the sine wave crosses
+// zero.
+int CountZeroCrossings(const std::vector<int16>& samples);
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TEST_AUDIO_UTILITY_H_
diff --git a/media/cast/test/crypto_utility.cc b/media/cast/test/crypto_utility.cc
new file mode 100644
index 0000000000..4e88a27485
--- /dev/null
+++ b/media/cast/test/crypto_utility.cc
@@ -0,0 +1,25 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_number_conversions.h"
+#include "media/cast/test/crypto_utility.h"
+
+namespace media {
+namespace cast {
+
+std::string ConvertFromBase16String(const std::string base_16) {
+ std::string compressed;
+ DCHECK_EQ(base_16.size() % 2, 0u) << "Must be a multiple of 2";
+ compressed.reserve(base_16.size() / 2);
+
+ std::vector<uint8> v;
+ if (!base::HexStringToBytes(base_16, &v)) {
+ NOTREACHED();
+ }
+ compressed.assign(reinterpret_cast<const char*>(&v[0]), v.size());
+ return compressed;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/crypto_utility.h b/media/cast/test/crypto_utility.h
new file mode 100644
index 0000000000..21f8c7f914
--- /dev/null
+++ b/media/cast/test/crypto_utility.h
@@ -0,0 +1,17 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Utility functions for crypto testing.
+
+#include "media/cast/cast_config.h"
+
+namespace media {
+namespace cast {
+
+// Convert to a binary string from a base 16 string.
+std::string ConvertFromBase16String(const std::string base_16);
+
+} // namespace cast
+} // namespace media
+
diff --git a/media/cast/test/encode_decode_test.cc b/media/cast/test/encode_decode_test.cc
index bc2c0a1725..c63357e92d 100644
--- a/media/cast/test/encode_decode_test.cc
+++ b/media/cast/test/encode_decode_test.cc
@@ -8,7 +8,11 @@
#include <gtest/gtest.h>
+#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/test/fake_task_runner.h"
#include "media/cast/test/video_utility.h"
#include "media/cast/video_receiver/codecs/vp8/vp8_decoder.h"
#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
@@ -16,18 +20,65 @@
namespace media {
namespace cast {
+static const int64 kStartMillisecond = GG_INT64_C(1245);
+static const int kWidth = 1280;
+static const int kHeight = 720;
+static const int kStartbitrate = 4000000;
+static const int kMaxQp = 54;
+static const int kMinQp = 4;
+static const int kMaxFrameRate = 30;
+
namespace {
-const int kWidth = 1280;
-const int kHeight = 720;
-const int kStartbitrate = 4000000;
-const int kMaxQp = 54;
-const int kMinQp = 4;
-const int kMaxFrameRate = 30;
+class EncodeDecodeTestFrameCallback :
+ public base::RefCountedThreadSafe<EncodeDecodeTestFrameCallback> {
+ public:
+ EncodeDecodeTestFrameCallback()
+ : num_called_(0) {
+ gfx::Size size(kWidth, kHeight);
+ original_frame_ = media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
+ }
+
+ void SetFrameStartValue(int start_value) {
+ PopulateVideoFrame(original_frame_.get(), start_value);
+ }
+
+ void DecodeComplete(scoped_ptr<I420VideoFrame> decoded_frame,
+ const base::TimeTicks& render_time) {
+ ++num_called_;
+ // Compare resolution.
+ EXPECT_EQ(original_frame_->coded_size().width(), decoded_frame->width);
+ EXPECT_EQ(original_frame_->coded_size().height(), decoded_frame->height);
+ // Compare data.
+ EXPECT_GT(I420PSNR(*(original_frame_.get()), *(decoded_frame.get())), 40.0);
+ }
+
+ int num_called() const {
+ return num_called_;
+ }
+
+ protected:
+ virtual ~EncodeDecodeTestFrameCallback() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<EncodeDecodeTestFrameCallback>;
+
+ int num_called_;
+ scoped_refptr<media::VideoFrame> original_frame_;
+};
} // namespace
class EncodeDecodeTest : public ::testing::Test {
protected:
- EncodeDecodeTest() {
+ EncodeDecodeTest()
+ : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
+ // CastEnvironment will only be used by the vp8 decoder; Enable only the
+ // video decoder and main threads.
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ NULL, NULL, NULL, task_runner_, GetDefaultCastLoggingConfig())),
+ test_callback_(new EncodeDecodeTestFrameCallback()) {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
encoder_config_.max_number_of_video_buffers_used = 1;
encoder_config_.number_of_cores = 1;
encoder_config_.width = kWidth;
@@ -39,49 +90,40 @@ class EncodeDecodeTest : public ::testing::Test {
int max_unacked_frames = 1;
encoder_.reset(new Vp8Encoder(encoder_config_, max_unacked_frames));
// Initialize to use one core.
- decoder_.reset(new Vp8Decoder(1));
+ decoder_.reset(new Vp8Decoder(1, cast_environment_));
}
- virtual void SetUp() {
+ virtual ~EncodeDecodeTest() {}
+
+ virtual void SetUp() OVERRIDE {
// Create test frame.
int start_value = 10; // Random value to start from.
- video_frame_.reset(new I420VideoFrame());
- video_frame_->width = encoder_config_.width;
- video_frame_->height = encoder_config_.height;
- PopulateVideoFrame(video_frame_.get(), start_value);
- }
-
- virtual void TearDown() {
- delete [] video_frame_->y_plane.data;
- delete [] video_frame_->u_plane.data;
- delete [] video_frame_->v_plane.data;
- }
-
- void Compare(const I420VideoFrame& original_image,
- const I420VideoFrame& decoded_image) {
- // Compare resolution.
- EXPECT_EQ(original_image.width, decoded_image.width);
- EXPECT_EQ(original_image.height, decoded_image.height);
- // Compare data.
- EXPECT_GT(I420PSNR(original_image, decoded_image), 40.0);
+ gfx::Size size(encoder_config_.width, encoder_config_.height);
+ video_frame_ = media::VideoFrame::CreateFrame(VideoFrame::I420,
+ size, gfx::Rect(size), size, base::TimeDelta());
+ PopulateVideoFrame(video_frame_, start_value);
+ test_callback_->SetFrameStartValue(start_value);
}
VideoSenderConfig encoder_config_;
scoped_ptr<Vp8Encoder> encoder_;
scoped_ptr<Vp8Decoder> decoder_;
- scoped_ptr<I420VideoFrame> video_frame_;
+ scoped_refptr<media::VideoFrame> video_frame_;
+ base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_refptr<EncodeDecodeTestFrameCallback> test_callback_;
};
TEST_F(EncodeDecodeTest, BasicEncodeDecode) {
EncodedVideoFrame encoded_frame;
- I420VideoFrame decoded_frame;
// Encode frame.
- encoder_->Encode(*(video_frame_.get()), &encoded_frame);
+ encoder_->Encode(video_frame_, &encoded_frame);
EXPECT_GT(encoded_frame.data.size(), GG_UINT64_C(0));
// Decode frame.
- decoder_->Decode(encoded_frame, &decoded_frame);
- // Validate data.
- Compare(*(video_frame_.get()), decoded_frame);
+ decoder_->Decode(&encoded_frame, base::TimeTicks(), base::Bind(
+ &EncodeDecodeTestFrameCallback::DecodeComplete, test_callback_));
+ task_runner_->RunTasks();
}
} // namespace cast
diff --git a/media/cast/test/end2end_unittest.cc b/media/cast/test/end2end_unittest.cc
index b465f2096a..0a2e0976a7 100644
--- a/media/cast/test/end2end_unittest.cc
+++ b/media/cast/test/end2end_unittest.cc
@@ -14,12 +14,16 @@
#include <list>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/test/simple_test_tick_clock.h"
#include "base/time/tick_clock.h"
+#include "media/base/video_frame.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
#include "media/cast/cast_sender.h"
+#include "media/cast/test/audio_utility.h"
+#include "media/cast/test/crypto_utility.h"
#include "media/cast/test/fake_task_runner.h"
#include "media/cast/test/video_utility.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -29,8 +33,8 @@ namespace cast {
static const int64 kStartMillisecond = GG_INT64_C(1245);
static const int kAudioChannels = 2;
-static const int kAudioSamplingFrequency = 48000;
-static const int kSoundFrequency = 1234; // Frequency of sinusoid wave.
+static const double kSoundFrequency = 314.15926535897; // Freq of sine wave.
+static const float kSoundVolume = 0.5f;
static const int kVideoWidth = 1280;
static const int kVideoHeight = 720;
static const int kCommonRtpHeaderLength = 12;
@@ -87,7 +91,7 @@ class LoopBackTransport : public PacketSender {
for (size_t i = 0; i < packets.size(); ++i) {
const Packet& packet = packets[i];
if (drop_packets_belonging_to_odd_frames_) {
- uint8 frame_id = packet[13];
+ uint32 frame_id = packet[13];
if (frame_id % 2 == 1) continue;
}
uint8* packet_copy = new uint8[packet.size()];
@@ -133,18 +137,13 @@ class TestReceiverAudioCallback :
};
TestReceiverAudioCallback()
- : num_called_(0),
- avg_snr_(0) {}
+ : num_called_(0) {}
- void SetExpectedResult(int expected_sampling_frequency,
- int expected_min_snr,
- int expected_avg_snr) {
+ void SetExpectedSamplingFrequency(int expected_sampling_frequency) {
expected_sampling_frequency_ = expected_sampling_frequency;
- expected_min_snr_ = expected_min_snr;
- expected_avg_snr_ = expected_avg_snr;
}
- void AddExpectedResult(PcmAudioFrame* audio_frame,
+ void AddExpectedResult(scoped_ptr<PcmAudioFrame> audio_frame,
int expected_num_10ms_blocks,
const base::TimeTicks& record_time) {
ExpectedAudioFrame expected_audio_frame;
@@ -168,78 +167,21 @@ class TestReceiverAudioCallback :
expected_audio_frame.num_10ms_blocks * kAudioChannels *
expected_sampling_frequency_ / 100);
- EXPECT_GE(expected_audio_frame.record_time +
+ const base::TimeTicks upper_bound = expected_audio_frame.record_time +
base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs +
- kTimerErrorMs), playout_time);
- EXPECT_LT(expected_audio_frame.record_time, playout_time);
+ kTimerErrorMs);
+ EXPECT_GE(upper_bound, playout_time)
+ << "playout_time - upper_bound == "
+ << (playout_time - upper_bound).InMicroseconds() << " usec";
+ EXPECT_LT(expected_audio_frame.record_time, playout_time)
+ << "playout_time - expected == "
+ << (playout_time - expected_audio_frame.record_time).InMicroseconds()
+ << " usec";
EXPECT_EQ(audio_frame->samples.size(),
expected_audio_frame.audio_frame.samples.size());
}
- size_t CalculateMaxResamplingDelay(size_t src_sample_rate_hz,
- size_t dst_sample_rate_hz,
- size_t number_of_channels) {
- // The sinc resampler has a known delay, which we compute here. Multiplying
- // by two gives us a crude maximum for any resampling, as the old resampler
- // typically (but not always) has lower delay. Since we sample up and down
- // we need to double our delay.
- static const size_t kInputKernelDelaySamples = 16;
- if (src_sample_rate_hz == dst_sample_rate_hz) return 0;
-
- return (dst_sample_rate_hz * kInputKernelDelaySamples *
- number_of_channels * 4) / src_sample_rate_hz;
- }
-
- // Computes the SNR based on the error between |reference_audio_frame| and
- // |output_audio_frame| given a sample offset of |delay|.
- double ComputeSNR(const PcmAudioFrame& reference_audio_frame,
- const std::vector<int16>& output_audio_samples,
- size_t delay) {
- // Check all out allowed delays.
- double square_error = 0;
- double variance = 0;
- for (size_t i = 0; i < reference_audio_frame.samples.size() - delay; ++i) {
- size_t error = reference_audio_frame.samples[i] -
- output_audio_samples[i + delay];
-
- square_error += error * error;
- variance += reference_audio_frame.samples[i] *
- reference_audio_frame.samples[i];
- }
- // 16-bit audio has a dynamic range of 96 dB.
- double snr = 96.0; // Assigning 96 dB to the zero-error case.
- if (square_error > 0) {
- snr = 10 * log10(variance / square_error);
- }
- return snr;
- }
-
- // Computes the best SNR based on the error between |ref_frame| and
- // |test_frame|. It allows for up to a |max_delay| in samples between the
- // signals to compensate for the re-sampling delay.
- double ComputeBestSNR(const PcmAudioFrame& reference_audio_frame,
- const std::vector<int16>& output_audio_samples,
- size_t max_delay) {
- double best_snr = 0;
-
- // Check all out allowed delays.
- for (size_t delay = 0; delay <= max_delay;
- delay += reference_audio_frame.channels) {
- double snr = ComputeSNR(reference_audio_frame, output_audio_samples,
- delay);
- if (snr > best_snr) {
- best_snr = snr;
- }
- }
- if (avg_snr_ == 0) {
- avg_snr_ = best_snr;
- } else {
- avg_snr_ = (avg_snr_ * 7 + best_snr) / 8;
- }
- return best_snr;
- }
-
void CheckPcmAudioFrame(scoped_ptr<PcmAudioFrame> audio_frame,
const base::TimeTicks& playout_time) {
++num_called_;
@@ -249,11 +191,9 @@ class TestReceiverAudioCallback :
expected_frame_.pop_front();
if (audio_frame->samples.size() == 0) return; // No more checks needed.
- size_t max_delay = CalculateMaxResamplingDelay(48000, 32000,
- expected_audio_frame.audio_frame.channels);
- EXPECT_GE(ComputeBestSNR(expected_audio_frame.audio_frame,
- audio_frame->samples, max_delay),
- expected_min_snr_);
+ EXPECT_NEAR(CountZeroCrossings(expected_audio_frame.audio_frame.samples),
+ CountZeroCrossings(audio_frame->samples),
+ 1);
}
void CheckCodedPcmAudioFrame(scoped_ptr<EncodedAudioFrame> audio_frame,
@@ -276,25 +216,23 @@ class TestReceiverAudioCallback :
EXPECT_LT(expected_audio_frame.record_time, playout_time);
if (audio_frame->data.size() == 0) return; // No more checks needed.
- size_t max_delay = CalculateMaxResamplingDelay(48000, 32000,
- expected_audio_frame.audio_frame.channels);
-
// We need to convert our "coded" audio frame to our raw format.
std::vector<int16> output_audio_samples;
size_t number_of_samples = audio_frame->data.size() / 2;
for (size_t i = 0; i < number_of_samples; ++i) {
- uint16 sample = (audio_frame->data[1 + i * sizeof(uint16)]) +
- (static_cast<uint16>(audio_frame->data[i * sizeof(uint16)]) << 8);
+ uint16 sample =
+ static_cast<uint8>(audio_frame->data[1 + i * sizeof(uint16)]) +
+ (static_cast<uint16>(audio_frame->data[i * sizeof(uint16)]) << 8);
output_audio_samples.push_back(static_cast<int16>(sample));
}
- EXPECT_GE(ComputeBestSNR(expected_audio_frame.audio_frame,
- output_audio_samples, max_delay),
- expected_min_snr_);
+
+ EXPECT_NEAR(CountZeroCrossings(expected_audio_frame.audio_frame.samples),
+ CountZeroCrossings(output_audio_samples),
+ 1);
}
- int number_times_called() {
- EXPECT_GE(avg_snr_, expected_avg_snr_);
+ int number_times_called() const {
return num_called_;
}
@@ -306,9 +244,6 @@ class TestReceiverAudioCallback :
int num_called_;
int expected_sampling_frequency_;
- int expected_min_snr_;
- int expected_avg_snr_;
- double avg_snr_;
std::list<ExpectedAudioFrame> expected_frame_;
};
@@ -348,22 +283,24 @@ class TestReceiverVideoCallback :
base::TimeDelta time_since_capture =
render_time - expected_video_frame.capture_time;
+ const base::TimeDelta upper_bound = base::TimeDelta::FromMilliseconds(
+ kDefaultRtpMaxDelayMs + kTimerErrorMs);
- EXPECT_LE(time_since_capture, base::TimeDelta::FromMilliseconds(
- kDefaultRtpMaxDelayMs + kTimerErrorMs));
+ EXPECT_GE(upper_bound, time_since_capture)
+ << "time_since_capture - upper_bound == "
+ << (time_since_capture - upper_bound).InMicroseconds() << " usec";
EXPECT_LE(expected_video_frame.capture_time, render_time);
EXPECT_EQ(expected_video_frame.width, video_frame->width);
EXPECT_EQ(expected_video_frame.height, video_frame->height);
- I420VideoFrame* expected_I420_frame = new I420VideoFrame();
- expected_I420_frame->width = expected_video_frame.width;
- expected_I420_frame->height = expected_video_frame.height;
+ gfx::Size size(expected_video_frame.width, expected_video_frame.height);
+ scoped_refptr<media::VideoFrame> expected_I420_frame =
+ media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
PopulateVideoFrame(expected_I420_frame, expected_video_frame.start_value);
- double psnr = I420PSNR(*expected_I420_frame, *(video_frame.get()));
+ double psnr = I420PSNR(*(expected_I420_frame.get()), *(video_frame.get()));
EXPECT_GE(psnr, kVideoAcceptedPSNR);
-
- FrameInput::DeleteVideoFrame(expected_I420_frame);
}
int number_times_called() { return num_called_;}
@@ -378,6 +315,11 @@ class TestReceiverVideoCallback :
std::list<ExpectedVideoFrame> expected_frame_;
};
+CastLoggingConfig EnableCastLoggingConfig() {
+ CastLoggingConfig config;
+ config.enable_data_collection = true;
+ return config;
+}
// The actual test class, generate synthetic data for both audio and video and
// send those through the sender and receiver and analyzes the result.
class End2EndTest : public ::testing::Test {
@@ -385,18 +327,20 @@ class End2EndTest : public ::testing::Test {
End2EndTest()
: task_runner_(new test::FakeTaskRunner(&testing_clock_)),
cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_)),
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ EnableCastLoggingConfig())),
+ start_time_(),
sender_to_receiver_(cast_environment_),
receiver_to_sender_(cast_environment_),
test_receiver_audio_callback_(new TestReceiverAudioCallback()),
- test_receiver_video_callback_(new TestReceiverVideoCallback()),
- audio_angle_(0) {
+ test_receiver_video_callback_(new TestReceiverVideoCallback()) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
void SetupConfig(AudioCodec audio_codec,
int audio_sampling_frequency,
+ // TODO(miu): 3rd arg is meaningless?!?
bool external_audio_decoder,
int max_number_of_video_buffers_used) {
audio_sender_config_.sender_ssrc = 1;
@@ -405,7 +349,7 @@ class End2EndTest : public ::testing::Test {
audio_sender_config_.use_external_encoder = false;
audio_sender_config_.frequency = audio_sampling_frequency;
audio_sender_config_.channels = kAudioChannels;
- audio_sender_config_.bitrate = 64000;
+ audio_sender_config_.bitrate = kDefaultAudioEncoderBitrate;
audio_sender_config_.codec = audio_codec;
audio_receiver_config_.feedback_ssrc =
@@ -419,6 +363,9 @@ class End2EndTest : public ::testing::Test {
audio_receiver_config_.channels = kAudioChannels;
audio_receiver_config_.codec = audio_sender_config_.codec;
+ test_receiver_audio_callback_->SetExpectedSamplingFrequency(
+ audio_receiver_config_.frequency);
+
video_sender_config_.sender_ssrc = 3;
video_sender_config_.incoming_feedback_ssrc = 4;
video_sender_config_.rtp_payload_type = 97;
@@ -462,44 +409,28 @@ class End2EndTest : public ::testing::Test {
frame_input_ = cast_sender_->frame_input();
frame_receiver_ = cast_receiver_->frame_receiver();
+
+ audio_bus_factory_.reset(new TestAudioBusFactory(
+ audio_sender_config_.channels, audio_sender_config_.frequency,
+ kSoundFrequency, kSoundVolume));
}
virtual ~End2EndTest() {}
void SendVideoFrame(int start_value, const base::TimeTicks& capture_time) {
- I420VideoFrame* video_frame = new I420VideoFrame();
- video_frame->width = video_sender_config_.width;
- video_frame->height = video_sender_config_.height;
+ if (start_time_.is_null())
+ start_time_ = testing_clock_.NowTicks();
+ start_time_ = testing_clock_.NowTicks();
+ base::TimeDelta time_diff = testing_clock_.NowTicks() - start_time_;
+ gfx::Size size(kVideoWidth, kVideoHeight);
+ EXPECT_TRUE(VideoFrame::IsValidConfig(VideoFrame::I420,
+ size, gfx::Rect(size), size));
+ scoped_refptr<media::VideoFrame> video_frame =
+ media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, time_diff);
PopulateVideoFrame(video_frame, start_value);
frame_input_->InsertRawVideoFrame(video_frame, capture_time,
- base::Bind(FrameInput::DeleteVideoFrame, video_frame));
- }
-
- PcmAudioFrame* CreateAudioFrame(int num_10ms_blocks, int sound_frequency,
- int sampling_frequency) {
- int number_of_samples = kAudioChannels * num_10ms_blocks *
- sampling_frequency / 100;
- int amplitude = 1000;
-
- PcmAudioFrame* audio_frame = new PcmAudioFrame();
- audio_frame->channels = kAudioChannels;
- audio_frame->frequency = sampling_frequency;
- audio_frame->samples.reserve(number_of_samples);
-
- // Create the sinusoid.
- double increment = (2 * 3.1415926535897932384626433) /
- (static_cast<double>(sampling_frequency) / sound_frequency);
- int sample = 0;
- while (sample < number_of_samples) {
- int16 value = static_cast<int16>(amplitude * sin(audio_angle_));
-
- for (int i = 0; i < kAudioChannels; ++i) {
- audio_frame->samples.insert(audio_frame->samples.end(), value);
- ++sample;
- }
- audio_angle_ += increment;
- }
- return audio_frame;
+ base::Bind(base::DoNothing));
}
void RunTasks(int during_ms) {
@@ -518,6 +449,7 @@ class End2EndTest : public ::testing::Test {
base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
+ base::TimeTicks start_time_;
LoopBackTransport sender_to_receiver_;
LoopBackTransport receiver_to_sender_;
@@ -530,18 +462,13 @@ class End2EndTest : public ::testing::Test {
scoped_refptr<TestReceiverAudioCallback> test_receiver_audio_callback_;
scoped_refptr<TestReceiverVideoCallback> test_receiver_video_callback_;
- double audio_angle_;
+ scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
};
// Audio and video test without packet loss using raw PCM 16 audio "codec";
-// note: even though the audio is not coded it is still re-sampled between
-// 48 and 32 KHz.
TEST_F(End2EndTest, LoopNoLossPcm16) {
- // Note running codec in different sampling frequency.
SetupConfig(kPcm16, 32000, false, 1);
Create();
- test_receiver_audio_callback_->SetExpectedResult(kAudioSamplingFrequency, 20,
- 25);
int video_start = 1;
int audio_diff = kFrameTimerMs;
@@ -556,19 +483,21 @@ TEST_F(End2EndTest, LoopNoLossPcm16) {
test_receiver_video_callback_->AddExpectedResult(video_start,
video_sender_config_.width, video_sender_config_.height, send_time);
- PcmAudioFrame* audio_frame = CreateAudioFrame(num_10ms_blocks,
- kSoundFrequency, kAudioSamplingFrequency);
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
if (i != 0) {
// Due to the re-sampler and NetEq in the webrtc AudioCodingModule the
// first samples will be 0 and then slowly ramp up to its real amplitude;
// ignore the first frame.
- test_receiver_audio_callback_->AddExpectedResult(audio_frame,
+ test_receiver_audio_callback_->AddExpectedResult(
+ ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
num_10ms_blocks, send_time);
}
- frame_input_->InsertRawAudioFrame(audio_frame, send_time,
- base::Bind(FrameInput::DeleteAudioFrame, audio_frame));
+ AudioBus* const audio_bus_ptr = audio_bus.get();
+ frame_input_->InsertAudio(audio_bus_ptr, send_time,
+ base::Bind(base::DoNothing));
SendVideoFrame(video_start, send_time);
@@ -577,12 +506,12 @@ TEST_F(End2EndTest, LoopNoLossPcm16) {
if (i == 0) {
frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
- kAudioSamplingFrequency,
+ audio_sender_config_.frequency,
base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
test_receiver_audio_callback_));
} else {
frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
- kAudioSamplingFrequency,
+ audio_sender_config_.frequency,
base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
test_receiver_audio_callback_));
}
@@ -604,24 +533,21 @@ TEST_F(End2EndTest, LoopNoLossPcm16) {
// This tests our external decoder interface for Audio.
// Audio test without packet loss using raw PCM 16 audio "codec";
TEST_F(End2EndTest, LoopNoLossPcm16ExternalDecoder) {
- // Note: Create an input in the same sampling frequency as the codec to avoid
- // re-sampling.
- const int audio_sampling_frequency = 32000;
- SetupConfig(kPcm16, audio_sampling_frequency, true, 1);
+ SetupConfig(kPcm16, 32000, true, 1);
Create();
- test_receiver_audio_callback_->SetExpectedResult(audio_sampling_frequency, 96,
- 96);
int i = 0;
for (; i < 100; ++i) {
base::TimeTicks send_time = testing_clock_.NowTicks();
- PcmAudioFrame* audio_frame = CreateAudioFrame(1, kSoundFrequency,
- audio_sampling_frequency);
- test_receiver_audio_callback_->AddExpectedResult(audio_frame, 1,
- send_time);
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(10)));
+ test_receiver_audio_callback_->AddExpectedResult(
+ ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
+ 1, send_time);
- frame_input_->InsertRawAudioFrame(audio_frame, send_time,
- base::Bind(FrameInput::DeleteAudioFrame, audio_frame));
+ AudioBus* const audio_bus_ptr = audio_bus.get();
+ frame_input_->InsertAudio(audio_bus_ptr, send_time,
+ base::Bind(base::DoNothing));
RunTasks(10);
frame_receiver_->GetCodedAudioFrame(
@@ -634,37 +560,37 @@ TEST_F(End2EndTest, LoopNoLossPcm16ExternalDecoder) {
// This tests our Opus audio codec without video.
TEST_F(End2EndTest, LoopNoLossOpus) {
- SetupConfig(kOpus, kAudioSamplingFrequency, false, 1);
+ SetupConfig(kOpus, kDefaultAudioSamplingRate, false, 1);
Create();
- test_receiver_audio_callback_->SetExpectedResult(
- kAudioSamplingFrequency, 18, 20);
int i = 0;
for (; i < 100; ++i) {
int num_10ms_blocks = 3;
base::TimeTicks send_time = testing_clock_.NowTicks();
- PcmAudioFrame* audio_frame = CreateAudioFrame(num_10ms_blocks,
- kSoundFrequency, kAudioSamplingFrequency);
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
if (i != 0) {
- test_receiver_audio_callback_->AddExpectedResult(audio_frame,
+ test_receiver_audio_callback_->AddExpectedResult(
+ ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
num_10ms_blocks, send_time);
}
- frame_input_->InsertRawAudioFrame(audio_frame, send_time,
- base::Bind(FrameInput::DeleteAudioFrame, audio_frame));
+ AudioBus* const audio_bus_ptr = audio_bus.get();
+ frame_input_->InsertAudio(audio_bus_ptr, send_time,
+ base::Bind(base::DoNothing));
RunTasks(30);
if (i == 0) {
frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
- kAudioSamplingFrequency,
+ audio_sender_config_.frequency,
base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
test_receiver_audio_callback_));
} else {
frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
- kAudioSamplingFrequency,
+ audio_sender_config_.frequency,
base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
test_receiver_audio_callback_));
}
@@ -678,10 +604,8 @@ TEST_F(End2EndTest, LoopNoLossOpus) {
// TODO(miu): Test disabled because of non-determinism.
// http://crbug.com/314233
TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
- SetupConfig(kOpus, kAudioSamplingFrequency, false, 1);
+ SetupConfig(kOpus, kDefaultAudioSamplingRate, false, 1);
Create();
- test_receiver_audio_callback_->SetExpectedResult(
- kAudioSamplingFrequency, 18, 20);
int video_start = 1;
int audio_diff = kFrameTimerMs;
@@ -693,11 +617,12 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
audio_diff -= num_10ms_blocks * 10;
base::TimeTicks send_time = testing_clock_.NowTicks();
- PcmAudioFrame* audio_frame = CreateAudioFrame(num_10ms_blocks,
- kSoundFrequency, kAudioSamplingFrequency);
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
- frame_input_->InsertRawAudioFrame(audio_frame, send_time,
- base::Bind(FrameInput::DeleteAudioFrame, audio_frame));
+ AudioBus* const audio_bus_ptr = audio_bus.get();
+ frame_input_->InsertAudio(audio_bus_ptr, send_time,
+ base::Bind(base::DoNothing));
SendVideoFrame(video_start, send_time);
RunTasks(kFrameTimerMs);
@@ -714,16 +639,19 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
audio_diff -= num_10ms_blocks * 10;
base::TimeTicks send_time = testing_clock_.NowTicks();
- PcmAudioFrame* audio_frame = CreateAudioFrame(num_10ms_blocks,
- kSoundFrequency, kAudioSamplingFrequency);
-
- frame_input_->InsertRawAudioFrame(audio_frame, send_time,
- base::Bind(FrameInput::DeleteAudioFrame, audio_frame));
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
if (j >= number_of_audio_frames_to_ignore) {
- test_receiver_audio_callback_->AddExpectedResult(audio_frame,
+ test_receiver_audio_callback_->AddExpectedResult(
+ ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
num_10ms_blocks, send_time);
}
+
+ AudioBus* const audio_bus_ptr = audio_bus.get();
+ frame_input_->InsertAudio(audio_bus_ptr, send_time,
+ base::Bind(base::DoNothing));
+
test_receiver_video_callback_->AddExpectedResult(video_start,
video_sender_config_.width, video_sender_config_.height, send_time);
@@ -733,12 +661,12 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
if (j < number_of_audio_frames_to_ignore) {
frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
- kAudioSamplingFrequency,
+ audio_sender_config_.frequency,
base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
test_receiver_audio_callback_));
} else {
frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
- kAudioSamplingFrequency,
+ audio_sender_config_.frequency,
base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
test_receiver_audio_callback_));
}
@@ -755,7 +683,7 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
// This tests a network glitch lasting for 10 video frames.
TEST_F(End2EndTest, GlitchWith3Buffers) {
- SetupConfig(kOpus, kAudioSamplingFrequency, false, 3);
+ SetupConfig(kOpus, kDefaultAudioSamplingRate, false, 3);
video_sender_config_.rtp_max_delay_ms = 67;
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
@@ -804,7 +732,7 @@ TEST_F(End2EndTest, GlitchWith3Buffers) {
}
TEST_F(End2EndTest, DropEveryOtherFrame3Buffers) {
- SetupConfig(kOpus, kAudioSamplingFrequency, false, 3);
+ SetupConfig(kOpus, kDefaultAudioSamplingRate, false, 3);
video_sender_config_.rtp_max_delay_ms = 67;
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
@@ -839,7 +767,7 @@ TEST_F(End2EndTest, DropEveryOtherFrame3Buffers) {
}
TEST_F(End2EndTest, ResetReferenceFrameId) {
- SetupConfig(kOpus, kAudioSamplingFrequency, false, 3);
+ SetupConfig(kOpus, kDefaultAudioSamplingRate, false, 3);
video_sender_config_.rtp_max_delay_ms = 67;
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
@@ -865,6 +793,239 @@ TEST_F(End2EndTest, ResetReferenceFrameId) {
test_receiver_video_callback_->number_times_called());
}
+TEST_F(End2EndTest, CryptoVideo) {
+ SetupConfig(kPcm16, 32000, false, 1);
+
+ video_sender_config_.aes_iv_mask =
+ ConvertFromBase16String("1234567890abcdeffedcba0987654321");
+ video_sender_config_.aes_key =
+ ConvertFromBase16String("deadbeefcafeb0b0b0b0cafedeadbeef");
+
+ video_receiver_config_.aes_iv_mask = video_sender_config_.aes_iv_mask;
+ video_receiver_config_.aes_key = video_sender_config_.aes_key;
+
+ Create();
+
+ int frames_counter = 0;
+ for (; frames_counter < 20; ++frames_counter) {
+ const base::TimeTicks send_time = testing_clock_.NowTicks();
+
+ SendVideoFrame(frames_counter, send_time);
+
+ test_receiver_video_callback_->AddExpectedResult(frames_counter,
+ video_sender_config_.width, video_sender_config_.height, send_time);
+
+ // GetRawVideoFrame will not return the frame until we are close to the
+ // time in which we should render the frame.
+ frame_receiver_->GetRawVideoFrame(
+ base::Bind(&TestReceiverVideoCallback::CheckVideoFrame,
+ test_receiver_video_callback_));
+ RunTasks(kFrameTimerMs);
+ }
+ RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline.
+ EXPECT_EQ(frames_counter,
+ test_receiver_video_callback_->number_times_called());
+}
+
+TEST_F(End2EndTest, CryptoAudio) {
+ SetupConfig(kPcm16, 32000, false, 1);
+
+ audio_sender_config_.aes_iv_mask =
+ ConvertFromBase16String("abcdeffedcba12345678900987654321");
+ audio_sender_config_.aes_key =
+ ConvertFromBase16String("deadbeefcafecafedeadbeefb0b0b0b0");
+
+ audio_receiver_config_.aes_iv_mask = audio_sender_config_.aes_iv_mask;
+ audio_receiver_config_.aes_key = audio_sender_config_.aes_key;
+
+ Create();
+
+ int frames_counter = 0;
+ for (; frames_counter < 20; ++frames_counter) {
+ int num_10ms_blocks = 2;
+
+ const base::TimeTicks send_time = testing_clock_.NowTicks();
+
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
+
+ if (frames_counter != 0) {
+ // Due to the re-sampler and NetEq in the webrtc AudioCodingModule the
+ // first samples will be 0 and then slowly ramp up to its real amplitude;
+ // ignore the first frame.
+ test_receiver_audio_callback_->AddExpectedResult(
+ ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
+ num_10ms_blocks, send_time);
+ }
+ AudioBus* const audio_bus_ptr = audio_bus.get();
+ frame_input_->InsertAudio(audio_bus_ptr, send_time,
+ base::Bind(base::DoNothing));
+
+ RunTasks(num_10ms_blocks * 10);
+
+ if (frames_counter == 0) {
+ frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
+ 32000,
+ base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
+ test_receiver_audio_callback_));
+ } else {
+ frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
+ 32000,
+ base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
+ test_receiver_audio_callback_));
+ }
+ }
+ RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline.
+ EXPECT_EQ(frames_counter - 1,
+ test_receiver_audio_callback_->number_times_called());
+}
+
+// Video test without packet loss; This test is targeted at testing the logging
+// aspects of the end2end, but is basically equivalent to LoopNoLossPcm16.
+TEST_F(End2EndTest, VideoLogging) {
+ SetupConfig(kPcm16, 32000, false, 1);
+ Create();
+
+ int video_start = 1;
+ int i = 0;
+ for (; i < 1; ++i) {
+ base::TimeTicks send_time = testing_clock_.NowTicks();
+ test_receiver_video_callback_->AddExpectedResult(video_start,
+ video_sender_config_.width, video_sender_config_.height, send_time);
+
+ SendVideoFrame(video_start, send_time);
+ RunTasks(kFrameTimerMs);
+
+ frame_receiver_->GetRawVideoFrame(
+ base::Bind(&TestReceiverVideoCallback::CheckVideoFrame,
+ test_receiver_video_callback_));
+
+ video_start++;
+ }
+
+ // Basic tests.
+ RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
+ EXPECT_EQ(i, test_receiver_video_callback_->number_times_called());
+ // Logging tests.
+ LoggingImpl* logging = cast_environment_->Logging();
+
+ // Frame logging.
+
+ // Verify that all frames and all required events were logged.
+ FrameRawMap frame_raw_log = logging->GetFrameRawData();
+ // Every frame should have only one entry.
+ EXPECT_EQ(static_cast<unsigned int>(i), frame_raw_log.size());
+ FrameRawMap::const_iterator frame_it = frame_raw_log.begin();
+ // Choose a video frame, and verify that all events were logged.
+ std::vector<CastLoggingEvent> event_log = frame_it->second.type;
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kVideoFrameReceived)) != event_log.end());
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kVideoFrameSentToEncoder)) != event_log.end());
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kVideoFrameEncoded)) != event_log.end());
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kVideoRenderDelay)) != event_log.end());
+ // TODO(mikhal): Plumb this one through.
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kVideoFrameDecoded)) == event_log.end());
+ // Verify that there were no other events logged with respect to this frame.
+ EXPECT_EQ(4u, event_log.size());
+
+ // Packet logging.
+ // Verify that all packet related events were logged.
+ PacketRawMap packet_raw_log = logging->GetPacketRawData();
+ // Every rtp_timestamp should have only one entry.
+ EXPECT_EQ(static_cast<unsigned int>(i), packet_raw_log.size());
+ PacketRawMap::const_iterator packet_it = packet_raw_log.begin();
+ // Choose a packet, and verify that all events were logged.
+ event_log = (++(packet_it->second.packet_map.begin()))->second.type;
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kPacketSentToPacer)) != event_log.end());
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kPacketSentToNetwork)) != event_log.end());
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kPacketReceived)) != event_log.end());
+ // Verify that there were no other events logged with respect to this frame.
+ EXPECT_EQ(3u, event_log.size());
+}
+
+// Audio test without packet loss; This test is targeted at testing the logging
+// aspects of the end2end, but is basically equivalent to LoopNoLossPcm16.
+TEST_F(End2EndTest, AudioLogging) {
+ SetupConfig(kPcm16, 32000, false, 1);
+ Create();
+
+ int audio_diff = kFrameTimerMs;
+ int i = 0;
+
+ for (; i < 10; ++i) {
+ int num_10ms_blocks = audio_diff / 10;
+ audio_diff -= num_10ms_blocks * 10;
+ base::TimeTicks send_time = testing_clock_.NowTicks();
+
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
+
+ if (i != 0) {
+ // Due to the re-sampler and NetEq in the webrtc AudioCodingModule the
+ // first samples will be 0 and then slowly ramp up to its real amplitude;
+ // ignore the first frame.
+ test_receiver_audio_callback_->AddExpectedResult(
+ ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
+ num_10ms_blocks, send_time);
+ }
+
+ AudioBus* const audio_bus_ptr = audio_bus.get();
+ frame_input_->InsertAudio(audio_bus_ptr, send_time,
+ base::Bind(base::DoNothing));
+
+ RunTasks(kFrameTimerMs);
+ audio_diff += kFrameTimerMs;
+
+ if (i == 0) {
+ frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
+ audio_sender_config_.frequency,
+ base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
+ test_receiver_audio_callback_));
+ } else {
+ frame_receiver_->GetRawAudioFrame(num_10ms_blocks,
+ audio_sender_config_.frequency,
+ base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
+ test_receiver_audio_callback_));
+ }
+ }
+
+ // Basic tests.
+ RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
+ //EXPECT_EQ(i - 1, test_receiver_audio_callback_->number_times_called());
+ EXPECT_EQ(i - 1, test_receiver_audio_callback_->number_times_called());
+ // Logging tests.
+ LoggingImpl* logging = cast_environment_->Logging();
+ // Verify that all frames and all required events were logged.
+ FrameRawMap frame_raw_log = logging->GetFrameRawData();
+ // TODO(mikhal): Results are wrong. Need to resolve passing/calculation of
+ // rtp_timestamp for audio for this to work.
+ // Should have logged both audio and video. Every frame should have only one
+ // entry.
+ //EXPECT_EQ(static_cast<unsigned int>(i - 1), frame_raw_log.size());
+ FrameRawMap::const_iterator frame_it = frame_raw_log.begin();
+ // Choose a video frame, and verify that all events were logged.
+ std::vector<CastLoggingEvent> event_log = frame_it->second.type;
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kAudioFrameReceived)) != event_log.end());
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kAudioFrameEncoded)) != event_log.end());
+ // EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ // kAudioPlayoutDelay)) != event_log.end());
+ // TODO(mikhal): Plumb this one through.
+ EXPECT_TRUE((std::find(event_log.begin(), event_log.end(),
+ kAudioFrameDecoded)) == event_log.end());
+ // Verify that there were no other events logged with respect to this frame.
+ EXPECT_EQ(2u, event_log.size());
+}
+
+
// TODO(pwestin): Add repeatable packet loss test.
// TODO(pwestin): Add test for misaligned send get calls.
// TODO(pwestin): Add more tests that does not resample.
diff --git a/media/cast/test/linux_output_window.cc b/media/cast/test/linux_output_window.cc
new file mode 100644
index 0000000000..f7405e03ce
--- /dev/null
+++ b/media/cast/test/linux_output_window.cc
@@ -0,0 +1,134 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/test/linux_output_window.h"
+
+#include "base/logging.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+LinuxOutputWindow::LinuxOutputWindow(int x_pos,
+ int y_pos,
+ int width,
+ int height,
+ const std::string& name) {
+ CreateWindow( x_pos, y_pos, width, height, name);
+}
+
+LinuxOutputWindow::~LinuxOutputWindow() {
+ if (display_ && window_) {
+ XUnmapWindow(display_, window_);
+ XDestroyWindow(display_, window_);
+ XSync(display_, false);
+ if (gc_)
+ XFreeGC(display_, gc_);
+ XCloseDisplay(display_);
+ }
+}
+
+void LinuxOutputWindow::CreateWindow(int x_pos,
+ int y_pos,
+ int width,
+ int height,
+ const std::string& name) {
+ display_ = XOpenDisplay(NULL);
+ if (display_ == NULL) {
+ // There's no point to continue if this happens: nothing will work anyway.
+ VLOG(1) << "Failed to connect to X server: X environment likely broken";
+ NOTREACHED();
+ }
+
+ int screen = DefaultScreen(display_);
+
+ // Try to establish a 24-bit TrueColor display.
+ // (our environment must allow this).
+ XVisualInfo visual_info;
+ if (XMatchVisualInfo(display_, screen, 24, TrueColor, &visual_info) == 0) {
+ VLOG(1) << "Failed to establish 24-bit TrueColor in X environment.";
+ NOTREACHED();
+ }
+
+ // Create suitable window attributes.
+ XSetWindowAttributes window_attributes;
+ window_attributes.colormap = XCreateColormap(
+ display_, DefaultRootWindow(display_), visual_info.visual, AllocNone);
+ window_attributes.event_mask = StructureNotifyMask | ExposureMask;
+ window_attributes.background_pixel = 0;
+ window_attributes.border_pixel = 0;
+
+ unsigned long attribute_mask = CWBackPixel | CWBorderPixel | CWColormap |
+ CWEventMask;
+
+ window_ = XCreateWindow(display_, DefaultRootWindow(display_), x_pos,
+ y_pos, width, height, 0, visual_info.depth,
+ InputOutput, visual_info.visual,
+ attribute_mask, &window_attributes);
+
+ // Set window name.
+ XStoreName(display_, window_, name.c_str());
+ XSetIconName(display_, window_, name.c_str());
+
+ // Make x report events for mask.
+ XSelectInput(display_, window_, StructureNotifyMask);
+
+ // Map the window to the display.
+ XMapWindow(display_, window_);
+
+ // Wait for map event.
+ XEvent event;
+ do {
+ XNextEvent(display_, &event);
+ } while (event.type != MapNotify || event.xmap.event != window_);
+
+ gc_ = XCreateGC(display_, window_, 0, 0);
+
+ // create shared memory image
+ image_ = XShmCreateImage(display_, CopyFromParent, 24, ZPixmap, NULL,
+ &shminfo_, width, height);
+ shminfo_.shmid = shmget(IPC_PRIVATE,
+ (image_->bytes_per_line * image_->height),
+ IPC_CREAT | 0777);
+ shminfo_.shmaddr = image_->data = (char*) shmat(shminfo_.shmid, 0, 0);
+ if (image_->data == reinterpret_cast<char*>(-1)) {
+ VLOG(1) << "XShmCreateImage failed";
+ NOTREACHED();
+ }
+ render_buffer_ = reinterpret_cast<uint8_t*>(image_->data);
+ shminfo_.readOnly = false;
+
+ // Attach image to display.
+ if (!XShmAttach(display_, &shminfo_)) {
+ VLOG(1) << "XShmAttach failed";
+ NOTREACHED();
+ }
+ XSync(display_, false);
+}
+
+void LinuxOutputWindow::RenderFrame(const I420VideoFrame& video_frame) {
+ libyuv::I420ToARGB(video_frame.y_plane.data,
+ video_frame.y_plane.stride,
+ video_frame.u_plane.data,
+ video_frame.u_plane.stride,
+ video_frame.v_plane.data,
+ video_frame.v_plane.stride,
+ render_buffer_,
+ video_frame.width * 4, // Stride.
+ video_frame.width,
+ video_frame.height);
+
+ // Place image in window.
+ XShmPutImage(display_, window_, gc_, image_, 0, 0, 0, 0,
+ video_frame.width,
+ video_frame.height, true);
+
+ // Very important for the image to update properly!
+ XSync(display_, false);
+}
+
+} // namespace test
+} // namespace cast
+} // namespace media \ No newline at end of file
diff --git a/media/cast/test/linux_output_window.h b/media/cast/test/linux_output_window.h
new file mode 100644
index 0000000000..ffaba3a29d
--- /dev/null
+++ b/media/cast/test/linux_output_window.h
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TEST_LINUX_OUTPUT_WINDOW_H_
+#define MEDIA_CAST_TEST_LINUX_OUTPUT_WINDOW_H_
+
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include <X11/extensions/XShm.h>
+
+#include "media/cast/cast_config.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+class LinuxOutputWindow {
+ public:
+ LinuxOutputWindow(int x_pos,
+ int y_pos,
+ int width,
+ int height,
+ const std::string& name);
+ virtual ~LinuxOutputWindow();
+
+ void RenderFrame(const I420VideoFrame& video_frame);
+
+ private:
+ void CreateWindow(int x_pos,
+ int y_pos,
+ int width,
+ int height,
+ const std::string& name);
+ uint8* render_buffer_;
+ Display* display_;
+ Window window_;
+ XShmSegmentInfo shminfo_;
+ GC gc_;
+ XImage* image_;
+};
+
+} // namespace test
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TEST_LINUX_OUTPUT_WINDOW_H_
diff --git a/media/cast/test/receiver.cc b/media/cast/test/receiver.cc
new file mode 100644
index 0000000000..54cff9a1c3
--- /dev/null
+++ b/media/cast/test/receiver.cc
@@ -0,0 +1,263 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <climits>
+#include <cstdarg>
+#include <cstdio>
+#include <string>
+
+#include "base/at_exit.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/thread.h"
+#include "base/time/default_tick_clock.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/cast_receiver.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/test/transport/transport.h"
+#include "media/cast/test/utility/input_helper.h"
+
+#if defined(OS_LINUX)
+#include "media/cast/test/linux_output_window.h"
+#endif // OS_LINUX
+
+namespace media {
+namespace cast {
+
+#define DEFAULT_SEND_PORT "2346"
+#define DEFAULT_RECEIVE_PORT "2344"
+#define DEFAULT_SEND_IP "127.0.0.1"
+#define DEFAULT_RESTART "0"
+#define DEFAULT_AUDIO_FEEDBACK_SSRC "2"
+#define DEFAULT_AUDIO_INCOMING_SSRC "1"
+#define DEFAULT_AUDIO_PAYLOAD_TYPE "127"
+#define DEFAULT_VIDEO_FEEDBACK_SSRC "12"
+#define DEFAULT_VIDEO_INCOMING_SSRC "11"
+#define DEFAULT_VIDEO_PAYLOAD_TYPE "96"
+#define DEFAULT_VIDEO_CODEC_WIDTH "640"
+#define DEFAULT_VIDEO_CODEC_HEIGHT "480"
+#define DEFAULT_VIDEO_CODEC_BITRATE "2000"
+
+static const int kAudioSamplingFrequency = 48000;
+#if defined(OS_LINUX)
+const int kVideoWindowWidth = 1280;
+const int kVideoWindowHeight = 720;
+#endif // OS_LINUX
+static const int kFrameTimerMs = 33;
+
+
+void GetPorts(int* tx_port, int* rx_port) {
+ test::InputBuilder tx_input("Enter send port.",
+ DEFAULT_SEND_PORT, 1, INT_MAX);
+ *tx_port = tx_input.GetIntInput();
+
+ test::InputBuilder rx_input("Enter receive port.",
+ DEFAULT_RECEIVE_PORT, 1, INT_MAX);
+ *rx_port = rx_input.GetIntInput();
+}
+
+std::string GetIpAddress(const std::string display_text) {
+ test::InputBuilder input(display_text, DEFAULT_SEND_IP,
+ INT_MIN, INT_MAX);
+ std::string ip_address = input.GetStringInput();
+ // Ensure correct form:
+ while (std::count(ip_address.begin(), ip_address.end(), '.') != 3) {
+ ip_address = input.GetStringInput();
+ }
+ return ip_address;
+}
+
+void GetSsrcs(AudioReceiverConfig* audio_config) {
+ test::InputBuilder input_tx("Choose audio sender SSRC.",
+ DEFAULT_AUDIO_FEEDBACK_SSRC, 1, INT_MAX);
+ audio_config->feedback_ssrc = input_tx.GetIntInput();
+
+ test::InputBuilder input_rx("Choose audio receiver SSRC.",
+ DEFAULT_AUDIO_INCOMING_SSRC, 1, INT_MAX);
+ audio_config->incoming_ssrc = input_tx.GetIntInput();
+}
+
+void GetSsrcs(VideoReceiverConfig* video_config) {
+ test::InputBuilder input_tx("Choose video sender SSRC.",
+ DEFAULT_VIDEO_FEEDBACK_SSRC, 1, INT_MAX);
+ video_config->feedback_ssrc = input_tx.GetIntInput();
+
+ test::InputBuilder input_rx("Choose video receiver SSRC.",
+ DEFAULT_VIDEO_INCOMING_SSRC, 1, INT_MAX);
+ video_config->incoming_ssrc = input_rx.GetIntInput();
+}
+
+void GetPayloadtype(AudioReceiverConfig* audio_config) {
+ test::InputBuilder input("Choose audio receiver payload type.",
+ DEFAULT_AUDIO_PAYLOAD_TYPE, 96, 127);
+ audio_config->rtp_payload_type = input.GetIntInput();
+}
+
+AudioReceiverConfig GetAudioReceiverConfig() {
+ AudioReceiverConfig audio_config;
+
+ GetSsrcs(&audio_config);
+ GetPayloadtype(&audio_config);
+
+ audio_config.rtcp_c_name = "audio_receiver@a.b.c.d";
+
+ VLOG(1) << "Using OPUS 48Khz stereo";
+ audio_config.use_external_decoder = false;
+ audio_config.frequency = 48000;
+ audio_config.channels = 2;
+ audio_config.codec = kOpus;
+ return audio_config;
+}
+
+void GetPayloadtype(VideoReceiverConfig* video_config) {
+ test::InputBuilder input("Choose video receiver payload type.",
+ DEFAULT_VIDEO_PAYLOAD_TYPE, 96, 127);
+ video_config->rtp_payload_type = input.GetIntInput();
+}
+
+VideoReceiverConfig GetVideoReceiverConfig() {
+ VideoReceiverConfig video_config;
+
+ GetSsrcs(&video_config);
+ GetPayloadtype(&video_config);
+
+ video_config.rtcp_c_name = "video_receiver@a.b.c.d";
+
+ video_config.use_external_decoder = false;
+
+ VLOG(1) << "Using VP8";
+ video_config.codec = kVp8;
+ return video_config;
+}
+
+
+class ReceiveProcess : public base::RefCountedThreadSafe<ReceiveProcess> {
+ public:
+ explicit ReceiveProcess(scoped_refptr<FrameReceiver> frame_receiver)
+ : frame_receiver_(frame_receiver),
+#if defined(OS_LINUX)
+ render_(0, 0, kVideoWindowWidth, kVideoWindowHeight, "Cast_receiver"),
+#endif // OS_LINUX
+ last_playout_time_(),
+ last_render_time_() {}
+
+ void Start() {
+ GetAudioFrame(base::TimeDelta::FromMilliseconds(kFrameTimerMs));
+ GetVideoFrame();
+ }
+
+ protected:
+ virtual ~ReceiveProcess() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<ReceiveProcess>;
+
+ void DisplayFrame(scoped_ptr<I420VideoFrame> frame,
+ const base::TimeTicks& render_time) {
+#ifdef OS_LINUX
+ render_.RenderFrame(*frame);
+#endif // OS_LINUX
+ // Print out the delta between frames.
+ if (!last_render_time_.is_null()){
+ base::TimeDelta time_diff = render_time - last_render_time_;
+ VLOG(0) << " RenderDelay[mS] = " << time_diff.InMilliseconds();
+ }
+ last_render_time_ = render_time;
+ GetVideoFrame();
+ }
+
+ void ReceiveAudioFrame(scoped_ptr<PcmAudioFrame> audio_frame,
+ const base::TimeTicks& playout_time) {
+ // For audio just print the playout delta between audio frames.
+ // Default diff time is kFrameTimerMs.
+ base::TimeDelta time_diff =
+ base::TimeDelta::FromMilliseconds(kFrameTimerMs);
+ if (!last_playout_time_.is_null()){
+ time_diff = playout_time - last_playout_time_;
+ VLOG(0) << " PlayoutDelay[mS] = " << time_diff.InMilliseconds();
+ }
+ last_playout_time_ = playout_time;
+ GetAudioFrame(time_diff);
+ }
+
+ void GetAudioFrame(base::TimeDelta playout_diff) {
+ int num_10ms_blocks = playout_diff.InMilliseconds() / 10;
+ frame_receiver_->GetRawAudioFrame(num_10ms_blocks, kAudioSamplingFrequency,
+ base::Bind(&ReceiveProcess::ReceiveAudioFrame, this));
+ }
+
+ void GetVideoFrame() {
+ frame_receiver_->GetRawVideoFrame(
+ base::Bind(&ReceiveProcess::DisplayFrame, this));
+ }
+
+ scoped_refptr<FrameReceiver> frame_receiver_;
+#ifdef OS_LINUX
+ test::LinuxOutputWindow render_;
+#endif // OS_LINUX
+ base::TimeTicks last_playout_time_;
+ base::TimeTicks last_render_time_;
+};
+
+} // namespace cast
+} // namespace media
+
+int main(int argc, char** argv) {
+ base::AtExitManager at_exit;
+ base::MessageLoopForIO main_message_loop;
+ VLOG(1) << "Cast Receiver";
+ base::Thread main_thread("Cast main send thread");
+ base::Thread audio_thread("Cast audio decoder thread");
+ base::Thread video_thread("Cast video decoder thread");
+ main_thread.Start();
+ audio_thread.Start();
+ video_thread.Start();
+
+ base::DefaultTickClock clock;
+ // Enable receiver side threads, and disable logging.
+ scoped_refptr<media::cast::CastEnvironment> cast_environment(new
+ media::cast::CastEnvironment(&clock,
+ main_thread.message_loop_proxy(),
+ NULL,
+ audio_thread.message_loop_proxy(),
+ NULL,
+ video_thread.message_loop_proxy(),
+ media::cast::GetDefaultCastLoggingConfig()));
+
+ media::cast::AudioReceiverConfig audio_config =
+ media::cast::GetAudioReceiverConfig();
+ media::cast::VideoReceiverConfig video_config =
+ media::cast::GetVideoReceiverConfig();
+
+ scoped_ptr<media::cast::test::Transport> transport(
+ new media::cast::test::Transport(main_message_loop.message_loop_proxy()));
+ scoped_ptr<media::cast::CastReceiver> cast_receiver(
+ media::cast::CastReceiver::CreateCastReceiver(
+ cast_environment,
+ audio_config,
+ video_config,
+ transport->packet_sender()));
+
+ media::cast::PacketReceiver* packet_receiver =
+ cast_receiver->packet_receiver();
+
+ int send_to_port, receive_port;
+ media::cast::GetPorts(&send_to_port, &receive_port);
+ std::string ip_address = media::cast::GetIpAddress("Enter destination IP.");
+ std::string local_ip_address = media::cast::GetIpAddress("Enter local IP.");
+ transport->SetLocalReceiver(packet_receiver, ip_address, local_ip_address,
+ receive_port);
+ transport->SetSendDestination(ip_address, send_to_port);
+
+ scoped_refptr<media::cast::ReceiveProcess> receive_process(
+ new media::cast::ReceiveProcess(cast_receiver->frame_receiver()));
+ receive_process->Start();
+ main_message_loop.Run();
+ transport->StopReceiving();
+ return 0;
+}
diff --git a/media/cast/test/sender.cc b/media/cast/test/sender.cc
new file mode 100644
index 0000000000..4e491bf6bf
--- /dev/null
+++ b/media/cast/test/sender.cc
@@ -0,0 +1,346 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test application that simulates a cast sender - Data can be either generated
+// or read from a file.
+
+#include "base/at_exit.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread.h"
+#include "base/time/default_tick_clock.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/cast_sender.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/test/audio_utility.h"
+#include "media/cast/test/transport/transport.h"
+#include "media/cast/test/utility/input_helper.h"
+#include "media/cast/test/video_utility.h"
+#include "ui/gfx/size.h"
+
+#define DEFAULT_SEND_PORT "2344"
+#define DEFAULT_RECEIVE_PORT "2346"
+#define DEFAULT_SEND_IP "127.0.0.1"
+#define DEFAULT_READ_FROM_FILE "0"
+#define DEFAULT_PACKET_LOSS "0"
+#define DEFAULT_AUDIO_SENDER_SSRC "1"
+#define DEFAULT_AUDIO_RECEIVER_SSRC "2"
+#define DEFAULT_AUDIO_PAYLOAD_TYPE "127"
+#define DEFAULT_VIDEO_SENDER_SSRC "11"
+#define DEFAULT_VIDEO_RECEIVER_SSRC "12"
+#define DEFAULT_VIDEO_PAYLOAD_TYPE "96"
+#define DEFAULT_VIDEO_CODEC_WIDTH "1280"
+#define DEFAULT_VIDEO_CODEC_HEIGHT "720"
+#define DEFAULT_VIDEO_CODEC_BITRATE "2000"
+#define DEFAULT_VIDEO_CODEC_MAX_BITRATE "4000"
+#define DEFAULT_VIDEO_CODEC_MIN_BITRATE "1000"
+
+namespace media {
+namespace cast {
+
+namespace {
+static const int kAudioChannels = 2;
+static const int kAudioSamplingFrequency = 48000;
+static const int kSoundFrequency = 1234; // Frequency of sinusoid wave.
+// The tests are commonly implemented with |kFrameTimerMs| RunTask function;
+// a normal video is 30 fps hence the 33 ms between frames.
+static const float kSoundVolume = 0.5f;
+static const int kFrameTimerMs = 33;
+} // namespace
+
+void GetPorts(int* tx_port, int* rx_port) {
+ test::InputBuilder tx_input("Enter send port.",
+ DEFAULT_SEND_PORT, 1, INT_MAX);
+ *tx_port = tx_input.GetIntInput();
+
+ test::InputBuilder rx_input("Enter receive port.",
+ DEFAULT_RECEIVE_PORT, 1, INT_MAX);
+ *rx_port = rx_input.GetIntInput();
+}
+
+int GetPacketLoss() {
+ test::InputBuilder input("Enter send side packet loss %.",
+ DEFAULT_PACKET_LOSS, 0, 99);
+ return input.GetIntInput();
+}
+
+std::string GetIpAddress(const std::string display_text) {
+ test::InputBuilder input(display_text, DEFAULT_SEND_IP,
+ INT_MIN, INT_MAX);
+ std::string ip_address = input.GetStringInput();
+ // Verify correct form:
+ while (std::count(ip_address.begin(), ip_address.end(), '.') != 3) {
+ ip_address = input.GetStringInput();
+ }
+ return ip_address;
+}
+
+bool ReadFromFile() {
+ test::InputBuilder input("Enter 1 to read from file.", DEFAULT_READ_FROM_FILE,
+ 0, 1);
+ return (1 == input.GetIntInput());
+}
+
+std::string GetVideoFile() {
+ test::InputBuilder input("Enter file and path to raw video file.","",
+ INT_MIN, INT_MAX);
+ return input.GetStringInput();
+}
+
+void GetSsrcs(AudioSenderConfig* audio_config) {
+ test::InputBuilder input_tx("Choose audio sender SSRC.",
+ DEFAULT_AUDIO_SENDER_SSRC, 1, INT_MAX);
+ audio_config->sender_ssrc = input_tx.GetIntInput();
+
+ test::InputBuilder input_rx("Choose audio receiver SSRC.",
+ DEFAULT_AUDIO_RECEIVER_SSRC, 1, INT_MAX);
+ audio_config->incoming_feedback_ssrc = input_rx.GetIntInput();
+}
+
+void GetSsrcs(VideoSenderConfig* video_config) {
+ test::InputBuilder input_tx("Choose video sender SSRC.",
+ DEFAULT_VIDEO_SENDER_SSRC, 1, INT_MAX);
+ video_config->sender_ssrc = input_tx.GetIntInput();
+
+ test::InputBuilder input_rx("Choose video receiver SSRC.",
+ DEFAULT_VIDEO_RECEIVER_SSRC, 1, INT_MAX);
+ video_config->incoming_feedback_ssrc = input_rx.GetIntInput();
+}
+
+void GetPayloadtype(AudioSenderConfig* audio_config) {
+ test::InputBuilder input("Choose audio sender payload type.",
+ DEFAULT_AUDIO_PAYLOAD_TYPE, 96, 127);
+ audio_config->rtp_payload_type = input.GetIntInput();
+}
+
+AudioSenderConfig GetAudioSenderConfig() {
+ AudioSenderConfig audio_config;
+
+ GetSsrcs(&audio_config);
+ GetPayloadtype(&audio_config);
+
+ audio_config.rtcp_c_name = "audio_sender@a.b.c.d";
+
+ VLOG(0) << "Using OPUS 48Khz stereo at 64kbit/s";
+ audio_config.use_external_encoder = false;
+ audio_config.frequency = kAudioSamplingFrequency;
+ audio_config.channels = kAudioChannels;
+ audio_config.bitrate = 64000;
+ audio_config.codec = kOpus;
+ return audio_config;
+}
+
+void GetPayloadtype(VideoSenderConfig* video_config) {
+ test::InputBuilder input("Choose video sender payload type.",
+ DEFAULT_VIDEO_PAYLOAD_TYPE, 96, 127);
+ video_config->rtp_payload_type = input.GetIntInput();
+}
+
+void GetVideoCodecSize(VideoSenderConfig* video_config) {
+ test::InputBuilder input_width("Choose video width.",
+ DEFAULT_VIDEO_CODEC_WIDTH, 144, 1920);
+ video_config->width = input_width.GetIntInput();
+
+ test::InputBuilder input_height("Choose video height.",
+ DEFAULT_VIDEO_CODEC_HEIGHT, 176, 1080);
+ video_config->height = input_height.GetIntInput();
+}
+
+void GetVideoBitrates(VideoSenderConfig* video_config) {
+ test::InputBuilder input_start_br("Choose start bitrate[kbps].",
+ DEFAULT_VIDEO_CODEC_BITRATE, 0, INT_MAX);
+ video_config->start_bitrate = input_start_br.GetIntInput() * 1000;
+
+ test::InputBuilder input_max_br("Choose max bitrate[kbps].",
+ DEFAULT_VIDEO_CODEC_MAX_BITRATE, 0, INT_MAX);
+ video_config->max_bitrate = input_max_br.GetIntInput() * 1000;
+
+ test::InputBuilder input_min_br("Choose min bitrate[kbps].",
+ DEFAULT_VIDEO_CODEC_MIN_BITRATE, 0, INT_MAX);
+ video_config->min_bitrate = input_min_br.GetIntInput() * 1000;
+}
+
+VideoSenderConfig GetVideoSenderConfig() {
+ VideoSenderConfig video_config;
+
+ GetSsrcs(&video_config);
+ GetPayloadtype(&video_config);
+ GetVideoCodecSize(&video_config);
+ GetVideoBitrates(&video_config);
+
+ video_config.rtcp_c_name = "video_sender@a.b.c.d";
+
+ video_config.use_external_encoder = false;
+
+ VLOG(0) << "Using VP8 at 30 fps";
+ video_config.min_qp = 4;
+ video_config.max_qp = 40;
+ video_config.max_frame_rate = 30;
+ video_config.codec = kVp8;
+ video_config.max_number_of_video_buffers_used = 1;
+ video_config.number_of_cores = 1;
+ return video_config;
+}
+
+class SendProcess {
+ public:
+ SendProcess(scoped_refptr<CastEnvironment> cast_environment,
+ const VideoSenderConfig& video_config,
+ FrameInput* frame_input)
+ : video_config_(video_config),
+ audio_diff_(kFrameTimerMs),
+ frame_input_(frame_input),
+ synthetic_count_(0),
+ clock_(cast_environment->Clock()),
+ start_time_(),
+ send_time_(),
+ weak_factory_(this) {
+ audio_bus_factory_.reset(new TestAudioBusFactory(kAudioChannels,
+ kAudioSamplingFrequency, kSoundFrequency, kSoundVolume));
+ if (ReadFromFile()) {
+ std::string video_file_name = GetVideoFile();
+ video_file_ = fopen(video_file_name.c_str(), "r");
+ if (video_file_ == NULL) {
+ VLOG(1) << "Failed to open file";
+ exit(-1);
+ }
+ } else {
+ video_file_ = NULL;
+ }
+ }
+
+ ~SendProcess() {
+ if (video_file_)
+ fclose(video_file_);
+ }
+
+ void ReleaseVideoFrame(const scoped_refptr<media::VideoFrame>&) {
+ SendFrame();
+ }
+
+ void SendFrame() {
+ // Make sure that we don't drift.
+ int num_10ms_blocks = audio_diff_ / 10;
+ // Avoid drift.
+ audio_diff_ += kFrameTimerMs - num_10ms_blocks * 10;
+
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
+ AudioBus* const audio_bus_ptr = audio_bus.get();
+ frame_input_->InsertAudio(audio_bus_ptr, clock_->NowTicks(),
+ base::Bind(base::DoNothing));
+
+ gfx::Size size(video_config_.width, video_config_.height);
+ // TODO(mikhal): Use the provided timestamp.
+ if (start_time_.is_null())
+ start_time_ = clock_->NowTicks();
+ base::TimeDelta time_diff = clock_->NowTicks() - start_time_;
+ scoped_refptr<media::VideoFrame> video_frame =
+ media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, time_diff);
+ if (video_file_) {
+ if (!PopulateVideoFrameFromFile(video_frame, video_file_))
+ return;
+ } else {
+ PopulateVideoFrame(video_frame, synthetic_count_);
+ ++synthetic_count_;
+ }
+
+ // Time the sending of the frame to match the set frame rate.
+ // Sleep if that time has yet to elapse.
+ base::TimeTicks now = clock_->NowTicks();
+ base::TimeDelta video_frame_time =
+ base::TimeDelta::FromMilliseconds(kFrameTimerMs);
+ base::TimeDelta elapsed_time = now - send_time_;
+ if (elapsed_time < video_frame_time) {
+ base::PlatformThread::Sleep(video_frame_time - elapsed_time);
+ VLOG(1) << "Sleep" <<
+ (video_frame_time - elapsed_time).InMilliseconds();
+ }
+
+ send_time_ = clock_->NowTicks();
+ frame_input_->InsertRawVideoFrame(video_frame, send_time_,
+ base::Bind(&SendProcess::ReleaseVideoFrame, weak_factory_.GetWeakPtr(),
+ video_frame));
+ }
+
+ private:
+ const VideoSenderConfig video_config_;
+ int audio_diff_;
+ const scoped_refptr<FrameInput> frame_input_;
+ FILE* video_file_;
+ uint8 synthetic_count_;
+ base::TickClock* const clock_; // Not owned by this class.
+ base::TimeTicks start_time_;
+ base::TimeTicks send_time_;
+ scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
+ base::WeakPtrFactory<SendProcess> weak_factory_;
+};
+
+} // namespace cast
+} // namespace media
+
+
+int main(int argc, char** argv) {
+ base::AtExitManager at_exit;
+ VLOG(1) << "Cast Sender";
+ base::Thread main_thread("Cast main send thread");
+ base::Thread audio_thread("Cast audio encoder thread");
+ base::Thread video_thread("Cast video encoder thread");
+ main_thread.Start();
+ audio_thread.Start();
+ video_thread.Start();
+
+ base::DefaultTickClock clock;
+ base::MessageLoopForIO io_message_loop;
+
+ // Enable main and send side threads only. Disable logging.
+ scoped_refptr<media::cast::CastEnvironment> cast_environment(new
+ media::cast::CastEnvironment(
+ &clock,
+ main_thread.message_loop_proxy(),
+ audio_thread.message_loop_proxy(),
+ NULL,
+ video_thread.message_loop_proxy(),
+ NULL,
+ media::cast::GetDefaultCastLoggingConfig()));
+
+ media::cast::AudioSenderConfig audio_config =
+ media::cast::GetAudioSenderConfig();
+ media::cast::VideoSenderConfig video_config =
+ media::cast::GetVideoSenderConfig();
+
+ scoped_ptr<media::cast::test::Transport> transport(
+ new media::cast::test::Transport(io_message_loop.message_loop_proxy()));
+ scoped_ptr<media::cast::CastSender> cast_sender(
+ media::cast::CastSender::CreateCastSender(cast_environment,
+ audio_config,
+ video_config,
+ NULL, // VideoEncoderController.
+ transport->packet_sender()));
+
+ media::cast::PacketReceiver* packet_receiver = cast_sender->packet_receiver();
+
+ int send_to_port, receive_port;
+ media::cast::GetPorts(&send_to_port, &receive_port);
+ std::string ip_address = media::cast::GetIpAddress("Enter destination IP.");
+ std::string local_ip_address = media::cast::GetIpAddress("Enter local IP.");
+ int packet_loss_percentage = media::cast::GetPacketLoss();
+
+ transport->SetLocalReceiver(packet_receiver, ip_address, local_ip_address,
+ receive_port);
+ transport->SetSendDestination(ip_address, send_to_port);
+ transport->SetSendSidePacketLoss(packet_loss_percentage);
+
+ media::cast::FrameInput* frame_input = cast_sender->frame_input();
+ scoped_ptr<media::cast::SendProcess> send_process(new
+ media::cast::SendProcess(cast_environment, video_config, frame_input));
+
+ send_process->SendFrame();
+ io_message_loop.Run();
+ transport->StopReceiving();
+ return 0;
+}
diff --git a/media/cast/test/transport/transport.cc b/media/cast/test/transport/transport.cc
new file mode 100644
index 0000000000..22f41ba32e
--- /dev/null
+++ b/media/cast/test/transport/transport.cc
@@ -0,0 +1,218 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/test/transport/transport.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/rand_util.h"
+#include "net/base/completion_callback.h"
+#include "net/base/io_buffer.h"
+#include "net/base/rand_callback.h"
+#include "net/base/test_completion_callback.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+const int kMaxPacketSize = 1500;
+
+class LocalUdpTransportData;
+
+void CreateUDPAddress(std::string ip_str, int port, net::IPEndPoint* address) {
+ net::IPAddressNumber ip_number;
+ bool rv = net::ParseIPLiteralToNumber(ip_str, &ip_number);
+ if (!rv)
+ return;
+ *address = net::IPEndPoint(ip_number, port);
+}
+
+class LocalUdpTransportData
+ : public base::RefCountedThreadSafe<LocalUdpTransportData> {
+ public:
+ LocalUdpTransportData(net::DatagramServerSocket* udp_socket,
+ scoped_refptr<base::TaskRunner> io_thread_proxy)
+ : udp_socket_(udp_socket),
+ buffer_(new net::IOBufferWithSize(kMaxPacketSize)),
+ io_thread_proxy_(io_thread_proxy) {
+ }
+
+ void ListenTo(net::IPEndPoint bind_address) {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+
+ bind_address_ = bind_address;
+ io_thread_proxy_->PostTask(FROM_HERE,
+ base::Bind(&LocalUdpTransportData::RecvFromSocketLoop, this));
+ }
+
+ void DeletePacket(uint8* data) {
+ // Should be called from the receiver (not on the transport thread).
+ DCHECK(!(io_thread_proxy_->RunsTasksOnCurrentThread()));
+ delete [] data;
+ }
+
+ void PacketReceived(int size) {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+ // Got a packet with length result.
+ uint8* data = new uint8[size];
+ memcpy(data, buffer_->data(), size);
+ packet_receiver_->ReceivedPacket(data, size,
+ base::Bind(&LocalUdpTransportData::DeletePacket, this, data));
+ RecvFromSocketLoop();
+
+ }
+
+ void RecvFromSocketLoop() {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+ // Callback should always trigger with a packet.
+ int res = udp_socket_->RecvFrom(buffer_.get(), kMaxPacketSize,
+ &bind_address_, base::Bind(&LocalUdpTransportData::PacketReceived,
+ this));
+ DCHECK(res >= net::ERR_IO_PENDING);
+ if (res > 0) {
+ PacketReceived(res);
+ }
+ }
+
+ void set_packet_receiver(PacketReceiver* packet_receiver) {
+ packet_receiver_ = packet_receiver;
+ }
+
+ void Close() {
+ udp_socket_->Close();
+ }
+
+ protected:
+ virtual ~LocalUdpTransportData() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<LocalUdpTransportData>;
+
+ net::DatagramServerSocket* udp_socket_;
+ net::IPEndPoint bind_address_;
+ PacketReceiver* packet_receiver_;
+ scoped_refptr<net::IOBufferWithSize> buffer_;
+ scoped_refptr<base::TaskRunner> io_thread_proxy_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocalUdpTransportData);
+};
+
+class LocalPacketSender : public PacketSender,
+ public base::RefCountedThreadSafe<LocalPacketSender> {
+ public:
+ LocalPacketSender(net::DatagramServerSocket* udp_socket,
+ scoped_refptr<base::TaskRunner> io_thread_proxy)
+ : udp_socket_(udp_socket),
+ send_address_(),
+ loss_limit_(0),
+ io_thread_proxy_(io_thread_proxy) {}
+
+ virtual bool SendPacket(const Packet& packet) OVERRIDE {
+ io_thread_proxy_->PostTask(FROM_HERE,
+ base::Bind(&LocalPacketSender::SendPacketToNetwork,
+ this, packet));
+ return true;
+ }
+
+ virtual void SendPacketToNetwork(const Packet& packet) {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+ const uint8* data = packet.data();
+ if (loss_limit_ > 0) {
+ int r = base::RandInt(0, 100);
+ if (r < loss_limit_) {
+ VLOG(1) << "Drop packet f:" << static_cast<int>(data[12 + 1])
+ << " p:" << static_cast<int>(data[12 + 3])
+ << " m:" << static_cast<int>(data[12 + 5]);
+ }
+ }
+ net::TestCompletionCallback callback;
+ scoped_refptr<net::WrappedIOBuffer> buffer(
+ new net::WrappedIOBuffer(reinterpret_cast<const char*>(data)));
+ udp_socket_->SendTo(buffer.get(), static_cast<int>(packet.size()),
+ send_address_, callback.callback());
+ }
+
+ virtual bool SendPackets(const PacketList& packets) OVERRIDE {
+ bool out_val = true;
+ for (size_t i = 0; i < packets.size(); ++i) {
+ const Packet& packet = packets[i];
+ out_val |= SendPacket(packet);
+ }
+ return out_val;
+ }
+
+ void SetPacketLoss(int percentage) {
+ DCHECK_GE(percentage, 0);
+ DCHECK_LT(percentage, 100);
+ loss_limit_ = percentage;
+ }
+
+ void SetSendAddress(const net::IPEndPoint& send_address) {
+ send_address_ = send_address;
+ }
+
+ protected:
+ virtual ~LocalPacketSender() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<LocalPacketSender>;
+
+ net::DatagramServerSocket* udp_socket_; // Not owned by this class.
+ net::IPEndPoint send_address_;
+ int loss_limit_;
+ scoped_refptr<base::TaskRunner> io_thread_proxy_;
+};
+
+Transport::Transport(
+ scoped_refptr<base::TaskRunner> io_thread_proxy)
+ : udp_socket_(new net::UDPServerSocket(NULL, net::NetLog::Source())),
+ local_udp_transport_data_(new LocalUdpTransportData(udp_socket_.get(),
+ io_thread_proxy)),
+ packet_sender_(new LocalPacketSender(udp_socket_.get(), io_thread_proxy)),
+ io_thread_proxy_(io_thread_proxy) {}
+
+Transport::~Transport() {}
+
+PacketSender* Transport::packet_sender() {
+ return static_cast<PacketSender*>(packet_sender_.get());
+}
+
+void Transport::SetSendSidePacketLoss(int percentage) {
+ packet_sender_->SetPacketLoss(percentage);
+}
+
+void Transport::StopReceiving() {
+ local_udp_transport_data_->Close();
+}
+
+void Transport::SetLocalReceiver(PacketReceiver* packet_receiver,
+ std::string ip_address,
+ std::string local_ip_address,
+ int port) {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+ net::IPEndPoint bind_address, local_bind_address;
+ CreateUDPAddress(ip_address, port, &bind_address);
+ CreateUDPAddress(local_ip_address, port, &local_bind_address);
+ local_udp_transport_data_->set_packet_receiver(packet_receiver);
+ udp_socket_->AllowAddressReuse();
+ udp_socket_->SetMulticastLoopbackMode(true);
+ udp_socket_->Listen(local_bind_address);
+
+ // Start listening once receiver has been set.
+ local_udp_transport_data_->ListenTo(bind_address);
+}
+
+void Transport::SetSendDestination(std::string ip_address, int port) {
+ net::IPEndPoint send_address;
+ CreateUDPAddress(ip_address, port, &send_address);
+ packet_sender_->SetSendAddress(send_address);
+}
+
+} // namespace test
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/transport/transport.gyp b/media/cast/test/transport/transport.gyp
new file mode 100644
index 0000000000..79be3d28e6
--- /dev/null
+++ b/media/cast/test/transport/transport.gyp
@@ -0,0 +1,22 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_transport',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'transport.cc',
+ 'transport.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/net/net.gyp:net',
+ ],
+ },
+ ],
+} \ No newline at end of file
diff --git a/media/cast/test/transport/transport.h b/media/cast/test/transport/transport.h
new file mode 100644
index 0000000000..288fa13bd3
--- /dev/null
+++ b/media/cast/test/transport/transport.h
@@ -0,0 +1,57 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TEST_TRANSPORT_TRANSPORT_H_
+#define MEDIA_CAST_TEST_TRANSPORT_TRANSPORT_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "net/udp/udp_server_socket.h"
+#include "net/udp/udp_socket.h"
+
+
+namespace media {
+namespace cast {
+namespace test {
+
+class LocalUdpTransportData;
+class LocalPacketSender;
+
+// Helper class for Cast test applications.
+class Transport {
+ public:
+ Transport(scoped_refptr<base::TaskRunner> io_thread_proxy);
+ ~Transport();
+
+ // Specifies the ports and IP address to receive packets on.
+ // Will start listening immediately.
+ void SetLocalReceiver(PacketReceiver* packet_receiver,
+ std::string ip_address,
+ std::string local_ip_address,
+ int port);
+
+ // Specifies the destination port and IP address.
+ void SetSendDestination(std::string ip_address, int port);
+
+ PacketSender* packet_sender();
+
+ void SetSendSidePacketLoss(int percentage);
+
+ void StopReceiving();
+
+ private:
+ scoped_ptr<net::DatagramServerSocket> udp_socket_;
+ scoped_refptr<LocalPacketSender> packet_sender_;
+ scoped_refptr<LocalUdpTransportData> local_udp_transport_data_;
+ scoped_refptr<base::TaskRunner> io_thread_proxy_;
+
+ DISALLOW_COPY_AND_ASSIGN(Transport);
+};
+
+} // namespace test
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TEST_TRANSPORT_TRANSPORT_H_
diff --git a/media/cast/test/utility/input_helper.cc b/media/cast/test/utility/input_helper.cc
new file mode 100644
index 0000000000..fa93926aa2
--- /dev/null
+++ b/media/cast/test/utility/input_helper.cc
@@ -0,0 +1,71 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/test/utility/input_helper.h"
+
+#include <stdlib.h>
+#include <cstdio>
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+
+namespace media {
+namespace cast {
+namespace test {
+
+InputBuilder::InputBuilder(const std::string& title,
+ const std::string& default_value,
+ int low_range,
+ int high_range)
+ : title_(title),
+ default_value_(default_value),
+ low_range_(low_range),
+ high_range_(high_range) {}
+
+InputBuilder::~InputBuilder() {}
+
+std::string InputBuilder::GetStringInput() const {
+ printf("\n%s\n", title_.c_str());
+ if (!default_value_.empty())
+ printf("Hit enter for default (%s):\n", default_value_.c_str());
+
+ printf("# ");
+ fflush(stdout);
+ char raw_input[128];
+ if (!fgets(raw_input, 128, stdin)) {
+ NOTREACHED();
+ return std::string();
+ }
+
+ std::string input = raw_input;
+ input = input.substr(0, input.size() - 1); // Strip last \n.
+ if (input.empty() && !default_value_.empty())
+ return default_value_;
+
+ if (!ValidateInput(input)) {
+ printf("Invalid input. Please try again.\n");
+ return GetStringInput();
+ }
+ return input;
+}
+
+int InputBuilder::GetIntInput() const {
+ std::string string_input = GetStringInput();
+ int int_value;
+ CHECK(base::StringToInt(string_input, &int_value));
+ return int_value;
+}
+
+bool InputBuilder::ValidateInput(const std::string input) const {
+ // Check for a valid range.
+ if (low_range_ == INT_MIN && high_range_ == INT_MAX) return true;
+ int value;
+ if (!base::StringToInt(input, &value))
+ return false;
+ return value >= low_range_ && value <= high_range_;
+}
+
+} // namespace test
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/utility/input_helper.h b/media/cast/test/utility/input_helper.h
new file mode 100644
index 0000000000..baabe5151c
--- /dev/null
+++ b/media/cast/test/utility/input_helper.h
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TEST_UTILITY_INPUT_HELPER_
+#define MEDIA_CAST_TEST_UTILITY_INPUT_HELPER_
+
+#include <string>
+
+namespace media {
+namespace cast {
+namespace test {
+
+// This class handles general user input to the application. The user will be
+// displayed with the title string and be given a default value. When forced
+// a range, the input values should be within low_range to high_range.
+// Setting low and high to INT_MIN/INT_MAX is equivalent to not setting a range.
+class InputBuilder {
+ public:
+ InputBuilder(const std::string& title,
+ const std::string& default_value,
+ int low_range,
+ int high_range);
+ virtual ~InputBuilder();
+
+ // Ask the user for input, reads input from the input source and returns
+ // the answer. This method will keep asking the user until a correct answer
+ // is returned and is thereby guaranteed to return a response that is
+ // acceptable within the predefined range.
+ // Input will be returned in either string or int format, base on the function
+ // called.
+ std::string GetStringInput() const;
+ int GetIntInput() const;
+
+ private:
+ bool ValidateInput(const std::string input) const;
+
+ const std::string title_;
+ const std::string default_value_;
+ // Low and high range values for input validation.
+ const int low_range_;
+ const int high_range_;
+};
+
+} // namespace test
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TEST_UTILITY_INPUT_HELPER_
diff --git a/media/cast/test/utility/utility.gyp b/media/cast/test/utility/utility.gyp
new file mode 100644
index 0000000000..021c2d9a41
--- /dev/null
+++ b/media/cast/test/utility/utility.gyp
@@ -0,0 +1,28 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_test_utility',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+
+ ],
+ 'sources': [
+ 'input_helper.cc',
+ 'input_helper.h',
+ '<(DEPTH)/media/cast/test/audio_utility.cc',
+ '<(DEPTH)/media/cast/test/fake_task_runner.cc',
+ '<(DEPTH)/media/cast/test/video_utility.cc',
+ ], # source
+ },
+ ],
+} \ No newline at end of file
diff --git a/media/cast/test/video_utility.cc b/media/cast/test/video_utility.cc
index 156329a305..30f00193dd 100644
--- a/media/cast/test/video_utility.cc
+++ b/media/cast/test/video_utility.cc
@@ -3,8 +3,12 @@
// found in the LICENSE file.
#include <math.h>
+#include <cstdio>
+#include "media/base/video_frame.h"
#include "media/cast/test/video_utility.h"
+#include "third_party/libyuv/include/libyuv/compare.h"
+#include "ui/gfx/size.h"
namespace media {
namespace cast {
@@ -12,55 +16,52 @@ namespace cast {
double I420PSNR(const I420VideoFrame& frame1, const I420VideoFrame& frame2) {
// Frames should have equal resolution.
if (frame1.width != frame2.width || frame1.height != frame2.height) return -1;
+ return libyuv::I420Psnr(frame1.y_plane.data, frame1.y_plane.stride,
+ frame1.u_plane.data, frame1.u_plane.stride,
+ frame1.v_plane.data, frame1.v_plane.stride,
+ frame2.y_plane.data, frame2.y_plane.stride,
+ frame2.u_plane.data, frame2.u_plane.stride,
+ frame2.v_plane.data, frame2.v_plane.stride,
+ frame1.width, frame1.height);
+}
- double y_mse = 0.0;
- // Y.
- uint8* data1 = frame1.y_plane.data;
- uint8* data2 = frame2.y_plane.data;
- for (int i = 0; i < frame1.height; ++i) {
- for (int j = 0; j < frame1.width; ++j) {
- y_mse += (data1[j] - data2[j]) * (data1[j] - data2[j]);
- }
- // Account for stride.
- data1 += frame1.y_plane.stride;
- data2 += frame2.y_plane.stride;
- }
- y_mse /= (frame1.width * frame1.height);
-
- int half_width = (frame1.width + 1) / 2;
- int half_height = (frame1.height + 1) / 2;
- // U.
- double u_mse = 0.0;
- data1 = frame1.u_plane.data;
- data2 = frame2.u_plane.data;
- for (int i = 0; i < half_height; ++i) {
- for (int j = 0; j < half_width; ++j) {
- u_mse += (data1[j] - data2[j]) * (data1[j] - data2[j]);
- }
- // Account for stride.
- data1 += frame1.u_plane.stride;
- data2 += frame2.u_plane.stride;
+double I420PSNR(const VideoFrame& frame1, const I420VideoFrame& frame2) {
+ if (frame1.coded_size().width() != frame2.width ||
+ frame1.coded_size().height() != frame2.height) return -1;
+
+ return libyuv::I420Psnr(
+ frame1.data(VideoFrame::kYPlane), frame1.stride(VideoFrame::kYPlane),
+ frame1.data(VideoFrame::kUPlane), frame1.stride(VideoFrame::kUPlane),
+ frame1.data(VideoFrame::kVPlane), frame1.stride(VideoFrame::kVPlane),
+ frame2.y_plane.data, frame2.y_plane.stride,
+ frame2.u_plane.data, frame2.u_plane.stride,
+ frame2.v_plane.data, frame2.v_plane.stride,
+ frame2.width, frame2.height);
+}
+
+void PopulateVideoFrame(VideoFrame* frame, int start_value) {
+ int width = frame->coded_size().width();
+ int height = frame->coded_size().height();
+ int half_width = (width + 1) / 2;
+ int half_height = (height + 1) / 2;
+ uint8* y_plane = frame->data(VideoFrame::kYPlane);
+ uint8* u_plane = frame->data(VideoFrame::kUPlane);
+ uint8* v_plane = frame->data(VideoFrame::kVPlane);
+
+ // Set Y.
+ for (int i = 0; i < width * height; ++i) {
+ y_plane[i] = static_cast<uint8>(start_value + i);
}
- u_mse /= half_width * half_height;
-
- // V.
- double v_mse = 0.0;
- data1 = frame1.v_plane.data;
- data2 = frame2.v_plane.data;
- for (int i = 0; i < half_height; ++i) {
- for (int j = 0; j < half_width; ++j) {
- v_mse += (data1[j] - data2[j]) * (data1[j] - data2[j]);
- }
- // Account for stride.
- data1 += frame1.v_plane.stride;
- data2 += frame2.v_plane.stride;
+
+ // Set U.
+ for (int i = 0; i < half_width * half_height; ++i) {
+ u_plane[i] = static_cast<uint8>(start_value + i);
}
- v_mse /= half_width * half_height;
- // Combine to one psnr value.
- static const double kVideoBitRange = 255.0;
- return 20.0 * log10(kVideoBitRange) -
- 10.0 * log10((y_mse + u_mse + v_mse) / 3.0);
+ // Set V.
+ for (int i = 0; i < half_width * half_height; ++i) {
+ v_plane[i] = static_cast<uint8>(start_value + i);
+ }
}
void PopulateVideoFrame(I420VideoFrame* frame, int start_value) {
@@ -94,5 +95,27 @@ void PopulateVideoFrame(I420VideoFrame* frame, int start_value) {
}
}
+bool PopulateVideoFrameFromFile(VideoFrame* frame, FILE* video_file) {
+ int width = frame->coded_size().width();
+ int height = frame->coded_size().height();
+ int half_width = (width + 1) / 2;
+ int half_height = (height + 1) / 2;
+ size_t frame_size = width * height + 2 * half_width * half_height;
+ uint8* y_plane = frame->data(VideoFrame::kYPlane);
+ uint8* u_plane = frame->data(VideoFrame::kUPlane);
+ uint8* v_plane = frame->data(VideoFrame::kVPlane);
+
+ uint8* raw_data = new uint8[frame_size];
+ size_t count = fread(raw_data, 1, frame_size, video_file);
+ if (count != frame_size) return false;
+
+ memcpy(y_plane, raw_data, width * height);
+ memcpy(u_plane, raw_data + width * height, half_width * half_height);
+ memcpy(v_plane, raw_data + width * height +
+ half_width * half_height, half_width * half_height);
+ delete [] raw_data;
+ return true;
+}
+
} // namespace cast
} // namespace media
diff --git a/media/cast/test/video_utility.h b/media/cast/test/video_utility.h
index 547b72d0c1..464dff2893 100644
--- a/media/cast/test/video_utility.h
+++ b/media/cast/test/video_utility.h
@@ -4,6 +4,7 @@
// Utility functions for video testing.
+#include "media/base/video_frame.h"
#include "media/cast/cast_config.h"
namespace media {
@@ -12,10 +13,19 @@ namespace cast {
// Compute and return PSNR between two frames.
double I420PSNR(const I420VideoFrame& frame1, const I420VideoFrame& frame2);
+// Temporary function to handle the transition
+// from I420VideoFrame->media::VideoFrame.
+double I420PSNR(const VideoFrame& frame1, const I420VideoFrame& frame2);
+
// Populate a video frame with values starting with the given start value.
// Width, height and stride should be set in advance.
// Memory is allocated within the function.
+void PopulateVideoFrame(VideoFrame* frame, int start_value);
void PopulateVideoFrame(I420VideoFrame* frame, int start_value);
+// Populate a video frame from a file.
+// Returns true if frame was populated, false if not (EOF).
+bool PopulateVideoFrameFromFile(VideoFrame* frame, FILE* video_file);
+
} // namespace cast
} // namespace media
diff --git a/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc b/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
index 26113cd3d6..bf56b0a2ad 100644
--- a/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
+++ b/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
@@ -4,14 +4,25 @@
#include "media/cast/video_receiver/codecs/vp8/vp8_decoder.h"
+#include "base/bind.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
namespace media {
namespace cast {
-Vp8Decoder::Vp8Decoder(int number_of_cores) {
- decoder_.reset(new vpx_dec_ctx_t());
+void LogFrameDecodedEvent(CastEnvironment* const cast_environment,
+ uint32 frame_id) {
+// TODO(mikhal): Sort out passing of rtp_timestamp.
+// cast_environment->Logging()->InsertFrameEvent(kVideoFrameDecoded,
+// 0, frame_id);
+}
+
+Vp8Decoder::Vp8Decoder(int number_of_cores,
+ scoped_refptr<CastEnvironment> cast_environment)
+ : decoder_(new vpx_dec_ctx_t()),
+ cast_environment_(cast_environment) {
InitDecode(number_of_cores);
}
@@ -27,31 +38,35 @@ void Vp8Decoder::InitDecode(int number_of_cores) {
}
}
-bool Vp8Decoder::Decode(const EncodedVideoFrame& input_image,
- I420VideoFrame* decoded_frame) {
- VLOG(1) << "VP8 decode frame:" << static_cast<int>(input_image.frame_id)
- << " sized:" << input_image.data.size();
+bool Vp8Decoder::Decode(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_cb) {
+ const int frame_id_int = static_cast<int>(encoded_frame->frame_id);
+ VLOG(1) << "VP8 decode frame:" << frame_id_int
+ << " sized:" << encoded_frame->data.size();
- if (input_image.data.empty()) return false;
+ if (encoded_frame->data.empty()) return false;
vpx_codec_iter_t iter = NULL;
vpx_image_t* img;
- if (vpx_codec_decode(decoder_.get(),
- input_image.data.data(),
- static_cast<unsigned int>(input_image.data.size()),
- 0,
- 1 /* real time*/)) {
+ if (vpx_codec_decode(
+ decoder_.get(),
+ reinterpret_cast<const uint8*>(encoded_frame->data.data()),
+ static_cast<unsigned int>(encoded_frame->data.size()),
+ 0,
+ 1 /* real time*/)) {
VLOG(1) << "Failed to decode VP8 frame.";
return false;
}
img = vpx_codec_get_frame(decoder_.get(), &iter);
if (img == NULL) {
- VLOG(1) << "Skip rendering VP8 frame:"
- << static_cast<int>(input_image.frame_id);
+ VLOG(1) << "Skip rendering VP8 frame:" << frame_id_int;
return false;
}
+ scoped_ptr<I420VideoFrame> decoded_frame(new I420VideoFrame());
+
// The img is only valid until the next call to vpx_codec_decode.
// Populate the decoded image.
decoded_frame->width = img->d_w;
@@ -76,6 +91,16 @@ bool Vp8Decoder::Decode(const EncodedVideoFrame& input_image,
memcpy(decoded_frame->v_plane.data, img->planes[VPX_PLANE_V],
decoded_frame->v_plane.length);
+ // Log:: Decoding complete (should be called from the main thread).
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, base::Bind(
+ LogFrameDecodedEvent, cast_environment_,encoded_frame->frame_id));
+ VLOG(1) << "Decoded frame " << frame_id_int;
+
+ // Frame decoded - return frame to the user via callback.
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(frame_decoded_cb, base::Passed(&decoded_frame),
+ render_time));
+
return true;
}
diff --git a/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp b/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
index bed02c8454..4bc9434d2d 100644
--- a/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
+++ b/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
@@ -19,7 +19,6 @@
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
- '<(DEPTH)/base/base.gyp:test_support_base',
],
},
],
diff --git a/media/cast/video_receiver/codecs/vp8/vp8_decoder.h b/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
index c8d930bb2a..511ad37ec9 100644
--- a/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
+++ b/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
@@ -8,6 +8,8 @@
#include "base/memory/scoped_ptr.h"
#include "base/threading/non_thread_safe.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/cast_receiver.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
typedef struct vpx_codec_ctx vpx_dec_ctx_t;
@@ -19,18 +21,24 @@ namespace cast {
// thread.
class Vp8Decoder : public base::NonThreadSafe {
public:
- explicit Vp8Decoder(int number_of_cores);
+ Vp8Decoder(int number_of_cores,
+ scoped_refptr<CastEnvironment> cast_environment);
~Vp8Decoder();
- // Decode encoded image (as a part of a video stream).
- bool Decode(const EncodedVideoFrame& input_image,
- I420VideoFrame* decoded_frame);
+ // Decode frame - The decoded frame will be passed via the callback.
+ // Will return false in case of error, and then it's up to the caller to
+ // release the memory.
+ // Ownership of the encoded_frame does not pass to the Vp8Decoder.
+ bool Decode(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_cb);
private:
// Initialize the decoder.
void InitDecode(int number_of_cores);
scoped_ptr<vpx_dec_ctx_t> decoder_;
+ scoped_refptr<CastEnvironment> cast_environment_;
};
} // namespace cast
diff --git a/media/cast/video_receiver/video_decoder.cc b/media/cast/video_receiver/video_decoder.cc
index 4a76ae6842..03f7a25d44 100644
--- a/media/cast/video_receiver/video_decoder.cc
+++ b/media/cast/video_receiver/video_decoder.cc
@@ -12,13 +12,14 @@
namespace media {
namespace cast {
-VideoDecoder::VideoDecoder(const VideoReceiverConfig& video_config)
+VideoDecoder::VideoDecoder(const VideoReceiverConfig& video_config,
+ scoped_refptr<CastEnvironment> cast_environment)
: codec_(video_config.codec),
vp8_decoder_() {
switch (video_config.codec) {
case kVp8:
// Initializing to use one core.
- vp8_decoder_.reset(new Vp8Decoder(1));
+ vp8_decoder_.reset(new Vp8Decoder(1, cast_environment));
break;
case kH264:
NOTIMPLEMENTED();
@@ -31,13 +32,13 @@ VideoDecoder::VideoDecoder(const VideoReceiverConfig& video_config)
VideoDecoder::~VideoDecoder() {}
-bool VideoDecoder::DecodeVideoFrame(
- const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- I420VideoFrame* video_frame) {
+bool VideoDecoder::DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback&
+ frame_decoded_cb) {
DCHECK(encoded_frame->codec == codec_) << "Invalid codec";
DCHECK_GT(encoded_frame->data.size(), GG_UINT64_C(0)) << "Empty video frame";
- return vp8_decoder_->Decode(*encoded_frame, video_frame);
+ return vp8_decoder_->Decode(encoded_frame, render_time, frame_decoded_cb);
}
} // namespace cast
diff --git a/media/cast/video_receiver/video_decoder.h b/media/cast/video_receiver/video_decoder.h
index e98768c215..e23a86b76b 100644
--- a/media/cast/video_receiver/video_decoder.h
+++ b/media/cast/video_receiver/video_decoder.h
@@ -19,14 +19,15 @@ class Vp8Decoder;
// thread.
class VideoDecoder : public base::NonThreadSafe {
public:
- explicit VideoDecoder(const VideoReceiverConfig& video_config);
+ VideoDecoder(const VideoReceiverConfig& video_config,
+ scoped_refptr<CastEnvironment> cast_environment);
virtual ~VideoDecoder();
- // Decode a video frame. Decoded (raw) frame will be returned in the
- // provided video_frame.
+ // Decode a video frame. Decoded (raw) frame will be returned via the
+ // provided callback
bool DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
const base::TimeTicks render_time,
- I420VideoFrame* video_frame);
+ const VideoFrameDecodedCallback& frame_decoded_cb);
private:
VideoCodec codec_;
diff --git a/media/cast/video_receiver/video_decoder_unittest.cc b/media/cast/video_receiver/video_decoder_unittest.cc
index cb40e12752..f03b467bd3 100644
--- a/media/cast/video_receiver/video_decoder_unittest.cc
+++ b/media/cast/video_receiver/video_decoder_unittest.cc
@@ -2,8 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/cast_receiver.h"
+#include "media/cast/test/fake_task_runner.h"
#include "media/cast/video_receiver/video_decoder.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -14,44 +21,71 @@ using testing::_;
// Random frame size for testing.
const int kFrameSize = 2345;
+static const int64 kStartMillisecond = GG_INT64_C(1245);
+
+namespace {
+class DecodeTestFrameCallback :
+ public base::RefCountedThreadSafe<DecodeTestFrameCallback> {
+ public:
+ DecodeTestFrameCallback() {}
+
+ void DecodeComplete(scoped_ptr<I420VideoFrame> decoded_frame,
+ const base::TimeTicks& render_time) {}
+ protected:
+ virtual ~DecodeTestFrameCallback() {}
+ private:
+ friend class base::RefCountedThreadSafe<DecodeTestFrameCallback>;
+};
+} // namespace
class VideoDecoderTest : public ::testing::Test {
protected:
- VideoDecoderTest() {
+ VideoDecoderTest()
+ : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig())),
+ test_callback_(new DecodeTestFrameCallback()) {
// Configure to vp8.
config_.codec = kVp8;
config_.use_external_decoder = false;
- decoder_.reset(new VideoDecoder(config_));
+ decoder_.reset(new VideoDecoder(config_, cast_environment_));
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
virtual ~VideoDecoderTest() {}
scoped_ptr<VideoDecoder> decoder_;
VideoReceiverConfig config_;
+ base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_refptr<DecodeTestFrameCallback> test_callback_;
};
// TODO(pwestin): EXPECT_DEATH tests can not pass valgrind.
TEST_F(VideoDecoderTest, DISABLED_SizeZero) {
EncodedVideoFrame encoded_frame;
- I420VideoFrame video_frame;
base::TimeTicks render_time;
encoded_frame.codec = kVp8;
-
EXPECT_DEATH(
- decoder_->DecodeVideoFrame(&encoded_frame, render_time, &video_frame),
- "Empty video frame");
+ decoder_->DecodeVideoFrame(
+ &encoded_frame, render_time,
+ base::Bind(&DecodeTestFrameCallback::DecodeComplete, test_callback_)),
+ "Empty frame");
}
// TODO(pwestin): EXPECT_DEATH tests can not pass valgrind.
TEST_F(VideoDecoderTest, DISABLED_InvalidCodec) {
EncodedVideoFrame encoded_frame;
- I420VideoFrame video_frame;
base::TimeTicks render_time;
encoded_frame.data.assign(kFrameSize, 0);
encoded_frame.codec = kExternalVideo;
EXPECT_DEATH(
- decoder_->DecodeVideoFrame(&encoded_frame, render_time, &video_frame),
- "Invalid codec");
+ decoder_->DecodeVideoFrame(&encoded_frame, render_time, base::Bind(
+ &DecodeTestFrameCallback::DecodeComplete, test_callback_)),
+ "Invalid codec");
}
// TODO(pwestin): Test decoding a real frame.
diff --git a/media/cast/video_receiver/video_receiver.cc b/media/cast/video_receiver/video_receiver.cc
index 03ec0ea956..98bed1fc69 100644
--- a/media/cast/video_receiver/video_receiver.cc
+++ b/media/cast/video_receiver/video_receiver.cc
@@ -5,9 +5,12 @@
#include "media/cast/video_receiver/video_receiver.h"
#include <algorithm>
+
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "crypto/encryptor.h"
+#include "crypto/symmetric_key.h"
#include "media/cast/cast_defines.h"
#include "media/cast/framer/framer.h"
#include "media/cast/video_receiver/video_decoder.h"
@@ -25,43 +28,20 @@ static const int64_t kMinProcessIntervalMs = 5;
// Used to pass payload data into the video receiver.
class LocalRtpVideoData : public RtpData {
public:
- explicit LocalRtpVideoData(base::TickClock* clock,
- VideoReceiver* video_receiver)
- : clock_(clock),
- video_receiver_(video_receiver),
- time_updated_(false),
- incoming_rtp_timestamp_(0) {
- }
+ explicit LocalRtpVideoData(VideoReceiver* video_receiver)
+ : video_receiver_(video_receiver) {}
+
virtual ~LocalRtpVideoData() {}
virtual void OnReceivedPayloadData(const uint8* payload_data,
size_t payload_size,
const RtpCastHeader* rtp_header) OVERRIDE {
- base::TimeTicks now = clock_->NowTicks();
- if (time_incoming_packet_.is_null() || now - time_incoming_packet_ >
- base::TimeDelta::FromMilliseconds(kMinTimeBetweenOffsetUpdatesMs)) {
- incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
- time_incoming_packet_ = now;
- time_updated_ = true;
- }
- video_receiver_->IncomingRtpPacket(payload_data, payload_size, *rtp_header);
- }
-
- bool GetPacketTimeInformation(base::TimeTicks* time_incoming_packet,
- uint32* incoming_rtp_timestamp) {
- *time_incoming_packet = time_incoming_packet_;
- *incoming_rtp_timestamp = incoming_rtp_timestamp_;
- bool time_updated = time_updated_;
- time_updated_ = false;
- return time_updated;
+ video_receiver_->IncomingParsedRtpPacket(payload_data, payload_size,
+ *rtp_header);
}
private:
- base::TickClock* clock_; // Not owned by this class.
VideoReceiver* video_receiver_;
- bool time_updated_;
- base::TimeTicks time_incoming_packet_;
- uint32 incoming_rtp_timestamp_;
};
// Local implementation of RtpPayloadFeedback (defined in rtp_defines.h)
@@ -108,56 +88,70 @@ VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
PacedPacketSender* const packet_sender)
: cast_environment_(cast_environment),
codec_(video_config.codec),
- incoming_ssrc_(video_config.incoming_ssrc),
target_delay_delta_(
base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
frame_delay_(base::TimeDelta::FromMilliseconds(
1000 / video_config.max_frame_rate)),
- incoming_payload_callback_(
- new LocalRtpVideoData(cast_environment_->Clock(), this)),
+ incoming_payload_callback_(new LocalRtpVideoData(this)),
incoming_payload_feedback_(new LocalRtpVideoFeedback(this)),
rtp_receiver_(cast_environment_->Clock(), NULL, &video_config,
incoming_payload_callback_.get()),
rtp_video_receiver_statistics_(
new LocalRtpReceiverStatistics(&rtp_receiver_)),
+ time_incoming_packet_updated_(false),
+ incoming_rtp_timestamp_(0),
weak_factory_(this) {
int max_unacked_frames = video_config.rtp_max_delay_ms *
video_config.max_frame_rate / 1000;
DCHECK(max_unacked_frames) << "Invalid argument";
+ if (video_config.aes_iv_mask.size() == kAesKeySize &&
+ video_config.aes_key.size() == kAesKeySize) {
+ iv_mask_ = video_config.aes_iv_mask;
+ crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
+ crypto::SymmetricKey::AES, video_config.aes_key);
+ decryptor_.reset(new crypto::Encryptor());
+ decryptor_->Init(key, crypto::Encryptor::CTR, std::string());
+ } else if (video_config.aes_iv_mask.size() != 0 ||
+ video_config.aes_key.size() != 0) {
+ DCHECK(false) << "Invalid crypto configuration";
+ }
+
framer_.reset(new Framer(cast_environment->Clock(),
incoming_payload_feedback_.get(),
video_config.incoming_ssrc,
video_config.decoder_faster_than_max_frame_rate,
max_unacked_frames));
if (!video_config.use_external_decoder) {
- video_decoder_.reset(new VideoDecoder(video_config));
+ video_decoder_.reset(new VideoDecoder(video_config, cast_environment));
}
rtcp_.reset(
- new Rtcp(cast_environment_->Clock(),
+ new Rtcp(cast_environment_,
NULL,
packet_sender,
NULL,
rtp_video_receiver_statistics_.get(),
video_config.rtcp_mode,
base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- false,
video_config.feedback_ssrc,
+ video_config.incoming_ssrc,
video_config.rtcp_c_name));
+}
+
+VideoReceiver::~VideoReceiver() {}
- rtcp_->SetRemoteSSRC(video_config.incoming_ssrc);
+void VideoReceiver::InitializeTimers() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
ScheduleNextRtcpReport();
ScheduleNextCastMessage();
}
-VideoReceiver::~VideoReceiver() {}
-
void VideoReceiver::GetRawVideoFrame(
const VideoFrameDecodedCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
GetEncodedVideoFrame(base::Bind(&VideoReceiver::DecodeVideoFrame,
- weak_factory_.GetWeakPtr(),
- callback));
+ base::Unretained(this), callback));
}
// Called when we have a frame to decode.
@@ -165,11 +159,11 @@ void VideoReceiver::DecodeVideoFrame(
const VideoFrameDecodedCallback& callback,
scoped_ptr<EncodedVideoFrame> encoded_frame,
const base::TimeTicks& render_time) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// Hand the ownership of the encoded frame to the decode thread.
cast_environment_->PostTask(CastEnvironment::VIDEO_DECODER, FROM_HERE,
- base::Bind(&VideoReceiver::DecodeVideoFrameThread,
- weak_factory_.GetWeakPtr(), base::Passed(&encoded_frame),
- render_time, callback));
+ base::Bind(&VideoReceiver::DecodeVideoFrameThread, base::Unretained(this),
+ base::Passed(&encoded_frame), render_time, callback));
}
// Utility function to run the decoder on a designated decoding thread.
@@ -180,29 +174,40 @@ void VideoReceiver::DecodeVideoFrameThread(
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_DECODER));
DCHECK(video_decoder_);
- // TODO(mikhal): Allow the application to allocate this memory.
- scoped_ptr<I420VideoFrame> video_frame(new I420VideoFrame());
-
- bool success = video_decoder_->DecodeVideoFrame(encoded_frame.get(),
- render_time, video_frame.get());
-
- if (success) {
- VLOG(1) << "Decoded frame " << static_cast<int>(encoded_frame->frame_id);
- // Frame decoded - return frame to the user via callback.
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(frame_decoded_callback,
- base::Passed(&video_frame), render_time));
- } else {
+ if (!(video_decoder_->DecodeVideoFrame(encoded_frame.get(), render_time,
+ frame_decoded_callback))) {
// This will happen if we decide to decode but not show a frame.
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::GetRawVideoFrame,
- weak_factory_.GetWeakPtr(), frame_decoded_callback));
+ base::Bind(&VideoReceiver::GetRawVideoFrame, base::Unretained(this),
+ frame_decoded_callback));
+ }
+}
+
+bool VideoReceiver::DecryptVideoFrame(
+ scoped_ptr<EncodedVideoFrame>* video_frame) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(decryptor_) << "Invalid state";
+
+ if (!decryptor_->SetCounter(GetAesNonce((*video_frame)->frame_id,
+ iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+ std::string decrypted_video_data;
+ if (!decryptor_->Decrypt((*video_frame)->data, &decrypted_video_data)) {
+ VLOG(1) << "Decryption error";
+ // Give up on this frame, release it from jitter buffer.
+ framer_->ReleaseFrame((*video_frame)->frame_id);
+ return false;
}
+ (*video_frame)->data.swap(decrypted_video_data);
+ return true;
}
// Called from the main cast thread.
void VideoReceiver::GetEncodedVideoFrame(
const VideoFrameEncodedCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
uint32 rtp_timestamp = 0;
bool next_frame = false;
@@ -213,6 +218,13 @@ void VideoReceiver::GetEncodedVideoFrame(
queued_encoded_callbacks_.push_back(callback);
return;
}
+
+ if (decryptor_ && !DecryptVideoFrame(&encoded_frame)) {
+ // Logging already done.
+ queued_encoded_callbacks_.push_back(callback);
+ return;
+ }
+
base::TimeTicks render_time;
if (PullEncodedVideoFrame(rtp_timestamp, next_frame, &encoded_frame,
&render_time)) {
@@ -228,14 +240,19 @@ void VideoReceiver::GetEncodedVideoFrame(
// Should we pull the encoded video frame from the framer? decided by if this is
// the next frame or we are running out of time and have to pull the following
// frame.
-// If the frame it too old to be rendered we set the don't show flag in the
+// If the frame is too old to be rendered we set the don't show flag in the
// video bitstream where possible.
bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp,
bool next_frame, scoped_ptr<EncodedVideoFrame>* encoded_frame,
base::TimeTicks* render_time) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
*render_time = GetRenderTime(now, rtp_timestamp);
+ // TODO(mikhal): Store actual render time and not diff.
+ cast_environment_->Logging()->InsertFrameEventWithDelay(kVideoRenderDelay,
+ rtp_timestamp, (*encoded_frame)->frame_id, now - *render_time);
+
// Minimum time before a frame is due to be rendered before we pull it for
// decode.
base::TimeDelta min_wait_delta = frame_delay_;
@@ -250,7 +267,7 @@ bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp,
cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()),
time_until_release);
- VLOG(0) << "Wait before releasing frame "
+ VLOG(1) << "Wait before releasing frame "
<< static_cast<int>((*encoded_frame)->frame_id)
<< " time " << time_until_release.InMilliseconds();
return false;
@@ -260,7 +277,7 @@ bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp,
base::TimeDelta::FromMilliseconds(-kDontShowTimeoutMs);
if (codec_ == kVp8 && time_until_render < dont_show_timeout_delta) {
(*encoded_frame)->data[0] &= 0xef;
- VLOG(0) << "Don't show frame "
+ VLOG(1) << "Don't show frame "
<< static_cast<int>((*encoded_frame)->frame_id)
<< " time_until_render:" << time_until_render.InMilliseconds();
} else {
@@ -275,6 +292,7 @@ bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp,
}
void VideoReceiver::PlayoutTimeout() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (queued_encoded_callbacks_.empty()) return;
uint32 rtp_timestamp = 0;
@@ -293,6 +311,11 @@ void VideoReceiver::PlayoutTimeout() {
VLOG(1) << "PlayoutTimeout retrieved frame "
<< static_cast<int>(encoded_frame->frame_id);
+ if (decryptor_ && !DecryptVideoFrame(&encoded_frame)) {
+ // Logging already done.
+ return;
+ }
+
base::TimeTicks render_time;
if (PullEncodedVideoFrame(rtp_timestamp, next_frame, &encoded_frame,
&render_time)) {
@@ -310,36 +333,33 @@ void VideoReceiver::PlayoutTimeout() {
base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
uint32 rtp_timestamp) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// Senders time in ms when this frame was captured.
// Note: the senders clock and our local clock might not be synced.
base::TimeTicks rtp_timestamp_in_ticks;
- base::TimeTicks time_incoming_packet;
- uint32 incoming_rtp_timestamp;
if (time_offset_.InMilliseconds() == 0) {
- incoming_payload_callback_->GetPacketTimeInformation(
- &time_incoming_packet, &incoming_rtp_timestamp);
-
if (!rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp,
+ incoming_rtp_timestamp_,
&rtp_timestamp_in_ticks)) {
// We have not received any RTCP to sync the stream play it out as soon as
// possible.
return now;
}
- time_offset_ = time_incoming_packet - rtp_timestamp_in_ticks;
- } else if (incoming_payload_callback_->GetPacketTimeInformation(
- &time_incoming_packet, &incoming_rtp_timestamp)) {
+ time_offset_ = time_incoming_packet_ - rtp_timestamp_in_ticks;
+ } else if (time_incoming_packet_updated_) {
if (rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp,
+ incoming_rtp_timestamp_,
&rtp_timestamp_in_ticks)) {
// Time to update the time_offset.
base::TimeDelta time_offset =
- time_incoming_packet - rtp_timestamp_in_ticks;
+ time_incoming_packet_ - rtp_timestamp_in_ticks;
time_offset_ = ((kTimeOffsetFilter - 1) * time_offset_ + time_offset)
/ kTimeOffsetFilter;
}
}
+ // Reset |time_incoming_packet_updated_| to enable a future measurement.
+ time_incoming_packet_updated_ = false;
if (!rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
rtp_timestamp,
&rtp_timestamp_in_ticks)) {
@@ -351,6 +371,7 @@ base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
void VideoReceiver::IncomingPacket(const uint8* packet, size_t length,
const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (Rtcp::IsRtcpPacket(packet, length)) {
rtcp_->IncomingRtcpPacket(packet, length);
} else {
@@ -359,9 +380,24 @@ void VideoReceiver::IncomingPacket(const uint8* packet, size_t length,
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
-void VideoReceiver::IncomingRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header) {
+void VideoReceiver::IncomingParsedRtpPacket(const uint8* payload_data,
+ size_t payload_size,
+ const RtpCastHeader& rtp_header) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ if (time_incoming_packet_.is_null() || now - time_incoming_packet_ >
+ base::TimeDelta::FromMilliseconds(kMinTimeBetweenOffsetUpdatesMs)) {
+ if (time_incoming_packet_.is_null()) InitializeTimers();
+ incoming_rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
+ time_incoming_packet_ = now;
+ time_incoming_packet_updated_ = true;
+ }
+
+ cast_environment_->Logging()->InsertPacketEvent(kPacketReceived,
+ rtp_header.webrtc.header.timestamp, rtp_header.frame_id,
+ rtp_header.packet_id, rtp_header.max_packet_id, payload_size);
+
bool complete = framer_->InsertPacket(payload_data, payload_size, rtp_header);
if (!complete) return; // Video frame not complete; wait for more packets.
@@ -377,18 +413,16 @@ void VideoReceiver::IncomingRtpPacket(const uint8* payload_data,
// Send a cast feedback message. Actual message created in the framer (cast
// message builder).
void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
- rtcp_->SendRtcpCast(cast_message);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ // TODO(pwestin): wire up log messages.
+ rtcp_->SendRtcpFromRtpReceiver(&cast_message, NULL);
time_last_sent_cast_message_= cast_environment_->Clock()->NowTicks();
}
-// Send a key frame request to the sender.
-void VideoReceiver::RequestKeyFrame() {
- rtcp_->SendRtcpPli(incoming_ssrc_);
-}
-
// Cast messages should be sent within a maximum interval. Schedule a call
// if not triggered elsewhere, e.g. by the cast message_builder.
void VideoReceiver::ScheduleNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeTicks send_time;
framer_->TimeToSendNextCastMessage(&send_time);
@@ -402,12 +436,14 @@ void VideoReceiver::ScheduleNextCastMessage() {
}
void VideoReceiver::SendNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
framer_->SendCastMessage(); // Will only send a message if it is time.
ScheduleNextCastMessage();
}
// Schedule the next RTCP report to be sent back to the sender.
void VideoReceiver::ScheduleNextRtcpReport() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_next = rtcp_->TimeToSendNextRtcpReport() -
cast_environment_->Clock()->NowTicks();
@@ -420,7 +456,8 @@ void VideoReceiver::ScheduleNextRtcpReport() {
}
void VideoReceiver::SendNextRtcpReport() {
- rtcp_->SendRtcpReport(incoming_ssrc_);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ rtcp_->SendRtcpFromRtpReceiver(NULL, NULL);
ScheduleNextRtcpReport();
}
diff --git a/media/cast/video_receiver/video_receiver.gypi b/media/cast/video_receiver/video_receiver.gypi
index bbee92e5ca..e1a9902872 100644
--- a/media/cast/video_receiver/video_receiver.gypi
+++ b/media/cast/video_receiver/video_receiver.gypi
@@ -19,6 +19,7 @@
'video_receiver.cc',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'framer/framer.gyp:cast_framer',
'video_receiver/codecs/vp8/vp8_decoder.gyp:cast_vp8_decoder',
'rtp_receiver/rtp_receiver.gyp:cast_rtp_receiver',
diff --git a/media/cast/video_receiver/video_receiver.h b/media/cast/video_receiver/video_receiver.h
index 8b14aae566..2f92e71856 100644
--- a/media/cast/video_receiver/video_receiver.h
+++ b/media/cast/video_receiver/video_receiver.h
@@ -20,6 +20,10 @@
#include "media/cast/rtp_common/rtp_defines.h"
#include "media/cast/rtp_receiver/rtp_receiver.h"
+namespace crypto {
+ class Encryptor;
+}
+
namespace media {
namespace cast {
@@ -32,7 +36,6 @@ class Rtcp;
class RtpReceiverStatistics;
class VideoDecoder;
-
// Should only be called from the Main cast thread.
class VideoReceiver : public base::NonThreadSafe,
public base::SupportsWeakPtr<VideoReceiver> {
@@ -54,9 +57,9 @@ class VideoReceiver : public base::NonThreadSafe,
const base::Closure callback);
protected:
- void IncomingRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header);
+ void IncomingParsedRtpPacket(const uint8* payload_data,
+ size_t payload_size,
+ const RtpCastHeader& rtp_header);
void DecodeVideoFrameThread(
scoped_ptr<EncodedVideoFrame> encoded_frame,
@@ -68,12 +71,13 @@ class VideoReceiver : public base::NonThreadSafe,
friend class LocalRtpVideoFeedback;
void CastFeedback(const RtcpCastMessage& cast_message);
- void RequestKeyFrame();
void DecodeVideoFrame(const VideoFrameDecodedCallback& callback,
scoped_ptr<EncodedVideoFrame> encoded_frame,
const base::TimeTicks& render_time);
+ bool DecryptVideoFrame(scoped_ptr<EncodedVideoFrame>* video_frame);
+
bool PullEncodedVideoFrame(uint32 rtp_timestamp,
bool next_frame,
scoped_ptr<EncodedVideoFrame>* encoded_frame,
@@ -84,6 +88,8 @@ class VideoReceiver : public base::NonThreadSafe,
// Returns Render time based on current time and the rtp timestamp.
base::TimeTicks GetRenderTime(base::TimeTicks now, uint32 rtp_timestamp);
+ void InitializeTimers();
+
// Schedule timing for the next cast message.
void ScheduleNextCastMessage();
@@ -100,7 +106,6 @@ class VideoReceiver : public base::NonThreadSafe,
scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<Framer> framer_;
const VideoCodec codec_;
- const uint32 incoming_ssrc_;
base::TimeDelta target_delay_delta_;
base::TimeDelta frame_delay_;
scoped_ptr<LocalRtpVideoData> incoming_payload_callback_;
@@ -109,10 +114,13 @@ class VideoReceiver : public base::NonThreadSafe,
scoped_ptr<Rtcp> rtcp_;
scoped_ptr<RtpReceiverStatistics> rtp_video_receiver_statistics_;
base::TimeTicks time_last_sent_cast_message_;
- // Sender-receiver offset estimation.
- base::TimeDelta time_offset_;
-
+ base::TimeDelta time_offset_; // Sender-receiver offset estimation.
+ scoped_ptr<crypto::Encryptor> decryptor_;
+ std::string iv_mask_;
std::list<VideoFrameEncodedCallback> queued_encoded_callbacks_;
+ bool time_incoming_packet_updated_;
+ base::TimeTicks time_incoming_packet_;
+ uint32 incoming_rtp_timestamp_;
base::WeakPtrFactory<VideoReceiver> weak_factory_;
diff --git a/media/cast/video_receiver/video_receiver_unittest.cc b/media/cast/video_receiver/video_receiver_unittest.cc
index 31c5bdd6e7..b5dda54695 100644
--- a/media/cast/video_receiver/video_receiver_unittest.cc
+++ b/media/cast/video_receiver/video_receiver_unittest.cc
@@ -21,7 +21,8 @@ namespace cast {
using testing::_;
-// was thread counted thread safe.
+namespace {
+// Was thread counted thread safe.
class TestVideoReceiverCallback :
public base::RefCountedThreadSafe<TestVideoReceiverCallback> {
public:
@@ -41,7 +42,7 @@ class TestVideoReceiverCallback :
++num_called_;
}
- int number_times_called() { return num_called_;}
+ int number_times_called() const { return num_called_;}
protected:
virtual ~TestVideoReceiverCallback() {}
@@ -51,6 +52,7 @@ class TestVideoReceiverCallback :
int num_called_;
};
+} // namespace
class PeerVideoReceiver : public VideoReceiver {
public:
@@ -59,7 +61,7 @@ class PeerVideoReceiver : public VideoReceiver {
PacedPacketSender* const packet_sender)
: VideoReceiver(cast_environment, video_config, packet_sender) {
}
- using VideoReceiver::IncomingRtpPacket;
+ using VideoReceiver::IncomingParsedRtpPacket;
};
@@ -71,7 +73,8 @@ class VideoReceiverTest : public ::testing::Test {
config_.use_external_decoder = false;
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_);
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
receiver_.reset(new
PeerVideoReceiver(cast_environment_, config_, &mock_transport_));
testing_clock_.Advance(
@@ -108,7 +111,8 @@ class VideoReceiverTest : public ::testing::Test {
TEST_F(VideoReceiverTest, GetOnePacketEncodedframe) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
testing::Return(true));
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
VideoFrameEncodedCallback frame_to_decode_callback =
base::Bind(&TestVideoReceiverCallback::FrameToDecode,
@@ -123,12 +127,15 @@ TEST_F(VideoReceiverTest, MultiplePackets) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
testing::Return(true));
rtp_header_.max_packet_id = 2;
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
++rtp_header_.packet_id;
++rtp_header_.webrtc.header.sequenceNumber;
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
++rtp_header_.packet_id;
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
VideoFrameEncodedCallback frame_to_decode_callback =
base::Bind(&TestVideoReceiverCallback::FrameToDecode,
@@ -143,7 +150,8 @@ TEST_F(VideoReceiverTest, MultiplePackets) {
TEST_F(VideoReceiverTest, GetOnePacketRawframe) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
testing::Return(true));
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
// Decode error - requires legal input.
VideoFrameDecodedCallback frame_decoded_callback =
base::Bind(&TestVideoReceiverCallback::DecodeComplete,
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
index 1021438020..5093d2512f 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
@@ -9,6 +9,7 @@
#include <vector>
#include "base/logging.h"
+#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
#include "media/cast/rtp_common/rtp_defines.h"
#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
@@ -121,17 +122,20 @@ void Vp8Encoder::InitEncode(int number_of_cores) {
rc_max_intra_target);
}
-bool Vp8Encoder::Encode(const I420VideoFrame& input_image,
+bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
EncodedVideoFrame* encoded_image) {
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
- raw_image_->planes[PLANE_Y] = const_cast<uint8*>(input_image.y_plane.data);
- raw_image_->planes[PLANE_U] = const_cast<uint8*>(input_image.u_plane.data);
- raw_image_->planes[PLANE_V] = const_cast<uint8*>(input_image.v_plane.data);
+ raw_image_->planes[PLANE_Y] =
+ const_cast<uint8*>(video_frame->data(VideoFrame::kYPlane));
+ raw_image_->planes[PLANE_U] =
+ const_cast<uint8*>(video_frame->data(VideoFrame::kUPlane));
+ raw_image_->planes[PLANE_V] =
+ const_cast<uint8*>(video_frame->data(VideoFrame::kVPlane));
- raw_image_->stride[VPX_PLANE_Y] = input_image.y_plane.stride;
- raw_image_->stride[VPX_PLANE_U] = input_image.u_plane.stride;
- raw_image_->stride[VPX_PLANE_V] = input_image.v_plane.stride;
+ raw_image_->stride[VPX_PLANE_Y] = video_frame->stride(VideoFrame::kYPlane);
+ raw_image_->stride[VPX_PLANE_U] = video_frame->stride(VideoFrame::kUPlane);
+ raw_image_->stride[VPX_PLANE_V] = video_frame->stride(VideoFrame::kVPlane);
uint8 latest_frame_id_to_reference;
Vp8Buffers buffer_to_update;
@@ -230,7 +234,7 @@ void Vp8Encoder::GetCodecReferenceFlags(vpx_codec_flags_t* flags) {
}
}
-uint8 Vp8Encoder::GetLatestFrameIdToReference() {
+uint32 Vp8Encoder::GetLatestFrameIdToReference() {
if (!use_multiple_video_buffers_) return last_encoded_frame_id_;
int latest_frame_id_to_reference = -1;
@@ -258,7 +262,7 @@ uint8 Vp8Encoder::GetLatestFrameIdToReference() {
}
}
DCHECK(latest_frame_id_to_reference != -1) << "Invalid state";
- return static_cast<uint8>(latest_frame_id_to_reference);
+ return static_cast<uint32>(latest_frame_id_to_reference);
}
Vp8Encoder::Vp8Buffers Vp8Encoder::GetNextBufferToUpdate() {
@@ -333,7 +337,7 @@ void Vp8Encoder::UpdateRates(uint32 new_bitrate) {
}
}
-void Vp8Encoder::LatestFrameIdToReference(uint8 frame_id) {
+void Vp8Encoder::LatestFrameIdToReference(uint32 frame_id) {
if (!use_multiple_video_buffers_) return;
VLOG(1) << "VP8 ok to reference frame:" << static_cast<int>(frame_id);
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi b/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
index 0b12789aa0..fa9c2944a1 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
@@ -12,6 +12,7 @@
'vp8_encoder.h',
], # source
'dependencies': [
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
'<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
],
},
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.h b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
index 777f862e4a..d09cc27dab 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.h
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
@@ -10,6 +10,10 @@
#include "media/cast/cast_config.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
+namespace media {
+class VideoFrame;
+}
+
// VPX forward declaration.
typedef struct vpx_codec_ctx vpx_enc_ctx_t;
@@ -26,7 +30,7 @@ class Vp8Encoder {
~Vp8Encoder();
// Encode a raw image (as a part of a video stream).
- bool Encode(const I420VideoFrame& input_image,
+ bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
EncodedVideoFrame* encoded_image);
// Update the encoder with a new target bit rate.
@@ -35,7 +39,7 @@ class Vp8Encoder {
// Set the next frame to be a key frame.
void GenerateKeyFrame();
- void LatestFrameIdToReference(uint8 frame_id);
+ void LatestFrameIdToReference(uint32 frame_id);
private:
enum Vp8Buffers {
@@ -54,7 +58,7 @@ class Vp8Encoder {
Vp8Buffers GetNextBufferToUpdate();
// Calculate which previous frame to reference.
- uint8 GetLatestFrameIdToReference();
+ uint32 GetLatestFrameIdToReference();
// Get encoder flags for our referenced encoder buffers.
void GetCodecReferenceFlags(vpx_codec_flags_t* flags);
@@ -74,8 +78,8 @@ class Vp8Encoder {
bool key_frame_requested_;
int64 timestamp_;
- uint8 last_encoded_frame_id_;
- uint8 used_buffers_frame_id_[kNumberOfVp8VideoBuffers];
+ uint32 last_encoded_frame_id_;
+ uint32 used_buffers_frame_id_[kNumberOfVp8VideoBuffers];
bool acked_frame_buffers_[kNumberOfVp8VideoBuffers];
Vp8Buffers last_used_vp8_buffer_;
int number_of_repeated_buffers_;
diff --git a/media/cast/video_sender/mock_video_encoder_controller.h b/media/cast/video_sender/mock_video_encoder_controller.h
index a97bcb1eb9..cfc58a9eb8 100644
--- a/media/cast/video_sender/mock_video_encoder_controller.h
+++ b/media/cast/video_sender/mock_video_encoder_controller.h
@@ -22,7 +22,7 @@ class MockVideoEncoderController : public VideoEncoderController {
MOCK_METHOD0(GenerateKeyFrame, void());
- MOCK_METHOD1(LatestFrameIdToReference, void(uint8 frame_id));
+ MOCK_METHOD1(LatestFrameIdToReference, void(uint32 frame_id));
MOCK_CONST_METHOD0(NumberOfSkippedFrames, int());
};
diff --git a/media/cast/video_sender/video_encoder.cc b/media/cast/video_sender/video_encoder.cc
index 42d1a88cc2..46d82dddd9 100644
--- a/media/cast/video_sender/video_encoder.cc
+++ b/media/cast/video_sender/video_encoder.cc
@@ -6,10 +6,19 @@
#include "base/bind.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_defines.h"
namespace media {
namespace cast {
+void LogFrameEncodedEvent(CastEnvironment* const cast_environment,
+ const base::TimeTicks& capture_time) {
+ cast_environment->Logging()->InsertFrameEvent(kVideoFrameEncoded,
+ GetVideoRtpTimestamp(capture_time), kFrameIdUnknown);
+}
+
VideoEncoder::VideoEncoder(scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
uint8 max_unacked_frames)
@@ -31,10 +40,11 @@ VideoEncoder::VideoEncoder(scoped_refptr<CastEnvironment> cast_environment,
VideoEncoder::~VideoEncoder() {}
bool VideoEncoder::EncodeVideoFrame(
- const I420VideoFrame* video_frame,
+ const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
const FrameEncodedCallback& frame_encoded_callback,
const base::Closure frame_release_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (video_config_.codec != kVp8) return false;
if (skip_next_frame_) {
@@ -43,21 +53,25 @@ bool VideoEncoder::EncodeVideoFrame(
return false;
}
+ cast_environment_->Logging()->InsertFrameEvent(kVideoFrameSentToEncoder,
+ GetVideoRtpTimestamp(capture_time), kFrameIdUnknown);
cast_environment_->PostTask(CastEnvironment::VIDEO_ENCODER, FROM_HERE,
- base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread, this,
- video_frame, capture_time, dynamic_config_, frame_encoded_callback,
- frame_release_callback));
+ base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread,
+ base::Unretained(this), video_frame, capture_time,
+ dynamic_config_, frame_encoded_callback,
+ frame_release_callback));
dynamic_config_.key_frame_requested = false;
return true;
}
void VideoEncoder::EncodeVideoFrameEncoderThread(
- const I420VideoFrame* video_frame,
+ const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
const CodecDynamicConfig& dynamic_config,
const FrameEncodedCallback& frame_encoded_callback,
const base::Closure frame_release_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_ENCODER));
if (dynamic_config.key_frame_requested) {
vp8_encoder_->GenerateKeyFrame();
}
@@ -66,8 +80,10 @@ void VideoEncoder::EncodeVideoFrameEncoderThread(
vp8_encoder_->UpdateRates(dynamic_config.bit_rate);
scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
- bool retval = vp8_encoder_->Encode(*video_frame, encoded_frame.get());
+ bool retval = vp8_encoder_->Encode(video_frame, encoded_frame.get());
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(LogFrameEncodedEvent, cast_environment_, capture_time));
// We are done with the video frame release it.
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
frame_release_callback);
@@ -101,7 +117,7 @@ void VideoEncoder::GenerateKeyFrame() {
}
// Inform the encoder to only reference frames older or equal to frame_id;
-void VideoEncoder::LatestFrameIdToReference(uint8 frame_id) {
+void VideoEncoder::LatestFrameIdToReference(uint32 frame_id) {
dynamic_config_.latest_frame_id_to_reference = frame_id;
}
diff --git a/media/cast/video_sender/video_encoder.h b/media/cast/video_sender/video_encoder.h
index fa85468c1f..f8415f679a 100644
--- a/media/cast/video_sender/video_encoder.h
+++ b/media/cast/video_sender/video_encoder.h
@@ -5,20 +5,23 @@
#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
#define MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
-#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
namespace media {
+class VideoFrame;
+}
+
+namespace media {
namespace cast {
// This object is called external from the main cast thread and internally from
// the video encoder thread.
-class VideoEncoder : public VideoEncoderController,
- public base::RefCountedThreadSafe<VideoEncoder> {
+class VideoEncoder : public VideoEncoderController {
public:
typedef base::Callback<void(scoped_ptr<EncodedVideoFrame>,
const base::TimeTicks&)> FrameEncodedCallback;
@@ -27,6 +30,8 @@ class VideoEncoder : public VideoEncoderController,
const VideoSenderConfig& video_config,
uint8 max_unacked_frames);
+ virtual ~VideoEncoder();
+
// Called from the main cast thread. This function post the encode task to the
// video encoder thread;
// The video_frame must be valid until the closure callback is called.
@@ -34,23 +39,21 @@ class VideoEncoder : public VideoEncoderController,
// the encoder is done with the frame; it does not mean that the encoded frame
// has been sent out.
// Once the encoded frame is ready the frame_encoded_callback is called.
- bool EncodeVideoFrame(const I420VideoFrame* video_frame,
+ bool EncodeVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
const FrameEncodedCallback& frame_encoded_callback,
const base::Closure frame_release_callback);
protected:
- virtual ~VideoEncoder();
-
struct CodecDynamicConfig {
bool key_frame_requested;
- uint8 latest_frame_id_to_reference;
+ uint32 latest_frame_id_to_reference;
int bit_rate;
};
// The actual encode, called from the video encoder thread.
void EncodeVideoFrameEncoderThread(
- const I420VideoFrame* video_frame,
+ const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
const CodecDynamicConfig& dynamic_config,
const FrameEncodedCallback& frame_encoded_callback,
@@ -60,7 +63,7 @@ class VideoEncoder : public VideoEncoderController,
virtual void SetBitRate(int new_bit_rate) OVERRIDE;
virtual void SkipNextFrame(bool skip_next_frame) OVERRIDE;
virtual void GenerateKeyFrame() OVERRIDE;
- virtual void LatestFrameIdToReference(uint8 frame_id) OVERRIDE;
+ virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
virtual int NumberOfSkippedFrames() const OVERRIDE;
private:
diff --git a/media/cast/video_sender/video_encoder_unittest.cc b/media/cast/video_sender/video_encoder_unittest.cc
index 10391f2a8a..01ad50be01 100644
--- a/media/cast/video_sender/video_encoder_unittest.cc
+++ b/media/cast/video_sender/video_encoder_unittest.cc
@@ -7,9 +7,11 @@
#include "base/bind.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/test/video_utility.h"
#include "media/cast/video_sender/video_encoder.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -18,10 +20,7 @@ namespace cast {
using testing::_;
-static void ReleaseFrame(const I420VideoFrame* frame) {
- // Empty since we in this test send in the same frame.
-}
-
+namespace {
class TestVideoEncoderCallback :
public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
public:
@@ -57,12 +56,12 @@ class TestVideoEncoderCallback :
uint8 expected_last_referenced_frame_id_;
base::TimeTicks expected_capture_time_;
};
+} // namespace
class VideoEncoderTest : public ::testing::Test {
protected:
VideoEncoderTest()
- : pixels_(320 * 240, 123),
- test_video_encoder_callback_(new TestVideoEncoderCallback()) {
+ : test_video_encoder_callback_(new TestVideoEncoderCallback()) {
video_config_.sender_ssrc = 1;
video_config_.incoming_feedback_ssrc = 2;
video_config_.rtp_payload_type = 127;
@@ -77,17 +76,10 @@ class VideoEncoderTest : public ::testing::Test {
video_config_.max_frame_rate = 30;
video_config_.max_number_of_video_buffers_used = 3;
video_config_.codec = kVp8;
- video_frame_.width = 320;
- video_frame_.height = 240;
- video_frame_.y_plane.stride = video_frame_.width;
- video_frame_.y_plane.length = video_frame_.width;
- video_frame_.y_plane.data = &(pixels_[0]);
- video_frame_.u_plane.stride = video_frame_.width / 2;
- video_frame_.u_plane.length = video_frame_.width / 2;
- video_frame_.u_plane.data = &(pixels_[0]);
- video_frame_.v_plane.stride = video_frame_.width / 2;
- video_frame_.v_plane.length = video_frame_.width / 2;
- video_frame_.v_plane.data = &(pixels_[0]);
+ gfx::Size size(video_config_.width, video_config_.height);
+ video_frame_ = media::VideoFrame::CreateFrame(VideoFrame::I420,
+ size, gfx::Rect(size), size, base::TimeDelta());
+ PopulateVideoFrame(video_frame_, 123);
}
virtual ~VideoEncoderTest() {}
@@ -95,23 +87,23 @@ class VideoEncoderTest : public ::testing::Test {
virtual void SetUp() {
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_);
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
}
void Configure(uint8 max_unacked_frames) {
- video_encoder_= new VideoEncoder(cast_environment_, video_config_,
- max_unacked_frames);
+ video_encoder_.reset(new VideoEncoder(cast_environment_, video_config_,
+ max_unacked_frames));
video_encoder_controller_ = video_encoder_.get();
}
base::SimpleTestTickClock testing_clock_;
- std::vector<uint8> pixels_;
scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
VideoSenderConfig video_config_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<VideoEncoder> video_encoder_;
+ scoped_ptr<VideoEncoder> video_encoder_;
VideoEncoderController* video_encoder_controller_;
- I420VideoFrame video_frame_;
+ scoped_refptr<media::VideoFrame> video_frame_;
scoped_refptr<CastEnvironment> cast_environment_;
};
@@ -126,22 +118,22 @@ TEST_F(VideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
capture_time += base::TimeDelta::FromMilliseconds(33);
video_encoder_controller_->LatestFrameIdToReference(0);
test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
capture_time += base::TimeDelta::FromMilliseconds(33);
video_encoder_controller_->LatestFrameIdToReference(1);
test_video_encoder_callback_->SetExpectedResult(false, 2, 1, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(2);
@@ -149,8 +141,8 @@ TEST_F(VideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
for (int i = 3; i < 6; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
}
}
@@ -168,22 +160,22 @@ TEST_F(VideoEncoderTest,DISABLED_EncodePattern60fpsRunningOutOfAck) {
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(0);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(1);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(2);
@@ -191,8 +183,8 @@ TEST_F(VideoEncoderTest,DISABLED_EncodePattern60fpsRunningOutOfAck) {
for (int i = 3; i < 9; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
}
}
@@ -209,44 +201,44 @@ TEST_F(VideoEncoderTest, DISABLED_EncodePattern60fps200msDelayRunningOutOfAck) {
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(0);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(1);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(2);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 3, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(3);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 4, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(4);
for (int i = 5; i < 17; ++i) {
test_video_encoder_callback_->SetExpectedResult(false, i, 4, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(base::DoNothing)));
task_runner_->RunTasks();
}
}
diff --git a/media/cast/video_sender/video_sender.cc b/media/cast/video_sender/video_sender.cc
index 52fa3e2625..b53247e743 100644
--- a/media/cast/video_sender/video_sender.cc
+++ b/media/cast/video_sender/video_sender.cc
@@ -9,6 +9,8 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "crypto/encryptor.h"
+#include "crypto/symmetric_key.h"
#include "media/cast/cast_defines.h"
#include "media/cast/pacing/paced_sender.h"
#include "media/cast/video_sender/video_encoder.h"
@@ -24,27 +26,6 @@ class LocalRtcpVideoSenderFeedback : public RtcpSenderFeedback {
: video_sender_(video_sender) {
}
- virtual void OnReceivedReportBlock(
- const RtcpReportBlock& report_block) OVERRIDE {}
-
- virtual void OnReceivedRpsi(uint8 payload_type,
- uint64 picture_id) OVERRIDE {
- NOTIMPLEMENTED();
- }
-
- virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {
- NOTIMPLEMENTED();
- }
-
- virtual void OnReceivedNackRequest(
- const std::list<uint16>& nack_sequence_numbers) OVERRIDE {
- NOTIMPLEMENTED();
- }
-
- virtual void OnReceivedIntraFrameRequest() OVERRIDE {
- video_sender_->OnReceivedIntraFrameRequest();
- }
-
virtual void OnReceivedCastFeedback(
const RtcpCastMessage& cast_feedback) OVERRIDE {
video_sender_->OnReceivedCastFeedback(cast_feedback);
@@ -74,17 +55,15 @@ VideoSender::VideoSender(
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
PacedPacketSender* const paced_packet_sender)
- : incoming_feedback_ssrc_(video_config.incoming_feedback_ssrc),
- rtp_max_delay_(
+ : rtp_max_delay_(
base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
max_frame_rate_(video_config.max_frame_rate),
cast_environment_(cast_environment),
rtcp_feedback_(new LocalRtcpVideoSenderFeedback(this)),
- rtp_sender_(new RtpSender(cast_environment->Clock(), NULL, &video_config,
+ rtp_sender_(new RtpSender(cast_environment, NULL, &video_config,
paced_packet_sender)),
last_acked_frame_id_(-1),
last_sent_frame_id_(-1),
- last_sent_key_frame_id_(-1),
duplicate_ack_(0),
last_skip_count_(0),
congestion_control_(cast_environment->Clock(),
@@ -92,6 +71,7 @@ VideoSender::VideoSender(
video_config.max_bitrate,
video_config.min_bitrate,
video_config.start_bitrate),
+ initialized_(false),
weak_factory_(this) {
max_unacked_frames_ = static_cast<uint8>(video_config.rtp_max_delay_ms *
video_config.max_frame_rate / 1000) + 1;
@@ -105,40 +85,60 @@ VideoSender::VideoSender(
DCHECK(video_encoder_controller) << "Invalid argument";
video_encoder_controller_ = video_encoder_controller;
} else {
- video_encoder_ = new VideoEncoder(cast_environment, video_config,
- max_unacked_frames_);
+ video_encoder_.reset(new VideoEncoder(cast_environment, video_config,
+ max_unacked_frames_));
video_encoder_controller_ = video_encoder_.get();
}
+
+ if (video_config.aes_iv_mask.size() == kAesKeySize &&
+ video_config.aes_key.size() == kAesKeySize) {
+ iv_mask_ = video_config.aes_iv_mask;
+ crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
+ crypto::SymmetricKey::AES, video_config.aes_key);
+ encryptor_.reset(new crypto::Encryptor());
+ encryptor_->Init(key, crypto::Encryptor::CTR, std::string());
+ } else if (video_config.aes_iv_mask.size() != 0 ||
+ video_config.aes_key.size() != 0) {
+ DCHECK(false) << "Invalid crypto configuration";
+ }
+
rtcp_.reset(new Rtcp(
- cast_environment_->Clock(),
+ cast_environment_,
rtcp_feedback_.get(),
paced_packet_sender,
rtp_video_sender_statistics_.get(),
NULL,
video_config.rtcp_mode,
base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- true,
video_config.sender_ssrc,
+ video_config.incoming_feedback_ssrc,
video_config.rtcp_c_name));
-
- rtcp_->SetRemoteSSRC(video_config.incoming_feedback_ssrc);
- ScheduleNextRtcpReport();
- ScheduleNextResendCheck();
- ScheduleNextSkippedFramesCheck();
}
VideoSender::~VideoSender() {}
+void VideoSender::InitializeTimers() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (!initialized_) {
+ initialized_ = true;
+ ScheduleNextRtcpReport();
+ ScheduleNextResendCheck();
+ ScheduleNextSkippedFramesCheck();
+ }
+}
+
void VideoSender::InsertRawVideoFrame(
- const I420VideoFrame* video_frame,
+ const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
- const base::Closure callback) {
+ const base::Closure& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(video_encoder_.get()) << "Invalid state";
+ cast_environment_->Logging()->InsertFrameEvent(kVideoFrameReceived,
+ GetVideoRtpTimestamp(capture_time), kFrameIdUnknown);
if (!video_encoder_->EncodeVideoFrame(video_frame, capture_time,
base::Bind(&VideoSender::SendEncodedVideoFrameMainThread,
weak_factory_.GetWeakPtr()), callback)) {
- VLOG(0) << "Failed to InsertRawVideoFrame";
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
}
@@ -148,6 +148,7 @@ void VideoSender::InsertCodedVideoFrame(const EncodedVideoFrame* encoded_frame,
const base::Closure callback) {
DCHECK(!video_encoder_.get()) << "Invalid state";
DCHECK(encoded_frame) << "Invalid argument";
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
SendEncodedVideoFrame(encoded_frame, capture_time);
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
@@ -159,40 +160,62 @@ void VideoSender::SendEncodedVideoFrameMainThread(
SendEncodedVideoFrame(video_frame.get(), capture_time);
}
+bool VideoSender::EncryptVideoFrame(const EncodedVideoFrame& video_frame,
+ EncodedVideoFrame* encrypted_frame) {
+ DCHECK(encryptor_) << "Invalid state";
+
+ if (!encryptor_->SetCounter(GetAesNonce(video_frame.frame_id, iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+
+ if (!encryptor_->Encrypt(video_frame.data, &encrypted_frame->data)) {
+ NOTREACHED() << "Encrypt error";
+ return false;
+ }
+ encrypted_frame->codec = video_frame.codec;
+ encrypted_frame->key_frame = video_frame.key_frame;
+ encrypted_frame->frame_id = video_frame.frame_id;
+ encrypted_frame->last_referenced_frame_id =
+ video_frame.last_referenced_frame_id;
+ return true;
+}
+
void VideoSender::SendEncodedVideoFrame(const EncodedVideoFrame* encoded_frame,
const base::TimeTicks& capture_time) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
last_send_time_ = cast_environment_->Clock()->NowTicks();
- rtp_sender_->IncomingEncodedVideoFrame(encoded_frame, capture_time);
+
+ if (encryptor_) {
+ EncodedVideoFrame encrypted_video_frame;
+
+ if (!EncryptVideoFrame(*encoded_frame, &encrypted_video_frame)) {
+ // Logging already done.
+ return;
+ }
+ rtp_sender_->IncomingEncodedVideoFrame(&encrypted_video_frame,
+ capture_time);
+ } else {
+ rtp_sender_->IncomingEncodedVideoFrame(encoded_frame, capture_time);
+ }
if (encoded_frame->key_frame) {
VLOG(1) << "Send encoded key frame; frame_id:"
<< static_cast<int>(encoded_frame->frame_id);
- last_sent_key_frame_id_ = encoded_frame->frame_id;
}
- last_sent_frame_id_ = encoded_frame->frame_id;
+ last_sent_frame_id_ = static_cast<int>(encoded_frame->frame_id);
UpdateFramesInFlight();
-}
-
-void VideoSender::OnReceivedIntraFrameRequest() {
- if (last_sent_key_frame_id_ != -1) {
- DCHECK_GE(255, last_sent_key_frame_id_);
- DCHECK_LE(0, last_sent_key_frame_id_);
-
- uint8 frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
- static_cast<uint8>(last_sent_key_frame_id_);
- if (frames_in_flight < (max_unacked_frames_ - 1)) return;
- }
- video_encoder_controller_->GenerateKeyFrame();
- last_acked_frame_id_ = -1;
- last_sent_frame_id_ = -1;
+ InitializeTimers();
}
void VideoSender::IncomingRtcpPacket(const uint8* packet, size_t length,
const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
rtcp_->IncomingRtcpPacket(packet, length);
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
void VideoSender::ScheduleNextRtcpReport() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_next = rtcp_->TimeToSendNextRtcpReport() -
cast_environment_->Clock()->NowTicks();
@@ -205,11 +228,13 @@ void VideoSender::ScheduleNextRtcpReport() {
}
void VideoSender::SendRtcpReport() {
- rtcp_->SendRtcpReport(incoming_feedback_ssrc_);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ rtcp_->SendRtcpFromRtpSender(NULL); // TODO(pwestin): add logging.
ScheduleNextRtcpReport();
}
void VideoSender::ScheduleNextResendCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_next;
if (last_send_time_.is_null()) {
time_to_next = rtp_max_delay_;
@@ -226,6 +251,7 @@ void VideoSender::ScheduleNextResendCheck() {
}
void VideoSender::ResendCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!last_send_time_.is_null() && last_sent_frame_id_ != -1) {
base::TimeDelta time_since_last_send =
cast_environment_->Clock()->NowTicks() - last_send_time_;
@@ -237,10 +263,9 @@ void VideoSender::ResendCheck() {
last_sent_frame_id_ = -1;
UpdateFramesInFlight();
} else {
- DCHECK_GE(255, last_acked_frame_id_);
DCHECK_LE(0, last_acked_frame_id_);
- uint8 frame_id = static_cast<uint8>(last_acked_frame_id_ + 1);
+ uint32 frame_id = static_cast<uint32>(last_acked_frame_id_ + 1);
VLOG(1) << "ACK timeout resend frame:" << static_cast<int>(frame_id);
ResendFrame(frame_id);
}
@@ -250,6 +275,7 @@ void VideoSender::ResendCheck() {
}
void VideoSender::ScheduleNextSkippedFramesCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_next;
if (last_checked_skip_count_time_.is_null()) {
time_to_next =
@@ -268,6 +294,7 @@ void VideoSender::ScheduleNextSkippedFramesCheck() {
}
void VideoSender::SkippedFramesCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
int skip_count = video_encoder_controller_->NumberOfSkippedFrames();
if (skip_count - last_skip_count_ >
kSkippedFramesThreshold * max_frame_rate_) {
@@ -279,12 +306,15 @@ void VideoSender::SkippedFramesCheck() {
}
void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta rtt;
base::TimeDelta avg_rtt;
base::TimeDelta min_rtt;
base::TimeDelta max_rtt;
if (rtcp_->Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt)) {
+ cast_environment_->Logging()->InsertGenericEvent(kRttMs,
+ rtt.InMilliseconds());
// Don't use a RTT lower than our average.
rtt = std::max(rtt, avg_rtt);
} else {
@@ -299,30 +329,29 @@ void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
video_encoder_controller_->LatestFrameIdToReference(
cast_feedback.ack_frame_id_);
- if (static_cast<uint8>(last_acked_frame_id_ + 1) ==
+ if (static_cast<uint32>(last_acked_frame_id_ + 1) ==
cast_feedback.ack_frame_id_) {
uint32 new_bitrate = 0;
if (congestion_control_.OnAck(rtt, &new_bitrate)) {
video_encoder_controller_->SetBitRate(new_bitrate);
}
}
- if (last_acked_frame_id_ == cast_feedback.ack_frame_id_ &&
+ if (static_cast<uint32>(last_acked_frame_id_) == cast_feedback.ack_frame_id_
// We only count duplicate ACKs when we have sent newer frames.
- IsNewerFrameId(last_sent_frame_id_, last_acked_frame_id_)) {
+ && IsNewerFrameId(last_sent_frame_id_, last_acked_frame_id_)) {
duplicate_ack_++;
} else {
duplicate_ack_ = 0;
}
if (duplicate_ack_ >= 2 && duplicate_ack_ % 3 == 2) {
// Resend last ACK + 1 frame.
- resend_frame = static_cast<uint8>(last_acked_frame_id_ + 1);
+ resend_frame = static_cast<uint32>(last_acked_frame_id_ + 1);
}
if (resend_frame != -1) {
- DCHECK_GE(255, resend_frame);
DCHECK_LE(0, resend_frame);
VLOG(1) << "Received duplicate ACK for frame:"
<< static_cast<int>(resend_frame);
- ResendFrame(static_cast<uint8>(resend_frame));
+ ResendFrame(static_cast<uint32>(resend_frame));
}
} else {
rtp_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
@@ -336,24 +365,28 @@ void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
ReceivedAck(cast_feedback.ack_frame_id_);
}
-void VideoSender::ReceivedAck(uint8 acked_frame_id) {
+void VideoSender::ReceivedAck(uint32 acked_frame_id) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ VLOG(1) << "ReceivedAck:" << acked_frame_id;
+ last_acked_frame_id_ = static_cast<int>(acked_frame_id);
+ cast_environment_->Logging()->InsertGenericEvent(kAckReceived,
+ acked_frame_id);
VLOG(1) << "ReceivedAck:" << static_cast<int>(acked_frame_id);
last_acked_frame_id_ = acked_frame_id;
UpdateFramesInFlight();
}
void VideoSender::UpdateFramesInFlight() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (last_sent_frame_id_ != -1) {
- DCHECK_GE(255, last_sent_frame_id_);
DCHECK_LE(0, last_sent_frame_id_);
- uint8 frames_in_flight;
+ uint32 frames_in_flight;
if (last_acked_frame_id_ != -1) {
- DCHECK_GE(255, last_acked_frame_id_);
DCHECK_LE(0, last_acked_frame_id_);
- frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
- static_cast<uint8>(last_acked_frame_id_);
+ frames_in_flight = static_cast<uint32>(last_sent_frame_id_) -
+ static_cast<uint32>(last_acked_frame_id_);
} else {
- frames_in_flight = last_sent_frame_id_ + 1;
+ frames_in_flight = static_cast<uint32>(last_sent_frame_id_) + 1;
}
VLOG(1) << "Frames in flight; last sent: " << last_sent_frame_id_
<< " last acked:" << last_acked_frame_id_;
@@ -365,7 +398,8 @@ void VideoSender::UpdateFramesInFlight() {
video_encoder_controller_->SkipNextFrame(false);
}
-void VideoSender::ResendFrame(uint8 resend_frame_id) {
+void VideoSender::ResendFrame(uint32 resend_frame_id) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
MissingFramesAndPacketsMap missing_frames_and_packets;
PacketIdSet missing;
missing_frames_and_packets.insert(std::make_pair(resend_frame_id, missing));
diff --git a/media/cast/video_sender/video_sender.gypi b/media/cast/video_sender/video_sender.gypi
index 9499066165..7b8b890f65 100644
--- a/media/cast/video_sender/video_sender.gypi
+++ b/media/cast/video_sender/video_sender.gypi
@@ -21,8 +21,11 @@
'video_sender.cc',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'<(DEPTH)/media/cast/rtcp/rtcp.gyp:*',
'<(DEPTH)/media/cast/rtp_sender/rtp_sender.gyp:*',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/media/media.gyp:shared_memory_support',
'congestion_control',
'cast_vp8_encoder',
],
diff --git a/media/cast/video_sender/video_sender.h b/media/cast/video_sender/video_sender.h
index 7bd60293b0..aa2a2c6828 100644
--- a/media/cast/video_sender/video_sender.h
+++ b/media/cast/video_sender/video_sender.h
@@ -18,6 +18,14 @@
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtp_sender/rtp_sender.h"
+namespace crypto {
+ class Encryptor;
+}
+
+namespace media {
+class VideoFrame;
+}
+
namespace media {
namespace cast {
@@ -48,9 +56,9 @@ class VideoSender : public base::NonThreadSafe,
// the encoder is done with the frame; it does not mean that the encoded frame
// has been sent out.
void InsertRawVideoFrame(
- const I420VideoFrame* video_frame,
+ const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
- const base::Closure callback);
+ const base::Closure& callback);
// The video_frame must be valid until the closure callback is called.
// The closure callback is called from the main thread as soon as
@@ -92,36 +100,43 @@ class VideoSender : public base::NonThreadSafe,
void SendEncodedVideoFrame(const EncodedVideoFrame* video_frame,
const base::TimeTicks& capture_time);
- void OnReceivedIntraFrameRequest();
- void ResendFrame(uint8 resend_frame_id);
- void ReceivedAck(uint8 acked_frame_id);
+ void ResendFrame(uint32 resend_frame_id);
+ void ReceivedAck(uint32 acked_frame_id);
void UpdateFramesInFlight();
void SendEncodedVideoFrameMainThread(
scoped_ptr<EncodedVideoFrame> video_frame,
const base::TimeTicks& capture_time);
- const uint32 incoming_feedback_ssrc_;
+ void InitializeTimers();
+
+ // Caller must allocate the destination |encrypted_video_frame| the data
+ // member will be resized to hold the encrypted size.
+ bool EncryptVideoFrame(const EncodedVideoFrame& encoded_frame,
+ EncodedVideoFrame* encrypted_video_frame);
+
const base::TimeDelta rtp_max_delay_;
const int max_frame_rate_;
scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<LocalRtcpVideoSenderFeedback> rtcp_feedback_;
scoped_ptr<LocalRtpVideoSenderStatistics> rtp_video_sender_statistics_;
- scoped_refptr<VideoEncoder> video_encoder_;
+ scoped_ptr<VideoEncoder> video_encoder_;
scoped_ptr<Rtcp> rtcp_;
scoped_ptr<RtpSender> rtp_sender_;
VideoEncoderController* video_encoder_controller_;
uint8 max_unacked_frames_;
+ scoped_ptr<crypto::Encryptor> encryptor_;
+ std::string iv_mask_;
int last_acked_frame_id_;
int last_sent_frame_id_;
- int last_sent_key_frame_id_;
int duplicate_ack_;
base::TimeTicks last_send_time_;
base::TimeTicks last_checked_skip_count_time_;
int last_skip_count_;
CongestionControl congestion_control_;
+ bool initialized_;
base::WeakPtrFactory<VideoSender> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(VideoSender);
diff --git a/media/cast/video_sender/video_sender_unittest.cc b/media/cast/video_sender/video_sender_unittest.cc
index 46a11380d1..8c56602a68 100644
--- a/media/cast/video_sender/video_sender_unittest.cc
+++ b/media/cast/video_sender/video_sender_unittest.cc
@@ -7,10 +7,12 @@
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
+#include "media/base/video_frame.h"
#include "media/cast/cast_environment.h"
#include "media/cast/pacing/mock_paced_packet_sender.h"
#include "media/cast/pacing/paced_sender.h"
#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/test/video_utility.h"
#include "media/cast/video_sender/mock_video_encoder_controller.h"
#include "media/cast/video_sender/video_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -19,11 +21,17 @@
namespace media {
namespace cast {
+namespace {
static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
static const uint8 kPixelValue = 123;
+static const int kWidth = 320;
+static const int kHeight = 240;
+}
using testing::_;
+using testing::AtLeast;
+namespace {
class PeerVideoSender : public VideoSender {
public:
PeerVideoSender(scoped_refptr<CastEnvironment> cast_environment,
@@ -35,17 +43,7 @@ class PeerVideoSender : public VideoSender {
}
using VideoSender::OnReceivedCastFeedback;
};
-
-static void ReleaseVideoFrame(const I420VideoFrame* frame) {
- delete [] frame->y_plane.data;
- delete [] frame->u_plane.data;
- delete [] frame->v_plane.data;
- delete frame;
-}
-
-static void ReleaseEncodedFrame(const EncodedVideoFrame* frame) {
- // Do nothing.
-}
+} // namespace
class VideoSenderTest : public ::testing::Test {
protected:
@@ -62,8 +60,8 @@ class VideoSenderTest : public ::testing::Test {
video_config.incoming_feedback_ssrc = 2;
video_config.rtp_payload_type = 127;
video_config.use_external_encoder = external;
- video_config.width = 320;
- video_config.height = 240;
+ video_config.width = kWidth;
+ video_config.height = kHeight;
video_config.max_bitrate = 5000000;
video_config.min_bitrate = 1000000;
video_config.start_bitrate = 1000000;
@@ -85,32 +83,16 @@ class VideoSenderTest : public ::testing::Test {
virtual void SetUp() {
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_);
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
}
- I420VideoFrame* AllocateNewVideoFrame() {
- I420VideoFrame* video_frame = new I420VideoFrame();
- video_frame->width = 320;
- video_frame->height = 240;
-
- video_frame->y_plane.stride = video_frame->width;
- video_frame->y_plane.length = video_frame->width;
- video_frame->y_plane.data =
- new uint8[video_frame->width * video_frame->height];
- memset(video_frame->y_plane.data, kPixelValue,
- video_frame->width * video_frame->height);
- video_frame->u_plane.stride = video_frame->width / 2;
- video_frame->u_plane.length = video_frame->width / 2;
- video_frame->u_plane.data =
- new uint8[video_frame->width * video_frame->height / 4];
- memset(video_frame->u_plane.data, kPixelValue,
- video_frame->width * video_frame->height / 4);
- video_frame->v_plane.stride = video_frame->width / 2;
- video_frame->v_plane.length = video_frame->width / 2;
- video_frame->v_plane.data =
- new uint8[video_frame->width * video_frame->height / 4];
- memset(video_frame->v_plane.data, kPixelValue,
- video_frame->width * video_frame->height / 4);
+ scoped_refptr<media::VideoFrame> GetNewVideoFrame() {
+ gfx::Size size(kWidth, kHeight);
+ scoped_refptr<media::VideoFrame> video_frame =
+ media::VideoFrame::CreateFrame(VideoFrame::I420, size, gfx::Rect(size),
+ size, base::TimeDelta());
+ PopulateVideoFrame(video_frame, kPixelValue);
return video_frame;
}
@@ -126,11 +108,11 @@ TEST_F(VideoSenderTest, BuiltInEncoder) {
EXPECT_CALL(mock_transport_, SendPackets(_)).Times(1);
InitEncoder(false);
- I420VideoFrame* video_frame = AllocateNewVideoFrame();
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
base::TimeTicks capture_time;
video_sender_->InsertRawVideoFrame(video_frame, capture_time,
- base::Bind(&ReleaseVideoFrame, video_frame));
+ base::Bind(base::DoNothing));
task_runner_->RunTasks();
}
@@ -150,12 +132,27 @@ TEST_F(VideoSenderTest, ExternalEncoder) {
video_frame.data.insert(video_frame.data.begin(), 1000, kPixelValue);
video_sender_->InsertCodedVideoFrame(&video_frame, capture_time,
- base::Bind(&ReleaseEncodedFrame, &video_frame));
+ base::Bind(base::DoNothing));
}
TEST_F(VideoSenderTest, RtcpTimer) {
+ EXPECT_CALL(mock_transport_, SendPackets(_)).Times(AtLeast(1));
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
- InitEncoder(false);
+ EXPECT_CALL(mock_video_encoder_controller_,
+ SkipNextFrame(false)).Times(AtLeast(1));
+ InitEncoder(true);
+
+ EncodedVideoFrame video_frame;
+ base::TimeTicks capture_time;
+
+ video_frame.codec = kVp8;
+ video_frame.key_frame = true;
+ video_frame.frame_id = 0;
+ video_frame.last_referenced_frame_id = 0;
+ video_frame.data.insert(video_frame.data.begin(), 1000, kPixelValue);
+
+ video_sender_->InsertCodedVideoFrame(&video_frame, capture_time,
+ base::Bind(base::DoNothing));
// Make sure that we send at least one RTCP packet.
base::TimeDelta max_rtcp_timeout =
@@ -171,11 +168,11 @@ TEST_F(VideoSenderTest, ResendTimer) {
InitEncoder(false);
- I420VideoFrame* video_frame = AllocateNewVideoFrame();
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
base::TimeTicks capture_time;
video_sender_->InsertRawVideoFrame(video_frame, capture_time,
- base::Bind(&ReleaseVideoFrame, video_frame));
+ base::Bind(base::DoNothing));
task_runner_->RunTasks();
@@ -185,9 +182,9 @@ TEST_F(VideoSenderTest, ResendTimer) {
cast_feedback.ack_frame_id_ = 0;
video_sender_->OnReceivedCastFeedback(cast_feedback);
- video_frame = AllocateNewVideoFrame();
+ video_frame = GetNewVideoFrame();
video_sender_->InsertRawVideoFrame(video_frame, capture_time,
- base::Bind(&ReleaseVideoFrame, video_frame));
+ base::Bind(base::DoNothing));
task_runner_->RunTasks();