summaryrefslogtreecommitdiff
path: root/media/cast
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2014-04-03 12:29:45 +0100
committerBen Murdoch <benm@google.com>2014-04-03 12:29:45 +0100
commite5d81f57cb97b3b6b7fccc9c5610d21eb81db09d (patch)
treef266aab56db899073b21c1edd1d0e00055b9a2cf /media/cast
parent67e8dac6e410a019f58fc452b262a184e8e7fd12 (diff)
downloadchromium_org-e5d81f57cb97b3b6b7fccc9c5610d21eb81db09d.tar.gz
Merge from Chromium at DEPS revision 261286
This commit was generated by merge_to_master.py. Change-Id: Iea9643ce91618057f128e9a5b62c07be152f2b89
Diffstat (limited to 'media/cast')
-rw-r--r--media/cast/audio_receiver/audio_decoder.cc352
-rw-r--r--media/cast/audio_receiver/audio_decoder.h75
-rw-r--r--media/cast/audio_receiver/audio_decoder_unittest.cc395
-rw-r--r--media/cast/audio_receiver/audio_receiver.cc442
-rw-r--r--media/cast/audio_receiver/audio_receiver.gypi4
-rw-r--r--media/cast/audio_receiver/audio_receiver.h133
-rw-r--r--media/cast/audio_receiver/audio_receiver_unittest.cc134
-rw-r--r--media/cast/audio_sender/audio_encoder_unittest.cc4
-rw-r--r--media/cast/audio_sender/audio_sender_unittest.cc4
-rw-r--r--media/cast/cast_defines.h6
-rw-r--r--media/cast/cast_receiver.gyp3
-rw-r--r--media/cast/cast_receiver.h26
-rw-r--r--media/cast/cast_receiver_impl.cc13
-rw-r--r--media/cast/cast_sender.gyp3
-rw-r--r--media/cast/congestion_control/congestion_control_unittest.cc16
-rw-r--r--media/cast/framer/cast_message_builder_unittest.cc4
-rw-r--r--media/cast/framer/framer.gyp3
-rw-r--r--media/cast/logging/logging.gyp3
-rw-r--r--media/cast/logging/logging_impl_unittest.cc4
-rw-r--r--media/cast/rtcp/rtcp.cc2
-rw-r--r--media/cast/rtcp/rtcp.gyp3
-rw-r--r--media/cast/rtcp/rtcp_sender.cc8
-rw-r--r--media/cast/rtcp/rtcp_unittest.cc10
-rw-r--r--media/cast/rtp_receiver/receiver_stats_unittest.cc4
-rw-r--r--media/cast/rtp_receiver/rtp_parser/rtp_parser.cc4
-rw-r--r--media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp3
-rw-r--r--media/cast/rtp_receiver/rtp_receiver.gyp3
-rw-r--r--media/cast/test/encode_decode_test.cc5
-rw-r--r--media/cast/test/end2end_unittest.cc544
-rw-r--r--media/cast/test/utility/audio_utility.cc24
-rw-r--r--media/cast/test/utility/audio_utility.h2
-rw-r--r--media/cast/test/utility/barcode.cc1
-rw-r--r--media/cast/test/utility/in_process_receiver.cc56
-rw-r--r--media/cast/test/utility/in_process_receiver.h17
-rw-r--r--media/cast/test/utility/standalone_cast_environment.h2
-rw-r--r--media/cast/test/utility/udp_proxy.cc15
-rw-r--r--media/cast/test/utility/utility.gyp3
-rw-r--r--media/cast/test/utility/video_utility.cc1
-rw-r--r--media/cast/transport/cast_transport_defines.h4
-rw-r--r--media/cast/transport/cast_transport_sender_impl_unittest.cc3
-rw-r--r--media/cast/transport/pacing/paced_sender_unittest.cc4
-rw-r--r--media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc4
-rw-r--r--media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc4
-rw-r--r--media/cast/transport/utility/utility.gyp5
-rw-r--r--media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp3
-rw-r--r--media/cast/video_receiver/video_decoder.cc4
-rw-r--r--media/cast/video_receiver/video_decoder_unittest.cc4
-rw-r--r--media/cast/video_receiver/video_receiver.cc2
-rw-r--r--media/cast/video_receiver/video_receiver_unittest.cc4
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.cc2
-rw-r--r--media/cast/video_sender/external_video_encoder.cc81
-rw-r--r--media/cast/video_sender/video_sender_unittest.cc4
52 files changed, 1275 insertions, 1184 deletions
diff --git a/media/cast/audio_receiver/audio_decoder.cc b/media/cast/audio_receiver/audio_decoder.cc
index b1a8256f2e..4e75473a6b 100644
--- a/media/cast/audio_receiver/audio_decoder.cc
+++ b/media/cast/audio_receiver/audio_decoder.cc
@@ -2,165 +2,257 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/logging.h"
#include "media/cast/audio_receiver/audio_decoder.h"
-#include "third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
-#include "third_party/webrtc/modules/interface/module_common_types.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/stl_util.h"
+#include "base/sys_byteorder.h"
+#include "media/cast/cast_defines.h"
+#include "third_party/opus/src/include/opus.h"
namespace media {
namespace cast {
-AudioDecoder::AudioDecoder(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- RtpPayloadFeedback* incoming_payload_feedback)
- : cast_environment_(cast_environment),
- audio_decoder_(webrtc::AudioCodingModule::Create(0)),
- cast_message_builder_(cast_environment->Clock(),
- incoming_payload_feedback,
- &frame_id_map_,
- audio_config.incoming_ssrc,
- true,
- 0),
- have_received_packets_(false),
- last_played_out_timestamp_(0) {
- audio_decoder_->InitializeReceiver();
-
- webrtc::CodecInst receive_codec;
- switch (audio_config.codec) {
- case transport::kPcm16:
- receive_codec.pltype = audio_config.rtp_payload_type;
- strncpy(receive_codec.plname, "L16", 4);
- receive_codec.plfreq = audio_config.frequency;
- receive_codec.pacsize = -1;
- receive_codec.channels = audio_config.channels;
- receive_codec.rate = -1;
- break;
- case transport::kOpus:
- receive_codec.pltype = audio_config.rtp_payload_type;
- strncpy(receive_codec.plname, "opus", 5);
- receive_codec.plfreq = audio_config.frequency;
- receive_codec.pacsize = -1;
- receive_codec.channels = audio_config.channels;
- receive_codec.rate = -1;
- break;
- case transport::kExternalAudio:
- NOTREACHED() << "Codec must be specified for audio decoder";
- break;
- }
- if (audio_decoder_->RegisterReceiveCodec(receive_codec) != 0) {
- NOTREACHED() << "Failed to register receive codec";
+// Base class that handles the common problem of detecting dropped frames, and
+// then invoking the Decode() method implemented by the subclasses to convert
+// the encoded payload data into usable audio data.
+class AudioDecoder::ImplBase
+ : public base::RefCountedThreadSafe<AudioDecoder::ImplBase> {
+ public:
+ ImplBase(const scoped_refptr<CastEnvironment>& cast_environment,
+ transport::AudioCodec codec,
+ int num_channels,
+ int sampling_rate)
+ : cast_environment_(cast_environment),
+ codec_(codec),
+ num_channels_(num_channels),
+ cast_initialization_status_(STATUS_AUDIO_UNINITIALIZED),
+ seen_first_frame_(false) {
+ if (num_channels_ <= 0 || sampling_rate <= 0 || sampling_rate % 100 != 0)
+ cast_initialization_status_ = STATUS_INVALID_AUDIO_CONFIGURATION;
}
- audio_decoder_->SetMaximumPlayoutDelay(audio_config.rtp_max_delay_ms);
- audio_decoder_->SetPlayoutMode(webrtc::streaming);
-}
+ CastInitializationStatus InitializationResult() const {
+ return cast_initialization_status_;
+ }
-AudioDecoder::~AudioDecoder() {}
+ void DecodeFrame(scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ const DecodeFrameCallback& callback) {
+ DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
-bool AudioDecoder::GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- PcmAudioFrame* audio_frame,
- uint32* rtp_timestamp) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO));
- // We don't care about the race case where a packet arrives at the same time
- // as this function in called. The data will be there the next time this
- // function is called.
- lock_.Acquire();
- // Get a local copy under lock.
- bool have_received_packets = have_received_packets_;
- lock_.Release();
-
- if (!have_received_packets)
- return false;
-
- audio_frame->samples.clear();
-
- for (int i = 0; i < number_of_10ms_blocks; ++i) {
- webrtc::AudioFrame webrtc_audio_frame;
- if (0 != audio_decoder_->PlayoutData10Ms(desired_frequency,
- &webrtc_audio_frame)) {
- return false;
+ scoped_ptr<AudioBus> decoded_audio;
+ if (encoded_frame->codec != codec_) {
+ NOTREACHED();
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(callback,
+ base::Passed(&decoded_audio),
+ false));
}
- if (webrtc_audio_frame.speech_type_ == webrtc::AudioFrame::kPLCCNG ||
- webrtc_audio_frame.speech_type_ == webrtc::AudioFrame::kUndefined) {
- // We are only interested in real decoded audio.
- return false;
- }
- audio_frame->frequency = webrtc_audio_frame.sample_rate_hz_;
- audio_frame->channels = webrtc_audio_frame.num_channels_;
- if (i == 0) {
- // Use the timestamp from the first 10ms block.
- if (0 != audio_decoder_->PlayoutTimestamp(rtp_timestamp)) {
- return false;
+ COMPILE_ASSERT(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
+ size_of_frame_id_types_do_not_match);
+ bool is_continuous = true;
+ if (seen_first_frame_) {
+ const uint32 frames_ahead = encoded_frame->frame_id - last_frame_id_;
+ if (frames_ahead > 1) {
+ RecoverBecauseFramesWereDropped();
+ is_continuous = false;
}
- lock_.Acquire();
- last_played_out_timestamp_ = *rtp_timestamp;
- lock_.Release();
+ } else {
+ seen_first_frame_ = true;
}
- int samples_per_10ms = webrtc_audio_frame.samples_per_channel_;
+ last_frame_id_ = encoded_frame->frame_id;
- audio_frame->samples.insert(
- audio_frame->samples.end(),
- &webrtc_audio_frame.data_[0],
- &webrtc_audio_frame.data_[samples_per_10ms * audio_frame->channels]);
+ decoded_audio = Decode(
+ reinterpret_cast<uint8*>(string_as_array(&encoded_frame->data)),
+ static_cast<int>(encoded_frame->data.size()));
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(callback,
+ base::Passed(&decoded_audio),
+ is_continuous));
}
- return true;
-}
-void AudioDecoder::IncomingParsedRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK_LE(payload_size, kMaxIpPacketSize);
- audio_decoder_->IncomingPacket(
- payload_data, static_cast<int32>(payload_size), rtp_header.webrtc);
- lock_.Acquire();
- have_received_packets_ = true;
- uint32 last_played_out_timestamp = last_played_out_timestamp_;
- lock_.Release();
-
- PacketType packet_type = frame_id_map_.InsertPacket(rtp_header);
- if (packet_type != kNewPacketCompletingFrame)
- return;
+ protected:
+ friend class base::RefCountedThreadSafe<ImplBase>;
+ virtual ~ImplBase() {}
- cast_message_builder_.CompleteFrameReceived(rtp_header.frame_id,
- rtp_header.is_key_frame);
+ virtual void RecoverBecauseFramesWereDropped() {}
- frame_id_rtp_timestamp_map_[rtp_header.frame_id] =
- rtp_header.webrtc.header.timestamp;
+ // Note: Implementation of Decode() is allowed to mutate |data|.
+ virtual scoped_ptr<AudioBus> Decode(uint8* data, int len) = 0;
- if (last_played_out_timestamp == 0)
- return; // Nothing is played out yet.
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ const transport::AudioCodec codec_;
+ const int num_channels_;
- uint32 latest_frame_id_to_remove = 0;
- bool frame_to_remove = false;
+ // Subclass' ctor is expected to set this to STATUS_AUDIO_INITIALIZED.
+ CastInitializationStatus cast_initialization_status_;
- FrameIdRtpTimestampMap::iterator it = frame_id_rtp_timestamp_map_.begin();
- while (it != frame_id_rtp_timestamp_map_.end()) {
- if (IsNewerRtpTimestamp(it->second, last_played_out_timestamp)) {
- break;
+ private:
+ bool seen_first_frame_;
+ uint32 last_frame_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImplBase);
+};
+
+class AudioDecoder::OpusImpl : public AudioDecoder::ImplBase {
+ public:
+ OpusImpl(const scoped_refptr<CastEnvironment>& cast_environment,
+ int num_channels,
+ int sampling_rate)
+ : ImplBase(cast_environment,
+ transport::kOpus,
+ num_channels,
+ sampling_rate),
+ decoder_memory_(new uint8[opus_decoder_get_size(num_channels)]),
+ opus_decoder_(reinterpret_cast<OpusDecoder*>(decoder_memory_.get())),
+ max_samples_per_frame_(
+ kOpusMaxFrameDurationMillis * sampling_rate / 1000),
+ buffer_(new float[max_samples_per_frame_ * num_channels]) {
+ if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ return;
+ if (opus_decoder_init(opus_decoder_, sampling_rate, num_channels) !=
+ OPUS_OK) {
+ ImplBase::cast_initialization_status_ =
+ STATUS_INVALID_AUDIO_CONFIGURATION;
+ return;
}
- frame_to_remove = true;
- latest_frame_id_to_remove = it->first;
- frame_id_rtp_timestamp_map_.erase(it);
- it = frame_id_rtp_timestamp_map_.begin();
+ ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
}
- if (!frame_to_remove)
- return;
- frame_id_map_.RemoveOldFrames(latest_frame_id_to_remove);
+ private:
+ virtual ~OpusImpl() {}
+
+ virtual void RecoverBecauseFramesWereDropped() OVERRIDE {
+ // Passing NULL for the input data notifies the decoder of frame loss.
+ const opus_int32 result =
+ opus_decode_float(
+ opus_decoder_, NULL, 0, buffer_.get(), max_samples_per_frame_, 0);
+ DCHECK_GE(result, 0);
+ }
+
+ virtual scoped_ptr<AudioBus> Decode(uint8* data, int len) OVERRIDE {
+ scoped_ptr<AudioBus> audio_bus;
+ const opus_int32 num_samples_decoded = opus_decode_float(
+ opus_decoder_, data, len, buffer_.get(), max_samples_per_frame_, 0);
+ if (num_samples_decoded <= 0)
+ return audio_bus.Pass(); // Decode error.
+
+ // Copy interleaved samples from |buffer_| into a new AudioBus (where
+ // samples are stored in planar format, for each channel).
+ audio_bus = AudioBus::Create(num_channels_, num_samples_decoded).Pass();
+ // TODO(miu): This should be moved into AudioBus::FromInterleaved().
+ for (int ch = 0; ch < num_channels_; ++ch) {
+ const float* src = buffer_.get() + ch;
+ const float* const src_end = src + num_samples_decoded * num_channels_;
+ float* dest = audio_bus->channel(ch);
+ for (; src < src_end; src += num_channels_, ++dest)
+ *dest = *src;
+ }
+ return audio_bus.Pass();
+ }
+
+ const scoped_ptr<uint8[]> decoder_memory_;
+ OpusDecoder* const opus_decoder_;
+ const int max_samples_per_frame_;
+ const scoped_ptr<float[]> buffer_;
+
+ // According to documentation in third_party/opus/src/include/opus.h, we must
+ // provide enough space in |buffer_| to contain 120ms of samples. At 48 kHz,
+ // then, that means 5760 samples times the number of channels.
+ static const int kOpusMaxFrameDurationMillis = 120;
+
+ DISALLOW_COPY_AND_ASSIGN(OpusImpl);
+};
+
+class AudioDecoder::Pcm16Impl : public AudioDecoder::ImplBase {
+ public:
+ Pcm16Impl(const scoped_refptr<CastEnvironment>& cast_environment,
+ int num_channels,
+ int sampling_rate)
+ : ImplBase(cast_environment,
+ transport::kPcm16,
+ num_channels,
+ sampling_rate) {
+ if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ return;
+ ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ }
+
+ private:
+ virtual ~Pcm16Impl() {}
+
+ virtual scoped_ptr<AudioBus> Decode(uint8* data, int len) OVERRIDE {
+ scoped_ptr<AudioBus> audio_bus;
+ const int num_samples = len / sizeof(int16) / num_channels_;
+ if (num_samples <= 0)
+ return audio_bus.Pass();
+
+ int16* const pcm_data = reinterpret_cast<int16*>(data);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ // Convert endianness.
+ const int num_elements = num_samples * num_channels_;
+ for (int i = 0; i < num_elements; ++i)
+ pcm_data[i] = static_cast<int16>(base::NetToHost16(pcm_data[i]));
+#endif
+ audio_bus = AudioBus::Create(num_channels_, num_samples).Pass();
+ audio_bus->FromInterleaved(pcm_data, num_samples, sizeof(int16));
+ return audio_bus.Pass();
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(Pcm16Impl);
+};
+
+AudioDecoder::AudioDecoder(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioReceiverConfig& audio_config)
+ : cast_environment_(cast_environment) {
+ switch (audio_config.codec) {
+ case transport::kOpus:
+ impl_ = new OpusImpl(cast_environment,
+ audio_config.channels,
+ audio_config.frequency);
+ break;
+ case transport::kPcm16:
+ impl_ = new Pcm16Impl(cast_environment,
+ audio_config.channels,
+ audio_config.frequency);
+ break;
+ default:
+ NOTREACHED() << "Unknown or unspecified codec.";
+ break;
+ }
}
-bool AudioDecoder::TimeToSendNextCastMessage(base::TimeTicks* time_to_send) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- return cast_message_builder_.TimeToSendNextCastMessage(time_to_send);
+AudioDecoder::~AudioDecoder() {}
+
+CastInitializationStatus AudioDecoder::InitializationResult() const {
+ if (impl_)
+ return impl_->InitializationResult();
+ return STATUS_UNSUPPORTED_AUDIO_CODEC;
}
-void AudioDecoder::SendCastMessage() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- cast_message_builder_.UpdateCastMessage();
+void AudioDecoder::DecodeFrame(
+ scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ const DecodeFrameCallback& callback) {
+ DCHECK(encoded_frame.get());
+ DCHECK(!callback.is_null());
+ if (!impl_ || impl_->InitializationResult() != STATUS_AUDIO_INITIALIZED) {
+ callback.Run(make_scoped_ptr<AudioBus>(NULL), false);
+ return;
+ }
+ cast_environment_->PostTask(CastEnvironment::AUDIO,
+ FROM_HERE,
+ base::Bind(&AudioDecoder::ImplBase::DecodeFrame,
+ impl_,
+ base::Passed(&encoded_frame),
+ callback));
}
} // namespace cast
diff --git a/media/cast/audio_receiver/audio_decoder.h b/media/cast/audio_receiver/audio_decoder.h
index 6bafdd1465..0e10ebaf04 100644
--- a/media/cast/audio_receiver/audio_decoder.h
+++ b/media/cast/audio_receiver/audio_decoder.h
@@ -6,61 +6,52 @@
#define MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_
#include "base/callback.h"
-#include "base/synchronization/lock.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/framer/cast_message_builder.h"
-#include "media/cast/framer/frame_id_map.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
-
-namespace webrtc {
-class AudioCodingModule;
-}
+#include "media/cast/transport/cast_transport_config.h"
namespace media {
namespace cast {
-typedef std::map<uint32, uint32> FrameIdRtpTimestampMap;
-
-// Thread safe class.
class AudioDecoder {
public:
- AudioDecoder(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- RtpPayloadFeedback* incoming_payload_feedback);
+ // Callback passed to DecodeFrame, to deliver decoded audio data from the
+ // decoder. The number of samples in |audio_bus| may vary, and |audio_bus|
+ // can be NULL when errors occur. |is_continuous| is normally true, but will
+ // be false if the decoder has detected a frame skip since the last decode
+ // operation; and the client should take steps to smooth audio discontinuities
+ // in this case.
+ typedef base::Callback<void(scoped_ptr<AudioBus> audio_bus,
+ bool is_continuous)> DecodeFrameCallback;
+
+ AudioDecoder(const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioReceiverConfig& audio_config);
virtual ~AudioDecoder();
- // Extract a raw audio frame from the decoder.
- // Set the number of desired 10ms blocks and frequency.
- // Should be called from the cast audio decoder thread; however that is not
- // required.
- bool GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- PcmAudioFrame* audio_frame,
- uint32* rtp_timestamp);
-
- // Insert an RTP packet to the decoder.
- // Should be called from the main cast thread; however that is not required.
- void IncomingParsedRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header);
-
- bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
- void SendCastMessage();
+ // Returns STATUS_AUDIO_INITIALIZED if the decoder was successfully
+ // constructed from the given AudioReceiverConfig. If this method returns any
+ // other value, calls to DecodeFrame() will not succeed.
+ CastInitializationStatus InitializationResult() const;
+
+ // Decode the payload in |encoded_frame| asynchronously. |callback| will be
+ // invoked on the CastEnvironment::MAIN thread with the result.
+ //
+ // In the normal case, |encoded_frame->frame_id| will be
+ // monotonically-increasing by 1 for each successive call to this method.
+ // When it is not, the decoder will assume one or more frames have been
+ // dropped (e.g., due to packet loss), and will perform recovery actions.
+ void DecodeFrame(scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ const DecodeFrameCallback& callback);
private:
- scoped_refptr<CastEnvironment> cast_environment_;
-
- // The webrtc AudioCodingModule is thread safe.
- scoped_ptr<webrtc::AudioCodingModule> audio_decoder_;
-
- FrameIdMap frame_id_map_;
- CastMessageBuilder cast_message_builder_;
+ class ImplBase;
+ class OpusImpl;
+ class Pcm16Impl;
- base::Lock lock_;
- bool have_received_packets_;
- FrameIdRtpTimestampMap frame_id_rtp_timestamp_map_;
- uint32 last_played_out_timestamp_;
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_refptr<ImplBase> impl_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
};
diff --git a/media/cast/audio_receiver/audio_decoder_unittest.cc b/media/cast/audio_receiver/audio_decoder_unittest.cc
index 46629b9081..d32dbe19b7 100644
--- a/media/cast/audio_receiver/audio_decoder_unittest.cc
+++ b/media/cast/audio_receiver/audio_decoder_unittest.cc
@@ -2,216 +2,243 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/test/simple_test_tick_clock.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/stl_util.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/sys_byteorder.h"
+#include "base/time/time.h"
#include "media/cast/audio_receiver/audio_decoder.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "testing/gmock/include/gmock/gmock.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/test/utility/audio_utility.h"
+#include "media/cast/test/utility/standalone_cast_environment.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/opus/src/include/opus.h"
namespace media {
namespace cast {
namespace {
-class TestRtpPayloadFeedback : public RtpPayloadFeedback {
- public:
- TestRtpPayloadFeedback() {}
- virtual ~TestRtpPayloadFeedback() {}
+struct TestScenario {
+ transport::AudioCodec codec;
+ int num_channels;
+ int sampling_rate;
- virtual void CastFeedback(const RtcpCastMessage& cast_feedback) OVERRIDE {
- EXPECT_EQ(1u, cast_feedback.ack_frame_id_);
- EXPECT_EQ(0u, cast_feedback.missing_frames_and_packets_.size());
- }
+ TestScenario(transport::AudioCodec c, int n, int s)
+ : codec(c), num_channels(n), sampling_rate(s) {}
};
-} // namespace.
+} // namespace
+
+class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
+ public:
+ AudioDecoderTest()
+ : cast_environment_(new StandaloneCastEnvironment()),
+ cond_(&lock_) {}
-class AudioDecoderTest : public ::testing::Test {
protected:
- AudioDecoderTest() {
- testing_clock_ = new base::SimpleTestTickClock();
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(1234));
- task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
- cast_environment_ =
- new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
- task_runner_,
- task_runner_,
- task_runner_);
+ virtual void SetUp() OVERRIDE {
+ AudioReceiverConfig decoder_config;
+ decoder_config.use_external_decoder = false;
+ decoder_config.frequency = GetParam().sampling_rate;
+ decoder_config.channels = GetParam().num_channels;
+ decoder_config.codec = GetParam().codec;
+ audio_decoder_.reset(new AudioDecoder(cast_environment_, decoder_config));
+ CHECK_EQ(STATUS_AUDIO_INITIALIZED, audio_decoder_->InitializationResult());
+
+ audio_bus_factory_.reset(
+ new TestAudioBusFactory(GetParam().num_channels,
+ GetParam().sampling_rate,
+ TestAudioBusFactory::kMiddleANoteFreq,
+ 0.5f));
+ last_frame_id_ = 0;
+ seen_a_decoded_frame_ = false;
+
+ if (GetParam().codec == transport::kOpus) {
+ opus_encoder_memory_.reset(
+ new uint8[opus_encoder_get_size(GetParam().num_channels)]);
+ OpusEncoder* const opus_encoder =
+ reinterpret_cast<OpusEncoder*>(opus_encoder_memory_.get());
+ CHECK_EQ(OPUS_OK, opus_encoder_init(opus_encoder,
+ GetParam().sampling_rate,
+ GetParam().num_channels,
+ OPUS_APPLICATION_AUDIO));
+ CHECK_EQ(OPUS_OK,
+ opus_encoder_ctl(opus_encoder, OPUS_SET_BITRATE(OPUS_AUTO)));
+ }
+
+ total_audio_feed_in_ = base::TimeDelta();
+ total_audio_decoded_ = base::TimeDelta();
+ }
+
+ // Called from the unit test thread to create another EncodedAudioFrame and
+ // push it into the decoding pipeline.
+ void FeedMoreAudio(const base::TimeDelta& duration,
+ int num_dropped_frames) {
+ // Prepare a simulated EncodedAudioFrame to feed into the AudioDecoder.
+ scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
+ new transport::EncodedAudioFrame());
+ encoded_frame->codec = GetParam().codec;
+ encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
+ last_frame_id_ = encoded_frame->frame_id;
+
+ const scoped_ptr<AudioBus> audio_bus(
+ audio_bus_factory_->NextAudioBus(duration).Pass());
+
+ // Encode |audio_bus| into |encoded_frame->data|.
+ const int num_elements = audio_bus->channels() * audio_bus->frames();
+ std::vector<int16> interleaved(num_elements);
+ audio_bus->ToInterleaved(
+ audio_bus->frames(), sizeof(int16), &interleaved.front());
+ if (GetParam().codec == transport::kPcm16) {
+ encoded_frame->data.resize(num_elements * sizeof(int16));
+ int16* const pcm_data =
+ reinterpret_cast<int16*>(string_as_array(&encoded_frame->data));
+ for (size_t i = 0; i < interleaved.size(); ++i)
+ pcm_data[i] = static_cast<int16>(base::HostToNet16(interleaved[i]));
+ } else if (GetParam().codec == transport::kOpus) {
+ OpusEncoder* const opus_encoder =
+ reinterpret_cast<OpusEncoder*>(opus_encoder_memory_.get());
+ const int kOpusEncodeBufferSize = 4000;
+ encoded_frame->data.resize(kOpusEncodeBufferSize);
+ const int payload_size =
+ opus_encode(opus_encoder,
+ &interleaved.front(),
+ audio_bus->frames(),
+ reinterpret_cast<unsigned char*>(
+ string_as_array(&encoded_frame->data)),
+ encoded_frame->data.size());
+ CHECK_GT(payload_size, 1);
+ encoded_frame->data.resize(payload_size);
+ } else {
+ ASSERT_TRUE(false); // Not reached.
+ }
+
+ {
+ base::AutoLock auto_lock(lock_);
+ total_audio_feed_in_ += duration;
+ }
+
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&AudioDecoder::DecodeFrame,
+ base::Unretained(audio_decoder_.get()),
+ base::Passed(&encoded_frame),
+ base::Bind(&AudioDecoderTest::OnDecodedFrame,
+ base::Unretained(this),
+ num_dropped_frames == 0)));
+ }
+
+ // Blocks the caller until all audio that has been feed in has been decoded.
+ void WaitForAllAudioToBeDecoded() {
+ DCHECK(!cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::AutoLock auto_lock(lock_);
+ while (total_audio_decoded_ < total_audio_feed_in_)
+ cond_.Wait();
+ EXPECT_EQ(total_audio_feed_in_.InMicroseconds(),
+ total_audio_decoded_.InMicroseconds());
}
- virtual ~AudioDecoderTest() {}
- void Configure(const AudioReceiverConfig& audio_config) {
- audio_decoder_.reset(
- new AudioDecoder(cast_environment_, audio_config, &cast_feedback_));
+ private:
+ // Called by |audio_decoder_| to deliver each frame of decoded audio.
+ void OnDecodedFrame(bool should_be_continuous,
+ scoped_ptr<AudioBus> audio_bus,
+ bool is_continuous) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ // A NULL |audio_bus| indicates a decode error, which we don't expect.
+ ASSERT_FALSE(!audio_bus);
+
+ // Did the decoder detect whether frames were dropped?
+ EXPECT_EQ(should_be_continuous, is_continuous);
+
+ // Does the audio data seem to be intact? For Opus, we have to ignore the
+ // first frame seen at the start (and immediately after dropped packet
+ // recovery) because it introduces a tiny, significant delay.
+ bool examine_signal = true;
+ if (GetParam().codec == transport::kOpus) {
+ examine_signal = seen_a_decoded_frame_ && should_be_continuous;
+ seen_a_decoded_frame_ = true;
+ }
+ if (examine_signal) {
+ for (int ch = 0; ch < audio_bus->channels(); ++ch) {
+ EXPECT_NEAR(
+ TestAudioBusFactory::kMiddleANoteFreq * 2 * audio_bus->frames() /
+ GetParam().sampling_rate,
+ CountZeroCrossings(audio_bus->channel(ch), audio_bus->frames()),
+ 1);
+ }
+ }
+
+ // Signal the main test thread that more audio was decoded.
+ base::AutoLock auto_lock(lock_);
+ total_audio_decoded_ += base::TimeDelta::FromSeconds(1) *
+ audio_bus->frames() / GetParam().sampling_rate;
+ cond_.Signal();
}
- TestRtpPayloadFeedback cast_feedback_;
- // Owned by CastEnvironment.
- base::SimpleTestTickClock* testing_clock_;
- scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
- scoped_refptr<CastEnvironment> cast_environment_;
+ const scoped_refptr<StandaloneCastEnvironment> cast_environment_;
scoped_ptr<AudioDecoder> audio_decoder_;
+ scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
+ uint32 last_frame_id_;
+ bool seen_a_decoded_frame_;
+ scoped_ptr<uint8[]> opus_encoder_memory_;
+
+ base::Lock lock_;
+ base::ConditionVariable cond_;
+ base::TimeDelta total_audio_feed_in_;
+ base::TimeDelta total_audio_decoded_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderTest);
};
-TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
- AudioReceiverConfig audio_config;
- audio_config.rtp_payload_type = 127;
- audio_config.frequency = 16000;
- audio_config.channels = 1;
- audio_config.codec = transport::kPcm16;
- audio_config.use_external_decoder = false;
- Configure(audio_config);
-
- RtpCastHeader rtp_header;
- rtp_header.webrtc.header.payloadType = 127;
- rtp_header.webrtc.header.sequenceNumber = 1234;
- rtp_header.webrtc.header.timestamp = 0x87654321;
- rtp_header.webrtc.header.ssrc = 0x12345678;
- rtp_header.webrtc.header.paddingLength = 0;
- rtp_header.webrtc.header.headerLength = 12;
- rtp_header.webrtc.type.Audio.channel = 1;
- rtp_header.webrtc.type.Audio.isCNG = false;
-
- std::vector<int16> payload(640, 0x1234);
- int number_of_10ms_blocks = 4;
- int desired_frequency = 16000;
- PcmAudioFrame audio_frame;
- uint32 rtp_timestamp;
-
- EXPECT_FALSE(audio_decoder_->GetRawAudioFrame(
- number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
-
- uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
- size_t payload_size = payload.size() * sizeof(int16);
-
- audio_decoder_->IncomingParsedRtpPacket(
- payload_data, payload_size, rtp_header);
-
- EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(
- number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
- EXPECT_EQ(1, audio_frame.channels);
- EXPECT_EQ(16000, audio_frame.frequency);
- EXPECT_EQ(640ul, audio_frame.samples.size());
- // First 10 samples per channel are 0 from NetEq.
- for (size_t i = 10; i < audio_frame.samples.size(); ++i) {
- EXPECT_EQ(0x3412, audio_frame.samples[i]);
- }
+TEST_P(AudioDecoderTest, DecodesFramesWithSameDuration) {
+ const base::TimeDelta kTenMilliseconds =
+ base::TimeDelta::FromMilliseconds(10);
+ const int kNumFrames = 10;
+ for (int i = 0; i < kNumFrames; ++i)
+ FeedMoreAudio(kTenMilliseconds, 0);
+ WaitForAllAudioToBeDecoded();
}
-TEST_F(AudioDecoderTest, Pcm16StereoNoResampleTwoPackets) {
- AudioReceiverConfig audio_config;
- audio_config.rtp_payload_type = 127;
- audio_config.frequency = 16000;
- audio_config.channels = 2;
- audio_config.codec = transport::kPcm16;
- audio_config.use_external_decoder = false;
- Configure(audio_config);
-
- RtpCastHeader rtp_header;
- rtp_header.frame_id = 0;
- rtp_header.webrtc.header.payloadType = 127;
- rtp_header.webrtc.header.sequenceNumber = 1234;
- rtp_header.webrtc.header.timestamp = 0x87654321;
- rtp_header.webrtc.header.ssrc = 0x12345678;
- rtp_header.webrtc.header.paddingLength = 0;
- rtp_header.webrtc.header.headerLength = 12;
-
- rtp_header.webrtc.type.Audio.isCNG = false;
- rtp_header.webrtc.type.Audio.channel = 2;
-
- std::vector<int16> payload(640, 0x1234);
-
- uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
- size_t payload_size = payload.size() * sizeof(int16);
-
- audio_decoder_->IncomingParsedRtpPacket(
- payload_data, payload_size, rtp_header);
-
- int number_of_10ms_blocks = 2;
- int desired_frequency = 16000;
- PcmAudioFrame audio_frame;
- uint32 rtp_timestamp;
-
- EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(
- number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
- EXPECT_EQ(2, audio_frame.channels);
- EXPECT_EQ(16000, audio_frame.frequency);
- EXPECT_EQ(640ul, audio_frame.samples.size());
- // First 10 samples per channel are 0 from NetEq.
- for (size_t i = 10 * audio_config.channels; i < audio_frame.samples.size();
- ++i) {
- EXPECT_EQ(0x3412, audio_frame.samples[i]);
- }
-
- rtp_header.frame_id++;
- rtp_header.webrtc.header.sequenceNumber++;
- rtp_header.webrtc.header.timestamp += (audio_config.frequency / 100) * 2 * 2;
+TEST_P(AudioDecoderTest, DecodesFramesWithVaryingDuration) {
+ // These are the set of frame durations supported by the Opus encoder.
+ const int kFrameDurationMs[] = { 5, 10, 20, 40, 60 };
- audio_decoder_->IncomingParsedRtpPacket(
- payload_data, payload_size, rtp_header);
-
- EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(
- number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
- EXPECT_EQ(2, audio_frame.channels);
- EXPECT_EQ(16000, audio_frame.frequency);
- EXPECT_EQ(640ul, audio_frame.samples.size());
- for (size_t i = 0; i < audio_frame.samples.size(); ++i) {
- EXPECT_NEAR(0x3412, audio_frame.samples[i], 1000);
- }
- // Test cast callback.
- audio_decoder_->SendCastMessage();
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(33));
- audio_decoder_->SendCastMessage();
+ const int kNumFrames = 10;
+ for (size_t i = 0; i < arraysize(kFrameDurationMs); ++i)
+ for (int j = 0; j < kNumFrames; ++j)
+ FeedMoreAudio(base::TimeDelta::FromMilliseconds(kFrameDurationMs[i]), 0);
+ WaitForAllAudioToBeDecoded();
}
-TEST_F(AudioDecoderTest, Pcm16Resample) {
- AudioReceiverConfig audio_config;
- audio_config.rtp_payload_type = 127;
- audio_config.frequency = 16000;
- audio_config.channels = 2;
- audio_config.codec = transport::kPcm16;
- audio_config.use_external_decoder = false;
- Configure(audio_config);
-
- RtpCastHeader rtp_header;
- rtp_header.webrtc.header.payloadType = 127;
- rtp_header.webrtc.header.sequenceNumber = 1234;
- rtp_header.webrtc.header.timestamp = 0x87654321;
- rtp_header.webrtc.header.ssrc = 0x12345678;
- rtp_header.webrtc.header.paddingLength = 0;
- rtp_header.webrtc.header.headerLength = 12;
-
- rtp_header.webrtc.type.Audio.isCNG = false;
- rtp_header.webrtc.type.Audio.channel = 2;
-
- std::vector<int16> payload(640, 0x1234);
-
- uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
- size_t payload_size = payload.size() * sizeof(int16);
-
- audio_decoder_->IncomingParsedRtpPacket(
- payload_data, payload_size, rtp_header);
-
- int number_of_10ms_blocks = 2;
- int desired_frequency = 48000;
- PcmAudioFrame audio_frame;
- uint32 rtp_timestamp;
-
- EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(
- number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
-
- EXPECT_EQ(2, audio_frame.channels);
- EXPECT_EQ(48000, audio_frame.frequency);
- EXPECT_EQ(1920ul, audio_frame.samples.size()); // Upsampled to 48 KHz.
- int count = 0;
- // Resampling makes the variance worse.
- for (size_t i = 100 * audio_config.channels; i < audio_frame.samples.size();
- ++i) {
- EXPECT_NEAR(0x3412, audio_frame.samples[i], 400);
- if (0x3412 == audio_frame.samples[i])
- count++;
+TEST_P(AudioDecoderTest, RecoversFromDroppedFrames) {
+ const base::TimeDelta kTenMilliseconds =
+ base::TimeDelta::FromMilliseconds(10);
+ const int kNumFrames = 100;
+ int next_drop_at = 3;
+ int next_num_dropped = 1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ if (i == next_drop_at) {
+ const int num_dropped = next_num_dropped++;
+ next_drop_at *= 2;
+ i += num_dropped;
+ FeedMoreAudio(kTenMilliseconds, num_dropped);
+ } else {
+ FeedMoreAudio(kTenMilliseconds, 0);
+ }
}
+ WaitForAllAudioToBeDecoded();
}
+INSTANTIATE_TEST_CASE_P(AudioDecoderTestScenarios,
+ AudioDecoderTest,
+ ::testing::Values(
+ TestScenario(transport::kPcm16, 1, 8000),
+ TestScenario(transport::kPcm16, 2, 48000),
+ TestScenario(transport::kOpus, 1, 8000),
+ TestScenario(transport::kOpus, 2, 48000)));
+
} // namespace cast
} // namespace media
diff --git a/media/cast/audio_receiver/audio_receiver.cc b/media/cast/audio_receiver/audio_receiver.cc
index e7b126c6ca..b214c52c6d 100644
--- a/media/cast/audio_receiver/audio_receiver.cc
+++ b/media/cast/audio_receiver/audio_receiver.cc
@@ -7,45 +7,17 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "base/strings/string_piece.h"
#include "media/cast/audio_receiver/audio_decoder.h"
-#include "media/cast/framer/framer.h"
-#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_receiver/rtp_receiver.h"
#include "media/cast/transport/cast_transport_defines.h"
namespace {
-
-// Max time we wait until an audio frame is due to be played out is released.
-static const int64 kMaxAudioFrameWaitMs = 20;
-static const int64 kMinSchedulingDelayMs = 1;
-
+const int kTypicalAudioFrameDurationMs = 10;
+const int kMinSchedulingDelayMs = 1;
} // namespace
namespace media {
namespace cast {
-DecodedAudioCallbackData::DecodedAudioCallbackData()
- : number_of_10ms_blocks(0), desired_frequency(0), callback() {}
-
-DecodedAudioCallbackData::~DecodedAudioCallbackData() {}
-
-// Local implementation of RtpPayloadFeedback (defined in rtp_defines.h)
-// Used to convey cast-specific feedback from receiver to sender.
-class LocalRtpAudioFeedback : public RtpPayloadFeedback {
- public:
- explicit LocalRtpAudioFeedback(AudioReceiver* audio_receiver)
- : audio_receiver_(audio_receiver) {}
-
- virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE {
- audio_receiver_->CastFeedback(cast_message);
- }
-
- private:
- AudioReceiver* audio_receiver_;
-};
-
AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
const AudioReceiverConfig& audio_config,
transport::PacedPacketSender* const packet_sender)
@@ -55,36 +27,45 @@ AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
ReceiverRtcpEventSubscriber::kAudioEventSubscriber),
codec_(audio_config.codec),
frequency_(audio_config.frequency),
- audio_buffer_(),
- audio_decoder_(),
- time_offset_(),
+ framer_(cast_environment->Clock(),
+ this,
+ audio_config.incoming_ssrc,
+ true,
+ 0),
+ rtcp_(cast_environment,
+ NULL,
+ NULL,
+ packet_sender,
+ GetStatistics(),
+ audio_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
+ audio_config.feedback_ssrc,
+ audio_config.incoming_ssrc,
+ audio_config.rtcp_c_name),
+ is_waiting_for_consecutive_frame_(false),
weak_factory_(this) {
target_delay_delta_ =
base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms);
- incoming_payload_feedback_.reset(new LocalRtpAudioFeedback(this));
- if (audio_config.use_external_decoder) {
- audio_buffer_.reset(new Framer(cast_environment->Clock(),
- incoming_payload_feedback_.get(),
- audio_config.incoming_ssrc, true, 0));
- } else {
- audio_decoder_.reset(new AudioDecoder(cast_environment, audio_config,
- incoming_payload_feedback_.get()));
- }
+ if (!audio_config.use_external_decoder)
+ audio_decoder_.reset(new AudioDecoder(cast_environment, audio_config));
decryptor_.Initialize(audio_config.aes_key, audio_config.aes_iv_mask);
- base::TimeDelta rtcp_interval_delta =
- base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval);
- rtcp_.reset(new Rtcp(cast_environment, NULL, NULL, packet_sender,
- GetStatistics(),
- audio_config.rtcp_mode, rtcp_interval_delta,
- audio_config.feedback_ssrc, audio_config.incoming_ssrc,
- audio_config.rtcp_c_name));
- // Set the target delay that will be conveyed to the sender.
- rtcp_->SetTargetDelay(target_delay_delta_);
+ rtcp_.SetTargetDelay(target_delay_delta_);
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
}
AudioReceiver::~AudioReceiver() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ // If any callbacks for encoded audio frames are queued, flush them out now.
+ // This is critical because some Closures in |frame_request_queue_| may have
+ // Unretained references to |this|.
+ while (!frame_request_queue_.empty()) {
+ frame_request_queue_.front().Run(
+ make_scoped_ptr<transport::EncodedAudioFrame>(NULL), base::TimeTicks());
+ frame_request_queue_.pop_front();
+ }
+
cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
}
@@ -114,41 +95,9 @@ void AudioReceiver::OnReceivedPayloadData(const uint8* payload_data,
time_first_incoming_packet_ = now;
}
- if (audio_decoder_) {
- DCHECK(!audio_buffer_) << "Invalid internal state";
- std::string plaintext;
- if (decryptor_.initialized()) {
- if (!decryptor_.Decrypt(
- rtp_header.frame_id,
- base::StringPiece(reinterpret_cast<const char*>(payload_data),
- payload_size),
- &plaintext))
- return;
- } else {
- plaintext.append(reinterpret_cast<const char*>(payload_data),
- payload_size);
- }
- audio_decoder_->IncomingParsedRtpPacket(
- reinterpret_cast<const uint8*>(plaintext.data()), plaintext.size(),
- rtp_header);
- if (!queued_decoded_callbacks_.empty()) {
- DecodedAudioCallbackData decoded_data = queued_decoded_callbacks_.front();
- queued_decoded_callbacks_.pop_front();
- cast_environment_->PostTask(
- CastEnvironment::AUDIO, FROM_HERE,
- base::Bind(&AudioReceiver::DecodeAudioFrameThread,
- base::Unretained(this), decoded_data.number_of_10ms_blocks,
- decoded_data.desired_frequency, decoded_data.callback));
- }
- return;
- }
-
- DCHECK(audio_buffer_) << "Invalid internal state";
- DCHECK(!audio_decoder_) << "Invalid internal state";
-
bool duplicate = false;
- bool complete = audio_buffer_->InsertPacket(payload_data, payload_size,
- rtp_header, &duplicate);
+ const bool complete =
+ framer_.InsertPacket(payload_data, payload_size, rtp_header, &duplicate);
if (duplicate) {
cast_environment_->Logging()->InsertPacketEvent(
now, kDuplicateAudioPacketReceived, rtp_header.webrtc.header.timestamp,
@@ -157,164 +106,145 @@ void AudioReceiver::OnReceivedPayloadData(const uint8* payload_data,
// Duplicate packets are ignored.
return;
}
- if (!complete) return; // Audio frame not complete; wait for more packets.
- if (queued_encoded_callbacks_.empty()) return;
- AudioFrameEncodedCallback callback = queued_encoded_callbacks_.front();
- queued_encoded_callbacks_.pop_front();
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::GetEncodedAudioFrame,
- weak_factory_.GetWeakPtr(), callback));
+ if (!complete)
+ return;
+
+ EmitAvailableEncodedFrames();
}
void AudioReceiver::GetRawAudioFrame(
- int number_of_10ms_blocks, int desired_frequency,
const AudioFrameDecodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_decoder_) << "Invalid function call in this configuration";
- // TODO(pwestin): we can skip this function by posting direct to the decoder.
- cast_environment_->PostTask(
- CastEnvironment::AUDIO, FROM_HERE,
- base::Bind(&AudioReceiver::DecodeAudioFrameThread, base::Unretained(this),
- number_of_10ms_blocks, desired_frequency, callback));
+ DCHECK(!callback.is_null());
+ DCHECK(audio_decoder_.get());
+ GetEncodedAudioFrame(base::Bind(
+ &AudioReceiver::DecodeEncodedAudioFrame,
+ // Note: Use of Unretained is safe since this Closure is guaranteed to be
+ // invoked before destruction of |this|.
+ base::Unretained(this),
+ callback));
}
-void AudioReceiver::DecodeAudioFrameThread(
- int number_of_10ms_blocks, int desired_frequency,
- const AudioFrameDecodedCallback callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO));
- // TODO(mikhal): Allow the application to allocate this memory.
- scoped_ptr<PcmAudioFrame> audio_frame(new PcmAudioFrame());
-
- uint32 rtp_timestamp = 0;
- if (!audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
- desired_frequency, audio_frame.get(),
- &rtp_timestamp)) {
- DecodedAudioCallbackData callback_data;
- callback_data.number_of_10ms_blocks = number_of_10ms_blocks;
- callback_data.desired_frequency = desired_frequency;
- callback_data.callback = callback;
- queued_decoded_callbacks_.push_back(callback_data);
+void AudioReceiver::DecodeEncodedAudioFrame(
+ const AudioFrameDecodedCallback& callback,
+ scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ const base::TimeTicks& playout_time) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (!encoded_frame) {
+ callback.Run(make_scoped_ptr<AudioBus>(NULL), playout_time, false);
return;
}
+ const uint32 frame_id = encoded_frame->frame_id;
+ const uint32 rtp_timestamp = encoded_frame->rtp_timestamp;
+ audio_decoder_->DecodeFrame(encoded_frame.Pass(),
+ base::Bind(&AudioReceiver::EmitRawAudioFrame,
+ cast_environment_,
+ callback,
+ frame_id,
+ rtp_timestamp,
+ playout_time));
+}
- cast_environment_->PostTask(
- CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::ReturnDecodedFrameWithPlayoutDelay,
- base::Unretained(this), base::Passed(&audio_frame),
- rtp_timestamp, callback));
+// static
+void AudioReceiver::EmitRawAudioFrame(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioFrameDecodedCallback& callback,
+ uint32 frame_id,
+ uint32 rtp_timestamp,
+ const base::TimeTicks& playout_time,
+ scoped_ptr<AudioBus> audio_bus,
+ bool is_continuous) {
+ DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN));
+ if (audio_bus.get()) {
+ const base::TimeTicks now = cast_environment->Clock()->NowTicks();
+ cast_environment->Logging()->InsertFrameEvent(
+ now, kAudioFrameDecoded, rtp_timestamp, frame_id);
+ cast_environment->Logging()->InsertFrameEventWithDelay(
+ now, kAudioPlayoutDelay, rtp_timestamp, frame_id,
+ playout_time - now);
+ }
+ callback.Run(audio_bus.Pass(), playout_time, is_continuous);
}
-void AudioReceiver::ReturnDecodedFrameWithPlayoutDelay(
- scoped_ptr<PcmAudioFrame> audio_frame, uint32 rtp_timestamp,
- const AudioFrameDecodedCallback callback) {
+void AudioReceiver::GetEncodedAudioFrame(
+ const AudioFrameEncodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- cast_environment_->Logging()->InsertFrameEvent(
- now, kAudioFrameDecoded, rtp_timestamp, kFrameIdUnknown);
-
- base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp);
-
- cast_environment_->Logging()->InsertFrameEventWithDelay(
- now, kAudioPlayoutDelay, rtp_timestamp, kFrameIdUnknown,
- playout_time - now);
-
- // Frame is ready - Send back to the caller.
- cast_environment_->PostTask(
- CastEnvironment::MAIN, FROM_HERE,
- base::Bind(callback, base::Passed(&audio_frame), playout_time));
+ frame_request_queue_.push_back(callback);
+ EmitAvailableEncodedFrames();
}
-void AudioReceiver::PlayoutTimeout() {
+void AudioReceiver::EmitAvailableEncodedFrames() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_buffer_) << "Invalid function call in this configuration";
- if (queued_encoded_callbacks_.empty()) {
- // Already released by incoming packet.
- return;
- }
- bool next_frame = false;
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
- new transport::EncodedAudioFrame());
-
- if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), &next_frame)) {
- // We have no audio frames. Wait for new packet(s).
- // Since the application can post multiple AudioFrameEncodedCallback and
- // we only check the next frame to play out we might have multiple timeout
- // events firing after each other; however this should be a rare event.
- VLOG(1) << "Failed to retrieved a complete frame at this point in time";
- return;
- }
-
- if (decryptor_.initialized() && !DecryptAudioFrame(&encoded_frame)) {
- // Logging already done.
- return;
- }
- if (PostEncodedAudioFrame(
- queued_encoded_callbacks_.front(), next_frame, &encoded_frame)) {
- // Call succeed remove callback from list.
- queued_encoded_callbacks_.pop_front();
- }
-}
+ while (!frame_request_queue_.empty()) {
+ // Attempt to peek at the next completed frame from the |framer_|.
+ // TODO(miu): We should only be peeking at the metadata, and not copying the
+ // payload yet! Or, at least, peek using a StringPiece instead of a copy.
+ scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
+ new transport::EncodedAudioFrame());
+ bool is_consecutively_next_frame = false;
+ if (!framer_.GetEncodedAudioFrame(encoded_frame.get(),
+ &is_consecutively_next_frame)) {
+ VLOG(1) << "Wait for more audio packets to produce a completed frame.";
+ return; // OnReceivedPayloadData() will invoke this method in the future.
+ }
-void AudioReceiver::GetEncodedAudioFrame(
- const AudioFrameEncodedCallback& callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_buffer_) << "Invalid function call in this configuration";
+ // If |framer_| has a frame ready that is out of sequence, examine the
+ // playout time to determine whether it's acceptable to continue, thereby
+ // skipping one or more frames. Skip if the missing frame wouldn't complete
+ // playing before the start of playback of the available frame.
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ const base::TimeTicks playout_time =
+ GetPlayoutTime(now, encoded_frame->rtp_timestamp);
+ if (!is_consecutively_next_frame) {
+ const base::TimeTicks earliest_possible_end_time_of_missing_frame =
+ now + base::TimeDelta::FromMilliseconds(kTypicalAudioFrameDurationMs);
+ if (earliest_possible_end_time_of_missing_frame < playout_time) {
+ VLOG(1) << "Wait for next consecutive frame instead of skipping.";
+ if (!is_waiting_for_consecutive_frame_) {
+ is_waiting_for_consecutive_frame_ = true;
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&AudioReceiver::EmitAvailableEncodedFramesAfterWaiting,
+ weak_factory_.GetWeakPtr()),
+ playout_time - now);
+ }
+ return;
+ }
+ }
- bool next_frame = false;
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
- new transport::EncodedAudioFrame());
+ // Decrypt the payload data in the frame, if crypto is being used.
+ if (decryptor_.initialized()) {
+ std::string decrypted_audio_data;
+ if (!decryptor_.Decrypt(encoded_frame->frame_id,
+ encoded_frame->data,
+ &decrypted_audio_data)) {
+ // Decryption failed. Give up on this frame, releasing it from the
+ // jitter buffer.
+ framer_.ReleaseFrame(encoded_frame->frame_id);
+ continue;
+ }
+ encoded_frame->data.swap(decrypted_audio_data);
+ }
- if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), &next_frame)) {
- // We have no audio frames. Wait for new packet(s).
- VLOG(1) << "Wait for more audio packets in frame";
- queued_encoded_callbacks_.push_back(callback);
- return;
- }
- if (decryptor_.initialized() && !DecryptAudioFrame(&encoded_frame)) {
- // Logging already done.
- queued_encoded_callbacks_.push_back(callback);
- return;
- }
- if (!PostEncodedAudioFrame(callback, next_frame, &encoded_frame)) {
- // We have an audio frame; however we are missing packets and we have time
- // to wait for new packet(s).
- queued_encoded_callbacks_.push_back(callback);
+ // At this point, we have a decrypted EncodedAudioFrame ready to be emitted.
+ encoded_frame->codec = codec_;
+ framer_.ReleaseFrame(encoded_frame->frame_id);
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(frame_request_queue_.front(),
+ base::Passed(&encoded_frame),
+ playout_time));
+ frame_request_queue_.pop_front();
}
}
-bool AudioReceiver::PostEncodedAudioFrame(
- const AudioFrameEncodedCallback& callback,
- bool next_frame,
- scoped_ptr<transport::EncodedAudioFrame>* encoded_frame) {
+void AudioReceiver::EmitAvailableEncodedFramesAfterWaiting() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_buffer_) << "Invalid function call in this configuration";
- DCHECK(encoded_frame) << "Invalid encoded_frame";
-
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- base::TimeTicks playout_time =
- GetPlayoutTime(now, (*encoded_frame)->rtp_timestamp);
- base::TimeDelta time_until_playout = playout_time - now;
- base::TimeDelta min_wait_delta =
- base::TimeDelta::FromMilliseconds(kMaxAudioFrameWaitMs);
-
- if (!next_frame && (time_until_playout > min_wait_delta)) {
- base::TimeDelta time_until_release = time_until_playout - min_wait_delta;
- cast_environment_->PostDelayedTask(
- CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()),
- time_until_release);
- VLOG(1) << "Wait until time to playout:"
- << time_until_release.InMilliseconds();
- return false;
- }
- (*encoded_frame)->codec = codec_;
- audio_buffer_->ReleaseFrame((*encoded_frame)->frame_id);
-
- cast_environment_->PostTask(
- CastEnvironment::MAIN, FROM_HERE,
- base::Bind(callback, base::Passed(encoded_frame), playout_time));
- return true;
+ DCHECK(is_waiting_for_consecutive_frame_);
+ is_waiting_for_consecutive_frame_ = false;
+ EmitAvailableEncodedFrames();
}
void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) {
@@ -323,23 +253,25 @@ void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) {
if (!rtcp_packet) {
ReceivedPacket(&packet->front(), packet->size());
} else {
- rtcp_->IncomingRtcpPacket(&packet->front(), packet->size());
+ rtcp_.IncomingRtcpPacket(&packet->front(), packet->size());
}
}
void AudioReceiver::SetTargetDelay(base::TimeDelta target_delay) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
target_delay_delta_ = target_delay;
- rtcp_->SetTargetDelay(target_delay_delta_);
+ rtcp_.SetTargetDelay(target_delay_delta_);
}
void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
RtpTimestamp rtp_timestamp =
frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff];
cast_environment_->Logging()->InsertFrameEvent(
now, kAudioAckSent, rtp_timestamp, cast_message.ack_frame_id_);
- rtcp_->SendRtcpFromRtpReceiver(&cast_message, &event_subscriber_);
+ rtcp_.SendRtcpFromRtpReceiver(&cast_message, &event_subscriber_);
}
base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
@@ -350,13 +282,21 @@ base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
base::TimeTicks rtp_timestamp_in_ticks;
base::TimeTicks playout_time;
if (time_offset_ == base::TimeDelta()) {
- if (rtcp_->RtpTimestampInSenderTime(frequency_,
- first_incoming_rtp_timestamp_,
- &rtp_timestamp_in_ticks)) {
+ if (rtcp_.RtpTimestampInSenderTime(frequency_,
+ first_incoming_rtp_timestamp_,
+ &rtp_timestamp_in_ticks)) {
time_offset_ = time_first_incoming_packet_ - rtp_timestamp_in_ticks;
+ // TODO(miu): As clocks drift w.r.t. each other, and other factors take
+ // effect, |time_offset_| should be updated. Otherwise, we might as well
+ // always compute the time offsets agnostic of RTCP's time data.
} else {
// We have not received any RTCP to sync the stream play it out as soon as
// possible.
+
+ // BUG: This means we're literally switching to a different timeline a
+ // short time after a cast receiver has been running. Re-enable
+ // End2EndTest.StartSenderBeforeReceiver once this is fixed.
+ // http://crbug.com/356942
uint32 rtp_timestamp_diff = rtp_timestamp - first_incoming_rtp_timestamp_;
int frequency_khz = frequency_ / 1000;
@@ -370,40 +310,31 @@ base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
}
if (playout_time.is_null()) {
// This can fail if we have not received any RTCP packets in a long time.
- if (rtcp_->RtpTimestampInSenderTime(frequency_, rtp_timestamp,
- &rtp_timestamp_in_ticks)) {
+ if (rtcp_.RtpTimestampInSenderTime(frequency_, rtp_timestamp,
+ &rtp_timestamp_in_ticks)) {
playout_time =
rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_;
} else {
playout_time = now;
}
}
+
+ // TODO(miu): This is broken since we literally switch timelines once |rtcp_|
+ // can provide us the |time_offset_|. Furthermore, this "getter" method may
+ // be called on frames received out-of-order, which means the playout times
+ // for earlier frames will be computed incorrectly.
+#if 0
// Don't allow the playout time to go backwards.
if (last_playout_time_ > playout_time) playout_time = last_playout_time_;
last_playout_time_ = playout_time;
- return playout_time;
-}
+#endif
-bool AudioReceiver::DecryptAudioFrame(
- scoped_ptr<transport::EncodedAudioFrame>* audio_frame) {
- if (!decryptor_.initialized())
- return false;
-
- std::string decrypted_audio_data;
- if (!decryptor_.Decrypt((*audio_frame)->frame_id,
- (*audio_frame)->data,
- &decrypted_audio_data)) {
- // Give up on this frame, release it from the jitter buffer.
- audio_buffer_->ReleaseFrame((*audio_frame)->frame_id);
- return false;
- }
- (*audio_frame)->data.swap(decrypted_audio_data);
- return true;
+ return playout_time;
}
void AudioReceiver::ScheduleNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_send = rtcp_->TimeToSendNextRtcpReport() -
+ base::TimeDelta time_to_send = rtcp_.TimeToSendNextRtcpReport() -
cast_environment_->Clock()->NowTicks();
time_to_send = std::max(
@@ -419,7 +350,7 @@ void AudioReceiver::ScheduleNextRtcpReport() {
void AudioReceiver::SendNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// TODO(pwestin): add logging.
- rtcp_->SendRtcpFromRtpReceiver(NULL, NULL);
+ rtcp_.SendRtcpFromRtpReceiver(NULL, NULL);
ScheduleNextRtcpReport();
}
@@ -428,13 +359,7 @@ void AudioReceiver::SendNextRtcpReport() {
void AudioReceiver::ScheduleNextCastMessage() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeTicks send_time;
- if (audio_buffer_) {
- audio_buffer_->TimeToSendNextCastMessage(&send_time);
- } else if (audio_decoder_) {
- audio_decoder_->TimeToSendNextCastMessage(&send_time);
- } else {
- NOTREACHED();
- }
+ framer_.TimeToSendNextCastMessage(&send_time);
base::TimeDelta time_to_send =
send_time - cast_environment_->Clock()->NowTicks();
time_to_send = std::max(
@@ -448,15 +373,8 @@ void AudioReceiver::ScheduleNextCastMessage() {
void AudioReceiver::SendNextCastMessage() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- if (audio_buffer_) {
- // Will only send a message if it is time.
- audio_buffer_->SendCastMessage();
- }
- if (audio_decoder_) {
- // Will only send a message if it is time.
- audio_decoder_->SendCastMessage();
- }
+ // Will only send a message if it is time.
+ framer_.SendCastMessage();
ScheduleNextCastMessage();
}
diff --git a/media/cast/audio_receiver/audio_receiver.gypi b/media/cast/audio_receiver/audio_receiver.gypi
index 16b425cd21..048227cd64 100644
--- a/media/cast/audio_receiver/audio_receiver.gypi
+++ b/media/cast/audio_receiver/audio_receiver.gypi
@@ -19,10 +19,12 @@
'audio_receiver.cc',
], # source
'dependencies': [
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/media/media.gyp:shared_memory_support',
'<(DEPTH)/media/cast/transport/utility/utility.gyp:transport_utility',
'<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
'<(DEPTH)/media/cast/rtp_receiver/rtp_receiver.gyp:cast_rtp_receiver',
- '<(DEPTH)/third_party/webrtc/webrtc.gyp:webrtc',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
],
},
],
diff --git a/media/cast/audio_receiver/audio_receiver.h b/media/cast/audio_receiver/audio_receiver.h
index cbf52f9e7c..feed5e98ca 100644
--- a/media/cast/audio_receiver/audio_receiver.h
+++ b/media/cast/audio_receiver/audio_receiver.h
@@ -17,34 +17,37 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
+#include "media/cast/framer/framer.h"
#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
-#include "media/cast/rtcp/rtcp.h" // RtcpCastMessage
+#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtp_receiver/rtp_receiver.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h" // RtpCastHeader
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
#include "media/cast/transport/utility/transport_encryption_handler.h"
namespace media {
namespace cast {
class AudioDecoder;
-class Framer;
-class LocalRtpAudioFeedback;
-class RtpReceiver;
-class RtpReceiverStatistics;
-
-struct DecodedAudioCallbackData {
- DecodedAudioCallbackData();
- ~DecodedAudioCallbackData();
- int number_of_10ms_blocks;
- int desired_frequency;
- AudioFrameDecodedCallback callback;
-};
-// This class is not thread safe. Should only be called from the Main cast
+// AudioReceiver receives packets out-of-order while clients make requests for
+// complete frames in-order. (A frame consists of one or more packets.)
+// AudioReceiver also includes logic for mapping RTP timestamps to the local
+// base::TimeTicks clock for each frame.
+//
+// Two types of frames can be requested: 1) A frame of decoded audio data; or 2)
+// a frame of still-encoded audio data, to be passed into an external audio
+// decoder. Each request for a frame includes a callback which AudioReceiver
+// guarantees will be called at some point in the future. Clients should
+// generally limit the number of outstanding requests (perhaps to just one or
+// two). When AudioReceiver is destroyed, any outstanding requests will be
+// immediately invoked with a NULL frame.
+//
+// This class is not thread safe. Should only be called from the Main cast
// thread.
-class AudioReceiver : public base::NonThreadSafe,
- public base::SupportsWeakPtr<AudioReceiver>,
- public RtpReceiver {
+class AudioReceiver : public RtpReceiver,
+ public RtpPayloadFeedback,
+ public base::NonThreadSafe,
+ public base::SupportsWeakPtr<AudioReceiver> {
public:
AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
const AudioReceiverConfig& audio_config,
@@ -52,58 +55,60 @@ class AudioReceiver : public base::NonThreadSafe,
virtual ~AudioReceiver();
- // Extract a raw audio frame from the cast receiver.
- // Actual decoding will be preformed on a designated audio_decoder thread.
- void GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback& callback);
+ // Request a decoded audio frame. The audio signal data returned in the
+ // callback will have the sampling rate and number of channels as requested in
+ // the configuration that was passed to the ctor.
+ //
+ // The given |callback| is guaranteed to be run at some point in the future,
+ // even if to respond with NULL at shutdown time.
+ void GetRawAudioFrame(const AudioFrameDecodedCallback& callback);
// Extract an encoded audio frame from the cast receiver.
+ //
+ // The given |callback| is guaranteed to be run at some point in the future,
+ // even if to respond with NULL at shutdown time.
void GetEncodedAudioFrame(const AudioFrameEncodedCallback& callback);
- // Should only be called from the main cast thread.
+ // Deliver another packet, possibly a duplicate, and possibly out-of-order.
void IncomingPacket(scoped_ptr<Packet> packet);
// Update target audio delay used to compute the playout time. Rtcp
// will also be updated (will be included in all outgoing reports).
void SetTargetDelay(base::TimeDelta target_delay);
+ protected:
+ friend class AudioReceiverTest; // Invokes OnReceivedPayloadData().
+
virtual void OnReceivedPayloadData(const uint8* payload_data,
size_t payload_size,
const RtpCastHeader& rtp_header) OVERRIDE;
- private:
- friend class LocalRtpAudioFeedback;
-
- void CastFeedback(const RtcpCastMessage& cast_message);
-
- // Time to pull out the audio even though we are missing data.
- void PlayoutTimeout();
-
- bool PostEncodedAudioFrame(
- const AudioFrameEncodedCallback& callback,
- bool next_frame,
- scoped_ptr<transport::EncodedAudioFrame>* encoded_frame);
+ // RtpPayloadFeedback implementation.
+ virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE;
- // Actual decoding implementation - should be called under the audio decoder
- // thread.
- void DecodeAudioFrameThread(int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback callback);
- void ReturnDecodedFrameWithPlayoutDelay(
- scoped_ptr<PcmAudioFrame> audio_frame,
- uint32 rtp_timestamp,
- const AudioFrameDecodedCallback callback);
+ private:
+ // Processes ready-to-consume packets from |framer_|, decrypting each packet's
+ // payload data, and then running the enqueued callbacks in order (one for
+ // each packet). This method may post a delayed task to re-invoke itself in
+ // the future to wait for missing/incomplete frames.
+ void EmitAvailableEncodedFrames();
+
+ // Clears the |is_waiting_for_consecutive_frame_| flag and invokes
+ // EmitAvailableEncodedFrames().
+ void EmitAvailableEncodedFramesAfterWaiting();
+
+ // Feeds an EncodedAudioFrame into |audio_decoder_|. GetRawAudioFrame() uses
+ // this as a callback for GetEncodedAudioFrame().
+ void DecodeEncodedAudioFrame(
+ const AudioFrameDecodedCallback& callback,
+ scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ const base::TimeTicks& playout_time);
// Return the playout time based on the current time and rtp timestamp.
base::TimeTicks GetPlayoutTime(base::TimeTicks now, uint32 rtp_timestamp);
void InitializeTimers();
- // Decrypts the data within the |audio_frame| and replaces the data with the
- // decrypted string.
- bool DecryptAudioFrame(scoped_ptr<transport::EncodedAudioFrame>* audio_frame);
-
// Schedule the next RTCP report.
void ScheduleNextRtcpReport();
@@ -116,7 +121,21 @@ class AudioReceiver : public base::NonThreadSafe,
// Actually send the next cast message.
void SendNextCastMessage();
- scoped_refptr<CastEnvironment> cast_environment_;
+ // Receives an AudioBus from |audio_decoder_|, logs the event, and passes the
+ // data on by running the given |callback|. This method is static to ensure
+ // it can be called after an AudioReceiver instance is destroyed.
+ // DecodeEncodedAudioFrame() uses this as a callback for
+ // AudioDecoder::DecodeFrame().
+ static void EmitRawAudioFrame(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioFrameDecodedCallback& callback,
+ uint32 frame_id,
+ uint32 rtp_timestamp,
+ const base::TimeTicks& playout_time,
+ scoped_ptr<AudioBus> audio_bus,
+ bool is_continuous);
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
// Subscribes to raw events.
// Processes raw audio events to be sent over to the cast sender via RTCP.
@@ -125,18 +144,20 @@ class AudioReceiver : public base::NonThreadSafe,
const transport::AudioCodec codec_;
const int frequency_;
base::TimeDelta target_delay_delta_;
- scoped_ptr<Framer> audio_buffer_;
+ Framer framer_;
scoped_ptr<AudioDecoder> audio_decoder_;
- scoped_ptr<LocalRtpAudioFeedback> incoming_payload_feedback_;
- scoped_ptr<Rtcp> rtcp_;
+ Rtcp rtcp_;
base::TimeDelta time_offset_;
base::TimeTicks time_first_incoming_packet_;
uint32 first_incoming_rtp_timestamp_;
transport::TransportEncryptionHandler decryptor_;
- base::TimeTicks last_playout_time_;
- std::list<AudioFrameEncodedCallback> queued_encoded_callbacks_;
- std::list<DecodedAudioCallbackData> queued_decoded_callbacks_;
+ // Outstanding callbacks to run to deliver on client requests for frames.
+ std::list<AudioFrameEncodedCallback> frame_request_queue_;
+
+ // True while there's an outstanding task to re-invoke
+ // EmitAvailableEncodedFrames().
+ bool is_waiting_for_consecutive_frame_;
// This mapping allows us to log kAudioAckSent as a frame event. In addition
// it allows the event to be transmitted via RTCP.
diff --git a/media/cast/audio_receiver/audio_receiver_unittest.cc b/media/cast/audio_receiver/audio_receiver_unittest.cc
index 17721da618..bf2c39c5d7 100644
--- a/media/cast/audio_receiver/audio_receiver_unittest.cc
+++ b/media/cast/audio_receiver/audio_receiver_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/bind.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
@@ -18,16 +20,16 @@
namespace media {
namespace cast {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
namespace {
-class TestAudioEncoderCallback
- : public base::RefCountedThreadSafe<TestAudioEncoderCallback> {
+class FakeAudioClient {
public:
- TestAudioEncoderCallback() : num_called_(0) {}
+ FakeAudioClient() : num_called_(0) {}
+ virtual ~FakeAudioClient() {}
- void SetExpectedResult(uint8 expected_frame_id,
- const base::TimeTicks& expected_playout_time) {
+ void SetNextExpectedResult(uint8 expected_frame_id,
+ const base::TimeTicks& expected_playout_time) {
expected_frame_id_ = expected_frame_id;
expected_playout_time_ = expected_playout_time;
}
@@ -35,6 +37,8 @@ class TestAudioEncoderCallback
void DeliverEncodedAudioFrame(
scoped_ptr<transport::EncodedAudioFrame> audio_frame,
const base::TimeTicks& playout_time) {
+ ASSERT_FALSE(!audio_frame)
+ << "If at shutdown: There were unsatisfied requests enqueued.";
EXPECT_EQ(expected_frame_id_, audio_frame->frame_id);
EXPECT_EQ(transport::kPcm16, audio_frame->codec);
EXPECT_EQ(expected_playout_time_, playout_time);
@@ -43,17 +47,12 @@ class TestAudioEncoderCallback
int number_times_called() const { return num_called_; }
- protected:
- virtual ~TestAudioEncoderCallback() {}
-
private:
- friend class base::RefCountedThreadSafe<TestAudioEncoderCallback>;
-
int num_called_;
uint8 expected_frame_id_;
base::TimeTicks expected_playout_time_;
- DISALLOW_COPY_AND_ASSIGN(TestAudioEncoderCallback);
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioClient);
};
} // namespace
@@ -77,8 +76,6 @@ class AudioReceiverTest : public ::testing::Test {
task_runner_,
task_runner_,
task_runner_);
-
- test_audio_encoder_callback_ = new TestAudioEncoderCallback();
}
void Configure(bool use_external_decoder) {
@@ -89,8 +86,6 @@ class AudioReceiverTest : public ::testing::Test {
virtual ~AudioReceiverTest() {}
- static void DummyDeletePacket(const uint8* packet) {};
-
virtual void SetUp() {
payload_.assign(kMaxIpPacketSize, 0);
rtp_header_.is_key_frame = true;
@@ -102,15 +97,23 @@ class AudioReceiverTest : public ::testing::Test {
rtp_header_.webrtc.header.timestamp = 0;
}
+ void FeedOneFrameIntoReceiver() {
+ receiver_->OnReceivedPayloadData(
+ payload_.data(), payload_.size(), rtp_header_);
+ }
+
AudioReceiverConfig audio_config_;
std::vector<uint8> payload_;
RtpCastHeader rtp_header_;
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
transport::MockPacedPacketSender mock_transport_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
- scoped_ptr<AudioReceiver> receiver_;
scoped_refptr<CastEnvironment> cast_environment_;
- scoped_refptr<TestAudioEncoderCallback> test_audio_encoder_callback_;
+ FakeAudioClient fake_audio_client_;
+
+ // Important for the AudioReceiver to be declared last, since its dependencies
+ // must remain alive until after its destruction.
+ scoped_ptr<AudioReceiver> receiver_;
};
TEST_F(AudioReceiverTest, GetOnePacketEncodedframe) {
@@ -120,20 +123,20 @@ TEST_F(AudioReceiverTest, GetOnePacketEncodedframe) {
Configure(true);
EXPECT_CALL(mock_transport_, SendRtcpPacket(testing::_)).Times(1);
- receiver_->OnReceivedPayloadData(
- payload_.data(), payload_.size(), rtp_header_);
- transport::EncodedAudioFrame audio_frame;
- base::TimeTicks playout_time;
- test_audio_encoder_callback_->SetExpectedResult(0,
- testing_clock_->NowTicks());
+ // Enqueue a request for an audio frame.
+ receiver_->GetEncodedAudioFrame(
+ base::Bind(&FakeAudioClient::DeliverEncodedAudioFrame,
+ base::Unretained(&fake_audio_client_)));
- AudioFrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestAudioEncoderCallback::DeliverEncodedAudioFrame,
- test_audio_encoder_callback_.get());
+ // The request should not be satisfied since no packets have been received.
+ task_runner_->RunTasks();
+ EXPECT_EQ(0, fake_audio_client_.number_times_called());
- receiver_->GetEncodedAudioFrame(frame_encoded_callback);
+ // Deliver one audio frame to the receiver and expect to get one frame back.
+ fake_audio_client_.SetNextExpectedResult(0, testing_clock_->NowTicks());
+ FeedOneFrameIntoReceiver();
task_runner_->RunTasks();
- EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
+ EXPECT_EQ(1, fake_audio_client_.number_times_called());
std::vector<FrameEvent> frame_events;
event_subscriber.GetFrameEventsAndReset(&frame_events);
@@ -152,22 +155,19 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(testing::_))
.WillRepeatedly(testing::Return(true));
- AudioFrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestAudioEncoderCallback::DeliverEncodedAudioFrame,
- test_audio_encoder_callback_.get());
-
+ // Enqueue a request for an audio frame.
+ const AudioFrameEncodedCallback frame_encoded_callback =
+ base::Bind(&FakeAudioClient::DeliverEncodedAudioFrame,
+ base::Unretained(&fake_audio_client_));
receiver_->GetEncodedAudioFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(0, fake_audio_client_.number_times_called());
- receiver_->OnReceivedPayloadData(
- payload_.data(), payload_.size(), rtp_header_);
-
- transport::EncodedAudioFrame audio_frame;
- base::TimeTicks playout_time;
- test_audio_encoder_callback_->SetExpectedResult(0,
- testing_clock_->NowTicks());
-
+ // Receive one audio frame and expect to see the first request satisfied.
+ fake_audio_client_.SetNextExpectedResult(0, testing_clock_->NowTicks());
+ FeedOneFrameIntoReceiver();
task_runner_->RunTasks();
- EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
+ EXPECT_EQ(1, fake_audio_client_.number_times_called());
TestRtcpPacketBuilder rtcp_packet;
@@ -181,50 +181,54 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
receiver_->IncomingPacket(rtcp_packet.GetPacket().Pass());
- // Make sure that we are not continuous and that the RTP timestamp represent a
- // time in the future.
+ // Enqueue a second request for an audio frame, but it should not be
+ // fulfilled yet.
+ receiver_->GetEncodedAudioFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, fake_audio_client_.number_times_called());
+
+ // Receive one audio frame out-of-order: Make sure that we are not continuous
+ // and that the RTP timestamp represents a time in the future.
rtp_header_.is_key_frame = false;
rtp_header_.frame_id = 2;
rtp_header_.is_reference = true;
rtp_header_.reference_frame_id = 0;
rtp_header_.webrtc.header.timestamp = 960;
- test_audio_encoder_callback_->SetExpectedResult(
+ fake_audio_client_.SetNextExpectedResult(
2, testing_clock_->NowTicks() + base::TimeDelta::FromMilliseconds(100));
-
- receiver_->OnReceivedPayloadData(
- payload_.data(), payload_.size(), rtp_header_);
- receiver_->GetEncodedAudioFrame(frame_encoded_callback);
- task_runner_->RunTasks();
+ FeedOneFrameIntoReceiver();
// Frame 2 should not come out at this point in time.
- EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, fake_audio_client_.number_times_called());
- // Through on one more pending callback.
+ // Enqueue a third request for an audio frame.
receiver_->GetEncodedAudioFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, fake_audio_client_.number_times_called());
+ // After 100 ms has elapsed, Frame 2 is emitted (to satisfy the second
+ // request) because a decision was made to skip over the no-show Frame 1.
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(100));
-
task_runner_->RunTasks();
- EXPECT_EQ(2, test_audio_encoder_callback_->number_times_called());
+ EXPECT_EQ(2, fake_audio_client_.number_times_called());
- test_audio_encoder_callback_->SetExpectedResult(3,
- testing_clock_->NowTicks());
-
- // Through on one more pending audio frame.
+ // Receive Frame 3 and expect it to fulfill the third request immediately.
rtp_header_.frame_id = 3;
rtp_header_.is_reference = false;
rtp_header_.reference_frame_id = 0;
rtp_header_.webrtc.header.timestamp = 1280;
- receiver_->OnReceivedPayloadData(
- payload_.data(), payload_.size(), rtp_header_);
+ fake_audio_client_.SetNextExpectedResult(3, testing_clock_->NowTicks());
+ FeedOneFrameIntoReceiver();
+ task_runner_->RunTasks();
+ EXPECT_EQ(3, fake_audio_client_.number_times_called());
- receiver_->GetEncodedAudioFrame(frame_encoded_callback);
+ // Move forward another 100 ms and run any pending tasks (there should be
+ // none). Expect no additional frames where emitted.
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(100));
task_runner_->RunTasks();
- EXPECT_EQ(3, test_audio_encoder_callback_->number_times_called());
+ EXPECT_EQ(3, fake_audio_client_.number_times_called());
}
-// TODO(mikhal): Add encoded frames.
-TEST_F(AudioReceiverTest, GetRawFrame) {}
-
} // namespace cast
} // namespace media
diff --git a/media/cast/audio_sender/audio_encoder_unittest.cc b/media/cast/audio_sender/audio_encoder_unittest.cc
index 52b8e56334..e2a467ed8d 100644
--- a/media/cast/audio_sender/audio_encoder_unittest.cc
+++ b/media/cast/audio_sender/audio_encoder_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include <sstream>
#include <string>
@@ -20,7 +22,7 @@
namespace media {
namespace cast {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
namespace {
diff --git a/media/cast/audio_sender/audio_sender_unittest.cc b/media/cast/audio_sender/audio_sender_unittest.cc
index ad0f3db87b..80443cb5fb 100644
--- a/media/cast/audio_sender/audio_sender_unittest.cc
+++ b/media/cast/audio_sender/audio_sender_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/memory/scoped_ptr.h"
@@ -20,7 +22,7 @@
namespace media {
namespace cast {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
class TestPacketSender : public transport::PacketSender {
public:
diff --git a/media/cast/cast_defines.h b/media/cast/cast_defines.h
index 8826e5d111..802df18515 100644
--- a/media/cast/cast_defines.h
+++ b/media/cast/cast_defines.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_CAST_CAST_DEFINES_H_
#define MEDIA_CAST_CAST_DEFINES_H_
+#include <stdint.h>
+
#include <map>
#include <set>
@@ -21,7 +23,7 @@ const int64 kDontShowTimeoutMs = 33;
const float kDefaultCongestionControlBackOff = 0.875f;
const uint32 kVideoFrequency = 90000;
const int64 kSkippedFramesCheckPeriodkMs = 10000;
-const uint32 kStartFrameId = GG_UINT32_C(0xffffffff);
+const uint32 kStartFrameId = UINT32_C(0xffffffff);
// Number of skipped frames threshold in fps (as configured) per period above.
const int kSkippedFramesThreshold = 3;
@@ -81,7 +83,7 @@ typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
// January 1970, in NTP seconds.
// Network Time Protocol (NTP), which is in seconds relative to 0h UTC on
// 1 January 1900.
-static const int64 kUnixEpochInNtpSeconds = GG_INT64_C(2208988800);
+static const int64 kUnixEpochInNtpSeconds = INT64_C(2208988800);
// Magic fractional unit. Used to convert time (in microseconds) to/from
// fractional NTP seconds.
diff --git a/media/cast/cast_receiver.gyp b/media/cast/cast_receiver.gyp
index c6087ac1b2..087b219795 100644
--- a/media/cast/cast_receiver.gyp
+++ b/media/cast/cast_receiver.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'includes': [
'audio_receiver/audio_receiver.gypi',
'video_receiver/video_receiver.gypi',
diff --git a/media/cast/cast_receiver.h b/media/cast/cast_receiver.h
index 581a27171d..b88827cb7a 100644
--- a/media/cast/cast_receiver.h
+++ b/media/cast/cast_receiver.h
@@ -13,6 +13,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
+#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
@@ -25,12 +26,21 @@ namespace transport {
class PacketSender;
}
-// Callback in which the raw audio frame and play-out time will be returned
-// once decoding is complete.
-typedef base::Callback<void(scoped_ptr<PcmAudioFrame>, const base::TimeTicks&)>
- AudioFrameDecodedCallback;
-
-// Callback in which the encoded audio frame and play-out time will be returned.
+// Callback in which the raw audio frame, play-out time, and a continuity flag
+// will be returned. |is_continuous| will be false to indicate the loss of
+// audio data due to a loss of frames (or decoding errors). This allows the
+// client to take steps to smooth discontinuities for playback. Note: A NULL
+// AudioBus can be returned when data is not available (e.g., bad packet or when
+// flushing callbacks during shutdown).
+typedef base::Callback<void(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& playout_time,
+ bool is_continuous)> AudioFrameDecodedCallback;
+
+// Callback in which the encoded audio frame and play-out time will be
+// returned. The client should examine the EncodedAudioFrame::frame_id field to
+// determine whether any frames have been dropped (i.e., frame_id should be
+// incrementing by one each time). Note: A NULL EncodedAudioFrame can be
+// returned on error/shutdown.
typedef base::Callback<void(scoped_ptr<transport::EncodedAudioFrame>,
const base::TimeTicks&)> AudioFrameEncodedCallback;
@@ -46,9 +56,7 @@ typedef base::Callback<void(scoped_ptr<transport::EncodedVideoFrame>,
// This Class is thread safe.
class FrameReceiver : public base::RefCountedThreadSafe<FrameReceiver> {
public:
- virtual void GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback& callback) = 0;
+ virtual void GetRawAudioFrame(const AudioFrameDecodedCallback& callback) = 0;
virtual void GetCodedAudioFrame(
const AudioFrameEncodedCallback& callback) = 0;
diff --git a/media/cast/cast_receiver_impl.cc b/media/cast/cast_receiver_impl.cc
index cee29ecb06..2712d7a1d1 100644
--- a/media/cast/cast_receiver_impl.cc
+++ b/media/cast/cast_receiver_impl.cc
@@ -12,13 +12,6 @@
namespace media {
namespace cast {
-// The callback should not be used, as the receiver is using the external
-// transport. Implementation is required as the pacer is common to sender and
-// receiver.
-static void DoNothingCastTransportStatus(
- transport::CastTransportStatus status) {
- NOTREACHED() << "Internal transport used in CastReceiver";
-}
// The video and audio receivers should only be called from the main thread.
// LocalFrameReciever posts tasks to the main thread, making the cast interface
// thread safe.
@@ -49,16 +42,12 @@ class LocalFrameReceiver : public FrameReceiver {
callback));
}
- virtual void GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback& callback)
+ virtual void GetRawAudioFrame(const AudioFrameDecodedCallback& callback)
OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
base::Bind(&AudioReceiver::GetRawAudioFrame,
audio_receiver_->AsWeakPtr(),
- number_of_10ms_blocks,
- desired_frequency,
callback));
}
diff --git a/media/cast/cast_sender.gyp b/media/cast/cast_sender.gyp
index 5738106aa9..bc4e825bdd 100644
--- a/media/cast/cast_sender.gyp
+++ b/media/cast/cast_sender.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'includes': [
'audio_sender/audio_sender.gypi',
'congestion_control/congestion_control.gypi',
diff --git a/media/cast/congestion_control/congestion_control_unittest.cc b/media/cast/congestion_control/congestion_control_unittest.cc
index edd1327861..20e023e1e4 100644
--- a/media/cast/congestion_control/congestion_control_unittest.cc
+++ b/media/cast/congestion_control/congestion_control_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
#include "media/cast/congestion_control/congestion_control.h"
@@ -13,7 +15,7 @@ namespace cast {
static const uint32 kMaxBitrateConfigured = 5000000;
static const uint32 kMinBitrateConfigured = 500000;
static const uint32 kStartBitrate = 2000000;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
static const int64 kRttMs = 20;
static const int64 kAckRateMs = 33;
@@ -35,7 +37,7 @@ class CongestionControlTest : public ::testing::Test {
int runtime_in_seconds) {
const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(rtt_ms);
const base::TimeDelta ack_rate =
- base::TimeDelta::FromMilliseconds(GG_INT64_C(1000) / fps);
+ base::TimeDelta::FromMilliseconds(INT64_C(1000) / fps);
uint32 new_bitrate = 0;
EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
@@ -160,21 +162,19 @@ TEST_F(CongestionControlTest, Timing) {
}
TEST_F(CongestionControlTest, Convergence24fps) {
- EXPECT_GE(RunWithOneLossEventPerSecond(24, kRttMs, 100),
- GG_UINT32_C(3000000));
+ EXPECT_GE(RunWithOneLossEventPerSecond(24, kRttMs, 100), UINT32_C(3000000));
}
TEST_F(CongestionControlTest, Convergence24fpsLongRtt) {
- EXPECT_GE(RunWithOneLossEventPerSecond(24, 100, 100), GG_UINT32_C(500000));
+ EXPECT_GE(RunWithOneLossEventPerSecond(24, 100, 100), UINT32_C(500000));
}
TEST_F(CongestionControlTest, Convergence60fps) {
- EXPECT_GE(RunWithOneLossEventPerSecond(60, kRttMs, 100),
- GG_UINT32_C(3500000));
+ EXPECT_GE(RunWithOneLossEventPerSecond(60, kRttMs, 100), UINT32_C(3500000));
}
TEST_F(CongestionControlTest, Convergence60fpsLongRtt) {
- EXPECT_GE(RunWithOneLossEventPerSecond(60, 100, 100), GG_UINT32_C(500000));
+ EXPECT_GE(RunWithOneLossEventPerSecond(60, 100, 100), UINT32_C(500000));
}
} // namespace cast
diff --git a/media/cast/framer/cast_message_builder_unittest.cc b/media/cast/framer/cast_message_builder_unittest.cc
index 445b04dea5..4c3e67bd5b 100644
--- a/media/cast/framer/cast_message_builder_unittest.cc
+++ b/media/cast/framer/cast_message_builder_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/framer/cast_message_builder.h"
@@ -16,7 +18,7 @@ namespace {
static const uint32 kSsrc = 0x1234;
static const uint32 kShortTimeIncrementMs = 10;
static const uint32 kLongTimeIncrementMs = 40;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
typedef std::map<uint32, size_t> MissingPacketsMap;
diff --git a/media/cast/framer/framer.gyp b/media/cast/framer/framer.gyp
index e1f35cd2f9..e72ac84f3a 100644
--- a/media/cast/framer/framer.gyp
+++ b/media/cast/framer/framer.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'targets': [
{
'target_name': 'cast_framer',
diff --git a/media/cast/logging/logging.gyp b/media/cast/logging/logging.gyp
index 30e8119227..29a96d8a68 100644
--- a/media/cast/logging/logging.gyp
+++ b/media/cast/logging/logging.gyp
@@ -2,6 +2,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'targets': [
{
'target_name': 'cast_common_logging',
diff --git a/media/cast/logging/logging_impl_unittest.cc b/media/cast/logging/logging_impl_unittest.cc
index d9a67ddefa..4aceecb226 100644
--- a/media/cast/logging/logging_impl_unittest.cc
+++ b/media/cast/logging/logging_impl_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include <vector>
#include "base/rand_util.h"
@@ -21,7 +23,7 @@ const int64 kIntervalTime1S = 1;
// Test frame rate goal - 30fps.
const int kFrameIntervalMs = 33;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
class LoggingImplTest : public ::testing::Test {
protected:
diff --git a/media/cast/rtcp/rtcp.cc b/media/cast/rtcp/rtcp.cc
index bb19f3e912..c5fbbadc42 100644
--- a/media/cast/rtcp/rtcp.cc
+++ b/media/cast/rtcp/rtcp.cc
@@ -169,8 +169,8 @@ Rtcp::Rtcp(scoped_refptr<CastEnvironment> cast_environment,
remote_ssrc_(remote_ssrc),
c_name_(c_name),
rtp_receiver_statistics_(rtp_receiver_statistics),
- receiver_feedback_(new LocalRtcpReceiverFeedback(this, cast_environment)),
rtt_feedback_(new LocalRtcpRttFeedback(this)),
+ receiver_feedback_(new LocalRtcpReceiverFeedback(this, cast_environment)),
rtcp_sender_(new RtcpSender(cast_environment, paced_packet_sender,
local_ssrc, c_name)),
last_report_received_(0),
diff --git a/media/cast/rtcp/rtcp.gyp b/media/cast/rtcp/rtcp.gyp
index 8ce90d48fb..d6e3e644a6 100644
--- a/media/cast/rtcp/rtcp.gyp
+++ b/media/cast/rtcp/rtcp.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'targets': [
{
'target_name': 'cast_rtcp',
diff --git a/media/cast/rtcp/rtcp_sender.cc b/media/cast/rtcp/rtcp_sender.cc
index dc5b8c8b79..11759457b5 100644
--- a/media/cast/rtcp/rtcp_sender.cc
+++ b/media/cast/rtcp/rtcp_sender.cc
@@ -4,6 +4,8 @@
#include "media/cast/rtcp/rtcp_sender.h"
+#include <stdint.h>
+
#include <algorithm>
#include <vector>
@@ -21,7 +23,7 @@ namespace {
// Max delta is 4095 milliseconds because we need to be able to encode it in
// 12 bits.
-const int64 kMaxWireFormatTimeDeltaMs = GG_INT64_C(0xfff);
+const int64 kMaxWireFormatTimeDeltaMs = INT64_C(0xfff);
// Converts a log event type to an integer value.
// NOTE: We have only allocated 4 bits to represent the type of event over the
@@ -129,7 +131,7 @@ bool BuildRtcpReceiverLogMessage(
std::vector<RtcpReceiverEventLogMessage>::reverse_iterator sorted_rit =
sorted_log_messages.rbegin();
base::TimeTicks first_event_timestamp = sorted_rit->event_timestamp;
- int events_in_frame = 0;
+ size_t events_in_frame = 0;
while (sorted_rit != sorted_log_messages.rend() &&
events_in_frame < kRtcpMaxReceiverLogMessages &&
remaining_space >= kRtcpReceiverEventLogSize) {
@@ -803,7 +805,7 @@ void RtcpSender::BuildReceiverLog(
receiver_log_message.pop_front();
}
}
- DCHECK_EQ(total_number_of_messages_to_send, 0);
+ DCHECK_EQ(total_number_of_messages_to_send, 0u);
}
} // namespace cast
diff --git a/media/cast/rtcp/rtcp_unittest.cc b/media/cast/rtcp/rtcp_unittest.cc
index 628f0d21cb..bdc96bd22c 100644
--- a/media/cast/rtcp/rtcp_unittest.cc
+++ b/media/cast/rtcp/rtcp_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
@@ -24,7 +26,7 @@ static const uint32 kSenderSsrc = 0x10203;
static const uint32 kReceiverSsrc = 0x40506;
static const std::string kCName("test@10.1.1.1");
static const uint32 kRtcpIntervalMs = 500;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
static const int64 kAddedDelay = 123;
static const int64 kAddedShortDelay = 100;
@@ -487,8 +489,8 @@ TEST_F(RtcpTest, RttWithPacketLoss) {
}
TEST_F(RtcpTest, NtpAndTime) {
- const int64 kSecondsbetweenYear1900and2010 = GG_INT64_C(40176 * 24 * 60 * 60);
- const int64 kSecondsbetweenYear1900and2030 = GG_INT64_C(47481 * 24 * 60 * 60);
+ const int64 kSecondsbetweenYear1900and2010 = INT64_C(40176 * 24 * 60 * 60);
+ const int64 kSecondsbetweenYear1900and2030 = INT64_C(47481 * 24 * 60 * 60);
uint32 ntp_seconds_1 = 0;
uint32 ntp_fractions_1 = 0;
@@ -514,7 +516,7 @@ TEST_F(RtcpTest, NtpAndTime) {
// Verify delta.
EXPECT_EQ((out_2 - out_1), time_delta);
- EXPECT_EQ((ntp_seconds_2 - ntp_seconds_1), GG_UINT32_C(1));
+ EXPECT_EQ((ntp_seconds_2 - ntp_seconds_1), UINT32_C(1));
EXPECT_NEAR(ntp_fractions_2, ntp_fractions_1, 1);
time_delta = base::TimeDelta::FromMilliseconds(500);
diff --git a/media/cast/rtp_receiver/receiver_stats_unittest.cc b/media/cast/rtp_receiver/receiver_stats_unittest.cc
index b2da051a62..eb4b58985e 100644
--- a/media/cast/rtp_receiver/receiver_stats_unittest.cc
+++ b/media/cast/rtp_receiver/receiver_stats_unittest.cc
@@ -4,6 +4,8 @@
#include <gtest/gtest.h>
+#include <stdint.h>
+
#include "base/test/simple_test_tick_clock.h"
#include "base/time/time.h"
#include "media/cast/rtp_receiver/receiver_stats.h"
@@ -12,7 +14,7 @@
namespace media {
namespace cast {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
static const uint32 kStdTimeIncrementMs = 33;
class ReceiverStatsTest : public ::testing::Test {
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc b/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
index 31fae9a02b..b0d27a2624 100644
--- a/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
+++ b/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
@@ -88,8 +88,8 @@ bool RtpParser::ParseCast(const uint8* packet,
// Extract header.
const uint8* data_ptr = packet;
size_t data_length = length;
- rtp_header->is_key_frame = (data_ptr[0] & kCastKeyFrameBitMask);
- rtp_header->is_reference = (data_ptr[0] & kCastReferenceFrameIdBitMask);
+ rtp_header->is_key_frame = !!(data_ptr[0] & kCastKeyFrameBitMask);
+ rtp_header->is_reference = !!(data_ptr[0] & kCastReferenceFrameIdBitMask);
rtp_header->frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(data_ptr[1]);
base::BigEndianReader big_endian_reader(
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp b/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
index 54bb93d4f3..533533fb4c 100644
--- a/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
+++ b/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'targets': [
{
'target_name': 'cast_rtp_parser',
diff --git a/media/cast/rtp_receiver/rtp_receiver.gyp b/media/cast/rtp_receiver/rtp_receiver.gyp
index 3fb454af3a..bd1c1fa5c9 100644
--- a/media/cast/rtp_receiver/rtp_receiver.gyp
+++ b/media/cast/rtp_receiver/rtp_receiver.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'targets': [
{
'target_name': 'cast_rtp_receiver',
diff --git a/media/cast/test/encode_decode_test.cc b/media/cast/test/encode_decode_test.cc
index 3151833e8e..20b1348cb0 100644
--- a/media/cast/test/encode_decode_test.cc
+++ b/media/cast/test/encode_decode_test.cc
@@ -7,6 +7,7 @@
// transport layer, and are targeted at validating the bit stream.
#include <gtest/gtest.h>
+#include <stdint.h>
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
@@ -20,7 +21,7 @@
namespace media {
namespace cast {
-static const int64 kStartMillisecond = GG_INT64_C(1245);
+static const int64 kStartMillisecond = INT64_C(1245);
static const int kWidth = 1280;
static const int kHeight = 720;
static const int kStartbitrate = 4000000;
@@ -123,7 +124,7 @@ TEST_F(EncodeDecodeTest, BasicEncodeDecode) {
encoder_->Initialize();
// Encode frame.
encoder_->Encode(video_frame_, &encoded_frame);
- EXPECT_GT(encoded_frame.data.size(), GG_UINT64_C(0));
+ EXPECT_GT(encoded_frame.data.size(), UINT64_C(0));
// Decode frame.
decoder_->Decode(&encoded_frame,
base::TimeTicks(),
diff --git a/media/cast/test/end2end_unittest.cc b/media/cast/test/end2end_unittest.cc
index 6949fa88ae..7299501257 100644
--- a/media/cast/test/end2end_unittest.cc
+++ b/media/cast/test/end2end_unittest.cc
@@ -10,6 +10,7 @@
// that moves across the screen
#include <math.h>
+#include <stdint.h>
#include <functional>
#include <list>
@@ -17,9 +18,12 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
+#include "base/sys_byteorder.h"
#include "base/test/simple_test_tick_clock.h"
#include "base/time/tick_clock.h"
+#include "media/base/audio_bus.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
@@ -41,7 +45,7 @@ namespace cast {
namespace {
-static const int64 kStartMillisecond = GG_INT64_C(1245);
+static const int64 kStartMillisecond = INT64_C(1245);
static const int kAudioChannels = 2;
static const double kSoundFrequency = 314.15926535897; // Freq of sine wave.
static const float kSoundVolume = 0.5f;
@@ -73,6 +77,10 @@ static const int kTimerErrorMs = 20;
// effects cause by encoding and quantization.
static const int kVideoStart = 100;
+// The size of audio frames. The encoder joins/breaks all inserted audio into
+// chunks of this size.
+static const int kAudioFrameDurationMs = 10;
+
std::string ConvertFromBase16String(const std::string base_16) {
std::string compressed;
DCHECK_EQ(base_16.size() % 2, 0u) << "Must be a multiple of 2";
@@ -209,8 +217,7 @@ class TestReceiverAudioCallback
: public base::RefCountedThreadSafe<TestReceiverAudioCallback> {
public:
struct ExpectedAudioFrame {
- PcmAudioFrame audio_frame;
- int num_10ms_blocks;
+ scoped_ptr<AudioBus> audio_bus;
base::TimeTicks record_time;
};
@@ -220,111 +227,98 @@ class TestReceiverAudioCallback
expected_sampling_frequency_ = expected_sampling_frequency;
}
- void AddExpectedResult(scoped_ptr<PcmAudioFrame> audio_frame,
- int expected_num_10ms_blocks,
+ void AddExpectedResult(const AudioBus& audio_bus,
const base::TimeTicks& record_time) {
- ExpectedAudioFrame expected_audio_frame;
- expected_audio_frame.audio_frame = *audio_frame;
- expected_audio_frame.num_10ms_blocks = expected_num_10ms_blocks;
- expected_audio_frame.record_time = record_time;
- expected_frame_.push_back(expected_audio_frame);
+ scoped_ptr<ExpectedAudioFrame> expected_audio_frame(
+ new ExpectedAudioFrame());
+ expected_audio_frame->audio_bus =
+ AudioBus::Create(audio_bus.channels(), audio_bus.frames()).Pass();
+ audio_bus.CopyTo(expected_audio_frame->audio_bus.get());
+ expected_audio_frame->record_time = record_time;
+ expected_frames_.push_back(expected_audio_frame.release());
}
- void IgnoreAudioFrame(scoped_ptr<PcmAudioFrame> audio_frame,
- const base::TimeTicks& playout_time) {}
+ void IgnoreAudioFrame(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& playout_time,
+ bool is_continuous) {
+ ++num_called_;
+ }
- // Check the audio frame parameters but not the audio samples.
- void CheckBasicAudioFrame(const scoped_ptr<PcmAudioFrame>& audio_frame,
- const base::TimeTicks& playout_time) {
- EXPECT_FALSE(expected_frame_.empty()); // Test for bug in test code.
- ExpectedAudioFrame expected_audio_frame = expected_frame_.front();
- EXPECT_EQ(audio_frame->channels, kAudioChannels);
- EXPECT_EQ(audio_frame->frequency, expected_sampling_frequency_);
- EXPECT_EQ(static_cast<int>(audio_frame->samples.size()),
- expected_audio_frame.num_10ms_blocks * kAudioChannels *
- expected_sampling_frequency_ / 100);
+ void CheckAudioFrame(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& playout_time,
+ bool is_continuous) {
+ ++num_called_;
+
+ ASSERT_FALSE(expected_frames_.empty());
+ const scoped_ptr<ExpectedAudioFrame> expected_audio_frame(
+ expected_frames_.front());
+ expected_frames_.pop_front();
+
+ EXPECT_EQ(audio_bus->channels(), kAudioChannels);
+ EXPECT_EQ(audio_bus->frames(), expected_audio_frame->audio_bus->frames());
+ for (int ch = 0; ch < audio_bus->channels(); ++ch) {
+ EXPECT_NEAR(CountZeroCrossings(
+ expected_audio_frame->audio_bus->channel(ch),
+ expected_audio_frame->audio_bus->frames()),
+ CountZeroCrossings(audio_bus->channel(ch),
+ audio_bus->frames()),
+ 1);
+ }
+ // TODO(miu): This is a "fuzzy" way to check the timestamps. We should be
+ // able to compute exact offsets with "omnipotent" knowledge of the system.
const base::TimeTicks upper_bound =
- expected_audio_frame.record_time +
+ expected_audio_frame->record_time +
base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs +
kTimerErrorMs);
EXPECT_GE(upper_bound, playout_time)
<< "playout_time - upper_bound == "
<< (playout_time - upper_bound).InMicroseconds() << " usec";
- EXPECT_LT(expected_audio_frame.record_time, playout_time)
- << "playout_time - expected == "
- << (playout_time - expected_audio_frame.record_time).InMilliseconds()
- << " mS";
-
- EXPECT_EQ(audio_frame->samples.size(),
- expected_audio_frame.audio_frame.samples.size());
- }
-
- void CheckPcmAudioFrame(scoped_ptr<PcmAudioFrame> audio_frame,
- const base::TimeTicks& playout_time) {
- ++num_called_;
-
- CheckBasicAudioFrame(audio_frame, playout_time);
- ExpectedAudioFrame expected_audio_frame = expected_frame_.front();
- expected_frame_.pop_front();
- if (audio_frame->samples.size() == 0)
- return; // No more checks needed.
- EXPECT_NEAR(CountZeroCrossings(expected_audio_frame.audio_frame.samples),
- CountZeroCrossings(audio_frame->samples),
- 1);
+ EXPECT_TRUE(is_continuous);
}
- void CheckCodedPcmAudioFrame(
+ void CheckCodedAudioFrame(
scoped_ptr<transport::EncodedAudioFrame> audio_frame,
const base::TimeTicks& playout_time) {
- ++num_called_;
-
- EXPECT_FALSE(expected_frame_.empty()); // Test for bug in test code.
- ExpectedAudioFrame expected_audio_frame = expected_frame_.front();
- expected_frame_.pop_front();
-
- EXPECT_EQ(static_cast<int>(audio_frame->data.size()),
- 2 * kAudioChannels * expected_sampling_frequency_ / 100);
-
- base::TimeDelta time_since_recording =
- playout_time - expected_audio_frame.record_time;
-
- EXPECT_LE(time_since_recording,
- base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs +
- kTimerErrorMs));
-
- EXPECT_LT(expected_audio_frame.record_time, playout_time);
- if (audio_frame->data.size() == 0)
- return; // No more checks needed.
-
- // We need to convert our "coded" audio frame to our raw format.
- std::vector<int16> output_audio_samples;
- size_t number_of_samples = audio_frame->data.size() / 2;
-
- for (size_t i = 0; i < number_of_samples; ++i) {
- uint16 sample =
- static_cast<uint8>(audio_frame->data[1 + i * sizeof(uint16)]) +
- (static_cast<uint16>(audio_frame->data[i * sizeof(uint16)]) << 8);
- output_audio_samples.push_back(static_cast<int16>(sample));
- }
-
- EXPECT_NEAR(CountZeroCrossings(expected_audio_frame.audio_frame.samples),
- CountZeroCrossings(output_audio_samples),
- 1);
+ ASSERT_FALSE(expected_frames_.empty());
+ const ExpectedAudioFrame& expected_audio_frame =
+ *(expected_frames_.front());
+ // Note: Just peeking here. Will delegate to CheckAudioFrame() to pop.
+
+ // We need to "decode" the encoded audio frame. The codec is simply to
+ // swizzle the bytes of each int16 from host-->network-->host order to get
+ // interleaved int16 PCM. Then, make an AudioBus out of that.
+ const int num_elements = audio_frame->data.size() / sizeof(int16);
+ ASSERT_EQ(expected_audio_frame.audio_bus->channels() *
+ expected_audio_frame.audio_bus->frames(),
+ num_elements);
+ int16* const pcm_data =
+ reinterpret_cast<int16*>(string_as_array(&audio_frame->data));
+ for (int i = 0; i < num_elements; ++i)
+ pcm_data[i] = static_cast<int16>(base::NetToHost16(pcm_data[i]));
+ scoped_ptr<AudioBus> audio_bus(
+ AudioBus::Create(expected_audio_frame.audio_bus->channels(),
+ expected_audio_frame.audio_bus->frames()));
+ audio_bus->FromInterleaved(pcm_data, audio_bus->frames(), sizeof(int16));
+
+ // Delegate the checking from here...
+ CheckAudioFrame(audio_bus.Pass(), playout_time, true);
}
int number_times_called() const { return num_called_; }
protected:
- virtual ~TestReceiverAudioCallback() {}
+ virtual ~TestReceiverAudioCallback() {
+ STLDeleteElements(&expected_frames_);
+ }
private:
friend class base::RefCountedThreadSafe<TestReceiverAudioCallback>;
int num_called_;
int expected_sampling_frequency_;
- std::list<ExpectedAudioFrame> expected_frame_;
+ std::list<ExpectedAudioFrame*> expected_frames_;
};
// Class that verifies the video frames coming out of the receiver.
@@ -365,9 +359,11 @@ class TestReceiverVideoCallback
const base::TimeDelta upper_bound = base::TimeDelta::FromMilliseconds(
kDefaultRtpMaxDelayMs + kTimerErrorMs);
+ // TODO(miu): This is a "fuzzy" way to check the timestamps. We should be
+ // able to compute exact offsets with "omnipotent" knowledge of the system.
EXPECT_GE(upper_bound, time_since_capture)
<< "time_since_capture - upper_bound == "
- << (time_since_capture - upper_bound).InMilliseconds() << " mS";
+ << (time_since_capture - upper_bound).InMicroseconds() << " usec";
EXPECT_LE(expected_video_frame.capture_time, render_time);
EXPECT_EQ(expected_video_frame.width, video_frame->visible_rect().width());
EXPECT_EQ(expected_video_frame.height,
@@ -426,11 +422,10 @@ class End2EndTest : public ::testing::Test {
&event_subscriber_sender_);
}
- void SetupConfig(transport::AudioCodec audio_codec,
- int audio_sampling_frequency,
- // TODO(miu): 3rd arg is meaningless?!?
- bool external_audio_decoder,
- int max_number_of_video_buffers_used) {
+ void Configure(transport::AudioCodec audio_codec,
+ int audio_sampling_frequency,
+ bool external_audio_decoder,
+ int max_number_of_video_buffers_used) {
audio_sender_config_.sender_ssrc = 1;
audio_sender_config_.incoming_feedback_ssrc = 2;
audio_sender_config_.rtp_config.payload_type = 96;
@@ -488,6 +483,42 @@ class End2EndTest : public ::testing::Test {
transport_video_config_.base.rtp_config = video_sender_config_.rtp_config;
}
+ void FeedAudioFrames(int count, bool will_be_checked) {
+ for (int i = 0; i < count; ++i) {
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs)));
+ const base::TimeTicks send_time =
+ testing_clock_sender_->NowTicks() +
+ i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs);
+ if (will_be_checked)
+ test_receiver_audio_callback_->AddExpectedResult(*audio_bus, send_time);
+ audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
+ }
+ }
+
+ void FeedAudioFramesWithExpectedDelay(int count,
+ const base::TimeDelta& delay) {
+ for (int i = 0; i < count; ++i) {
+ scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs)));
+ const base::TimeTicks send_time =
+ testing_clock_sender_->NowTicks() +
+ i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs);
+ test_receiver_audio_callback_->AddExpectedResult(*audio_bus,
+ send_time + delay);
+ audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
+ }
+ }
+
+ void RequestAudioFrames(int count, bool with_check) {
+ for (int i = 0; i < count; ++i) {
+ frame_receiver_->GetRawAudioFrame(
+ base::Bind(with_check ? &TestReceiverAudioCallback::CheckAudioFrame :
+ &TestReceiverAudioCallback::IgnoreAudioFrame,
+ test_receiver_audio_callback_));
+ }
+ }
+
void Create() {
cast_receiver_ = CastReceiver::Create(cast_environment_receiver_,
audio_receiver_config_,
@@ -620,146 +651,103 @@ class End2EndTest : public ::testing::Test {
};
TEST_F(End2EndTest, LoopNoLossPcm16) {
- SetupConfig(transport::kPcm16, 32000, false, 1);
+ Configure(transport::kPcm16, 32000, false, 1);
// Reduce video resolution to allow processing multiple frames within a
// reasonable time frame.
video_sender_config_.width = kVideoQcifWidth;
video_sender_config_.height = kVideoQcifHeight;
Create();
+ const int kNumIterations = 50;
int video_start = kVideoStart;
int audio_diff = kFrameTimerMs;
- int i = 0;
-
- for (; i < 300; ++i) {
- int num_10ms_blocks = audio_diff / 10;
- audio_diff -= num_10ms_blocks * 10;
-
- scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
- base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
+ int num_audio_frames_requested = 0;
+ for (int i = 0; i < kNumIterations; ++i) {
+ const int num_audio_frames = audio_diff / kAudioFrameDurationMs;
+ audio_diff -= num_audio_frames * kAudioFrameDurationMs;
- base::TimeTicks send_time = testing_clock_sender_->NowTicks();
- if (i != 0) {
- // Due to the re-sampler and NetEq in the webrtc AudioCodingModule the
- // first samples will be 0 and then slowly ramp up to its real
- // amplitude;
- // ignore the first frame.
- test_receiver_audio_callback_->AddExpectedResult(
- ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
- num_10ms_blocks,
- send_time);
- }
-
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
+ if (num_audio_frames > 0)
+ FeedAudioFrames(1, true);
test_receiver_video_callback_->AddExpectedResult(
video_start,
video_sender_config_.width,
video_sender_config_.height,
- send_time);
- SendVideoFrame(video_start, send_time);
+ testing_clock_sender_->NowTicks());
+ SendVideoFrame(video_start, testing_clock_sender_->NowTicks());
- if (i == 0) {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- audio_sender_config_.frequency,
- base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
- test_receiver_audio_callback_));
- } else {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- audio_sender_config_.frequency,
- base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
- test_receiver_audio_callback_));
- }
+ if (num_audio_frames > 0)
+ RunTasks(kAudioFrameDurationMs); // Advance clock forward.
+ if (num_audio_frames > 1)
+ FeedAudioFrames(num_audio_frames - 1, true);
+
+ RequestAudioFrames(num_audio_frames, true);
+ num_audio_frames_requested += num_audio_frames;
frame_receiver_->GetRawVideoFrame(
base::Bind(&TestReceiverVideoCallback::CheckVideoFrame,
test_receiver_video_callback_));
- RunTasks(kFrameTimerMs);
+ RunTasks(kFrameTimerMs - kAudioFrameDurationMs);
audio_diff += kFrameTimerMs;
video_start++;
}
RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
- EXPECT_EQ(i - 1, test_receiver_audio_callback_->number_times_called());
- EXPECT_EQ(i, test_receiver_video_callback_->number_times_called());
+ EXPECT_EQ(num_audio_frames_requested,
+ test_receiver_audio_callback_->number_times_called());
+ EXPECT_EQ(kNumIterations,
+ test_receiver_video_callback_->number_times_called());
}
// This tests our external decoder interface for Audio.
// Audio test without packet loss using raw PCM 16 audio "codec";
TEST_F(End2EndTest, LoopNoLossPcm16ExternalDecoder) {
- SetupConfig(transport::kPcm16, 32000, true, 1);
+ Configure(transport::kPcm16, 32000, true, 1);
Create();
- int i = 0;
- for (; i < 10; ++i) {
- base::TimeTicks send_time = testing_clock_sender_->NowTicks();
- scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
- base::TimeDelta::FromMilliseconds(10)));
- test_receiver_audio_callback_->AddExpectedResult(
- ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
- 1,
- send_time);
-
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
-
- RunTasks(10);
+ const int kNumIterations = 10;
+ for (int i = 0; i < kNumIterations; ++i) {
+ FeedAudioFrames(1, true);
+ RunTasks(kAudioFrameDurationMs);
frame_receiver_->GetCodedAudioFrame(
- base::Bind(&TestReceiverAudioCallback::CheckCodedPcmAudioFrame,
+ base::Bind(&TestReceiverAudioCallback::CheckCodedAudioFrame,
test_receiver_audio_callback_));
}
RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
- EXPECT_EQ(10, test_receiver_audio_callback_->number_times_called());
+ EXPECT_EQ(kNumIterations,
+ test_receiver_audio_callback_->number_times_called());
}
// This tests our Opus audio codec without video.
TEST_F(End2EndTest, LoopNoLossOpus) {
- SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 1);
+ Configure(transport::kOpus, kDefaultAudioSamplingRate, false, 1);
Create();
- int i = 0;
- for (; i < 10; ++i) {
- int num_10ms_blocks = 3;
- base::TimeTicks send_time = testing_clock_sender_->NowTicks();
-
- scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
- base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
-
- if (i != 0) {
- test_receiver_audio_callback_->AddExpectedResult(
- ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
- num_10ms_blocks,
- send_time);
- }
-
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
-
- RunTasks(30);
-
- if (i == 0) {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- audio_sender_config_.frequency,
- base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
- test_receiver_audio_callback_));
- } else {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- audio_sender_config_.frequency,
- base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
- test_receiver_audio_callback_));
- }
+ const int kNumIterations = 300;
+ for (int i = 0; i < kNumIterations; ++i) {
+ // Opus introduces a tiny delay before the sinewave starts; so don't examine
+ // the first frame.
+ const bool examine_audio_data = i > 0;
+ FeedAudioFrames(1, examine_audio_data);
+ RunTasks(kAudioFrameDurationMs);
+ RequestAudioFrames(1, examine_audio_data);
}
RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
- EXPECT_EQ(i - 1, test_receiver_audio_callback_->number_times_called());
+ EXPECT_EQ(kNumIterations,
+ test_receiver_audio_callback_->number_times_called());
}
// This tests start sending audio and video at start-up time before the receiver
// is ready; it sends 2 frames before the receiver comes online.
-TEST_F(End2EndTest, StartSenderBeforeReceiver) {
- SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 1);
+//
+// Test disabled due to flakiness: It appears that the RTCP synchronization
+// sometimes kicks in, and sometimes doesn't. When it does, there's a sharp
+// discontinuity in the timeline, throwing off the test expectations. See TODOs
+// in audio_receiver.cc for likely cause(s) of this bug.
+// http://crbug.com/356942
+TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
+ Configure(transport::kPcm16, kDefaultAudioSamplingRate, false, 1);
Create();
int video_start = kVideoStart;
@@ -769,18 +757,16 @@ TEST_F(End2EndTest, StartSenderBeforeReceiver) {
const int test_delay_ms = 100;
- base::TimeTicks initial_send_time;
- for (int i = 0; i < 2; ++i) {
- int num_10ms_blocks = audio_diff / 10;
- audio_diff -= num_10ms_blocks * 10;
-
- base::TimeTicks send_time = testing_clock_sender_->NowTicks();
- if (initial_send_time.is_null())
- initial_send_time = send_time;
- scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
- base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
+ const int kNumVideoFramesBeforeReceiverStarted = 2;
+ const base::TimeTicks initial_send_time = testing_clock_sender_->NowTicks();
+ const base::TimeDelta expected_delay =
+ base::TimeDelta::FromMilliseconds(test_delay_ms + kFrameTimerMs);
+ for (int i = 0; i < kNumVideoFramesBeforeReceiverStarted; ++i) {
+ const int num_audio_frames = audio_diff / kAudioFrameDurationMs;
+ audio_diff -= num_audio_frames * kAudioFrameDurationMs;
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
+ if (num_audio_frames > 0)
+ FeedAudioFramesWithExpectedDelay(1, expected_delay);
// Frame will be rendered with 100mS delay, as the transmission is delayed.
// The receiver at this point cannot be synced to the sender's clock, as no
@@ -789,11 +775,15 @@ TEST_F(End2EndTest, StartSenderBeforeReceiver) {
video_start,
video_sender_config_.width,
video_sender_config_.height,
- initial_send_time +
- base::TimeDelta::FromMilliseconds(test_delay_ms + kFrameTimerMs));
+ initial_send_time + expected_delay);
+ SendVideoFrame(video_start, testing_clock_sender_->NowTicks());
- SendVideoFrame(video_start, send_time);
- RunTasks(kFrameTimerMs);
+ if (num_audio_frames > 0)
+ RunTasks(kAudioFrameDurationMs); // Advance clock forward.
+ if (num_audio_frames > 1)
+ FeedAudioFramesWithExpectedDelay(num_audio_frames - 1, expected_delay);
+
+ RunTasks(kFrameTimerMs - kAudioFrameDurationMs);
audio_diff += kFrameTimerMs;
video_start++;
}
@@ -801,63 +791,47 @@ TEST_F(End2EndTest, StartSenderBeforeReceiver) {
RunTasks(test_delay_ms);
sender_to_receiver_.SetSendPackets(true);
- int j = 0;
- const int number_of_audio_frames_to_ignore = 2;
- for (; j < 10; ++j) {
- int num_10ms_blocks = audio_diff / 10;
- audio_diff -= num_10ms_blocks * 10;
- base::TimeTicks send_time = testing_clock_sender_->NowTicks();
-
- scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
- base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
-
- if (j >= number_of_audio_frames_to_ignore) {
- test_receiver_audio_callback_->AddExpectedResult(
- ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
- num_10ms_blocks,
- send_time);
- }
+ int num_audio_frames_requested = 0;
+ for (int j = 0; j < 10; ++j) {
+ const int num_audio_frames = audio_diff / kAudioFrameDurationMs;
+ audio_diff -= num_audio_frames * kAudioFrameDurationMs;
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
+ if (num_audio_frames > 0)
+ FeedAudioFrames(1, true);
test_receiver_video_callback_->AddExpectedResult(
video_start,
video_sender_config_.width,
video_sender_config_.height,
- send_time);
+ testing_clock_sender_->NowTicks());
+ SendVideoFrame(video_start, testing_clock_sender_->NowTicks());
- SendVideoFrame(video_start, send_time);
- RunTasks(kFrameTimerMs);
- audio_diff += kFrameTimerMs;
+ if (num_audio_frames > 0)
+ RunTasks(kAudioFrameDurationMs); // Advance clock forward.
+ if (num_audio_frames > 1)
+ FeedAudioFrames(num_audio_frames - 1, true);
+
+ RequestAudioFrames(num_audio_frames, true);
+ num_audio_frames_requested += num_audio_frames;
- if (j < number_of_audio_frames_to_ignore) {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- audio_sender_config_.frequency,
- base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
- test_receiver_audio_callback_));
- } else {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- audio_sender_config_.frequency,
- base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
- test_receiver_audio_callback_));
- }
frame_receiver_->GetRawVideoFrame(
base::Bind(&TestReceiverVideoCallback::CheckVideoFrame,
test_receiver_video_callback_));
+
+ RunTasks(kFrameTimerMs - kAudioFrameDurationMs);
+ audio_diff += kFrameTimerMs;
video_start++;
}
RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
- EXPECT_EQ(j - number_of_audio_frames_to_ignore,
+ EXPECT_EQ(num_audio_frames_requested,
test_receiver_audio_callback_->number_times_called());
- EXPECT_EQ(j, test_receiver_video_callback_->number_times_called());
+ EXPECT_EQ(10, test_receiver_video_callback_->number_times_called());
}
// This tests a network glitch lasting for 10 video frames.
// Flaky. See crbug.com/351596.
TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) {
- SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
+ Configure(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
video_sender_config_.rtp_config.max_delay_ms = 67;
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
@@ -916,7 +890,7 @@ TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) {
}
TEST_F(End2EndTest, DropEveryOtherFrame3Buffers) {
- SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
+ Configure(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
video_sender_config_.rtp_config.max_delay_ms = 67;
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
@@ -952,7 +926,7 @@ TEST_F(End2EndTest, DropEveryOtherFrame3Buffers) {
}
TEST_F(End2EndTest, ResetReferenceFrameId) {
- SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
+ Configure(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
video_sender_config_.rtp_config.max_delay_ms = 67;
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
@@ -982,7 +956,7 @@ TEST_F(End2EndTest, ResetReferenceFrameId) {
}
TEST_F(End2EndTest, CryptoVideo) {
- SetupConfig(transport::kPcm16, 32000, false, 1);
+ Configure(transport::kPcm16, 32000, false, 1);
transport_video_config_.base.aes_iv_mask =
ConvertFromBase16String("1234567890abcdeffedcba0987654321");
@@ -1019,7 +993,7 @@ TEST_F(End2EndTest, CryptoVideo) {
}
TEST_F(End2EndTest, CryptoAudio) {
- SetupConfig(transport::kPcm16, 32000, false, 1);
+ Configure(transport::kPcm16, 32000, false, 1);
transport_audio_config_.base.aes_iv_mask =
ConvertFromBase16String("abcdeffedcba12345678900987654321");
@@ -1031,52 +1005,22 @@ TEST_F(End2EndTest, CryptoAudio) {
Create();
- int frames_counter = 0;
- for (; frames_counter < 3; ++frames_counter) {
- int num_10ms_blocks = 2;
-
- const base::TimeTicks send_time = testing_clock_sender_->NowTicks();
-
- scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
- base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
-
- if (frames_counter != 0) {
- // Due to the re-sampler and NetEq in the webrtc AudioCodingModule the
- // first samples will be 0 and then slowly ramp up to its real
- // amplitude;
- // ignore the first frame.
- test_receiver_audio_callback_->AddExpectedResult(
- ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
- num_10ms_blocks,
- send_time);
- }
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
-
- RunTasks(num_10ms_blocks * 10);
-
- if (frames_counter == 0) {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- 32000,
- base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
- test_receiver_audio_callback_));
- } else {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- 32000,
- base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
- test_receiver_audio_callback_));
- }
+ const int kNumIterations = 3;
+ const int kNumAudioFramesPerIteration = 2;
+ for (int i = 0; i < kNumIterations; ++i) {
+ FeedAudioFrames(kNumAudioFramesPerIteration, true);
+ RunTasks(kNumAudioFramesPerIteration * kAudioFrameDurationMs);
+ RequestAudioFrames(kNumAudioFramesPerIteration, true);
}
RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline.
- EXPECT_EQ(frames_counter - 1,
+ EXPECT_EQ(kNumIterations * kNumAudioFramesPerIteration,
test_receiver_audio_callback_->number_times_called());
}
// Video test without packet loss - tests the logging aspects of the end2end,
// but is basically equivalent to LoopNoLossPcm16.
TEST_F(End2EndTest, VideoLogging) {
- SetupConfig(transport::kPcm16, 32000, false, 1);
+ Configure(transport::kPcm16, 32000, false, 1);
Create();
int video_start = kVideoStart;
@@ -1197,58 +1141,30 @@ TEST_F(End2EndTest, VideoLogging) {
// Audio test without packet loss - tests the logging aspects of the end2end,
// but is basically equivalent to LoopNoLossPcm16.
TEST_F(End2EndTest, AudioLogging) {
- SetupConfig(transport::kPcm16, 32000, false, 1);
+ Configure(transport::kPcm16, 32000, false, 1);
Create();
int audio_diff = kFrameTimerMs;
- const int num_audio_buses = 10;
- int num_frames = 0;
- for (int i = 0; i < num_audio_buses; ++i) {
- int num_10ms_blocks = audio_diff / 10;
- audio_diff -= num_10ms_blocks * 10;
- base::TimeTicks send_time = testing_clock_sender_->NowTicks();
-
- // Each audio bus can contain more than one frame.
- scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
- base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
- num_frames += num_10ms_blocks;
-
- if (i != 0) {
- // Due to the re-sampler and NetEq in the webrtc AudioCodingModule the
- // first samples will be 0 and then slowly ramp up to its real
- // amplitude;
- // ignore the first frame.
- test_receiver_audio_callback_->AddExpectedResult(
- ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
- num_10ms_blocks,
- send_time);
- }
+ const int kNumVideoFrames = 10;
+ int num_audio_frames_requested = 0;
+ for (int i = 0; i < kNumVideoFrames; ++i) {
+ const int num_audio_frames = audio_diff / kAudioFrameDurationMs;
+ audio_diff -= num_audio_frames * kAudioFrameDurationMs;
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
+ FeedAudioFrames(num_audio_frames, true);
RunTasks(kFrameTimerMs);
audio_diff += kFrameTimerMs;
- if (i == 0) {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- audio_sender_config_.frequency,
- base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
- test_receiver_audio_callback_));
- } else {
- frame_receiver_->GetRawAudioFrame(
- num_10ms_blocks,
- audio_sender_config_.frequency,
- base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
- test_receiver_audio_callback_));
- }
+ RequestAudioFrames(num_audio_frames, true);
+ num_audio_frames_requested += num_audio_frames;
}
// Basic tests.
RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
- int num_times_called = test_receiver_audio_callback_->number_times_called();
- EXPECT_EQ(num_audio_buses - 1, num_times_called);
+ EXPECT_EQ(num_audio_frames_requested,
+ test_receiver_audio_callback_->number_times_called());
// Logging tests.
// Verify that all frames and all required events were logged.
@@ -1271,8 +1187,8 @@ TEST_F(End2EndTest, AudioLogging) {
encoded_count += it->second.counter[kAudioFrameEncoded];
}
- EXPECT_EQ(num_frames, received_count);
- EXPECT_EQ(num_frames, encoded_count);
+ EXPECT_EQ(num_audio_frames_requested, received_count);
+ EXPECT_EQ(num_audio_frames_requested, encoded_count);
std::map<RtpTimestamp, LoggingEventCounts>::iterator map_it =
event_counter_for_frame.begin();
diff --git a/media/cast/test/utility/audio_utility.cc b/media/cast/test/utility/audio_utility.cc
index f8e4957b7c..e3a913d1dd 100644
--- a/media/cast/test/utility/audio_utility.cc
+++ b/media/cast/test/utility/audio_utility.cc
@@ -53,6 +53,26 @@ scoped_ptr<PcmAudioFrame> ToPcmAudioFrame(const AudioBus& audio_bus,
return audio_frame.Pass();
}
+int CountZeroCrossings(const float* samples, int len) {
+ // The sample values must pass beyond |kAmplitudeThreshold| on the opposite
+ // side of zero before a crossing will be counted.
+ const float kAmplitudeThreshold = 0.03f; // 3% of max amplitude.
+
+ int count = 0;
+ int i = 0;
+ float last = 0.0f;
+ for (; i < len && fabsf(last) < kAmplitudeThreshold; ++i)
+ last = samples[i];
+ for (; i < len; ++i) {
+ if (fabsf(samples[i]) >= kAmplitudeThreshold &&
+ (last < 0) != (samples[i] < 0)) {
+ ++count;
+ last = samples[i];
+ }
+ }
+ return count;
+}
+
int CountZeroCrossings(const std::vector<int16>& samples) {
// The sample values must pass beyond |kAmplitudeThreshold| on the opposite
// side of zero before a crossing will be counted.
@@ -113,7 +133,7 @@ bool EncodeTimestamp(uint16 timestamp,
// gray-code the number
timestamp = (timestamp >> 1) ^ timestamp;
std::vector<double> frequencies;
- for (int i = 0; i < kNumBits; i++) {
+ for (size_t i = 0; i < kNumBits; i++) {
if ((timestamp >> i) & 1) {
frequencies.push_back(kBaseFrequency * (i+1));
}
@@ -165,7 +185,7 @@ bool DecodeTimestamp(const std::vector<int16>& samples, uint16* timestamp) {
if (sense < kMinSense) continue;
bool success = true;
uint16 gray_coded = 0;
- for (int bit = 0; success && bit < kNumBits; bit++) {
+ for (size_t bit = 0; success && bit < kNumBits; bit++) {
double signal_strength = DecodeOneFrequency(
&samples[start],
kSamplesToAnalyze,
diff --git a/media/cast/test/utility/audio_utility.h b/media/cast/test/utility/audio_utility.h
index 7cc9b7d74c..1cb0585df0 100644
--- a/media/cast/test/utility/audio_utility.h
+++ b/media/cast/test/utility/audio_utility.h
@@ -56,6 +56,8 @@ scoped_ptr<PcmAudioFrame> ToPcmAudioFrame(const AudioBus& audio_bus,
// Assuming |samples| contains a single-frequency sine wave (and maybe some
// low-amplitude noise), count the number of times the sine wave crosses
// zero.
+int CountZeroCrossings(const float* samples, int len);
+// DEPRECATED:
int CountZeroCrossings(const std::vector<int16>& samples);
// Encode |timestamp| into the samples pointed to by 'samples' in a way
diff --git a/media/cast/test/utility/barcode.cc b/media/cast/test/utility/barcode.cc
index fbb17f18e8..cf279375b1 100644
--- a/media/cast/test/utility/barcode.cc
+++ b/media/cast/test/utility/barcode.cc
@@ -123,7 +123,6 @@ bool DecodeBarcode(const scoped_refptr<VideoFrame>& frame,
if (i[2] > unit_size * 2 || i[2] < unit_size / 2) valid = false;
if (i[3] > unit_size * 2 || i[3] < unit_size / 2) valid = false;
i += 4;
- uint64 return_value = 0;
for (size_t bit = 0; valid && bit < output->size(); bit++) {
if (i[0] > unit_size / 2 && i[0] <= unit_size * 1.5 &&
i[1] > unit_size * 1.5 && i[1] <= unit_size * 3) {
diff --git a/media/cast/test/utility/in_process_receiver.cc b/media/cast/test/utility/in_process_receiver.cc
index c8dfc28502..2b67666e73 100644
--- a/media/cast/test/utility/in_process_receiver.cc
+++ b/media/cast/test/utility/in_process_receiver.cc
@@ -5,6 +5,7 @@
#include "media/cast/test/utility/in_process_receiver.h"
#include "base/bind_helpers.h"
+#include "base/synchronization/waitable_event.h"
#include "base/time/time.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_config.h"
@@ -33,7 +34,7 @@ InProcessReceiver::InProcessReceiver(
weak_factory_(this) {}
InProcessReceiver::~InProcessReceiver() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ Stop();
}
void InProcessReceiver::Start() {
@@ -43,6 +44,28 @@ void InProcessReceiver::Start() {
base::Unretained(this)));
}
+void InProcessReceiver::Stop() {
+ base::WaitableEvent event(false, false);
+ if (cast_environment_->CurrentlyOn(CastEnvironment::MAIN)) {
+ StopOnMainThread(&event);
+ } else {
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&InProcessReceiver::StopOnMainThread,
+ base::Unretained(this),
+ &event));
+ event.Wait();
+ }
+}
+
+void InProcessReceiver::StopOnMainThread(base::WaitableEvent* event) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_receiver_.reset(NULL);
+ transport_.reset(NULL);
+ weak_factory_.InvalidateWeakPtrs();
+ event->Signal();
+}
+
void InProcessReceiver::DestroySoon() {
cast_environment_->PostTask(
CastEnvironment::MAIN,
@@ -77,11 +100,22 @@ void InProcessReceiver::StartOnMainThread() {
PullNextVideoFrame();
}
-void InProcessReceiver::GotAudioFrame(scoped_ptr<PcmAudioFrame> audio_frame,
- const base::TimeTicks& playout_time) {
+void InProcessReceiver::GotAudioFrame(scoped_ptr<AudioBus> audio_frame,
+ const base::TimeTicks& playout_time,
+ bool is_continuous) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- OnAudioFrame(audio_frame.Pass(), playout_time);
- // TODO(miu): Put this back here: PullNextAudioFrame();
+ if (audio_frame.get()) {
+ // TODO(miu): Remove use of deprecated PcmAudioFrame and also pass
+ // |is_continuous| flag.
+ scoped_ptr<PcmAudioFrame> pcm_frame(new PcmAudioFrame());
+ pcm_frame->channels = audio_frame->channels();
+ pcm_frame->frequency = audio_config_.frequency;
+ pcm_frame->samples.resize(audio_frame->channels() * audio_frame->frames());
+ audio_frame->ToInterleaved(
+ audio_frame->frames(), sizeof(int16), &pcm_frame->samples.front());
+ OnAudioFrame(pcm_frame.Pass(), playout_time);
+ }
+ PullNextAudioFrame();
}
void InProcessReceiver::GotVideoFrame(
@@ -95,20 +129,8 @@ void InProcessReceiver::GotVideoFrame(
void InProcessReceiver::PullNextAudioFrame() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
cast_receiver_->frame_receiver()->GetRawAudioFrame(
- 1 /* 10 ms of samples */,
- audio_config_.frequency,
base::Bind(&InProcessReceiver::GotAudioFrame,
weak_factory_.GetWeakPtr()));
- // TODO(miu): Fix audio decoder so that it never drops a request for the next
- // frame of audio. Once fixed, remove this, and add PullNextAudioFrame() to
- // the end of GotAudioFrame(), so that it behaves just like GotVideoFrame().
- // http://crbug.com/347361
- cast_environment_->PostDelayedTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(&InProcessReceiver::PullNextAudioFrame,
- weak_factory_.GetWeakPtr()),
- base::TimeDelta::FromMilliseconds(10));
}
void InProcessReceiver::PullNextVideoFrame() {
diff --git a/media/cast/test/utility/in_process_receiver.h b/media/cast/test/utility/in_process_receiver.h
index a5b13cee43..a5a2addd5c 100644
--- a/media/cast/test/utility/in_process_receiver.h
+++ b/media/cast/test/utility/in_process_receiver.h
@@ -8,11 +8,13 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
+#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/transport/cast_transport_config.h"
namespace base {
class TimeTicks;
+class WaitableEvent;
} // namespace base
namespace net {
@@ -59,8 +61,14 @@ class InProcessReceiver {
// Schedules destruction on the cast MAIN thread. Any external references to
// the InProcessReceiver instance become invalid.
+ // Deprecated: Use Stop instead.
+ // TODO(hubbe): Remove this function and change callers to use Stop.
void DestroySoon();
+ // Destroy the sub-compontents of this class.
+ // After this call, it is safe to destroy this object on any thread.
+ void Stop();
+
protected:
// To be implemented by subclasses. These are called on the Cast MAIN thread
// as each frame is received.
@@ -74,6 +82,10 @@ class InProcessReceiver {
// Subclasses may override to provide additional start-up functionality.
virtual void StartOnMainThread();
+ // Helper method that destroys |transport_| and |cast_receiver_|.
+ // Subclasses may override to provide additional start-up functionality.
+ virtual void StopOnMainThread(base::WaitableEvent* event);
+
// Callback for the transport to notify of status changes. A default
// implementation is provided here that simply logs socket errors.
virtual void UpdateCastTransportStatus(transport::CastTransportStatus status);
@@ -82,8 +94,9 @@ class InProcessReceiver {
friend class base::RefCountedThreadSafe<InProcessReceiver>;
// CastReceiver callbacks that receive a frame and then request another.
- void GotAudioFrame(scoped_ptr<PcmAudioFrame> audio_frame,
- const base::TimeTicks& playout_time);
+ void GotAudioFrame(scoped_ptr<AudioBus> audio_frame,
+ const base::TimeTicks& playout_time,
+ bool is_continuous);
void GotVideoFrame(const scoped_refptr<VideoFrame>& video_frame,
const base::TimeTicks& render_time);
void PullNextAudioFrame();
diff --git a/media/cast/test/utility/standalone_cast_environment.h b/media/cast/test/utility/standalone_cast_environment.h
index 0deab6dd7c..cf2674e61e 100644
--- a/media/cast/test/utility/standalone_cast_environment.h
+++ b/media/cast/test/utility/standalone_cast_environment.h
@@ -17,7 +17,7 @@ namespace cast {
class StandaloneCastEnvironment : public CastEnvironment,
public base::ThreadChecker {
public:
- explicit StandaloneCastEnvironment();
+ StandaloneCastEnvironment();
// Stops all threads backing the task runners, blocking the caller until
// complete.
diff --git a/media/cast/test/utility/udp_proxy.cc b/media/cast/test/utility/udp_proxy.cc
index 8f8c41bf78..62c64b1b7b 100644
--- a/media/cast/test/utility/udp_proxy.cc
+++ b/media/cast/test/utility/udp_proxy.cc
@@ -23,7 +23,11 @@ Packet::~Packet() {}
PacketPipe::PacketPipe() {}
PacketPipe::~PacketPipe() {}
-void PacketPipe::InitOnIOThread() {}
+void PacketPipe::InitOnIOThread() {
+ if (pipe_) {
+ pipe_->InitOnIOThread();
+ }
+}
void PacketPipe::AppendToPipe(scoped_ptr<PacketPipe> pipe) {
if (pipe_) {
pipe_->AppendToPipe(pipe.Pass());
@@ -188,6 +192,7 @@ class RandomSortedDelay : public PacketPipe {
}
}
virtual void InitOnIOThread() OVERRIDE {
+ PacketPipe::InitOnIOThread();
// As we start the stream, assume that we are in a random
// place between two extra delays, thus multiplier = 1.0;
ScheduleExtraDelay(1.0);
@@ -269,6 +274,7 @@ class NetworkGlitchPipe : public PacketPipe {
}
virtual void InitOnIOThread() OVERRIDE {
+ PacketPipe::InitOnIOThread();
Flip();
}
@@ -326,6 +332,7 @@ class PacketSender : public PacketPipe {
int result;
if (destination_->address().empty()) {
VLOG(1) << "Destination has not been set yet.";
+ result = net::ERR_INVALID_ARGUMENT;
} else {
VLOG(1) << "Destination:" << destination_->ToString();
result = udp_socket_->SendTo(buf,
@@ -410,12 +417,12 @@ class UDPProxyImpl : public UDPProxy {
scoped_ptr<PacketPipe> to_dest_pipe,
scoped_ptr<PacketPipe> from_dest_pipe,
net::NetLog* net_log) :
- proxy_thread_("media::cast::test::UdpProxy Thread"),
local_port_(local_port),
destination_(destination),
- start_event_(false, false),
+ proxy_thread_("media::cast::test::UdpProxy Thread"),
to_dest_pipe_(to_dest_pipe.Pass()),
- from_dest_pipe_(to_dest_pipe.Pass()) {
+ from_dest_pipe_(to_dest_pipe.Pass()),
+ start_event_(false, false) {
proxy_thread_.StartWithOptions(
base::Thread::Options(base::MessageLoop::TYPE_IO, 0));
proxy_thread_.message_loop_proxy()->PostTask(
diff --git a/media/cast/test/utility/utility.gyp b/media/cast/test/utility/utility.gyp
index 698579caee..e8deb809b2 100644
--- a/media/cast/test/utility/utility.gyp
+++ b/media/cast/test/utility/utility.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'targets': [
{
'target_name': 'cast_test_utility',
diff --git a/media/cast/test/utility/video_utility.cc b/media/cast/test/utility/video_utility.cc
index 742be6fba3..81475d85f1 100644
--- a/media/cast/test/utility/video_utility.cc
+++ b/media/cast/test/utility/video_utility.cc
@@ -36,7 +36,6 @@ double I420PSNR(const scoped_refptr<media::VideoFrame>& frame1,
}
void PopulateVideoFrame(VideoFrame* frame, int start_value) {
- int width = frame->coded_size().width();
int height = frame->coded_size().height();
int stride_y = frame->stride(VideoFrame::kYPlane);
int stride_u = frame->stride(VideoFrame::kUPlane);
diff --git a/media/cast/transport/cast_transport_defines.h b/media/cast/transport/cast_transport_defines.h
index 252883d24d..328b4bd543 100644
--- a/media/cast/transport/cast_transport_defines.h
+++ b/media/cast/transport/cast_transport_defines.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_DEFINES_H_
#define MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_DEFINES_H_
+#include <stdint.h>
+
#include <map>
#include <set>
#include <string>
@@ -142,7 +144,7 @@ class FrameIdWrapHelper {
static const uint8 kLowRangeThreshold = 63;
static const uint8 kHighRangeThreshold = 192;
- static const uint32 kStartFrameId = GG_UINT32_C(0xffffffff);
+ static const uint32 kStartFrameId = UINT32_C(0xffffffff);
bool first_;
uint32 frame_id_wrap_count_;
diff --git a/media/cast/transport/cast_transport_sender_impl_unittest.cc b/media/cast/transport/cast_transport_sender_impl_unittest.cc
index 7077bf620b..6df5304d4a 100644
--- a/media/cast/transport/cast_transport_sender_impl_unittest.cc
+++ b/media/cast/transport/cast_transport_sender_impl_unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include <gtest/gtest.h>
+#include <stdint.h>
#include "base/bind.h"
#include "base/bind_helpers.h"
@@ -21,7 +22,7 @@ namespace media {
namespace cast {
namespace transport {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
class FakePacketSender : public transport::PacketSender {
public:
diff --git a/media/cast/transport/pacing/paced_sender_unittest.cc b/media/cast/transport/pacing/paced_sender_unittest.cc
index bd558dc5cd..16dcd0f838 100644
--- a/media/cast/transport/pacing/paced_sender_unittest.cc
+++ b/media/cast/transport/pacing/paced_sender_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/big_endian.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/logging/simple_event_subscriber.h"
@@ -21,7 +23,7 @@ static const size_t kSize2 = 101;
static const size_t kSize3 = 102;
static const size_t kSize4 = 103;
static const size_t kNackSize = 104;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
static const uint32 kVideoSsrc = 0x1234;
static const uint32 kAudioSsrc = 0x5678;
diff --git a/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc b/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc
index c83a528691..73faad0d64 100644
--- a/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc
+++ b/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc
@@ -4,6 +4,8 @@
#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
+#include <stdint.h>
+
#include <vector>
#include "base/test/simple_test_tick_clock.h"
@@ -18,7 +20,7 @@ static const int kMaxDeltaStoredMs = 500;
static const base::TimeDelta kDeltaBetweenFrames =
base::TimeDelta::FromMilliseconds(33);
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
class PacketStorageTest : public ::testing::Test {
protected:
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
index 8aa8ffff5e..4b7d0966a9 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
+++ b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
@@ -4,6 +4,8 @@
#include "media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+#include <stdint.h>
+
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/logging/simple_event_subscriber.h"
@@ -25,7 +27,7 @@ static const int kMaxPacketLength = 1500;
static const int kSsrc = 0x12345;
static const unsigned int kFrameSize = 5000;
static const int kMaxPacketStorageTimeMs = 300;
-static const uint32 kStartFrameId = GG_UINT32_C(0xffffffff);
+static const uint32 kStartFrameId = UINT32_C(0xffffffff);
}
class TestRtpPacketTransport : public PacketSender {
diff --git a/media/cast/transport/utility/utility.gyp b/media/cast/transport/utility/utility.gyp
index 35b6aa0d14..9be40683ae 100644
--- a/media/cast/transport/utility/utility.gyp
+++ b/media/cast/transport/utility/utility.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'targets': [
{
'target_name': 'transport_utility',
@@ -20,4 +23,4 @@
],
},
],
-} \ No newline at end of file
+}
diff --git a/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp b/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
index 4bc9434d2d..c1209ff741 100644
--- a/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
+++ b/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'variables': {
+ 'chromium_code': 1,
+ },
'targets': [
{
'target_name': 'cast_vp8_decoder',
diff --git a/media/cast/video_receiver/video_decoder.cc b/media/cast/video_receiver/video_decoder.cc
index 230b6e18a8..f85e117232 100644
--- a/media/cast/video_receiver/video_decoder.cc
+++ b/media/cast/video_receiver/video_decoder.cc
@@ -4,6 +4,8 @@
#include "media/cast/video_receiver/video_decoder.h"
+#include <stdint.h>
+
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
@@ -32,7 +34,7 @@ bool VideoDecoder::DecodeVideoFrame(
const base::TimeTicks render_time,
const VideoFrameDecodedCallback& frame_decoded_cb) {
DCHECK(encoded_frame->codec == codec_) << "Invalid codec";
- DCHECK_GT(encoded_frame->data.size(), GG_UINT64_C(0)) << "Empty video frame";
+ DCHECK_GT(encoded_frame->data.size(), UINT64_C(0)) << "Empty video frame";
return vp8_decoder_->Decode(encoded_frame, render_time, frame_decoded_cb);
}
diff --git a/media/cast/video_receiver/video_decoder_unittest.cc b/media/cast/video_receiver/video_decoder_unittest.cc
index dd1c0e6024..44de8809f8 100644
--- a/media/cast/video_receiver/video_decoder_unittest.cc
+++ b/media/cast/video_receiver/video_decoder_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
@@ -20,7 +22,7 @@ namespace cast {
using testing::_;
// Random frame size for testing.
-static const int64 kStartMillisecond = GG_INT64_C(1245);
+static const int64 kStartMillisecond = INT64_C(1245);
namespace {
class DecodeTestFrameCallback
diff --git a/media/cast/video_receiver/video_receiver.cc b/media/cast/video_receiver/video_receiver.cc
index bf8844bbfb..95b1d5127a 100644
--- a/media/cast/video_receiver/video_receiver.cc
+++ b/media/cast/video_receiver/video_receiver.cc
@@ -21,7 +21,6 @@ namespace {
static const int64 kMinSchedulingDelayMs = 1;
static const int64 kMinTimeBetweenOffsetUpdatesMs = 1000;
static const int kTimeOffsetMaxCounter = 10;
-static const int64_t kMinProcessIntervalMs = 5;
} // namespace
@@ -176,7 +175,6 @@ void VideoReceiver::GetEncodedVideoFrame(
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
new transport::EncodedVideoFrame());
- uint32 rtp_timestamp = 0;
bool next_frame = false;
if (!framer_->GetEncodedVideoFrame(encoded_frame.get(), &next_frame)) {
diff --git a/media/cast/video_receiver/video_receiver_unittest.cc b/media/cast/video_receiver/video_receiver_unittest.cc
index 94a7841911..7bea501320 100644
--- a/media/cast/video_receiver/video_receiver_unittest.cc
+++ b/media/cast/video_receiver/video_receiver_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/bind.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
@@ -15,7 +17,7 @@
#include "testing/gmock/include/gmock/gmock.h"
static const int kPacketSize = 1500;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
namespace media {
namespace cast {
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
index a073b9f080..a822dc91f1 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
@@ -294,7 +294,7 @@ uint32 Vp8Encoder::GetLatestFrameIdToReference() {
Vp8Encoder::Vp8Buffers Vp8Encoder::GetNextBufferToUpdate() {
// Update at most one buffer, except for key-frames.
- Vp8Buffers buffer_to_update;
+ Vp8Buffers buffer_to_update = kNoBuffer;
if (number_of_repeated_buffers_ < max_number_of_repeated_buffers_in_a_row_) {
// TODO(pwestin): experiment with this. The issue with only this change is
// that we can end up with only 4 frames in flight when we expect 6.
diff --git a/media/cast/video_sender/external_video_encoder.cc b/media/cast/video_sender/external_video_encoder.cc
index 4b5937903a..b051c1b5e0 100644
--- a/media/cast/video_sender/external_video_encoder.cc
+++ b/media/cast/video_sender/external_video_encoder.cc
@@ -23,7 +23,7 @@ class LocalVideoEncodeAcceleratorClient;
} // namespace media
namespace {
-static const int kOutputBufferCount = 3;
+static const size_t kOutputBufferCount = 3;
void LogFrameEncodedEvent(
const scoped_refptr<media::cast::CastEnvironment>& cast_environment,
@@ -181,7 +181,7 @@ class LocalVideoEncodeAcceleratorClient
DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
DCHECK(video_encode_accelerator_);
- for (int j = 0; j < kOutputBufferCount; ++j) {
+ for (size_t j = 0; j < kOutputBufferCount; ++j) {
create_video_encode_memory_cb_.Run(
output_buffer_size,
base::Bind(&LocalVideoEncodeAcceleratorClient::OnCreateSharedMemory,
@@ -212,47 +212,46 @@ class LocalVideoEncodeAcceleratorClient
NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
return;
}
- if (encoded_frame_data_storage_.empty()) {
- NOTREACHED();
- NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
- return;
- }
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
-
- encoded_frame->codec = codec_;
- encoded_frame->key_frame = key_frame;
- encoded_frame->last_referenced_frame_id = last_encoded_frame_id_;
- last_encoded_frame_id_++;
- encoded_frame->frame_id = last_encoded_frame_id_;
- encoded_frame->rtp_timestamp =
- GetVideoRtpTimestamp(encoded_frame_data_storage_.front().capture_time);
- if (key_frame) {
- // Self referenced.
- encoded_frame->last_referenced_frame_id = encoded_frame->frame_id;
+ if (!encoded_frame_data_storage_.empty()) {
+ scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
+ new transport::EncodedVideoFrame());
+
+ encoded_frame->codec = codec_;
+ encoded_frame->key_frame = key_frame;
+ encoded_frame->last_referenced_frame_id = last_encoded_frame_id_;
+ last_encoded_frame_id_++;
+ encoded_frame->frame_id = last_encoded_frame_id_;
+ encoded_frame->rtp_timestamp = GetVideoRtpTimestamp(
+ encoded_frame_data_storage_.front().capture_time);
+ if (key_frame) {
+ // Self referenced.
+ encoded_frame->last_referenced_frame_id = encoded_frame->frame_id;
+ }
+
+ encoded_frame->data.insert(
+ 0, static_cast<const char*>(output_buffer->memory()), payload_size);
+
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&LogFrameEncodedEvent,
+ cast_environment_,
+ cast_environment_->Clock()->NowTicks(),
+ encoded_frame->rtp_timestamp,
+ encoded_frame->frame_id));
+
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(encoded_frame_data_storage_.front().frame_encoded_callback,
+ base::Passed(&encoded_frame),
+ encoded_frame_data_storage_.front().capture_time));
+
+ encoded_frame_data_storage_.pop_front();
+ } else {
+ VLOG(1) << "BitstreamBufferReady(): no encoded frame data available";
}
- encoded_frame->data.insert(
- 0, static_cast<const char*>(output_buffer->memory()), payload_size);
-
- cast_environment_->PostTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(&LogFrameEncodedEvent,
- cast_environment_,
- cast_environment_->Clock()->NowTicks(),
- encoded_frame->rtp_timestamp,
- encoded_frame->frame_id));
-
- cast_environment_->PostTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(encoded_frame_data_storage_.front().frame_encoded_callback,
- base::Passed(&encoded_frame),
- encoded_frame_data_storage_.front().capture_time));
-
- encoded_frame_data_storage_.pop_front();
-
// We need to re-add the output buffer to the encoder after we are done
// with it.
video_encode_accelerator_->UseOutputBitstreamBuffer(media::BitstreamBuffer(
diff --git a/media/cast/video_sender/video_sender_unittest.cc b/media/cast/video_sender/video_sender_unittest.cc
index 1eafe408ad..56d38c0c72 100644
--- a/media/cast/video_sender/video_sender_unittest.cc
+++ b/media/cast/video_sender/video_sender_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include <vector>
#include "base/bind.h"
@@ -25,7 +27,7 @@ namespace media {
namespace cast {
namespace {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
static const uint8 kPixelValue = 123;
static const int kWidth = 320;
static const int kHeight = 240;