summaryrefslogtreecommitdiff
path: root/media
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2013-09-12 12:10:22 +0100
committerTorne (Richard Coles) <torne@google.com>2013-09-12 12:10:22 +0100
commit58537e28ecd584eab876aee8be7156509866d23a (patch)
tree8988984e52090aaadf33cff139d7dd212cd13656 /media
parent0a1b11dee8e5cb2520121c300858fea6138e3c54 (diff)
downloadchromium_org-58537e28ecd584eab876aee8be7156509866d23a.tar.gz
Merge from Chromium at DEPS revision 222756
This commit was generated by merge_to_master.py. Change-Id: I40d7f32f195f328f005f230ea80d07092d48040e
Diffstat (limited to 'media')
-rw-r--r--media/DEPS1
-rw-r--r--media/audio/android/audio_manager_android.cc17
-rw-r--r--media/audio/android/audio_manager_android.h3
-rw-r--r--media/audio/audio_input_controller.cc8
-rw-r--r--media/audio/audio_input_device.cc4
-rw-r--r--media/audio/audio_input_device_unittest.cc2
-rw-r--r--media/audio/audio_low_latency_input_output_unittest.cc3
-rw-r--r--media/audio/audio_manager.h39
-rw-r--r--media/audio/audio_manager_base.cc78
-rw-r--r--media/audio/audio_manager_base.h24
-rw-r--r--media/audio/audio_manager_unittest.cc145
-rw-r--r--media/audio/audio_output_controller.cc23
-rw-r--r--media/audio/audio_output_controller.h15
-rw-r--r--media/audio/audio_output_controller_unittest.cc4
-rw-r--r--media/audio/audio_output_dispatcher.cc2
-rw-r--r--media/audio/audio_output_dispatcher.h2
-rw-r--r--media/audio/audio_output_dispatcher_impl.cc6
-rw-r--r--media/audio/audio_output_dispatcher_impl.h1
-rw-r--r--media/audio/audio_output_proxy_unittest.cc57
-rw-r--r--media/audio/audio_output_resampler.cc8
-rw-r--r--media/audio/audio_output_resampler.h4
-rw-r--r--media/audio/cras/audio_manager_cras.cc8
-rw-r--r--media/audio/cras/audio_manager_cras.h2
-rw-r--r--media/audio/cras/cras_input.cc1
-rw-r--r--media/audio/ios/audio_manager_ios.h56
-rw-r--r--media/audio/ios/audio_manager_ios.mm140
-rw-r--r--media/audio/ios/audio_manager_ios_unittest.cc34
-rw-r--r--media/audio/ios/audio_session_util_ios.h17
-rw-r--r--media/audio/ios/audio_session_util_ios.mm40
-rw-r--r--media/audio/linux/alsa_output_unittest.cc6
-rw-r--r--media/audio/linux/audio_manager_linux.cc89
-rw-r--r--media/audio/linux/audio_manager_linux.h22
-rw-r--r--media/audio/mac/audio_auhal_mac_unittest.cc2
-rw-r--r--media/audio/mac/audio_input_mac.cc5
-rw-r--r--media/audio/mac/audio_low_latency_input_mac.cc32
-rw-r--r--media/audio/mac/audio_low_latency_input_mac.h3
-rw-r--r--media/audio/mac/audio_manager_mac.cc171
-rw-r--r--media/audio/mac/audio_manager_mac.h7
-rw-r--r--media/audio/mock_audio_manager.cc20
-rw-r--r--media/audio/mock_audio_manager.h9
-rw-r--r--media/audio/openbsd/audio_manager_openbsd.cc5
-rw-r--r--media/audio/openbsd/audio_manager_openbsd.h2
-rw-r--r--media/audio/pulse/audio_manager_pulse.cc52
-rw-r--r--media/audio/pulse/audio_manager_pulse.h6
-rw-r--r--media/audio/win/audio_low_latency_output_win.cc44
-rw-r--r--media/audio/win/audio_low_latency_output_win.h9
-rw-r--r--media/audio/win/audio_low_latency_output_win_unittest.cc4
-rw-r--r--media/audio/win/audio_manager_win.cc129
-rw-r--r--media/audio/win/audio_manager_win.h13
-rw-r--r--media/audio/win/audio_output_win_unittest.cc40
-rw-r--r--media/audio/win/audio_unified_win_unittest.cc15
-rw-r--r--media/audio/win/core_audio_util_win.cc76
-rw-r--r--media/audio/win/core_audio_util_win.h10
-rw-r--r--media/audio/win/core_audio_util_win_unittest.cc44
-rw-r--r--media/audio/win/device_enumeration_win.cc59
-rw-r--r--media/audio/win/device_enumeration_win.h16
-rw-r--r--media/base/android/audio_decoder_job.cc4
-rw-r--r--media/base/android/audio_decoder_job.h2
-rw-r--r--media/base/android/demuxer_android.h77
-rw-r--r--media/base/android/demuxer_stream_player_params.h5
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaCodecBridge.java222
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaDrmBridge.java90
-rw-r--r--media/base/android/media_codec_bridge.cc206
-rw-r--r--media/base/android/media_codec_bridge.h99
-rw-r--r--media/base/android/media_codec_bridge_unittest.cc57
-rw-r--r--media/base/android/media_decoder_job.cc224
-rw-r--r--media/base/android/media_decoder_job.h34
-rw-r--r--media/base/android/media_drm_bridge.cc88
-rw-r--r--media/base/android/media_drm_bridge.h20
-rw-r--r--media/base/android/media_player_android.cc21
-rw-r--r--media/base/android/media_player_android.h37
-rw-r--r--media/base/android/media_player_bridge.cc27
-rw-r--r--media/base/android/media_player_manager.h11
-rw-r--r--media/base/android/media_source_player.cc179
-rw-r--r--media/base/android/media_source_player.h39
-rw-r--r--media/base/android/media_source_player_unittest.cc327
-rw-r--r--media/base/android/video_decoder_job.cc18
-rw-r--r--media/base/android/video_decoder_job.h14
-rw-r--r--media/base/audio_decoder_config.cc18
-rw-r--r--media/base/audio_decoder_config.h16
-rw-r--r--media/base/decoder_buffer.cc3
-rw-r--r--media/base/decoder_buffer.h11
-rw-r--r--media/base/keyboard_event_counter.cc3
-rw-r--r--media/base/media.cc2
-rw-r--r--media/base/media_stub.cc2
-rw-r--r--media/base/media_switches.cc13
-rw-r--r--media/base/media_switches.h8
-rw-r--r--media/base/run_all_unittests.cc17
-rw-r--r--media/base/simd/convert_rgb_to_yuv_sse2.cc13
-rw-r--r--media/base/simd/convert_yuv_to_rgb_c.cc32
-rw-r--r--media/base/simd/yuv_to_rgb_table.cc22
-rw-r--r--media/base/sinc_resampler.cc5
-rw-r--r--media/base/vector_math.cc5
-rw-r--r--media/cast/OWNERS1
-rw-r--r--media/cast/audio_sender/audio_encoder.cc172
-rw-r--r--media/cast/audio_sender/audio_encoder.h63
-rw-r--r--media/cast/audio_sender/audio_encoder_unittest.cc73
-rw-r--r--media/cast/audio_sender/audio_sender.cc168
-rw-r--r--media/cast/audio_sender/audio_sender.gypi30
-rw-r--r--media/cast/audio_sender/audio_sender.h100
-rw-r--r--media/cast/audio_sender/audio_sender_unittest.cc96
-rw-r--r--media/cast/cast.gyp9
-rw-r--r--media/cast/cast_config.h9
-rw-r--r--media/cast/cast_defines.h81
-rw-r--r--media/cast/cast_receiver.gyp1
-rw-r--r--media/cast/cast_receiver.h17
-rw-r--r--media/cast/cast_sender.gyp17
-rw-r--r--media/cast/cast_sender.h65
-rw-r--r--media/cast/cast_sender_impl.cc176
-rw-r--r--media/cast/cast_sender_impl.h55
-rw-r--r--media/cast/cast_thread.cc62
-rw-r--r--media/cast/cast_thread.h73
-rw-r--r--media/cast/pacing/paced_sender.cc44
-rw-r--r--media/cast/pacing/paced_sender.h29
-rw-r--r--media/cast/pacing/paced_sender_unittest.cc207
-rw-r--r--media/cast/rtcp/rtcp.cc60
-rw-r--r--media/cast/rtcp/rtcp.h32
-rw-r--r--media/cast/rtcp/rtcp_defines.h5
-rw-r--r--media/cast/rtcp/rtcp_unittest.cc18
-rw-r--r--media/cast/rtp_common/rtp_defines.h22
-rw-r--r--media/cast/rtp_receiver/receiver_stats.cc120
-rw-r--r--media/cast/rtp_receiver/receiver_stats.h53
-rw-r--r--media/cast/rtp_receiver/receiver_stats_unittest.cc157
-rw-r--r--media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h37
-rw-r--r--media/cast/rtp_receiver/rtp_parser/rtp_parser.cc107
-rw-r--r--media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi25
-rw-r--r--media/cast/rtp_receiver/rtp_parser/rtp_parser.h53
-rw-r--r--media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc201
-rw-r--r--media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc104
-rw-r--r--media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h51
-rw-r--r--media/cast/rtp_receiver/rtp_receiver.cc57
-rw-r--r--media/cast/rtp_receiver/rtp_receiver.gyp27
-rw-r--r--media/cast/rtp_receiver/rtp_receiver.h53
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc30
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h28
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc14
-rw-r--r--media/cast/rtp_sender/rtp_sender.cc41
-rw-r--r--media/cast/rtp_sender/rtp_sender.h25
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.cc352
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.gypi19
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.h87
-rw-r--r--media/cast/video_sender/mock_video_encoder_controller.h31
-rw-r--r--media/cast/video_sender/video_encoder.cc112
-rw-r--r--media/cast/video_sender/video_encoder.h80
-rw-r--r--media/cast/video_sender/video_encoder_unittest.cc282
-rw-r--r--media/cast/video_sender/video_sender.cc346
-rw-r--r--media/cast/video_sender/video_sender.gypi31
-rw-r--r--media/cast/video_sender/video_sender.h145
-rw-r--r--media/cast/video_sender/video_sender_unittest.cc226
-rw-r--r--media/cdm/aes_decryptor.cc203
-rw-r--r--media/cdm/aes_decryptor.h6
-rw-r--r--media/cdm/aes_decryptor_unittest.cc945
-rw-r--r--media/ffmpeg/ffmpeg_common.cc4
-rw-r--r--media/filters/decrypting_audio_decoder.cc8
-rw-r--r--media/filters/decrypting_audio_decoder_unittest.cc3
-rw-r--r--media/filters/decrypting_demuxer_stream.cc4
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc14
-rw-r--r--media/filters/ffmpeg_demuxer.cc4
-rw-r--r--media/filters/opus_audio_decoder.cc60
-rw-r--r--media/filters/opus_audio_decoder.h4
-rw-r--r--media/filters/pipeline_integration_test.cc72
-rw-r--r--media/filters/source_buffer_stream.cc22
-rw-r--r--media/filters/source_buffer_stream_unittest.cc17
-rw-r--r--media/filters/stream_parser_factory.cc56
-rw-r--r--media/filters/stream_parser_factory.h2
-rw-r--r--media/filters/vpx_video_decoder.cc2
-rw-r--r--media/media.gyp229
-rw-r--r--media/media.target.darwin-arm.mk16
-rw-r--r--media/media.target.darwin-mips.mk16
-rw-r--r--media/media.target.darwin-x86.mk12
-rw-r--r--media/media.target.linux-arm.mk16
-rw-r--r--media/media.target.linux-mips.mk16
-rw-r--r--media/media.target.linux-x86.mk12
-rw-r--r--media/media_android_jni_headers.target.darwin-arm.mk4
-rw-r--r--media/media_android_jni_headers.target.darwin-mips.mk4
-rw-r--r--media/media_android_jni_headers.target.darwin-x86.mk4
-rw-r--r--media/media_android_jni_headers.target.linux-arm.mk4
-rw-r--r--media/media_android_jni_headers.target.linux-mips.mk4
-rw-r--r--media/media_android_jni_headers.target.linux-x86.mk4
-rw-r--r--media/media_asm.target.darwin-x86.mk10
-rw-r--r--media/media_asm.target.linux-x86.mk10
-rw-r--r--media/media_cdm.gypi6
-rw-r--r--media/media_mmx.target.darwin-x86.mk10
-rw-r--r--media/media_mmx.target.linux-x86.mk10
-rw-r--r--media/media_sse.target.darwin-x86.mk10
-rw-r--r--media/media_sse.target.linux-x86.mk10
-rw-r--r--media/media_sse2.target.darwin-x86.mk10
-rw-r--r--media/media_sse2.target.linux-x86.mk10
-rw-r--r--media/mp3/mp3_stream_parser.cc566
-rw-r--r--media/mp3/mp3_stream_parser.h119
-rw-r--r--media/mp4/mp4_stream_parser.cc3
-rw-r--r--media/player_android.target.darwin-arm.mk10
-rw-r--r--media/player_android.target.darwin-mips.mk10
-rw-r--r--media/player_android.target.darwin-x86.mk10
-rw-r--r--media/player_android.target.linux-arm.mk10
-rw-r--r--media/player_android.target.linux-mips.mk10
-rw-r--r--media/player_android.target.linux-x86.mk10
-rw-r--r--media/shared_memory_support.target.darwin-arm.mk10
-rw-r--r--media/shared_memory_support.target.darwin-mips.mk10
-rw-r--r--media/shared_memory_support.target.darwin-x86.mk10
-rw-r--r--media/shared_memory_support.target.linux-arm.mk10
-rw-r--r--media/shared_memory_support.target.linux-mips.mk10
-rw-r--r--media/shared_memory_support.target.linux-x86.mk10
-rw-r--r--media/shared_memory_support_sse.target.darwin-x86.mk10
-rw-r--r--media/shared_memory_support_sse.target.linux-x86.mk10
-rw-r--r--media/test/data/icy_sfx.mp3bin0 -> 2445 bytes
-rw-r--r--media/video/capture/android/video_capture_device_android.cc2
-rw-r--r--media/video/capture/fake_video_capture_device.cc10
-rw-r--r--media/video/capture/linux/video_capture_device_linux.cc12
-rw-r--r--media/video/capture/mac/video_capture_device_mac.mm2
-rw-r--r--media/video/capture/mac/video_capture_device_qtkit_mac.mm4
-rw-r--r--media/video/capture/video_capture_device_unittest.cc20
-rw-r--r--media/video/capture/video_capture_types.cc60
-rw-r--r--media/video/capture/video_capture_types.h89
-rw-r--r--media/video/capture/win/sink_input_pin_win.cc8
-rw-r--r--media/video/capture/win/video_capture_device_mf_win.cc18
-rw-r--r--media/video/capture/win/video_capture_device_win.cc20
-rw-r--r--media/video_capture_android_jni_headers.target.darwin-arm.mk4
-rw-r--r--media/video_capture_android_jni_headers.target.darwin-mips.mk4
-rw-r--r--media/video_capture_android_jni_headers.target.darwin-x86.mk4
-rw-r--r--media/video_capture_android_jni_headers.target.linux-arm.mk4
-rw-r--r--media/video_capture_android_jni_headers.target.linux-mips.mk4
-rw-r--r--media/video_capture_android_jni_headers.target.linux-x86.mk4
-rw-r--r--media/webm/webm_audio_client.cc15
-rw-r--r--media/webm/webm_audio_client.h2
-rw-r--r--media/webm/webm_cluster_parser.cc30
-rw-r--r--media/webm/webm_cluster_parser.h8
-rw-r--r--media/webm/webm_constants.h4
-rw-r--r--media/webm/webm_parser.cc4
-rw-r--r--media/webm/webm_tracks_parser.cc12
-rw-r--r--media/webm/webm_tracks_parser.h2
231 files changed, 8844 insertions, 2367 deletions
diff --git a/media/DEPS b/media/DEPS
index 495c8049c7..61ad8cf657 100644
--- a/media/DEPS
+++ b/media/DEPS
@@ -1,6 +1,7 @@
include_rules = [
"+gpu",
"+jni",
+ "+net/http",
"+third_party/ffmpeg",
"+third_party/libvpx",
"+third_party/opus",
diff --git a/media/audio/android/audio_manager_android.cc b/media/audio/android/audio_manager_android.cc
index 164344aba0..e6eed7fea4 100644
--- a/media/audio/android/audio_manager_android.cc
+++ b/media/audio/android/audio_manager_android.cc
@@ -74,9 +74,12 @@ AudioParameters AudioManagerAndroid::GetInputStreamParameters(
}
AudioOutputStream* AudioManagerAndroid::MakeAudioOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
AudioOutputStream* stream =
- AudioManagerBase::MakeAudioOutputStream(params, std::string());
+ AudioManagerBase::MakeAudioOutputStream(params, std::string(),
+ std::string());
if (stream && output_stream_count() == 1) {
SetAudioMode(kAudioModeInCommunication);
RegisterHeadsetReceiver();
@@ -104,13 +107,16 @@ void AudioManagerAndroid::ReleaseInputStream(AudioInputStream* stream) {
}
AudioOutputStream* AudioManagerAndroid::MakeLinearOutputStream(
- const AudioParameters& params) {
+ const AudioParameters& params) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return new OpenSLESOutputStream(this, params);
}
AudioOutputStream* AudioManagerAndroid::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return new OpenSLESOutputStream(this, params);
}
@@ -140,7 +146,10 @@ int AudioManagerAndroid::GetOptimalOutputFrameSize(int sample_rate,
}
AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = GetNativeOutputSampleRate();
int buffer_size = GetOptimalOutputFrameSize(sample_rate, 2);
diff --git a/media/audio/android/audio_manager_android.h b/media/audio/android/audio_manager_android.h
index fa1c3736a3..ba5bed61e3 100644
--- a/media/audio/android/audio_manager_android.h
+++ b/media/audio/android/audio_manager_android.h
@@ -25,6 +25,7 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -36,6 +37,7 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -48,6 +50,7 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
virtual ~AudioManagerAndroid();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
diff --git a/media/audio/audio_input_controller.cc b/media/audio/audio_input_controller.cc
index d701337b51..ef94d1274d 100644
--- a/media/audio/audio_input_controller.cc
+++ b/media/audio/audio_input_controller.cc
@@ -19,16 +19,10 @@ const int kMaxInputChannels = 2;
// breakage (very hard to repro bugs!) on other platforms: See
// http://crbug.com/226327 and http://crbug.com/230972.
const int kTimerResetIntervalSeconds = 1;
-#if defined(OS_IOS)
-// The first callback on iOS is received after the current background
-// audio has faded away.
-const int kTimerInitialIntervalSeconds = 4;
-#else
// We have received reports that the timer can be too trigger happy on some
// Mac devices and the initial timer interval has therefore been increased
// from 1 second to 5 seconds.
const int kTimerInitialIntervalSeconds = 5;
-#endif // defined(OS_IOS)
}
namespace media {
@@ -179,7 +173,7 @@ void AudioInputController::DoCreate(AudioManager* audio_manager,
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.CreateTime");
// TODO(miu): See TODO at top of file. Until that's resolved, assume all
// platform audio input requires the |no_data_timer_| be used to auto-detect
- // errors. In reality, probably only Windows and IOS need to be treated as
+ // errors. In reality, probably only Windows needs to be treated as
// unreliable here.
DoCreateForStream(audio_manager->MakeAudioInputStream(params, device_id),
true);
diff --git a/media/audio/audio_input_device.cc b/media/audio/audio_input_device.cc
index 5477be6e63..d7685840ec 100644
--- a/media/audio/audio_input_device.cc
+++ b/media/audio/audio_input_device.cc
@@ -291,7 +291,9 @@ void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
uint8* ptr = static_cast<uint8*>(shared_memory_.memory());
ptr += current_segment_id_ * segment_length_;
AudioInputBuffer* buffer = reinterpret_cast<AudioInputBuffer*>(ptr);
- DCHECK_EQ(buffer->params.size,
+ // Usually this will be equal but in the case of low sample rate (e.g. 8kHz,
+ // the buffer may be bigger (on mac at least)).
+ DCHECK_GE(buffer->params.size,
segment_length_ - sizeof(AudioInputBufferParameters));
double volume = buffer->params.volume;
bool key_pressed = buffer->params.key_pressed;
diff --git a/media/audio/audio_input_device_unittest.cc b/media/audio/audio_input_device_unittest.cc
index dc211a48a9..61a97832f6 100644
--- a/media/audio/audio_input_device_unittest.cc
+++ b/media/audio/audio_input_device_unittest.cc
@@ -164,7 +164,7 @@ TEST_F(AudioInputDeviceTest, WinXPDeviceIdUnchanged) {
}
}
-TEST_F(AudioInputDeviceTest, ConvertToWinXPDeviceId) {
+TEST_F(AudioInputDeviceTest, ConvertToWinXPInputDeviceId) {
if (!CanRunAudioTest())
return;
diff --git a/media/audio/audio_low_latency_input_output_unittest.cc b/media/audio/audio_low_latency_input_output_unittest.cc
index 33729c45a0..a616761294 100644
--- a/media/audio/audio_low_latency_input_output_unittest.cc
+++ b/media/audio/audio_low_latency_input_output_unittest.cc
@@ -308,7 +308,8 @@ class AudioOutputStreamTraits {
static StreamType* CreateStream(AudioManager* audio_manager,
const AudioParameters& params) {
- return audio_manager->MakeAudioOutputStream(params, std::string());
+ return audio_manager->MakeAudioOutputStream(params, std::string(),
+ std::string());
}
};
diff --git a/media/audio/audio_manager.h b/media/audio/audio_manager.h
index cc5b95c819..891d2a2658 100644
--- a/media/audio/audio_manager.h
+++ b/media/audio/audio_manager.h
@@ -58,11 +58,16 @@ class MEDIA_EXPORT AudioManager {
// threads to avoid blocking the rest of the application.
virtual void ShowAudioInputSettings() = 0;
- // Appends a list of available input devices. It is not guaranteed that
- // all the devices in the list support all formats and sample rates for
+ // Appends a list of available input devices to |device_names|,
+ // which must initially be empty. It is not guaranteed that all the
+ // devices in the list support all formats and sample rates for
// recording.
virtual void GetAudioInputDeviceNames(AudioDeviceNames* device_names) = 0;
+ // Appends a list of available output devices to |device_names|,
+ // which must initially be empty.
+ virtual void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) = 0;
+
// Factory for all the supported stream formats. |params| defines parameters
// of the audio stream to be created.
//
@@ -71,6 +76,14 @@ class MEDIA_EXPORT AudioManager {
// or three buffers are created, one will be locked for playback and one will
// be ready to be filled in the call to AudioSourceCallback::OnMoreData().
//
+ // To create a stream for the default output device, pass an empty string
+ // for |device_id|, otherwise the specified audio device will be opened.
+ //
+ // The |input_device_id| is used for low-latency unified streams
+ // (input+output) only and then only if the audio parameters specify a >0
+ // input channel count. In other cases this id is ignored and should be
+ // empty.
+ //
// Returns NULL if the combination of the parameters is not supported, or if
// we have reached some other platform specific limit.
//
@@ -82,14 +95,18 @@ class MEDIA_EXPORT AudioManager {
//
// Do not free the returned AudioOutputStream. It is owned by AudioManager.
virtual AudioOutputStream* MakeAudioOutputStream(
- const AudioParameters& params, const std::string& input_device_id) = 0;
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) = 0;
// Creates new audio output proxy. A proxy implements
// AudioOutputStream interface, but unlike regular output stream
// created with MakeAudioOutputStream() it opens device only when a
// sound is actually playing.
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
- const AudioParameters& params, const std::string& input_device_id) = 0;
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) = 0;
// Factory to create audio recording streams.
// |channels| can be 1 or 2.
@@ -130,14 +147,28 @@ class MEDIA_EXPORT AudioManager {
// streams. It is a convenience interface to
// AudioManagerBase::GetPreferredOutputStreamParameters and each AudioManager
// does not need their own implementation to this interface.
+ // TODO(tommi): Remove this method and use GetOutputStreamParameteres instead.
virtual AudioParameters GetDefaultOutputStreamParameters() = 0;
+ // Returns the output hardware audio parameters for a specific output device.
+ virtual AudioParameters GetOutputStreamParameters(
+ const std::string& device_id) = 0;
+
// Returns the input hardware audio parameters of the specific device
// for opening input streams. Each AudioManager needs to implement their own
// version of this interface.
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) = 0;
+ // Returns the device id of an output device that belongs to the same hardware
+ // as the specified input device.
+ // If the hardware has only an input device (e.g. a webcam), the return value
+ // will be empty (which the caller can then interpret to be the default output
+ // device). Implementations that don't yet support this feature, must return
+ // an empty string.
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) = 0;
+
protected:
AudioManager();
diff --git a/media/audio/audio_manager_base.cc b/media/audio/audio_manager_base.cc
index db77f004e3..1477ce3c5a 100644
--- a/media/audio/audio_manager_base.cc
+++ b/media/audio/audio_manager_base.cc
@@ -38,15 +38,18 @@ const char AudioManagerBase::kDefaultDeviceId[] = "default";
struct AudioManagerBase::DispatcherParams {
DispatcherParams(const AudioParameters& input,
const AudioParameters& output,
- const std::string& device_id)
+ const std::string& output_device_id,
+ const std::string& input_device_id)
: input_params(input),
output_params(output),
- input_device_id(device_id) {}
+ input_device_id(input_device_id),
+ output_device_id(output_device_id) {}
~DispatcherParams() {}
const AudioParameters input_params;
const AudioParameters output_params;
const std::string input_device_id;
+ const std::string output_device_id;
scoped_refptr<AudioOutputDispatcher> dispatcher;
private:
@@ -65,6 +68,7 @@ class AudioManagerBase::CompareByParams {
// of the existing dispatcher are the same as the request dispatcher.
return (dispatcher_->input_params == dispatcher_in->input_params &&
dispatcher_->output_params == dispatcher_in->output_params &&
+ dispatcher_->output_device_id == dispatcher_in->output_device_id &&
(!dispatcher_->input_params.input_channels() ||
dispatcher_->input_device_id == dispatcher_in->input_device_id));
}
@@ -134,6 +138,7 @@ scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetWorkerLoop() {
AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) {
// TODO(miu): Fix ~50 call points across several unit test modules to call
// this method on the audio thread, then uncomment the following:
@@ -159,10 +164,12 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
AudioOutputStream* stream;
switch (params.format()) {
case AudioParameters::AUDIO_PCM_LINEAR:
+ DCHECK(device_id.empty())
+ << "AUDIO_PCM_LINEAR supports only the default device.";
stream = MakeLinearOutputStream(params);
break;
case AudioParameters::AUDIO_PCM_LOW_LATENCY:
- stream = MakeLowLatencyOutputStream(params, input_device_id);
+ stream = MakeLowLatencyOutputStream(params, device_id, input_device_id);
break;
case AudioParameters::AUDIO_FAKE:
stream = FakeAudioOutputStream::MakeFakeStream(this, params);
@@ -180,7 +187,8 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
}
AudioInputStream* AudioManagerBase::MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) {
+ const AudioParameters& params,
+ const std::string& device_id) {
// TODO(miu): Fix ~20 call points across several unit test modules to call
// this method on the audio thread, then uncomment the following:
// DCHECK(message_loop_->BelongsToCurrentThread());
@@ -222,19 +230,26 @@ AudioInputStream* AudioManagerBase::MakeAudioInputStream(
}
AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
- const AudioParameters& params, const std::string& input_device_id) {
-#if defined(OS_IOS)
- // IOS implements audio input only.
- NOTIMPLEMENTED();
- return NULL;
-#else
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
DCHECK(message_loop_->BelongsToCurrentThread());
+ // If the caller supplied an empty device id to select the default device,
+ // we fetch the actual device id of the default device so that the lookup
+ // will find the correct device regardless of whether it was opened as
+ // "default" or via the specific id.
+ // NOTE: Implementations that don't yet support opening non-default output
+ // devices may return an empty string from GetDefaultOutputDeviceID().
+ std::string output_device_id = device_id.empty() ?
+ GetDefaultOutputDeviceID() : device_id;
+
// If we're not using AudioOutputResampler our output parameters are the same
// as our input parameters.
AudioParameters output_params = params;
if (params.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
- output_params = GetPreferredOutputStreamParameters(params);
+ output_params =
+ GetPreferredOutputStreamParameters(output_device_id, params);
// Ensure we only pass on valid output parameters.
if (!output_params.IsValid()) {
@@ -257,7 +272,8 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
}
DispatcherParams* dispatcher_params =
- new DispatcherParams(params, output_params, input_device_id);
+ new DispatcherParams(params, output_params, output_device_id,
+ input_device_id);
AudioOutputDispatchers::iterator it =
std::find_if(output_dispatchers_.begin(), output_dispatchers_.end(),
@@ -272,23 +288,30 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
scoped_refptr<AudioOutputDispatcher> dispatcher;
if (output_params.format() != AudioParameters::AUDIO_FAKE) {
dispatcher = new AudioOutputResampler(this, params, output_params,
- input_device_id, kCloseDelay);
+ output_device_id, input_device_id,
+ kCloseDelay);
} else {
dispatcher = new AudioOutputDispatcherImpl(this, output_params,
+ output_device_id,
input_device_id, kCloseDelay);
}
dispatcher_params->dispatcher = dispatcher;
output_dispatchers_.push_back(dispatcher_params);
return new AudioOutputProxy(dispatcher.get());
-#endif // defined(OS_IOS)
}
void AudioManagerBase::ShowAudioInputSettings() {
}
void AudioManagerBase::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
+ AudioDeviceNames* device_names) {
+}
+
+void AudioManagerBase::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
+ // TODO(joi): Remove this and keep pure virtual once implemented everywhere.
+ NOTREACHED() << "Don't use this yet, it's not ready on all platforms!";
}
void AudioManagerBase::ReleaseOutputStream(AudioOutputStream* stream) {
@@ -333,10 +356,6 @@ void AudioManagerBase::Shutdown() {
}
void AudioManagerBase::ShutdownOnAudioThread() {
-// IOS implements audio input only.
-#if defined(OS_IOS)
- return;
-#else
// This should always be running on the audio thread, but since we've cleared
// the audio_thread_ member pointer when we get here, we can't verify exactly
// what thread we're running on. The method is not public though and only
@@ -357,7 +376,6 @@ void AudioManagerBase::ShutdownOnAudioThread() {
}
output_dispatchers_.clear();
-#endif // defined(OS_IOS)
}
void AudioManagerBase::AddOutputDeviceChangeListener(
@@ -379,7 +397,14 @@ void AudioManagerBase::NotifyAllOutputDeviceChangeListeners() {
}
AudioParameters AudioManagerBase::GetDefaultOutputStreamParameters() {
- return GetPreferredOutputStreamParameters(AudioParameters());
+ return GetPreferredOutputStreamParameters(GetDefaultOutputDeviceID(),
+ AudioParameters());
+}
+
+AudioParameters AudioManagerBase::GetOutputStreamParameters(
+ const std::string& device_id) {
+ return GetPreferredOutputStreamParameters(device_id,
+ AudioParameters());
}
AudioParameters AudioManagerBase::GetInputStreamParameters(
@@ -388,4 +413,15 @@ AudioParameters AudioManagerBase::GetInputStreamParameters(
return AudioParameters();
}
+std::string AudioManagerBase::GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) {
+ NOTIMPLEMENTED();
+ return "";
+}
+
+std::string AudioManagerBase::GetDefaultOutputDeviceID() {
+ NOTIMPLEMENTED();
+ return "";
+}
+
} // namespace media
diff --git a/media/audio/audio_manager_base.h b/media/audio/audio_manager_base.h
index 8b34d9fcf9..15c1b24066 100644
--- a/media/audio/audio_manager_base.h
+++ b/media/audio/audio_manager_base.h
@@ -47,10 +47,14 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual void ShowAudioInputSettings() OVERRIDE;
virtual void GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) OVERRIDE;
+ AudioDeviceNames* device_names) OVERRIDE;
+
+ virtual void GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
@@ -58,6 +62,7 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
// Called internally by the audio stream when it has been closed.
@@ -72,7 +77,9 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// Creates the output stream for the |AUDIO_PCM_LOW_LATENCY| format.
// |input_device_id| is used by unified IO to open the correct input device.
virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) = 0;
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) = 0;
// Creates the input stream for the |AUDIO_PCM_LINEAR| format. The legacy
// name is also from |AUDIO_PCM_LINEAR|.
@@ -90,9 +97,15 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
AudioDeviceListener* listener) OVERRIDE;
virtual AudioParameters GetDefaultOutputStreamParameters() OVERRIDE;
+ virtual AudioParameters GetOutputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) OVERRIDE;
+
protected:
AudioManagerBase();
@@ -115,9 +128,16 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// will decide if they should return the values from |input_params| or the
// default hardware values. If the |input_params| is invalid, it will return
// the default hardware audio parameters.
+ // If |output_device_id| is empty, the implementation must treat that as
+ // a request for the default output device.
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) = 0;
+ // Returns the ID of the default audio output device.
+ // Implementations that don't yet support this should return an empty string.
+ virtual std::string GetDefaultOutputDeviceID();
+
// Get number of input or output streams.
int input_stream_count() { return num_input_streams_; }
int output_stream_count() { return num_output_streams_; }
diff --git a/media/audio/audio_manager_unittest.cc b/media/audio/audio_manager_unittest.cc
new file mode 100644
index 0000000000..96300c9a83
--- /dev/null
+++ b/media/audio/audio_manager_unittest.cc
@@ -0,0 +1,145 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_LINUX)
+#include "media/audio/linux/audio_manager_linux.h"
+#endif // defined(OS_LINUX)
+
+#if defined(OS_WIN)
+#include "media/audio/win/audio_manager_win.h"
+#endif // defined(OS_WIN)
+
+#if defined(USE_PULSEAUDIO)
+#include "media/audio/pulse/audio_manager_pulse.h"
+#endif // defined(USE_PULSEAUDIO)
+
+namespace media {
+
+void GetAudioOutputDeviceNamesImpl(AudioManager* audio_manager) {
+ AudioDeviceNames device_names;
+ audio_manager->GetAudioOutputDeviceNames(&device_names);
+
+ VLOG(2) << "Got " << device_names.size() << " audio output devices.";
+ for (AudioDeviceNames::iterator it = device_names.begin();
+ it != device_names.end();
+ ++it) {
+ EXPECT_FALSE(it->unique_id.empty());
+ EXPECT_FALSE(it->device_name.empty());
+ VLOG(2) << "Device ID(" << it->unique_id << "), label: " << it->device_name;
+ }
+}
+
+// So that tests herein can be friends of AudioManagerWin.
+//
+// TODO(joi): Make this go away by unifying audio_manager_unittest.cc
+// and audio_input_device_unittest.cc
+class AudioManagerTest : public ::testing::Test {
+ public:
+ bool SetupForSecondTest(AudioManager* amw) {
+#if defined(OS_WIN)
+ AudioManagerWin* audio_manager_win = static_cast<AudioManagerWin*>(amw);
+ if (audio_manager_win->enumeration_type() ==
+ AudioManagerWin::kWaveEnumeration) {
+ // This will be true only if running on Windows XP.
+ VLOG(2) << "AudioManagerWin on WinXP; nothing more to test.";
+ } else {
+ VLOG(2) << "Testing AudioManagerWin in fallback WinXP mode.";
+ audio_manager_win->SetEnumerationType(AudioManagerWin::kWaveEnumeration);
+ return true;
+ }
+#endif // defined(OS_WIN)
+ return false;
+ }
+};
+
+TEST_F(AudioManagerTest, GetAudioOutputDeviceNames) {
+ // On Linux, we may be able to test both the Alsa and Pulseaudio
+ // versions of the audio manager.
+#if defined(USE_PULSEAUDIO)
+ {
+ VLOG(2) << "Testing AudioManagerPulse.";
+ scoped_ptr<AudioManager> pulse_audio_manager(AudioManagerPulse::Create());
+ if (pulse_audio_manager.get())
+ GetAudioOutputDeviceNamesImpl(pulse_audio_manager.get());
+ else
+ LOG(WARNING) << "No pulseaudio on this system.";
+ }
+#endif // defined(USE_PULSEAUDIO)
+#if defined(USE_ALSA)
+ {
+ VLOG(2) << "Testing AudioManagerLinux.";
+ scoped_ptr<AudioManager> alsa_audio_manager(new AudioManagerLinux());
+ GetAudioOutputDeviceNamesImpl(alsa_audio_manager.get());
+ }
+#endif // defined(USE_ALSA)
+
+#if defined(OS_MACOSX)
+ VLOG(2) << "Testing platform-default AudioManager.";
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ GetAudioOutputDeviceNamesImpl(audio_manager.get());
+#endif // defined(OS_MACOSX)
+
+#if defined(OS_WIN)
+ {
+ // TODO(joi): Unify the tests in audio_input_device_unittest.cc
+ // with the tests in this file, and reuse the Windows-specific
+ // bits from that file.
+ VLOG(2) << "Testing AudioManagerWin in its default mode.";
+ scoped_ptr<AudioManager> audio_manager_win(AudioManager::Create());
+ GetAudioOutputDeviceNamesImpl(audio_manager_win.get());
+
+ if (SetupForSecondTest(audio_manager_win.get())) {
+ GetAudioOutputDeviceNamesImpl(audio_manager_win.get());
+ }
+ }
+#endif // defined(OS_WIN)
+}
+
+TEST_F(AudioManagerTest, GetDefaultOutputStreamParameters) {
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ ASSERT_TRUE(audio_manager);
+ if (!audio_manager->HasAudioOutputDevices())
+ return;
+
+ AudioParameters params = audio_manager->GetDefaultOutputStreamParameters();
+ EXPECT_TRUE(params.IsValid());
+#endif // defined(OS_WIN) || defined(OS_MACOSX)
+}
+
+TEST_F(AudioManagerTest, GetAssociatedOutputDeviceID) {
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ ASSERT_TRUE(audio_manager);
+ if (!audio_manager->HasAudioOutputDevices() ||
+ !audio_manager->HasAudioInputDevices()) {
+ return;
+ }
+
+ AudioDeviceNames device_names;
+ audio_manager->GetAudioInputDeviceNames(&device_names);
+ bool found_an_associated_device = false;
+ for (AudioDeviceNames::iterator it = device_names.begin();
+ it != device_names.end();
+ ++it) {
+ EXPECT_FALSE(it->unique_id.empty());
+ EXPECT_FALSE(it->device_name.empty());
+ std::string output_device_id(
+ audio_manager->GetAssociatedOutputDeviceID(it->unique_id));
+ if (!output_device_id.empty()) {
+ VLOG(2) << it->unique_id << " matches with " << output_device_id;
+ found_an_associated_device = true;
+ }
+ }
+
+ EXPECT_TRUE(found_an_associated_device);
+#endif // defined(OS_WIN) || defined(OS_MACOSX)
+}
+
+} // namespace media
diff --git a/media/audio/audio_output_controller.cc b/media/audio/audio_output_controller.cc
index f7f4cf8240..1784c9b69a 100644
--- a/media/audio/audio_output_controller.cc
+++ b/media/audio/audio_output_controller.cc
@@ -32,14 +32,17 @@ static const int kPowerMeasurementsPerSecond = 30;
const int AudioOutputController::kPollNumAttempts = 3;
const int AudioOutputController::kPollPauseInMilliseconds = 3;
-AudioOutputController::AudioOutputController(AudioManager* audio_manager,
- EventHandler* handler,
- const AudioParameters& params,
- const std::string& input_device_id,
- SyncReader* sync_reader)
+AudioOutputController::AudioOutputController(
+ AudioManager* audio_manager,
+ EventHandler* handler,
+ const AudioParameters& params,
+ const std::string& output_device_id,
+ const std::string& input_device_id,
+ SyncReader* sync_reader)
: audio_manager_(audio_manager),
params_(params),
handler_(handler),
+ output_device_id_(output_device_id),
input_device_id_(input_device_id),
stream_(NULL),
diverting_to_stream_(NULL),
@@ -67,6 +70,7 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
AudioManager* audio_manager,
EventHandler* event_handler,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
SyncReader* sync_reader) {
DCHECK(audio_manager);
@@ -76,7 +80,8 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
return NULL;
scoped_refptr<AudioOutputController> controller(new AudioOutputController(
- audio_manager, event_handler, params, input_device_id, sync_reader));
+ audio_manager, event_handler, params, output_device_id, input_device_id,
+ sync_reader));
controller->message_loop_->PostTask(FROM_HERE, base::Bind(
&AudioOutputController::DoCreate, controller, false));
return controller;
@@ -114,8 +119,10 @@ void AudioOutputController::DoCreate(bool is_for_device_change) {
DoStopCloseAndClearStream(); // Calls RemoveOutputDeviceChangeListener().
DCHECK_EQ(kEmpty, state_);
- stream_ = diverting_to_stream_ ? diverting_to_stream_ :
- audio_manager_->MakeAudioOutputStreamProxy(params_, input_device_id_);
+ stream_ = diverting_to_stream_ ?
+ diverting_to_stream_ :
+ audio_manager_->MakeAudioOutputStreamProxy(params_, output_device_id_,
+ input_device_id_);
if (!stream_) {
state_ = kError;
handler_->OnError();
diff --git a/media/audio/audio_output_controller.h b/media/audio/audio_output_controller.h
index 38a2c03f59..387214e1d0 100644
--- a/media/audio/audio_output_controller.h
+++ b/media/audio/audio_output_controller.h
@@ -101,10 +101,14 @@ class MEDIA_EXPORT AudioOutputController
// thread, and if this is successful, the |event_handler| will receive an
// OnCreated() call from the same audio manager thread. |audio_manager| must
// outlive AudioOutputController.
+ // The |output_device_id| can be either empty (default device) or specify a
+ // specific hardware device for audio output. The |input_device_id| is
+ // used only for unified audio when opening up input and output at the same
+ // time (controlled by |params.input_channel_count()|).
static scoped_refptr<AudioOutputController> Create(
AudioManager* audio_manager, EventHandler* event_handler,
- const AudioParameters& params, const std::string& input_device_id,
- SyncReader* sync_reader);
+ const AudioParameters& params, const std::string& output_device_id,
+ const std::string& input_device_id, SyncReader* sync_reader);
// Methods to control playback of the stream.
@@ -166,6 +170,7 @@ class MEDIA_EXPORT AudioOutputController
AudioOutputController(AudioManager* audio_manager, EventHandler* handler,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
SyncReader* sync_reader);
@@ -198,8 +203,12 @@ class MEDIA_EXPORT AudioOutputController
const AudioParameters params_;
EventHandler* const handler_;
+ // Specifies the device id of the output device to open or empty for the
+ // default output device.
+ const std::string output_device_id_;
+
// Used by the unified IO to open the correct input device.
- std::string input_device_id_;
+ const std::string input_device_id_;
AudioOutputStream* stream_;
diff --git a/media/audio/audio_output_controller_unittest.cc b/media/audio/audio_output_controller_unittest.cc
index 128cc07716..37305c3978 100644
--- a/media/audio/audio_output_controller_unittest.cc
+++ b/media/audio/audio_output_controller_unittest.cc
@@ -29,8 +29,6 @@ static const int kSampleRate = AudioParameters::kAudioCDSampleRate;
static const int kBitsPerSample = 16;
static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
static const int kSamplesPerPacket = kSampleRate / 100;
-static const int kHardwareBufferSize = kSamplesPerPacket *
- ChannelLayoutToChannelCount(kChannelLayout) * kBitsPerSample / 8;
static const double kTestVolume = 0.25;
class MockAudioOutputControllerEventHandler
@@ -122,7 +120,7 @@ class AudioOutputControllerTest : public testing::Test {
controller_ = AudioOutputController::Create(
audio_manager_.get(), &mock_event_handler_, params_, std::string(),
- &mock_sync_reader_);
+ std::string(), &mock_sync_reader_);
if (controller_.get())
controller_->SetVolume(kTestVolume);
diff --git a/media/audio/audio_output_dispatcher.cc b/media/audio/audio_output_dispatcher.cc
index 06206d7be7..a151c449f0 100644
--- a/media/audio/audio_output_dispatcher.cc
+++ b/media/audio/audio_output_dispatcher.cc
@@ -11,10 +11,12 @@ namespace media {
AudioOutputDispatcher::AudioOutputDispatcher(
AudioManager* audio_manager,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id)
: audio_manager_(audio_manager),
message_loop_(base::MessageLoop::current()),
params_(params),
+ output_device_id_(output_device_id),
input_device_id_(input_device_id) {
// We expect to be instantiated on the audio thread. Otherwise the
// message_loop_ member will point to the wrong message loop!
diff --git a/media/audio/audio_output_dispatcher.h b/media/audio/audio_output_dispatcher.h
index a79fd94477..30266ed6a9 100644
--- a/media/audio/audio_output_dispatcher.h
+++ b/media/audio/audio_output_dispatcher.h
@@ -38,6 +38,7 @@ class MEDIA_EXPORT AudioOutputDispatcher
public:
AudioOutputDispatcher(AudioManager* audio_manager,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id);
// Called by AudioOutputProxy to open the stream.
@@ -79,6 +80,7 @@ class MEDIA_EXPORT AudioOutputDispatcher
AudioManager* audio_manager_;
base::MessageLoop* message_loop_;
AudioParameters params_;
+ const std::string output_device_id_;
const std::string input_device_id_;
private:
diff --git a/media/audio/audio_output_dispatcher_impl.cc b/media/audio/audio_output_dispatcher_impl.cc
index 1df8e7ddd5..bcdcd65146 100644
--- a/media/audio/audio_output_dispatcher_impl.cc
+++ b/media/audio/audio_output_dispatcher_impl.cc
@@ -19,9 +19,11 @@ namespace media {
AudioOutputDispatcherImpl::AudioOutputDispatcherImpl(
AudioManager* audio_manager,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, params, input_device_id),
+ : AudioOutputDispatcher(audio_manager, params, output_device_id,
+ input_device_id),
pause_delay_(base::TimeDelta::FromMicroseconds(
2 * params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
static_cast<float>(params.sample_rate()))),
@@ -168,7 +170,7 @@ void AudioOutputDispatcherImpl::Shutdown() {
bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
DCHECK_EQ(base::MessageLoop::current(), message_loop_);
AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(
- params_, input_device_id_);
+ params_, output_device_id_, input_device_id_);
if (!stream)
return false;
diff --git a/media/audio/audio_output_dispatcher_impl.h b/media/audio/audio_output_dispatcher_impl.h
index 06fe3ebeaf..b59f835f9b 100644
--- a/media/audio/audio_output_dispatcher_impl.h
+++ b/media/audio/audio_output_dispatcher_impl.h
@@ -35,6 +35,7 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
// the audio device is closed.
AudioOutputDispatcherImpl(AudioManager* audio_manager,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
const base::TimeDelta& close_delay);
diff --git a/media/audio/audio_output_proxy_unittest.cc b/media/audio/audio_output_proxy_unittest.cc
index de95b0661e..1806ce6613 100644
--- a/media/audio/audio_output_proxy_unittest.cc
+++ b/media/audio/audio_output_proxy_unittest.cc
@@ -95,10 +95,14 @@ class MockAudioManager : public AudioManagerBase {
MOCK_METHOD0(HasAudioOutputDevices, bool());
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD0(GetAudioInputDeviceModel, string16());
- MOCK_METHOD2(MakeAudioOutputStream, AudioOutputStream*(
- const AudioParameters& params, const std::string& input_device_id));
- MOCK_METHOD2(MakeAudioOutputStreamProxy, AudioOutputStream*(
- const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD3(MakeAudioOutputStream, AudioOutputStream*(
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id));
+ MOCK_METHOD3(MakeAudioOutputStreamProxy, AudioOutputStream*(
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeAudioInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD0(ShowAudioInputSettings, void());
@@ -108,14 +112,15 @@ class MockAudioManager : public AudioManagerBase {
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params, const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeLinearInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
- MOCK_METHOD1(GetPreferredOutputStreamParameters, AudioParameters(
- const AudioParameters& params));
+ MOCK_METHOD2(GetPreferredOutputStreamParameters, AudioParameters(
+ const std::string& device_id, const AudioParameters& params));
};
class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
@@ -161,6 +166,7 @@ class AudioOutputProxyTest : public testing::Test {
dispatcher_impl_ = new AudioOutputDispatcherImpl(&manager(),
params_,
std::string(),
+ std::string(),
close_delay);
// Necessary to know how long the dispatcher will wait before posting
@@ -186,7 +192,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenAndClose(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -203,7 +209,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartAndStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -229,7 +235,7 @@ class AudioOutputProxyTest : public testing::Test {
void CloseAfterStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -261,7 +267,7 @@ class AudioOutputProxyTest : public testing::Test {
void TwoStreams(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -283,7 +289,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(false));
@@ -301,7 +307,7 @@ class AudioOutputProxyTest : public testing::Test {
void CreateAndWait(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -328,7 +334,7 @@ class AudioOutputProxyTest : public testing::Test {
MockAudioOutputStream stream1(&manager_, params_);
MockAudioOutputStream stream2(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2));
@@ -366,7 +372,7 @@ class AudioOutputProxyTest : public testing::Test {
MockAudioOutputStream stream1(&manager_, params_);
MockAudioOutputStream stream2(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2));
@@ -406,7 +412,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -425,7 +431,7 @@ class AudioOutputProxyTest : public testing::Test {
Mock::VerifyAndClear(&stream);
// |stream| is closed at this point. Start() should reopen it again.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(2)
.WillRepeatedly(Return(reinterpret_cast<AudioOutputStream*>(NULL)));
@@ -467,7 +473,8 @@ class AudioOutputResamplerTest : public AudioOutputProxyTest {
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
16000, 16, 1024);
resampler_ = new AudioOutputResampler(
- &manager(), params_, resampler_params_, std::string(), close_delay);
+ &manager(), params_, resampler_params_, std::string(), std::string(),
+ close_delay);
}
virtual void OnStart() OVERRIDE {
@@ -568,7 +575,7 @@ TEST_F(AudioOutputResamplerTest, StartFailed) { StartFailed(resampler_.get()); }
// ensure AudioOutputResampler falls back to the high latency path.
TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(2)
.WillOnce(Return(static_cast<AudioOutputStream*>(NULL)))
.WillRepeatedly(Return(&stream));
@@ -588,7 +595,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
TEST_F(AudioOutputResamplerTest, LowLatencyOpenFailedFallback) {
MockAudioOutputStream failed_stream(&manager_, params_);
MockAudioOutputStream okay_stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(2)
.WillOnce(Return(&failed_stream))
.WillRepeatedly(Return(&okay_stream));
@@ -619,7 +626,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
#else
static const int kFallbackCount = 1;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -630,7 +637,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
testing::Property(&AudioParameters::sample_rate, params_.sample_rate()),
testing::Property(
&AudioParameters::frames_per_buffer, params_.frames_per_buffer())),
- _))
+ _, _))
.Times(1)
.WillOnce(Return(&okay_stream));
EXPECT_CALL(okay_stream, Open())
@@ -655,7 +662,7 @@ TEST_F(AudioOutputResamplerTest, AllFallbackFailed) {
#else
static const int kFallbackCount = 2;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -673,7 +680,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
MockAudioOutputStream stream3(&manager_, params_);
// Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2))
.WillOnce(Return(&stream3))
diff --git a/media/audio/audio_output_resampler.cc b/media/audio/audio_output_resampler.cc
index 6db0e2fb2f..da424ec124 100644
--- a/media/audio/audio_output_resampler.cc
+++ b/media/audio/audio_output_resampler.cc
@@ -147,12 +147,13 @@ static AudioParameters SetupFallbackParams(
AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager,
const AudioParameters& input_params,
const AudioParameters& output_params,
+ const std::string& output_device_id,
const std::string& input_device_id,
const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, input_params, input_device_id),
+ : AudioOutputDispatcher(audio_manager, input_params, output_device_id,
+ input_device_id),
close_delay_(close_delay),
output_params_(output_params),
- input_device_id_(input_device_id),
streams_opened_(false) {
DCHECK(input_params.IsValid());
DCHECK(output_params.IsValid());
@@ -172,7 +173,8 @@ void AudioOutputResampler::Initialize() {
DCHECK(!streams_opened_);
DCHECK(callbacks_.empty());
dispatcher_ = new AudioOutputDispatcherImpl(
- audio_manager_, output_params_, input_device_id_, close_delay_);
+ audio_manager_, output_params_, output_device_id_, input_device_id_,
+ close_delay_);
}
bool AudioOutputResampler::OpenStream() {
diff --git a/media/audio/audio_output_resampler.h b/media/audio/audio_output_resampler.h
index df9e4320b5..f9a75ac38f 100644
--- a/media/audio/audio_output_resampler.h
+++ b/media/audio/audio_output_resampler.h
@@ -40,6 +40,7 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
AudioOutputResampler(AudioManager* audio_manager,
const AudioParameters& input_params,
const AudioParameters& output_params,
+ const std::string& output_device_id,
const std::string& input_device_id,
const base::TimeDelta& close_delay);
@@ -74,9 +75,6 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
// AudioParameters used to setup the output stream.
AudioParameters output_params_;
- // Device ID to be used by the unified IO to open the correct input device.
- const std::string input_device_id_;
-
// Whether any streams have been opened through |dispatcher_|, if so we can't
// fallback on future OpenStream() failures.
bool streams_opened_;
diff --git a/media/audio/cras/audio_manager_cras.cc b/media/audio/cras/audio_manager_cras.cc
index 165d642922..276487557e 100644
--- a/media/audio/cras/audio_manager_cras.cc
+++ b/media/audio/cras/audio_manager_cras.cc
@@ -76,7 +76,10 @@ AudioOutputStream* AudioManagerCras::MakeLinearOutputStream(
}
AudioOutputStream* AudioManagerCras::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
// TODO(dgreid): Open the correct input device for unified IO.
return MakeOutputStream(params);
@@ -95,7 +98,10 @@ AudioInputStream* AudioManagerCras::MakeLowLatencyInputStream(
}
AudioParameters AudioManagerCras::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
static const int kDefaultOutputBufferSize = 512;
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
diff --git a/media/audio/cras/audio_manager_cras.h b/media/audio/cras/audio_manager_cras.h
index fdc5b02688..41e1876ac8 100644
--- a/media/audio/cras/audio_manager_cras.h
+++ b/media/audio/cras/audio_manager_cras.h
@@ -35,6 +35,7 @@ class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -45,6 +46,7 @@ class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
virtual ~AudioManagerCras();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
diff --git a/media/audio/cras/cras_input.cc b/media/audio/cras/cras_input.cc
index a82fe283f7..363dc68227 100644
--- a/media/audio/cras/cras_input.cc
+++ b/media/audio/cras/cras_input.cc
@@ -114,7 +114,6 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
StartAgc();
callback_ = callback;
- LOG(ERROR) << "Input Start";
// Prepare |audio_format| and |stream_params| for the stream we
// will create.
diff --git a/media/audio/ios/audio_manager_ios.h b/media/audio/ios/audio_manager_ios.h
deleted file mode 100644
index 19751502fd..0000000000
--- a/media/audio/ios/audio_manager_ios.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
-#define MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
-
-#include "base/basictypes.h"
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-class PCMQueueInAudioInputStream;
-
-// iOS implementation of the AudioManager singleton. Supports only audio input.
-class MEDIA_EXPORT AudioManagerIOS : public AudioManagerBase {
- public:
- AudioManagerIOS();
-
- // Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() OVERRIDE;
- virtual bool HasAudioInputDevices() OVERRIDE;
- virtual AudioOutputStream* MakeAudioOutputStream(
- const AudioParameters& params,
- const std::string& input_device_id) OVERRIDE;
- virtual AudioInputStream* MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioParameters GetInputStreamParameters(
- const std::string& device_id) OVERRIDE;
-
- // Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params,
- const std::string& input_device_id) OVERRIDE;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE;
- virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE;
-
- protected:
- virtual ~AudioManagerIOS();
-
- virtual AudioParameters GetPreferredOutputStreamParameters(
- const AudioParameters& input_params) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioManagerIOS);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
diff --git a/media/audio/ios/audio_manager_ios.mm b/media/audio/ios/audio_manager_ios.mm
deleted file mode 100644
index 49479302ef..0000000000
--- a/media/audio/ios/audio_manager_ios.mm
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/ios/audio_manager_ios.h"
-
-#import <AudioToolbox/AudioToolbox.h>
-#import <AVFoundation/AVFoundation.h>
-
-#include "base/sys_info.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/fake_audio_input_stream.h"
-#include "media/audio/ios/audio_session_util_ios.h"
-#include "media/audio/mac/audio_input_mac.h"
-#include "media/base/channel_layout.h"
-#include "media/base/limits.h"
-
-namespace media {
-
-enum { kMaxInputChannels = 2 };
-
-AudioManagerIOS::AudioManagerIOS() {
-}
-
-AudioManagerIOS::~AudioManagerIOS() {
- Shutdown();
-}
-
-bool AudioManagerIOS::HasAudioOutputDevices() {
- return false;
-}
-
-bool AudioManagerIOS::HasAudioInputDevices() {
- if (!InitAudioSessionIOS())
- return false;
- // Note that the |kAudioSessionProperty_AudioInputAvailable| property is a
- // 32-bit integer, not a boolean.
- UInt32 property_size;
- OSStatus error =
- AudioSessionGetPropertySize(kAudioSessionProperty_AudioInputAvailable,
- &property_size);
- if (error != kAudioSessionNoError)
- return false;
- UInt32 audio_input_is_available = false;
- DCHECK(property_size == sizeof(audio_input_is_available));
- error = AudioSessionGetProperty(kAudioSessionProperty_AudioInputAvailable,
- &property_size,
- &audio_input_is_available);
- return error == kAudioSessionNoError ? audio_input_is_available : false;
-}
-
-AudioParameters AudioManagerIOS::GetInputStreamParameters(
- const std::string& device_id) {
- // TODO(xians): figure out the right input sample rate and buffer size to
- // achieve the best audio performance for iOS devices.
- // TODO(xians): query the native channel layout for the specific device.
- static const int kDefaultSampleRate = 48000;
- static const int kDefaultBufferSize = 2048;
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, kDefaultBufferSize);
-}
-
-AudioOutputStream* AudioManagerIOS::MakeAudioOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
- return NULL;
-}
-
-AudioInputStream* AudioManagerIOS::MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) {
- // Current line of iOS devices has only one audio input.
- // Ignore the device_id (unittest uses a test value in it).
- if (!params.IsValid() || (params.channels() > kMaxInputChannels))
- return NULL;
-
- if (params.format() == AudioParameters::AUDIO_FAKE)
- return FakeAudioInputStream::MakeFakeStream(this, params);
- else if (params.format() == AudioParameters::AUDIO_PCM_LINEAR)
- return new PCMQueueInAudioInputStream(this, params);
- return NULL;
-}
-
-AudioOutputStream* AudioManagerIOS::MakeLinearOutputStream(
- const AudioParameters& params) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
- return NULL;
-}
-
-AudioOutputStream* AudioManagerIOS::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
- return NULL;
-}
-
-AudioInputStream* AudioManagerIOS::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- return MakeAudioInputStream(params, device_id);
-}
-
-AudioInputStream* AudioManagerIOS::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- NOTIMPLEMENTED(); // Only linear audio input is supported on iOS.
- return MakeAudioInputStream(params, device_id);
-}
-
-
-AudioParameters AudioManagerIOS::GetPreferredOutputStreamParameters(
- const AudioParameters& input_params) {
- // TODO(xians): handle the case when input_params is valid.
- // TODO(xians): figure out the right output sample rate and sample rate to
- // achieve the best audio performance for iOS devices.
- // TODO(xians): add support to --audio-buffer-size flag.
- static const int kDefaultSampleRate = 48000;
- static const int kDefaultBufferSize = 2048;
- if (input_params.IsValid()) {
- NOTREACHED();
- }
-
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, kDefaultBufferSize);
-}
-
-// Called by the stream when it has been released by calling Close().
-void AudioManagerIOS::ReleaseOutputStream(AudioOutputStream* stream) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
-}
-
-// Called by the stream when it has been released by calling Close().
-void AudioManagerIOS::ReleaseInputStream(AudioInputStream* stream) {
- delete stream;
-}
-
-// static
-AudioManager* CreateAudioManager() {
- return new AudioManagerIOS();
-}
-
-} // namespace media
diff --git a/media/audio/ios/audio_manager_ios_unittest.cc b/media/audio/ios/audio_manager_ios_unittest.cc
deleted file mode 100644
index 30ebc04f20..0000000000
--- a/media/audio/ios/audio_manager_ios_unittest.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using namespace media;
-
-// Test that input is supported and output is not.
-TEST(IOSAudioTest, AudioSupport) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- ASSERT_TRUE(NULL != audio_manager.get());
- ASSERT_FALSE(audio_manager->HasAudioOutputDevices());
- ASSERT_TRUE(audio_manager->HasAudioInputDevices());
-}
-
-// Test that input stream can be opened and closed.
-TEST(IOSAudioTest, InputStreamOpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- ASSERT_TRUE(NULL != audio_manager.get());
- if (!audio_manager->HasAudioInputDevices())
- return;
- AudioInputStream* ias = audio_manager->MakeAudioInputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 8000, 16, 1024),
- std::string("test_device"));
- ASSERT_TRUE(NULL != ias);
- EXPECT_TRUE(ias->Open());
- ias->Close();
-}
diff --git a/media/audio/ios/audio_session_util_ios.h b/media/audio/ios/audio_session_util_ios.h
deleted file mode 100644
index 175db91fae..0000000000
--- a/media/audio/ios/audio_session_util_ios.h
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_IOS_AUDIO_SESSION_UTIL_IOS_H_
-#define MEDIA_AUDIO_IOS_AUDIO_SESSION_UTIL_IOS_H_
-
-namespace media {
-
-// Initializes and configures the audio session, returning a bool indicating
-// whether initialization was successful. Can be called multiple times.
-// Safe to call from any thread.
-bool InitAudioSessionIOS();
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_IOS_AUDIO_SESSION_UTIL_IOS_H_
diff --git a/media/audio/ios/audio_session_util_ios.mm b/media/audio/ios/audio_session_util_ios.mm
deleted file mode 100644
index a4071a04cc..0000000000
--- a/media/audio/ios/audio_session_util_ios.mm
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/ios/audio_session_util_ios.h"
-
-#include <AVFoundation/AVFoundation.h>
-
-#include "base/logging.h"
-
-namespace media {
-
-bool InitAudioSessionIOS() {
- static bool kSessionInitialized = false;
- static dispatch_once_t once = 0;
- dispatch_once(&once, ^{
- OSStatus error = AudioSessionInitialize(NULL, NULL, NULL, NULL);
- if (error != kAudioSessionNoError)
- DLOG(ERROR) << "AudioSessionInitialize OSStatus error: " << error;
- BOOL result = [[AVAudioSession sharedInstance]
- setCategory:AVAudioSessionCategoryPlayAndRecord
- error:nil];
- if (!result)
- DLOG(ERROR) << "AVAudioSession setCategory failed";
- UInt32 allowMixing = true;
- AudioSessionSetProperty(
- kAudioSessionProperty_OverrideCategoryMixWithOthers,
- sizeof(allowMixing), &allowMixing);
- UInt32 defaultToSpeaker = true;
- AudioSessionSetProperty(
- kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
- sizeof(defaultToSpeaker),
- &defaultToSpeaker);
- // Speech input cannot be used if either of these two conditions fail.
- kSessionInitialized = (error == kAudioSessionNoError) && result;
- });
- return kSessionInitialized;
-}
-
-} // namespace media
diff --git a/media/audio/linux/alsa_output_unittest.cc b/media/audio/linux/alsa_output_unittest.cc
index 32456360f4..82fbab94c1 100644
--- a/media/audio/linux/alsa_output_unittest.cc
+++ b/media/audio/linux/alsa_output_unittest.cc
@@ -83,8 +83,10 @@ class MockAudioManagerLinux : public AudioManagerLinux {
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
diff --git a/media/audio/linux/audio_manager_linux.cc b/media/audio/linux/audio_manager_linux.cc
index 38253e2e65..7596c2fe25 100644
--- a/media/audio/linux/audio_manager_linux.cc
+++ b/media/audio/linux/audio_manager_linux.cc
@@ -42,9 +42,9 @@ static const int kDefaultSampleRate = 48000;
// hence surround devices are not stored in the list.
static const char* kInvalidAudioInputDevices[] = {
"default",
+ "dmix",
"null",
"pulse",
- "dmix",
"surround",
};
@@ -105,7 +105,13 @@ void AudioManagerLinux::ShowAudioInputSettings() {
void AudioManagerLinux::GetAudioInputDeviceNames(
media::AudioDeviceNames* device_names) {
DCHECK(device_names->empty());
- GetAlsaAudioInputDevices(device_names);
+ GetAlsaAudioDevices(kStreamCapture, device_names);
+}
+
+void AudioManagerLinux::GetAudioOutputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ GetAlsaAudioDevices(kStreamPlayback, device_names);
}
AudioParameters AudioManagerLinux::GetInputStreamParameters(
@@ -117,7 +123,8 @@ AudioParameters AudioManagerLinux::GetInputStreamParameters(
kDefaultSampleRate, 16, kDefaultInputBufferSize);
}
-void AudioManagerLinux::GetAlsaAudioInputDevices(
+void AudioManagerLinux::GetAlsaAudioDevices(
+ StreamType type,
media::AudioDeviceNames* device_names) {
// Constants specified by the ALSA API for device hints.
static const char kPcmInterfaceName[] = "pcm";
@@ -128,37 +135,40 @@ void AudioManagerLinux::GetAlsaAudioInputDevices(
void** hints = NULL;
int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
if (!error) {
- GetAlsaDevicesInfo(hints, device_names);
+ GetAlsaDevicesInfo(type, hints, device_names);
// Destroy the hints now that we're done with it.
wrapper_->DeviceNameFreeHint(hints);
} else {
- DLOG(WARNING) << "GetAudioInputDevices: unable to get device hints: "
+ DLOG(WARNING) << "GetAlsaAudioDevices: unable to get device hints: "
<< wrapper_->StrError(error);
}
}
}
void AudioManagerLinux::GetAlsaDevicesInfo(
- void** hints, media::AudioDeviceNames* device_names) {
+ AudioManagerLinux::StreamType type,
+ void** hints,
+ media::AudioDeviceNames* device_names) {
static const char kIoHintName[] = "IOID";
static const char kNameHintName[] = "NAME";
static const char kDescriptionHintName[] = "DESC";
- static const char kOutputDevice[] = "Output";
+
+ const char* unwanted_device_type = UnwantedDeviceTypeWhenEnumerating(type);
for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
- // Only examine devices that are input capable. Valid values are
+ // Only examine devices of the right type. Valid values are
// "Input", "Output", and NULL which means both input and output.
scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
kIoHintName));
- if (io != NULL && strcmp(kOutputDevice, io.get()) == 0)
+ if (io != NULL && strcmp(unwanted_device_type, io.get()) == 0)
continue;
- // Found an input device, prepend the default device since we always want
- // it to be on the top of the list for all platforms. And there is no
- // duplicate counting here since it is only done if the list is still empty.
- // Note, pulse has exclusively opened the default device, so we must open
- // the device via the "default" moniker.
+ // Found a device, prepend the default device since we always want
+ // it to be on the top of the list for all platforms. And there is
+ // no duplicate counting here since it is only done if the list is
+ // still empty. Note, pulse has exclusively opened the default
+ // device, so we must open the device via the "default" moniker.
if (device_names->empty()) {
device_names->push_front(media::AudioDeviceName(
AudioManagerBase::kDefaultDeviceName,
@@ -170,7 +180,7 @@ void AudioManagerLinux::GetAlsaDevicesInfo(
wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
// Find out if the device is available.
- if (IsAlsaDeviceAvailable(unique_device_name.get())) {
+ if (IsAlsaDeviceAvailable(type, unique_device_name.get())) {
// Get the description for the device.
scoped_ptr_malloc<char> desc(wrapper_->DeviceNameGetHint(
*hint_iter, kDescriptionHintName));
@@ -196,25 +206,46 @@ void AudioManagerLinux::GetAlsaDevicesInfo(
}
}
-bool AudioManagerLinux::IsAlsaDeviceAvailable(const char* device_name) {
+// static
+bool AudioManagerLinux::IsAlsaDeviceAvailable(
+ AudioManagerLinux::StreamType type,
+ const char* device_name) {
if (!device_name)
return false;
- // Check if the device is in the list of invalid devices.
- for (size_t i = 0; i < arraysize(kInvalidAudioInputDevices); ++i) {
- if (strncmp(kInvalidAudioInputDevices[i], device_name,
- strlen(kInvalidAudioInputDevices[i])) == 0)
- return false;
+ // We do prefix matches on the device name to see whether to include
+ // it or not.
+ if (type == kStreamCapture) {
+ // Check if the device is in the list of invalid devices.
+ for (size_t i = 0; i < arraysize(kInvalidAudioInputDevices); ++i) {
+ if (strncmp(kInvalidAudioInputDevices[i], device_name,
+ strlen(kInvalidAudioInputDevices[i])) == 0)
+ return false;
+ }
+ return true;
+ } else {
+ DCHECK_EQ(kStreamPlayback, type);
+ // We prefer the device type that maps straight to hardware but
+ // goes through software conversion if needed (e.g. incompatible
+ // sample rate).
+ // TODO(joi): Should we prefer "hw" instead?
+ static const char kDeviceTypeDesired[] = "plughw";
+ return strncmp(kDeviceTypeDesired,
+ device_name,
+ arraysize(kDeviceTypeDesired) - 1) == 0;
}
+}
- return true;
+// static
+const char* AudioManagerLinux::UnwantedDeviceTypeWhenEnumerating(
+ AudioManagerLinux::StreamType wanted_type) {
+ return wanted_type == kStreamPlayback ? "Input" : "Output";
}
-bool AudioManagerLinux::HasAnyAlsaAudioDevice(StreamType stream) {
+bool AudioManagerLinux::HasAnyAlsaAudioDevice(
+ AudioManagerLinux::StreamType stream) {
static const char kPcmInterfaceName[] = "pcm";
static const char kIoHintName[] = "IOID";
- const char* kNotWantedDevice =
- (stream == kStreamPlayback ? "Input" : "Output");
void** hints = NULL;
bool has_device = false;
int card = -1;
@@ -230,7 +261,8 @@ bool AudioManagerLinux::HasAnyAlsaAudioDevice(StreamType stream) {
// "Input", "Output", and NULL which means both input and output.
scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
kIoHintName));
- if (io != NULL && strcmp(kNotWantedDevice, io.get()) == 0)
+ const char* unwanted_type = UnwantedDeviceTypeWhenEnumerating(stream);
+ if (io != NULL && strcmp(unwanted_type, io.get()) == 0)
continue; // Wrong type, skip the device.
// Found an input device.
@@ -258,7 +290,9 @@ AudioOutputStream* AudioManagerLinux::MakeLinearOutputStream(
AudioOutputStream* AudioManagerLinux::MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
// TODO(xians): Use input_device_id for unified IO.
return MakeOutputStream(params);
@@ -277,7 +311,10 @@ AudioInputStream* AudioManagerLinux::MakeLowLatencyInputStream(
}
AudioParameters AudioManagerLinux::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
static const int kDefaultOutputBufferSize = 2048;
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = kDefaultSampleRate;
diff --git a/media/audio/linux/audio_manager_linux.h b/media/audio/linux/audio_manager_linux.h
index 28abaa116e..2258e81eb9 100644
--- a/media/audio/linux/audio_manager_linux.h
+++ b/media/audio/linux/audio_manager_linux.h
@@ -27,6 +27,8 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
virtual void ShowAudioInputSettings() OVERRIDE;
virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(media::AudioDeviceNames* device_names)
+ OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
@@ -35,6 +37,7 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -45,6 +48,7 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
virtual ~AudioManagerLinux();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
@@ -53,14 +57,22 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
kStreamCapture,
};
- // Gets a list of available ALSA input devices.
- void GetAlsaAudioInputDevices(media::AudioDeviceNames* device_names);
+ // Gets a list of available ALSA devices.
+ void GetAlsaAudioDevices(StreamType type,
+ media::AudioDeviceNames* device_names);
- // Gets the ALSA devices' names and ids.
- void GetAlsaDevicesInfo(void** hint, media::AudioDeviceNames* device_names);
+ // Gets the ALSA devices' names and ids that support streams of the
+ // given type.
+ void GetAlsaDevicesInfo(StreamType type,
+ void** hint,
+ media::AudioDeviceNames* device_names);
// Checks if the specific ALSA device is available.
- bool IsAlsaDeviceAvailable(const char* device_name);
+ static bool IsAlsaDeviceAvailable(StreamType type,
+ const char* device_name);
+
+ static const char* UnwantedDeviceTypeWhenEnumerating(
+ StreamType wanted_type);
// Returns true if a device is present for the given stream type.
bool HasAnyAlsaAudioDevice(StreamType stream);
diff --git a/media/audio/mac/audio_auhal_mac_unittest.cc b/media/audio/mac/audio_auhal_mac_unittest.cc
index b4cf8c64cc..9b699ff10f 100644
--- a/media/audio/mac/audio_auhal_mac_unittest.cc
+++ b/media/audio/mac/audio_auhal_mac_unittest.cc
@@ -101,7 +101,7 @@ class AudioOutputStreamWrapper {
samples_per_packet_);
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params,
- std::string());
+ std::string(), std::string());
EXPECT_TRUE(aos);
return aos;
}
diff --git a/media/audio/mac/audio_input_mac.cc b/media/audio/mac/audio_input_mac.cc
index 06af6d11c1..7930567fd9 100644
--- a/media/audio/mac/audio_input_mac.cc
+++ b/media/audio/mac/audio_input_mac.cc
@@ -4,15 +4,14 @@
#include "media/audio/mac/audio_input_mac.h"
+#include <CoreServices/CoreServices.h>
+
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "media/audio/audio_manager_base.h"
#include "media/audio/audio_util.h"
-#if !defined(OS_IOS)
-#include <CoreServices/CoreServices.h>
-#endif
namespace media {
diff --git a/media/audio/mac/audio_low_latency_input_mac.cc b/media/audio/mac/audio_low_latency_input_mac.cc
index 17a87b0a7d..d97f453ca9 100644
--- a/media/audio/mac/audio_low_latency_input_mac.cc
+++ b/media/audio/mac/audio_low_latency_input_mac.cc
@@ -35,7 +35,9 @@ static std::ostream& operator<<(std::ostream& os,
// for more details and background regarding this implementation.
AUAudioInputStream::AUAudioInputStream(
- AudioManagerMac* manager, const AudioParameters& params,
+ AudioManagerMac* manager,
+ const AudioParameters& input_params,
+ const AudioParameters& output_params,
AudioDeviceID audio_device_id)
: manager_(manager),
sink_(NULL),
@@ -48,15 +50,15 @@ AUAudioInputStream::AUAudioInputStream(
DCHECK(manager_);
// Set up the desired (output) format specified by the client.
- format_.mSampleRate = params.sample_rate();
+ format_.mSampleRate = input_params.sample_rate();
format_.mFormatID = kAudioFormatLinearPCM;
format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
+ format_.mBitsPerChannel = input_params.bits_per_sample();
+ format_.mChannelsPerFrame = input_params.channels();
format_.mFramesPerPacket = 1; // uncompressed audio
format_.mBytesPerPacket = (format_.mBitsPerChannel *
- params.channels()) / 8;
+ input_params.channels()) / 8;
format_.mBytesPerFrame = format_.mBytesPerPacket;
format_.mReserved = 0;
@@ -68,10 +70,7 @@ AUAudioInputStream::AUAudioInputStream(
// Note that we use the same native buffer size as for the output side here
// since the AUHAL implementation requires that both capture and render side
// use the same buffer size. See http://crbug.com/154352 for more details.
- // TODO(xians): Get the audio parameters from the right device.
- const AudioParameters parameters =
- manager_->GetInputStreamParameters(AudioManagerBase::kDefaultDeviceId);
- number_of_frames_ = parameters.frames_per_buffer();
+ number_of_frames_ = output_params.frames_per_buffer();
DVLOG(1) << "Size of data buffer in frames : " << number_of_frames_;
// Derive size (in bytes) of the buffers that we will render to.
@@ -85,7 +84,7 @@ AUAudioInputStream::AUAudioInputStream(
audio_buffer_list_.mNumberBuffers = 1;
AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers;
- audio_buffer->mNumberChannels = params.channels();
+ audio_buffer->mNumberChannels = input_params.channels();
audio_buffer->mDataByteSize = data_byte_size;
audio_buffer->mData = audio_data_buffer_.get();
@@ -93,9 +92,16 @@ AUAudioInputStream::AUAudioInputStream(
// until a requested size is ready to be sent to the client.
// It is not possible to ask for less than |kAudioFramesPerCallback| number of
// audio frames.
- const size_t requested_size_frames =
- params.GetBytesPerBuffer() / format_.mBytesPerPacket;
- DCHECK_GE(requested_size_frames, number_of_frames_);
+ size_t requested_size_frames =
+ input_params.GetBytesPerBuffer() / format_.mBytesPerPacket;
+ if (requested_size_frames < number_of_frames_) {
+ // For devices that only support a low sample rate like 8kHz, we adjust the
+ // buffer size to match number_of_frames_. The value of number_of_frames_
+ // in this case has not been calculated based on hardware settings but
+ // rather our hardcoded defaults (see ChooseBufferSize).
+ requested_size_frames = number_of_frames_;
+ }
+
requested_size_bytes_ = requested_size_frames * format_.mBytesPerFrame;
DVLOG(1) << "Requested buffer size in bytes : " << requested_size_bytes_;
DLOG_IF(INFO, requested_size_frames > number_of_frames_) << "FIFO is used";
diff --git a/media/audio/mac/audio_low_latency_input_mac.h b/media/audio/mac/audio_low_latency_input_mac.h
index 736bf082f5..04592d2cec 100644
--- a/media/audio/mac/audio_low_latency_input_mac.h
+++ b/media/audio/mac/audio_low_latency_input_mac.h
@@ -57,7 +57,8 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
// The ctor takes all the usual parameters, plus |manager| which is the
// the audio manager who is creating this object.
AUAudioInputStream(AudioManagerMac* manager,
- const AudioParameters& params,
+ const AudioParameters& input_params,
+ const AudioParameters& output_params,
AudioDeviceID audio_device_id);
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioInputStream::Close().
diff --git a/media/audio/mac/audio_manager_mac.cc b/media/audio/mac/audio_manager_mac.cc
index c0c18ee2cc..8e4b969854 100644
--- a/media/audio/mac/audio_manager_mac.cc
+++ b/media/audio/mac/audio_manager_mac.cc
@@ -81,11 +81,10 @@ bool AudioManagerMac::HasUnifiedDefaultIO() {
return input_id == output_id;
}
+// Retrieves information on audio devices, and prepends the default
+// device to the list if the list is non-empty.
static void GetAudioDeviceInfo(bool is_input,
media::AudioDeviceNames* device_names) {
- DCHECK(device_names);
- device_names->clear();
-
// Query the number of total devices.
AudioObjectPropertyAddress property_address = {
kAudioHardwarePropertyDevices,
@@ -176,6 +175,16 @@ static void GetAudioDeviceInfo(bool is_input,
if (name)
CFRelease(name);
}
+
+ if (!device_names->empty()) {
+ // Prepend the default device to the list since we always want it to be
+ // on the top of the list for all platforms. There is no duplicate
+ // counting here since the default device has been abstracted out before.
+ media::AudioDeviceName name;
+ name.device_name = AudioManagerBase::kDefaultDeviceName;
+ name.unique_id = AudioManagerBase::kDefaultDeviceId;
+ device_names->push_front(name);
+ }
}
static AudioDeviceID GetAudioDeviceIdByUId(bool is_input,
@@ -189,7 +198,7 @@ static AudioDeviceID GetAudioDeviceIdByUId(bool is_input,
UInt32 device_size = sizeof(audio_device_id);
OSStatus result = -1;
- if (device_id == AudioManagerBase::kDefaultDeviceId) {
+ if (device_id == AudioManagerBase::kDefaultDeviceId || device_id.empty()) {
// Default Device.
property_address.mSelector = is_input ?
kAudioHardwarePropertyDefaultInputDevice :
@@ -263,7 +272,7 @@ bool AudioManagerMac::HasAudioInputDevices() {
return HasAudioHardware(kAudioHardwarePropertyDefaultInputDevice);
}
-// TODO(crogers): There are several places on the OSX specific code which
+// TODO(xians): There are several places on the OSX specific code which
// could benefit from these helper functions.
bool AudioManagerMac::GetDefaultInputDevice(
AudioDeviceID* device) {
@@ -397,16 +406,14 @@ int AudioManagerMac::HardwareSampleRate() {
void AudioManagerMac::GetAudioInputDeviceNames(
media::AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
GetAudioDeviceInfo(true, device_names);
- if (!device_names->empty()) {
- // Prepend the default device to the list since we always want it to be
- // on the top of the list for all platforms. There is no duplicate
- // counting here since the default device has been abstracted out before.
- media::AudioDeviceName name;
- name.device_name = AudioManagerBase::kDefaultDeviceName;
- name.unique_id = AudioManagerBase::kDefaultDeviceId;
- device_names->push_front(name);
- }
+}
+
+void AudioManagerMac::GetAudioOutputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ GetAudioDeviceInfo(false, device_names);
}
AudioParameters AudioManagerMac::GetInputStreamParameters(
@@ -443,21 +450,86 @@ AudioParameters AudioManagerMac::GetInputStreamParameters(
sample_rate, 16, buffer_size);
}
+std::string AudioManagerMac::GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) {
+ AudioDeviceID device = GetAudioDeviceIdByUId(true, input_device_id);
+ if (device == kAudioObjectUnknown)
+ return std::string();
+
+ UInt32 size = 0;
+ AudioObjectPropertyAddress pa = {
+ kAudioDevicePropertyRelatedDevices,
+ kAudioDevicePropertyScopeOutput,
+ kAudioObjectPropertyElementMaster
+ };
+ OSStatus result = AudioObjectGetPropertyDataSize(device, &pa, 0, 0, &size);
+ if (result || !size)
+ return std::string();
+
+ int device_count = size / sizeof(AudioDeviceID);
+ scoped_ptr_malloc<AudioDeviceID>
+ devices(reinterpret_cast<AudioDeviceID*>(malloc(size)));
+ result = AudioObjectGetPropertyData(
+ device, &pa, 0, NULL, &size, devices.get());
+ if (result)
+ return std::string();
+
+ for (int i = 0; i < device_count; ++i) {
+ // Get the number of output channels of the device.
+ pa.mSelector = kAudioDevicePropertyStreams;
+ size = 0;
+ result = AudioObjectGetPropertyDataSize(devices.get()[i],
+ &pa,
+ 0,
+ NULL,
+ &size);
+ if (result || !size)
+ continue; // Skip if there aren't any output channels.
+
+ // Get device UID.
+ CFStringRef uid = NULL;
+ size = sizeof(uid);
+ pa.mSelector = kAudioDevicePropertyDeviceUID;
+ result = AudioObjectGetPropertyData(devices.get()[i],
+ &pa,
+ 0,
+ NULL,
+ &size,
+ &uid);
+ if (result || !uid)
+ continue;
+
+ std::string ret(base::SysCFStringRefToUTF8(uid));
+ CFRelease(uid);
+ return ret;
+ }
+
+ // No matching device found.
+ return std::string();
+}
+
AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
const AudioParameters& params) {
- return MakeLowLatencyOutputStream(params, std::string());
+ return MakeLowLatencyOutputStream(params, std::string(), std::string());
}
AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
// Handle basic output with no input channels.
if (params.input_channels() == 0) {
- AudioDeviceID device = kAudioObjectUnknown;
- GetDefaultOutputDevice(&device);
+ AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
+ if (device == kAudioObjectUnknown) {
+ DLOG(ERROR) << "Failed to open output device: " << device_id;
+ return NULL;
+ }
return new AUHALStream(this, params, device);
}
- // TODO(crogers): support more than stereo input.
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
+
+ // TODO(xians): support more than stereo input.
if (params.input_channels() != 2) {
// WebAudio is currently hard-coded to 2 channels so we should not
// see this case.
@@ -494,7 +566,7 @@ AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
// different and arbitrary combinations of input and output devices
// even running at different sample-rates.
// kAudioDeviceUnknown translates to "use default" here.
- // TODO(crogers): consider tracking UMA stats on AUHALStream
+ // TODO(xians): consider tracking UMA stats on AUHALStream
// versus AudioSynchronizedStream.
AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, input_device_id);
if (audio_device_id == kAudioObjectUnknown)
@@ -506,6 +578,33 @@ AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
kAudioDeviceUnknown);
}
+std::string AudioManagerMac::GetDefaultOutputDeviceID() {
+ AudioDeviceID device_id = kAudioObjectUnknown;
+ if (!GetDefaultOutputDevice(&device_id))
+ return std::string();
+
+ const AudioObjectPropertyAddress property_address = {
+ kAudioDevicePropertyDeviceUID,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+ CFStringRef device_uid = NULL;
+ UInt32 size = sizeof(device_uid);
+ OSStatus status = AudioObjectGetPropertyData(device_id,
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &device_uid);
+ if (status != kAudioHardwareNoError || !device_uid)
+ return std::string();
+
+ std::string ret(base::SysCFStringRefToUTF8(device_uid));
+ CFRelease(device_uid);
+
+ return ret;
+}
+
AudioInputStream* AudioManagerMac::MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
@@ -515,27 +614,47 @@ AudioInputStream* AudioManagerMac::MakeLinearInputStream(
AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- // Gets the AudioDeviceID that refers to the AudioOutputDevice with the device
+ // Gets the AudioDeviceID that refers to the AudioInputDevice with the device
// unique id. This AudioDeviceID is used to set the device for Audio Unit.
AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, device_id);
AudioInputStream* stream = NULL;
- if (audio_device_id != kAudioObjectUnknown)
- stream = new AUAudioInputStream(this, params, audio_device_id);
+ if (audio_device_id != kAudioObjectUnknown) {
+ // AUAudioInputStream needs to be fed the preferred audio output parameters
+ // of the matching device so that the buffer size of both input and output
+ // can be matched. See constructor of AUAudioInputStream for more.
+ const std::string associated_output_device(
+ GetAssociatedOutputDeviceID(device_id));
+ const AudioParameters output_params =
+ GetPreferredOutputStreamParameters(
+ associated_output_device.empty() ?
+ AudioManagerBase::kDefaultDeviceId : associated_output_device,
+ params);
+ stream = new AUAudioInputStream(this, params, output_params,
+ audio_device_id);
+ }
return stream;
}
AudioParameters AudioManagerMac::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ AudioDeviceID device = GetAudioDeviceIdByUId(false, output_device_id);
+ if (device == kAudioObjectUnknown) {
+ DLOG(ERROR) << "Invalid output device " << output_device_id;
+ return AudioParameters();
+ }
+
int hardware_channels = 2;
- if (!GetDefaultOutputChannels(&hardware_channels)) {
+ if (!GetDeviceChannels(device, kAudioDevicePropertyScopeOutput,
+ &hardware_channels)) {
// Fallback to stereo.
hardware_channels = 2;
}
ChannelLayout channel_layout = GuessChannelLayout(hardware_channels);
- const int hardware_sample_rate = AUAudioOutputStream::HardwareSampleRate();
+ const int hardware_sample_rate = HardwareSampleRateForDevice(device);
const int buffer_size = ChooseBufferSize(hardware_sample_rate);
int input_channels = 0;
@@ -543,7 +662,7 @@ AudioParameters AudioManagerMac::GetPreferredOutputStreamParameters(
input_channels = input_params.input_channels();
if (input_channels > 0) {
- // TODO(crogers): given the limitations of the AudioOutputStream
+ // TODO(xians): given the limitations of the AudioOutputStream
// back-ends used with synchronized I/O, we hard-code to stereo.
// Specifically, this is a limitation of AudioSynchronizedStream which
// can be removed as part of the work to consolidate these back-ends.
diff --git a/media/audio/mac/audio_manager_mac.h b/media/audio/mac/audio_manager_mac.h
index cd3cc2e94b..9757315920 100644
--- a/media/audio/mac/audio_manager_mac.h
+++ b/media/audio/mac/audio_manager_mac.h
@@ -29,19 +29,25 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
virtual bool HasAudioInputDevices() OVERRIDE;
virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(media::AudioDeviceNames* device_names)
+ OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) OVERRIDE;
// Implementation of AudioManagerBase.
virtual AudioOutputStream* MakeLinearOutputStream(
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual std::string GetDefaultOutputDeviceID() OVERRIDE;
static bool GetDefaultInputDevice(AudioDeviceID* device);
static bool GetDefaultOutputDevice(AudioDeviceID* device);
@@ -64,6 +70,7 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
virtual ~AudioManagerMac();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
diff --git a/media/audio/mock_audio_manager.cc b/media/audio/mock_audio_manager.cc
index 60898bd61b..2ab2b708da 100644
--- a/media/audio/mock_audio_manager.cc
+++ b/media/audio/mock_audio_manager.cc
@@ -36,15 +36,21 @@ void MockAudioManager::GetAudioInputDeviceNames(
media::AudioDeviceNames* device_names) {
}
+void MockAudioManager::GetAudioOutputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+}
+
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStream(
- const media::AudioParameters& params,
- const std::string& input_device_id) {
+ const media::AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
NOTREACHED();
return NULL;
}
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) {
NOTREACHED();
return NULL;
@@ -77,9 +83,19 @@ AudioParameters MockAudioManager::GetDefaultOutputStreamParameters() {
return AudioParameters();
}
+AudioParameters MockAudioManager::GetOutputStreamParameters(
+ const std::string& device_id) {
+ return AudioParameters();
+}
+
AudioParameters MockAudioManager::GetInputStreamParameters(
const std::string& device_id) {
return AudioParameters();
}
+std::string MockAudioManager::GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) {
+ return std::string();
+}
+
} // namespace media.
diff --git a/media/audio/mock_audio_manager.h b/media/audio/mock_audio_manager.h
index eee84b1643..7bc30f578e 100644
--- a/media/audio/mock_audio_manager.h
+++ b/media/audio/mock_audio_manager.h
@@ -34,12 +34,17 @@ class MockAudioManager : public media::AudioManager {
virtual void GetAudioInputDeviceNames(
media::AudioDeviceNames* device_names) OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(
+ media::AudioDeviceNames* device_names) OVERRIDE;
+
virtual media::AudioOutputStream* MakeAudioOutputStream(
const media::AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual media::AudioOutputStream* MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual media::AudioInputStream* MakeAudioInputStream(
@@ -55,8 +60,12 @@ class MockAudioManager : public media::AudioManager {
AudioDeviceListener* listener) OVERRIDE;
virtual AudioParameters GetDefaultOutputStreamParameters() OVERRIDE;
+ virtual AudioParameters GetOutputStreamParameters(
+ const std::string& device_id) OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) OVERRIDE;
private:
virtual ~MockAudioManager();
diff --git a/media/audio/openbsd/audio_manager_openbsd.cc b/media/audio/openbsd/audio_manager_openbsd.cc
index 4005aeb98f..a97ea8f625 100644
--- a/media/audio/openbsd/audio_manager_openbsd.cc
+++ b/media/audio/openbsd/audio_manager_openbsd.cc
@@ -92,7 +92,9 @@ AudioOutputStream* AudioManagerOpenBSD::MakeLinearOutputStream(
AudioOutputStream* AudioManagerOpenBSD::MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
return MakeOutputStream(params);
}
@@ -112,7 +114,10 @@ AudioInputStream* AudioManagerOpenBSD::MakeLowLatencyInputStream(
}
AudioParameters AudioManagerOpenBSD::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
static const int kDefaultOutputBufferSize = 512;
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
diff --git a/media/audio/openbsd/audio_manager_openbsd.h b/media/audio/openbsd/audio_manager_openbsd.h
index a1adcb6c86..e4bb3948d2 100644
--- a/media/audio/openbsd/audio_manager_openbsd.h
+++ b/media/audio/openbsd/audio_manager_openbsd.h
@@ -27,6 +27,7 @@ class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -37,6 +38,7 @@ class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
virtual ~AudioManagerOpenBSD();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
diff --git a/media/audio/pulse/audio_manager_pulse.cc b/media/audio/pulse/audio_manager_pulse.cc
index dcdd328222..5c09f14905 100644
--- a/media/audio/pulse/audio_manager_pulse.cc
+++ b/media/audio/pulse/audio_manager_pulse.cc
@@ -66,19 +66,13 @@ AudioManagerPulse::~AudioManagerPulse() {
// Implementation of AudioManager.
bool AudioManagerPulse::HasAudioOutputDevices() {
- DCHECK(input_mainloop_);
- DCHECK(input_context_);
- media::AudioDeviceNames devices;
- AutoPulseLock auto_lock(input_mainloop_);
- devices_ = &devices;
- pa_operation* operation = pa_context_get_sink_info_list(
- input_context_, OutputDevicesInfoCallback, this);
- WaitForOperationCompletion(input_mainloop_, operation);
+ AudioDeviceNames devices;
+ GetAudioOutputDeviceNames(&devices);
return !devices.empty();
}
bool AudioManagerPulse::HasAudioInputDevices() {
- media::AudioDeviceNames devices;
+ AudioDeviceNames devices;
GetAudioInputDeviceNames(&devices);
return !devices.empty();
}
@@ -87,18 +81,24 @@ void AudioManagerPulse::ShowAudioInputSettings() {
AudioManagerLinux::ShowLinuxAudioInputSettings();
}
-void AudioManagerPulse::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
+void AudioManagerPulse::GetAudioDeviceNames(
+ bool input, media::AudioDeviceNames* device_names) {
DCHECK(device_names->empty());
DCHECK(input_mainloop_);
DCHECK(input_context_);
AutoPulseLock auto_lock(input_mainloop_);
devices_ = device_names;
- pa_operation* operation = pa_context_get_source_info_list(
+ pa_operation* operation = NULL;
+ if (input) {
+ operation = pa_context_get_source_info_list(
input_context_, InputDevicesInfoCallback, this);
+ } else {
+ operation = pa_context_get_sink_info_list(
+ input_context_, OutputDevicesInfoCallback, this);
+ }
WaitForOperationCompletion(input_mainloop_, operation);
- // Append the default device on the top of the list if the list is not empty.
+ // Prepend the default device if the list is not empty.
if (!device_names->empty()) {
device_names->push_front(
AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
@@ -106,6 +106,16 @@ void AudioManagerPulse::GetAudioInputDeviceNames(
}
}
+void AudioManagerPulse::GetAudioInputDeviceNames(
+ AudioDeviceNames* device_names) {
+ GetAudioDeviceNames(true, device_names);
+}
+
+void AudioManagerPulse::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
+ GetAudioDeviceNames(false, device_names);
+}
+
AudioParameters AudioManagerPulse::GetInputStreamParameters(
const std::string& device_id) {
static const int kDefaultInputBufferSize = 1024;
@@ -123,7 +133,10 @@ AudioOutputStream* AudioManagerPulse::MakeLinearOutputStream(
}
AudioOutputStream* AudioManagerPulse::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return MakeOutputStream(params, input_device_id);
}
@@ -141,7 +154,10 @@ AudioInputStream* AudioManagerPulse::MakeLowLatencyInputStream(
}
AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
static const int kDefaultOutputBufferSize = 512;
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
@@ -286,8 +302,8 @@ void AudioManagerPulse::InputDevicesInfoCallback(pa_context* context,
// Exclude the output devices.
if (info->monitor_of_sink == PA_INVALID_INDEX) {
- manager->devices_->push_back(media::AudioDeviceName(info->description,
- info->name));
+ manager->devices_->push_back(AudioDeviceName(info->description,
+ info->name));
}
}
@@ -302,8 +318,8 @@ void AudioManagerPulse::OutputDevicesInfoCallback(pa_context* context,
return;
}
- manager->devices_->push_back(media::AudioDeviceName(info->description,
- info->name));
+ manager->devices_->push_back(AudioDeviceName(info->description,
+ info->name));
}
void AudioManagerPulse::SampleRateInfoCallback(pa_context* context,
diff --git a/media/audio/pulse/audio_manager_pulse.h b/media/audio/pulse/audio_manager_pulse.h
index 6dfebaeff3..8fc4310cba 100644
--- a/media/audio/pulse/audio_manager_pulse.h
+++ b/media/audio/pulse/audio_manager_pulse.h
@@ -27,6 +27,8 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
virtual void ShowAudioInputSettings() OVERRIDE;
virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(media::AudioDeviceNames* device_names)
+ OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
@@ -35,6 +37,7 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -43,12 +46,15 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
protected:
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
bool Init();
void DestroyPulse();
+ void GetAudioDeviceNames(bool input, media::AudioDeviceNames* device_names);
+
// Callback to get the devices' info like names, used by GetInputDevices().
static void InputDevicesInfoCallback(pa_context* context,
const pa_source_info* info,
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc
index b2098b0209..c889c03ef2 100644
--- a/media/audio/win/audio_low_latency_output_win.cc
+++ b/media/audio/win/audio_low_latency_output_win.cc
@@ -111,14 +111,26 @@ ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() {
}
// static
-int WASAPIAudioOutputStream::HardwareSampleRate() {
+int WASAPIAudioOutputStream::HardwareSampleRate(const std::string& device_id) {
WAVEFORMATPCMEX format;
- return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat(
- eRender, eConsole, &format)) ?
- static_cast<int>(format.Format.nSamplesPerSec) : 0;
+ ScopedComPtr<IAudioClient> client;
+ if (device_id.empty()) {
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ } else {
+ ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id));
+ if (!device)
+ return 0;
+ client = CoreAudioUtil::CreateClient(device);
+ }
+
+ if (!client || FAILED(CoreAudioUtil::GetSharedModeMixFormat(client, &format)))
+ return 0;
+
+ return static_cast<int>(format.Format.nSamplesPerSec);
}
WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
+ const std::string& device_id,
const AudioParameters& params,
ERole device_role)
: creating_thread_id_(base::PlatformThread::CurrentId()),
@@ -127,6 +139,7 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
audio_parameters_are_valid_(false),
volume_(1.0),
endpoint_buffer_size_frames_(0),
+ device_id_(device_id),
device_role_(device_role),
share_mode_(GetShareMode()),
num_written_frames_(0),
@@ -142,12 +155,16 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
// channel count are excluded) to the preferred (native) audio parameters.
// Open() will fail if this is not the case.
AudioParameters preferred_params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
- eRender, device_role, &preferred_params);
+ HRESULT hr = device_id_.empty() ?
+ CoreAudioUtil::GetPreferredAudioParameters(eRender, device_role,
+ &preferred_params) :
+ CoreAudioUtil::GetPreferredAudioParameters(device_id_,
+ &preferred_params);
audio_parameters_are_valid_ = SUCCEEDED(hr) &&
CompareAudioParametersNoBitDepthOrChannels(params, preferred_params);
LOG_IF(WARNING, !audio_parameters_are_valid_)
- << "Input and preferred parameters are not identical.";
+ << "Input and preferred parameters are not identical. "
+ << "Device id: " << device_id_;
}
// Load the Avrt DLL if not already loaded. Required to support MMCSS.
@@ -203,7 +220,6 @@ bool WASAPIAudioOutputStream::Open() {
if (opened_)
return true;
-
// Audio parameters must be identical to the preferred set of parameters
// if shared mode (default) is utilized.
if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
@@ -214,8 +230,16 @@ bool WASAPIAudioOutputStream::Open() {
}
// Create an IAudioClient interface for the default rendering IMMDevice.
- ScopedComPtr<IAudioClient> audio_client =
- CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
+ ScopedComPtr<IAudioClient> audio_client;
+ if (device_id_.empty()) {
+ audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
+ } else {
+ ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
+ DLOG_IF(ERROR, !device) << "Failed to open device: " << device_id_;
+ if (device)
+ audio_client = CoreAudioUtil::CreateClient(device);
+ }
+
if (!audio_client)
return false;
diff --git a/media/audio/win/audio_low_latency_output_win.h b/media/audio/win/audio_low_latency_output_win.h
index b0e990bb1a..7884d8840f 100644
--- a/media/audio/win/audio_low_latency_output_win.h
+++ b/media/audio/win/audio_low_latency_output_win.h
@@ -122,6 +122,7 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// The ctor takes all the usual parameters, plus |manager| which is the
// the audio manager who is creating this object.
WASAPIAudioOutputStream(AudioManagerWin* manager,
+ const std::string& device_id,
const AudioParameters& params,
ERole device_role);
@@ -149,8 +150,9 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
static ChannelLayout HardwareChannelLayout();
// Retrieves the sample rate the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device.
- static int HardwareSampleRate();
+ // processing/mixing of shared-mode streams. To fetch the settings for the
+ // default device, pass an empty string as the |device_id|.
+ static int HardwareSampleRate(const std::string& device_id);
// Returns AUDCLNT_SHAREMODE_EXCLUSIVE if --enable-exclusive-mode is used
// as command-line flag and AUDCLNT_SHAREMODE_SHARED otherwise (default).
@@ -219,6 +221,9 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Length of the audio endpoint buffer.
uint32 endpoint_buffer_size_frames_;
+ // The target device id or an empty string for the default device.
+ const std::string device_id_;
+
// Defines the role that the system has assigned to an audio endpoint device.
ERole device_role_;
diff --git a/media/audio/win/audio_low_latency_output_win_unittest.cc b/media/audio/win/audio_low_latency_output_win_unittest.cc
index 8c3e366c0c..1f78facf91 100644
--- a/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -234,7 +234,7 @@ class AudioOutputStreamWrapper {
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(
AudioParameters(format_, channel_layout_, sample_rate_,
bits_per_sample_, samples_per_packet_),
- std::string());
+ std::string(), std::string());
EXPECT_TRUE(aos);
return aos;
}
@@ -268,7 +268,7 @@ TEST(WASAPIAudioOutputStreamTest, HardwareSampleRate) {
// Default device intended for games, system notification sounds,
// and voice commands.
int fs = static_cast<int>(
- WASAPIAudioOutputStream::HardwareSampleRate());
+ WASAPIAudioOutputStream::HardwareSampleRate(std::string()));
EXPECT_GE(fs, 0);
}
diff --git a/media/audio/win/audio_manager_win.cc b/media/audio/win/audio_manager_win.cc
index a753e554cb..e3a95883b2 100644
--- a/media/audio/win/audio_manager_win.cc
+++ b/media/audio/win/audio_manager_win.cc
@@ -240,27 +240,44 @@ void AudioManagerWin::ShowAudioInputSettings() {
base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
}
-void AudioManagerWin::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
+void AudioManagerWin::GetAudioDeviceNamesImpl(
+ bool input,
+ AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
DCHECK(enumeration_type() != kUninitializedEnumeration);
// Enumerate all active audio-endpoint capture devices.
if (enumeration_type() == kWaveEnumeration) {
// Utilize the Wave API for Windows XP.
- media::GetInputDeviceNamesWinXP(device_names);
+ if (input)
+ GetInputDeviceNamesWinXP(device_names);
+ else
+ GetOutputDeviceNamesWinXP(device_names);
} else {
// Utilize the MMDevice API (part of Core Audio) for Vista and higher.
- media::GetInputDeviceNamesWin(device_names);
+ if (input)
+ GetInputDeviceNamesWin(device_names);
+ else
+ GetOutputDeviceNamesWin(device_names);
}
// Always add default device parameters as first element.
if (!device_names->empty()) {
- media::AudioDeviceName name;
+ AudioDeviceName name;
name.device_name = AudioManagerBase::kDefaultDeviceName;
name.unique_id = AudioManagerBase::kDefaultDeviceId;
device_names->push_front(name);
}
}
+void AudioManagerWin::GetAudioInputDeviceNames(AudioDeviceNames* device_names) {
+ GetAudioDeviceNamesImpl(true, device_names);
+}
+
+void AudioManagerWin::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
+ GetAudioDeviceNamesImpl(false, device_names);
+}
+
AudioParameters AudioManagerWin::GetInputStreamParameters(
const std::string& device_id) {
int sample_rate = 48000;
@@ -280,6 +297,11 @@ AudioParameters AudioManagerWin::GetInputStreamParameters(
sample_rate, 16, kFallbackBufferSize);
}
+std::string AudioManagerWin::GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) {
+ return CoreAudioUtil::GetMatchingOutputDeviceID(input_device_id);
+}
+
// Factory for the implementations of AudioOutputStream for AUDIO_PCM_LINEAR
// mode.
// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
@@ -291,7 +313,7 @@ AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
return new PCMWaveOutAudioOutputStream(this,
params,
- media::NumberOfWaveOutBuffers(),
+ NumberOfWaveOutBuffers(),
WAVE_MAPPER);
}
@@ -301,25 +323,31 @@ AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API.
AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
if (params.channels() > kWinMaxChannels)
return NULL;
if (!CoreAudioUtil::IsSupported()) {
// Fall back to Windows Wave implementation on Windows XP or lower.
+ DLOG_IF(ERROR, !device_id.empty())
+ << "Opening by device id not supported by PCMWaveOutAudioOutputStream";
DVLOG(1) << "Using WaveOut since WASAPI requires at least Vista.";
return new PCMWaveOutAudioOutputStream(
- this, params, media::NumberOfWaveOutBuffers(), WAVE_MAPPER);
+ this, params, NumberOfWaveOutBuffers(), WAVE_MAPPER);
}
- // TODO(crogers): support more than stereo input.
+ // TODO(rtoy): support more than stereo input.
if (params.input_channels() > 0) {
DVLOG(1) << "WASAPIUnifiedStream is created.";
+ DLOG_IF(ERROR, !device_id.empty())
+ << "Opening by device id not supported by WASAPIUnifiedStream";
return new WASAPIUnifiedStream(this, params, input_device_id);
}
- return new WASAPIAudioOutputStream(this, params, eConsole);
+ return new WASAPIAudioOutputStream(this, device_id, params, eConsole);
}
// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
@@ -347,55 +375,68 @@ AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
return stream;
}
+std::string AudioManagerWin::GetDefaultOutputDeviceID() {
+ if (!CoreAudioUtil::IsSupported())
+ return std::string();
+ return CoreAudioUtil::GetDefaultOutputDeviceID();
+}
+
AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ const bool core_audio_supported = CoreAudioUtil::IsSupported();
+ DLOG_IF(ERROR, !core_audio_supported && !output_device_id.empty())
+ << "CoreAudio is required to open non-default devices.";
+
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = 48000;
int buffer_size = kFallbackBufferSize;
int bits_per_sample = 16;
int input_channels = 0;
- bool use_input_params = !CoreAudioUtil::IsSupported();
- if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) {
- // TODO(crogers): tune these values for best possible WebAudio performance.
- // WebRTC works well at 48kHz and a buffer size of 480 samples will be used
- // for this case. Note that exclusive mode is experimental.
- // This sample rate will be combined with a buffer size of 256 samples,
- // which corresponds to an output delay of ~5.33ms.
- sample_rate = 48000;
- buffer_size = 256;
- if (input_params.IsValid())
- channel_layout = input_params.channel_layout();
- } else if (!use_input_params) {
- // Hardware sample-rate on Windows can be configured, so we must query.
- // TODO(henrika): improve possibility to specify an audio endpoint.
- // Use the default device (same as for Wave) for now to be compatible.
- int hw_sample_rate = WASAPIAudioOutputStream::HardwareSampleRate();
-
- AudioParameters params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(eRender, eConsole,
- &params);
- int hw_buffer_size =
- FAILED(hr) ? kFallbackBufferSize : params.frames_per_buffer();
- channel_layout = WASAPIAudioOutputStream::HardwareChannelLayout();
-
- // TODO(henrika): Figure out the right thing to do here.
- if (hw_sample_rate && hw_buffer_size) {
- sample_rate = hw_sample_rate;
- buffer_size = hw_buffer_size;
+ bool use_input_params = !core_audio_supported;
+ if (core_audio_supported) {
+ if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) {
+ // TODO(rtoy): tune these values for best possible WebAudio
+ // performance. WebRTC works well at 48kHz and a buffer size of 480
+ // samples will be used for this case. Note that exclusive mode is
+ // experimental. This sample rate will be combined with a buffer size of
+ // 256 samples, which corresponds to an output delay of ~5.33ms.
+ sample_rate = 48000;
+ buffer_size = 256;
+ if (input_params.IsValid())
+ channel_layout = input_params.channel_layout();
} else {
- use_input_params = true;
+ AudioParameters params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
+ output_device_id.empty() ?
+ GetDefaultOutputDeviceID() : output_device_id,
+ &params);
+ if (SUCCEEDED(hr)) {
+ bits_per_sample = params.bits_per_sample();
+ buffer_size = params.frames_per_buffer();
+ channel_layout = params.channel_layout();
+ sample_rate = params.sample_rate();
+ } else {
+ use_input_params = true;
+ }
}
}
if (input_params.IsValid()) {
- if (cmd_line->HasSwitch(switches::kTrySupportedChannelLayouts) &&
- CoreAudioUtil::IsSupported()) {
+ if (core_audio_supported &&
+ cmd_line->HasSwitch(switches::kTrySupportedChannelLayouts)) {
// Check if it is possible to open up at the specified input channel
// layout but avoid checking if the specified layout is the same as the
// hardware (preferred) layout. We do this extra check to avoid the
// CoreAudioUtil::IsChannelLayoutSupported() overhead in most cases.
if (input_params.channel_layout() != channel_layout) {
+ // TODO(henrika): Use |output_device_id| here.
+ // Internally, IsChannelLayoutSupported does many of the operations
+ // that have already been done such as opening up a client and fetching
+ // the WAVEFORMATPCMEX format. Ideally we should only do that once and
+ // do it for the requested device. Then here, we can check the layout
+ // from the data we already hold.
if (CoreAudioUtil::IsChannelLayoutSupported(
eRender, eConsole, input_params.channel_layout())) {
// Open up using the same channel layout as the source if it is
@@ -413,10 +454,10 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
// equal to the input values, AudioOutputResampler will skip resampling
// and bit per sample differences (since the input parameters will match
// the output parameters).
- sample_rate = input_params.sample_rate();
bits_per_sample = input_params.bits_per_sample();
- channel_layout = input_params.channel_layout();
buffer_size = input_params.frames_per_buffer();
+ channel_layout = input_params.channel_layout();
+ sample_rate = input_params.sample_rate();
}
}
@@ -435,7 +476,7 @@ AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
std::string xp_device_id = device_id;
if (device_id != AudioManagerBase::kDefaultDeviceId &&
enumeration_type_ == kMMDeviceEnumeration) {
- xp_device_id = media::ConvertToWinXPDeviceId(device_id);
+ xp_device_id = ConvertToWinXPInputDeviceId(device_id);
if (xp_device_id.empty()) {
DLOG(ERROR) << "Cannot find a waveIn device which matches the device ID "
<< device_id;
diff --git a/media/audio/win/audio_manager_win.h b/media/audio/win/audio_manager_win.h
index 65cc73bbd6..b3e8de9286 100644
--- a/media/audio/win/audio_manager_win.h
+++ b/media/audio/win/audio_manager_win.h
@@ -25,26 +25,33 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
virtual bool HasAudioInputDevices() OVERRIDE;
virtual string16 GetAudioInputDeviceModel() OVERRIDE;
virtual void ShowAudioInputSettings() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
+ virtual void GetAudioInputDeviceNames(AudioDeviceNames* device_names)
+ OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(AudioDeviceNames* device_names)
OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) OVERRIDE;
// Implementation of AudioManagerBase.
virtual AudioOutputStream* MakeLinearOutputStream(
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual std::string GetDefaultOutputDeviceID() OVERRIDE;
protected:
virtual ~AudioManagerWin();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
@@ -55,6 +62,8 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
};
// Allow unit test to modify the utilized enumeration API.
+ // TODO(joi): Collapse these tests into one.
+ friend class AudioManagerTest;
friend class AudioInputDeviceTest;
EnumerationType enumeration_type_;
@@ -76,6 +85,8 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
void CreateDeviceListener();
void DestroyDeviceListener();
+ void GetAudioDeviceNamesImpl(bool input, AudioDeviceNames* device_names);
+
// Listen for output device changes.
scoped_ptr<AudioDeviceListenerWin> output_device_listener_;
diff --git a/media/audio/win/audio_output_win_unittest.cc b/media/audio/win/audio_output_win_unittest.cc
index 4e13d84f3d..7ce146b0ab 100644
--- a/media/audio/win/audio_output_win_unittest.cc
+++ b/media/audio/win/audio_output_win_unittest.cc
@@ -185,7 +185,7 @@ TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
oas->Close();
}
@@ -201,29 +201,29 @@ TEST(WinAudioTest, SanityOnMakeParams) {
AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, -100),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, 0),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16,
media::limits::kMaxSamplesPerPacket + 1),
- std::string()));
+ std::string(), std::string()));
}
// Test that it can be opened and closed.
@@ -237,7 +237,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
oas->Close();
@@ -254,7 +254,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 1024 * 1024 * 1024),
- std::string());
+ std::string(), std::string());
EXPECT_TRUE(NULL == oas);
if (oas)
oas->Close();
@@ -273,7 +273,7 @@ TEST(WinAudioTest, PCMWaveSlowSource) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
16000, 16, 256),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
TestSourceLaggy test_laggy(2, 90);
EXPECT_TRUE(oas->Open());
@@ -302,7 +302,7 @@ TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -333,7 +333,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -362,7 +362,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate / 2, 16,
samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate/2);
@@ -402,7 +402,7 @@ TEST(WinAudioTest, PushSourceFile16KHz) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
kSampleRate, 16, kSamples100ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
@@ -439,7 +439,7 @@ TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -486,7 +486,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
CHANNEL_LAYOUT_MONO, sample_rate,
16, n * samples_10_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200, sample_rate);
@@ -520,7 +520,7 @@ TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
NiceMock<MockAudioSource> source;
@@ -680,7 +680,7 @@ TEST(WinAudioTest, SyncSocketBasic) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(params,
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
ASSERT_TRUE(oas->Open());
diff --git a/media/audio/win/audio_unified_win_unittest.cc b/media/audio/win/audio_unified_win_unittest.cc
index cfd17aea14..011c36348b 100644
--- a/media/audio/win/audio_unified_win_unittest.cc
+++ b/media/audio/win/audio_unified_win_unittest.cc
@@ -196,13 +196,13 @@ class AudioUnifiedStreamWrapper {
// Creates an AudioOutputStream object using default parameters.
WASAPIUnifiedStream* Create() {
- return static_cast<WASAPIUnifiedStream*> (CreateOutputStream());
+ return static_cast<WASAPIUnifiedStream*>(CreateOutputStream());
}
// Creates an AudioOutputStream object using default parameters but a
// specified input device.
WASAPIUnifiedStream* Create(const std::string device_id) {
- return static_cast<WASAPIUnifiedStream*> (CreateOutputStream(device_id));
+ return static_cast<WASAPIUnifiedStream*>(CreateOutputStream(device_id));
}
AudioParameters::Format format() const { return params_.format(); }
@@ -223,20 +223,21 @@ class AudioUnifiedStreamWrapper {
CoreAudioUtil::CreateDefaultDevice(eCapture, eConsole);
AudioDeviceName name;
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device, &name)));
- const std::string& device_id = name.unique_id;
- EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole, device_id));
+ const std::string& input_device_id = name.unique_id;
+ EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole,
+ input_device_id));
// Create the unified audio I/O stream using the default input device.
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
- device_id);
+ "", input_device_id);
EXPECT_TRUE(aos);
return aos;
}
- AudioOutputStream* CreateOutputStream(const std::string& device_id) {
+ AudioOutputStream* CreateOutputStream(const std::string& input_device_id) {
// Create the unified audio I/O stream using the specified input device.
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
- device_id);
+ "", input_device_id);
EXPECT_TRUE(aos);
return aos;
}
diff --git a/media/audio/win/core_audio_util_win.cc b/media/audio/win/core_audio_util_win.cc
index 73d7b11171..4adfdda090 100644
--- a/media/audio/win/core_audio_util_win.cc
+++ b/media/audio/win/core_audio_util_win.cc
@@ -123,7 +123,7 @@ static std::ostream& operator<<(std::ostream& os,
return os;
}
-bool LoadAudiosesDll() {
+static bool LoadAudiosesDll() {
static const wchar_t* const kAudiosesDLL =
L"%WINDIR%\\system32\\audioses.dll";
@@ -132,7 +132,7 @@ bool LoadAudiosesDll() {
return (LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH) != NULL);
}
-bool CanCreateDeviceEnumerator() {
+static bool CanCreateDeviceEnumerator() {
ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
NULL, CLSCTX_INPROC_SERVER);
@@ -144,6 +144,14 @@ bool CanCreateDeviceEnumerator() {
return SUCCEEDED(hr);
}
+static std::string GetDeviceID(IMMDevice* device) {
+ ScopedCoMem<WCHAR> device_id_com;
+ std::string device_id;
+ if (SUCCEEDED(device->GetId(&device_id_com)))
+ WideToUTF8(device_id_com, wcslen(device_id_com), &device_id);
+ return device_id;
+}
+
bool CoreAudioUtil::IsSupported() {
// It is possible to force usage of WaveXxx APIs by using a command line flag.
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
@@ -263,6 +271,12 @@ ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
return endpoint_device;
}
+std::string CoreAudioUtil::GetDefaultOutputDeviceID() {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMDevice> device(CreateDefaultDevice(eRender, eConsole));
+ return device ? GetDeviceID(device) : std::string();
+}
+
ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
const std::string& device_id) {
DCHECK(IsSupported());
@@ -289,17 +303,14 @@ HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
// Retrieve unique name of endpoint device.
// Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
AudioDeviceName device_name;
- ScopedCoMem<WCHAR> endpoint_device_id;
- HRESULT hr = device->GetId(&endpoint_device_id);
- if (FAILED(hr))
- return hr;
- WideToUTF8(endpoint_device_id, wcslen(endpoint_device_id),
- &device_name.unique_id);
+ device_name.unique_id = GetDeviceID(device);
+ if (device_name.unique_id.empty())
+ return E_FAIL;
// Retrieve user-friendly name of endpoint device.
// Example: "Microphone (Realtek High Definition Audio)".
ScopedComPtr<IPropertyStore> properties;
- hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
+ HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
if (FAILED(hr))
return hr;
base::win::ScopedPropVariant friendly_name;
@@ -365,6 +376,41 @@ std::string CoreAudioUtil::GetAudioControllerID(IMMDevice* device,
return controller_id;
}
+std::string CoreAudioUtil::GetMatchingOutputDeviceID(
+ const std::string& input_device_id) {
+ ScopedComPtr<IMMDevice> input_device(CreateDevice(input_device_id));
+ if (!input_device)
+ return std::string();
+
+ // See if we can get id of the associated controller.
+ ScopedComPtr<IMMDeviceEnumerator> enumerator(CreateDeviceEnumerator());
+ std::string controller_id(GetAudioControllerID(input_device, enumerator));
+ if (controller_id.empty())
+ return std::string();
+
+ // Now enumerate the available (and active) output devices and see if any of
+ // them is associated with the same controller.
+ ScopedComPtr<IMMDeviceCollection> collection;
+ enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE,
+ collection.Receive());
+ if (!collection)
+ return std::string();
+
+ UINT count = 0;
+ collection->GetCount(&count);
+ ScopedComPtr<IMMDevice> output_device;
+ for (UINT i = 0; i < count; ++i) {
+ collection->Item(i, output_device.Receive());
+ std::string output_controller_id(CoreAudioUtil::GetAudioControllerID(
+ output_device, enumerator));
+ if (output_controller_id == controller_id)
+ break;
+ output_device = NULL;
+ }
+
+ return output_device ? GetDeviceID(output_device) : std::string();
+}
+
std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
DCHECK(IsSupported());
ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
@@ -387,16 +433,8 @@ bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
if (!device)
return false;
- ScopedCoMem<WCHAR> default_device_id;
- HRESULT hr = device->GetId(&default_device_id);
- if (FAILED(hr))
- return false;
-
- std::string str_default;
- WideToUTF8(default_device_id, wcslen(default_device_id), &str_default);
- if (device_id.compare(str_default) != 0)
- return false;
- return true;
+ std::string str_default(GetDeviceID(device));
+ return device_id.compare(str_default) == 0;
}
EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
diff --git a/media/audio/win/core_audio_util_win.h b/media/audio/win/core_audio_util_win.h
index 154a33a8a4..cdf6dfb11d 100644
--- a/media/audio/win/core_audio_util_win.h
+++ b/media/audio/win/core_audio_util_win.h
@@ -59,6 +59,10 @@ class MEDIA_EXPORT CoreAudioUtil {
static ScopedComPtr<IMMDevice> CreateDefaultDevice(
EDataFlow data_flow, ERole role);
+ // Returns the device id of the default output device or an empty string
+ // if no such device exists or if the default device has been disabled.
+ static std::string GetDefaultOutputDeviceID();
+
// Creates an endpoint device that is specified by a unique endpoint device-
// identification string.
static ScopedComPtr<IMMDevice> CreateDevice(const std::string& device_id);
@@ -80,6 +84,12 @@ class MEDIA_EXPORT CoreAudioUtil {
static std::string GetAudioControllerID(IMMDevice* device,
IMMDeviceEnumerator* enumerator);
+ // Accepts an id of an input device and finds a matching output device id.
+ // If the associated hardware does not have an audio output device (e.g.
+ // a webcam with a mic), an empty string is returned.
+ static std::string GetMatchingOutputDeviceID(
+ const std::string& input_device_id);
+
// Gets the user-friendly name of the endpoint device which is represented
// by a unique id in |device_id|.
static std::string GetFriendlyName(const std::string& device_id);
diff --git a/media/audio/win/core_audio_util_win_unittest.cc b/media/audio/win/core_audio_util_win_unittest.cc
index f18c6118e1..abef868202 100644
--- a/media/audio/win/core_audio_util_win_unittest.cc
+++ b/media/audio/win/core_audio_util_win_unittest.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
+#include "base/strings/utf_string_conversions.h"
#include "base/synchronization/waitable_event.h"
+#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_com_initializer.h"
#include "base/win/scoped_handle.h"
#include "media/audio/win/core_audio_util_win.h"
@@ -475,6 +477,46 @@ TEST_F(CoreAudioUtilWinTest, FillRenderEndpointBufferWithSilence) {
EXPECT_EQ(num_queued_frames, endpoint_buffer_size);
}
-//
+// This test can only succeed on a machine that has audio hardware
+// that has both input and output devices. Currently this is the case
+// with our test bots and the CanRunAudioTest() method should make sure
+// that the test won't run in unsupported environments, but be warned.
+TEST_F(CoreAudioUtilWinTest, GetMatchingOutputDeviceID) {
+ if (!CanRunAudioTest())
+ return;
+
+ bool found_a_pair = false;
+
+ ScopedComPtr<IMMDeviceEnumerator> enumerator(
+ CoreAudioUtil::CreateDeviceEnumerator());
+ ASSERT_TRUE(enumerator);
+
+ // Enumerate all active input and output devices and fetch the ID of
+ // the associated device.
+ ScopedComPtr<IMMDeviceCollection> collection;
+ ASSERT_TRUE(SUCCEEDED(enumerator->EnumAudioEndpoints(eCapture,
+ DEVICE_STATE_ACTIVE, collection.Receive())));
+ UINT count = 0;
+ collection->GetCount(&count);
+ for (UINT i = 0; i < count && !found_a_pair; ++i) {
+ ScopedComPtr<IMMDevice> device;
+ collection->Item(i, device.Receive());
+ base::win::ScopedCoMem<WCHAR> wide_id;
+ device->GetId(&wide_id);
+ std::string id;
+ WideToUTF8(wide_id, wcslen(wide_id), &id);
+ found_a_pair = !CoreAudioUtil::GetMatchingOutputDeviceID(id).empty();
+ }
+
+ EXPECT_TRUE(found_a_pair);
+}
+
+TEST_F(CoreAudioUtilWinTest, GetDefaultOutputDeviceID) {
+ if (!CanRunAudioTest())
+ return;
+
+ std::string default_device_id(CoreAudioUtil::GetDefaultOutputDeviceID());
+ EXPECT_FALSE(default_device_id.empty());
+}
} // namespace media
diff --git a/media/audio/win/device_enumeration_win.cc b/media/audio/win/device_enumeration_win.cc
index 36ed2913ff..50d0b7aa0e 100644
--- a/media/audio/win/device_enumeration_win.cc
+++ b/media/audio/win/device_enumeration_win.cc
@@ -8,13 +8,13 @@
#include "media/audio/win/audio_manager_win.h"
+#include "base/basictypes.h"
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_comptr.h"
#include "base/win/scoped_propvariant.h"
-using media::AudioDeviceNames;
using base::win::ScopedComPtr;
using base::win::ScopedCoMem;
@@ -25,7 +25,10 @@ using base::win::ScopedCoMem;
namespace media {
-bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
+namespace {
+
+bool GetDeviceNamesWinImpl(EDataFlow data_flow,
+ AudioDeviceNames* device_names) {
// It is assumed that this method is called from a COM thread, i.e.,
// CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
ScopedComPtr<IMMDeviceEnumerator> enumerator;
@@ -37,24 +40,24 @@ bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
return false;
}
- // Generate a collection of active audio capture endpoint devices.
+ // Generate a collection of active audio endpoint devices.
// This method will succeed even if all devices are disabled.
ScopedComPtr<IMMDeviceCollection> collection;
- hr = enumerator->EnumAudioEndpoints(eCapture,
+ hr = enumerator->EnumAudioEndpoints(data_flow,
DEVICE_STATE_ACTIVE,
collection.Receive());
if (FAILED(hr))
return false;
- // Retrieve the number of active capture devices.
+ // Retrieve the number of active devices.
UINT number_of_active_devices = 0;
collection->GetCount(&number_of_active_devices);
if (number_of_active_devices == 0)
return true;
- media::AudioDeviceName device;
+ AudioDeviceName device;
- // Loop over all active capture devices and add friendly name and
+ // Loop over all active devices and add friendly name and
// unique ID to the |device_names| list.
for (UINT i = 0; i < number_of_active_devices; ++i) {
// Retrieve unique name of endpoint device.
@@ -92,14 +95,22 @@ bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
return true;
}
-bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
+// The waveform API is weird in that it has completely separate but
+// almost identical functions and structs for input devices vs. output
+// devices. We deal with this by implementing the logic as a templated
+// function that takes the functions and struct type to use as
+// template parameters.
+template <UINT (__stdcall *NumDevsFunc)(),
+ typename CAPSSTRUCT,
+ MMRESULT (__stdcall *DevCapsFunc)(UINT_PTR, CAPSSTRUCT*, UINT)>
+bool GetDeviceNamesWinXPImpl(AudioDeviceNames* device_names) {
// Retrieve the number of active waveform input devices.
- UINT number_of_active_devices = waveInGetNumDevs();
+ UINT number_of_active_devices = NumDevsFunc();
if (number_of_active_devices == 0)
return true;
- media::AudioDeviceName device;
- WAVEINCAPS capabilities;
+ AudioDeviceName device;
+ CAPSSTRUCT capabilities;
MMRESULT err = MMSYSERR_NOERROR;
// Loop over all active capture devices and add friendly name and
@@ -108,7 +119,7 @@ bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
// there is no safe method to retrieve a unique device name on XP.
for (UINT i = 0; i < number_of_active_devices; ++i) {
// Retrieve the capabilities of the specified waveform-audio input device.
- err = waveInGetDevCaps(i, &capabilities, sizeof(capabilities));
+ err = DevCapsFunc(i, &capabilities, sizeof(capabilities));
if (err != MMSYSERR_NOERROR)
continue;
@@ -118,7 +129,7 @@ bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
device.device_name = WideToUTF8(capabilities.szPname);
// Store the "unique" name (we use same as friendly name on Windows XP).
- device.unique_id = WideToUTF8(capabilities.szPname);
+ device.unique_id = device.device_name;
// Add combination of user-friendly and unique name to the output list.
device_names->push_back(device);
@@ -127,7 +138,27 @@ bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
return true;
}
-std::string ConvertToWinXPDeviceId(const std::string& device_id) {
+} // namespace
+
+bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
+ return GetDeviceNamesWinImpl(eCapture, device_names);
+}
+
+bool GetOutputDeviceNamesWin(AudioDeviceNames* device_names) {
+ return GetDeviceNamesWinImpl(eRender, device_names);
+}
+
+bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
+ return GetDeviceNamesWinXPImpl<
+ waveInGetNumDevs, WAVEINCAPSW, waveInGetDevCapsW>(device_names);
+}
+
+bool GetOutputDeviceNamesWinXP(AudioDeviceNames* device_names) {
+ return GetDeviceNamesWinXPImpl<
+ waveOutGetNumDevs, WAVEOUTCAPSW, waveOutGetDevCapsW>(device_names);
+}
+
+std::string ConvertToWinXPInputDeviceId(const std::string& device_id) {
UINT number_of_active_devices = waveInGetNumDevs();
MMRESULT result = MMSYSERR_NOERROR;
diff --git a/media/audio/win/device_enumeration_win.h b/media/audio/win/device_enumeration_win.h
index 3d44670a6d..e61a331842 100644
--- a/media/audio/win/device_enumeration_win.h
+++ b/media/audio/win/device_enumeration_win.h
@@ -11,28 +11,32 @@
namespace media {
-// Returns a list of audio input device structures (name and unique device ID)
-// using the MMDevice API which is supported on Windows Vista and higher.
+// Returns a list of audio input or output device structures (name and
+// unique device ID) using the MMDevice API which is supported on
+// Windows Vista and higher.
// Example record in the output list:
// - device_name: "Microphone (Realtek High Definition Audio)".
// - unique_id: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}"
// This method must be called from a COM thread using MTA.
bool GetInputDeviceNamesWin(media::AudioDeviceNames* device_names);
+bool GetOutputDeviceNamesWin(media::AudioDeviceNames* device_names);
-// Returns a list of audio input device structures (name and unique device ID)
-// using the WaveIn API which is supported on Windows XP and higher.
+// Returns a list of audio input or output device structures (name and
+// unique device ID) using the WaveIn API which is supported on
+// Windows XP and higher.
// Example record in the output list:
// - device_name: "Microphone (Realtek High Defini".
// - unique_id: "Microphone (Realtek High Defini" (same as friendly name).
bool GetInputDeviceNamesWinXP(media::AudioDeviceNames* device_names);
+bool GetOutputDeviceNamesWinXP(media::AudioDeviceNames* device_names);
-// Converts a device ID generated by |GetInputDeviceNamesWin()| to the
+// Converts an input device ID generated by |GetInputDeviceNamesWin()| to the
// corresponding ID by |GetInputDeviceNamesWinXP()|. Returns an empty string on
// failure.
// Example input and output:
// - input ID: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}"
// - output ID: "Microphone (Realtek High Defini"
-std::string ConvertToWinXPDeviceId(const std::string& device_id);
+std::string ConvertToWinXPInputDeviceId(const std::string& device_id);
} // namespace media
diff --git a/media/base/android/audio_decoder_job.cc b/media/base/android/audio_decoder_job.cc
index 59a1a630bc..b0d371be69 100644
--- a/media/base/android/audio_decoder_job.cc
+++ b/media/base/android/audio_decoder_job.cc
@@ -59,10 +59,10 @@ void AudioDecoderJob::ReleaseOutputBuffer(
int outputBufferIndex, size_t size,
const base::TimeDelta& presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback,
- DecodeStatus status) {
+ MediaCodecStatus status) {
audio_codec_bridge_->PlayOutputBuffer(outputBufferIndex, size);
- if (status != DECODE_OUTPUT_END_OF_STREAM || size != 0u)
+ if (status != MEDIA_CODEC_OUTPUT_END_OF_STREAM || size != 0u)
audio_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, false);
callback.Run(status, presentation_timestamp, size);
diff --git a/media/base/android/audio_decoder_job.h b/media/base/android/audio_decoder_job.h
index e74f5b8d79..6ad8c28e25 100644
--- a/media/base/android/audio_decoder_job.h
+++ b/media/base/android/audio_decoder_job.h
@@ -43,7 +43,7 @@ class AudioDecoderJob : public MediaDecoderJob {
int outputBufferIndex, size_t size,
const base::TimeDelta& presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback,
- DecodeStatus status) OVERRIDE;
+ MediaCodecStatus status) OVERRIDE;
virtual bool ComputeTimeToRender() const OVERRIDE;
diff --git a/media/base/android/demuxer_android.h b/media/base/android/demuxer_android.h
new file mode 100644
index 0000000000..33902db728
--- /dev/null
+++ b/media/base/android/demuxer_android.h
@@ -0,0 +1,77 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_DEMUXER_ANDROID_H_
+#define MEDIA_BASE_ANDROID_DEMUXER_ANDROID_H_
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class DemuxerAndroidClient;
+struct DemuxerConfigs;
+struct DemuxerData;
+
+// Defines a demuxer with ID-based asynchronous operations.
+//
+// TODO(scherkus): Remove |demuxer_client_id| and Add/RemoveDemuxerClient().
+// It's required in the interim as the Android Media Source implementation uses
+// the MediaPlayerAndroid interface and associated IPC messages.
+class MEDIA_EXPORT DemuxerAndroid {
+ public:
+ // Associates |client| with the demuxer using |demuxer_client_id| as the
+ // identifier. Must be called prior to calling any other methods.
+ virtual void AddDemuxerClient(int demuxer_client_id,
+ DemuxerAndroidClient* client) = 0;
+
+ // Removes the association created by AddClient(). Must be called when the
+ // client no longer wants to receive updates.
+ virtual void RemoveDemuxerClient(int demuxer_client_id) = 0;
+
+ // Called to request the current audio/video decoder configurations.
+ virtual void RequestDemuxerConfigs(int demuxer_client_id) = 0;
+
+ // Called to request additiona data from the demuxer.
+ virtual void RequestDemuxerData(int demuxer_client_id,
+ media::DemuxerStream::Type type) = 0;
+
+ // Called to request the demuxer to seek to a particular media time.
+ virtual void RequestDemuxerSeek(int demuxer_client_id,
+ base::TimeDelta time_to_seek,
+ unsigned seek_request_id) = 0;
+
+ protected:
+ virtual ~DemuxerAndroid() {}
+};
+
+// Defines the client callback interface.
+class MEDIA_EXPORT DemuxerAndroidClient {
+ public:
+ // Called in response to RequestDemuxerConfigs() and also when the demuxer has
+ // initialized.
+ //
+ // TODO(scherkus): Perhaps clients should be required to call
+ // RequestDemuxerConfigs() to initialize themselves instead of the demuxer
+ // calling this method without being prompted.
+ virtual void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) = 0;
+
+ // Called in response to RequestDemuxerData().
+ virtual void OnDemuxerDataAvailable(const DemuxerData& params) = 0;
+
+ // Called in response to RequestDemuxerSeek().
+ virtual void OnDemuxerSeeked(unsigned seek_request_id) = 0;
+
+ // Called whenever the demuxer has detected a duration change.
+ virtual void OnDemuxerDurationChanged(base::TimeDelta duration) = 0;
+
+ protected:
+ virtual ~DemuxerAndroidClient() {}
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_DEMUXER_ANDROID_H_
diff --git a/media/base/android/demuxer_stream_player_params.h b/media/base/android/demuxer_stream_player_params.h
index 92ef74f210..4a3a04d10e 100644
--- a/media/base/android/demuxer_stream_player_params.h
+++ b/media/base/android/demuxer_stream_player_params.h
@@ -5,7 +5,9 @@
#ifndef MEDIA_BASE_ANDROID_DEMUXER_STREAM_PLAYER_PARAMS_H_
#define MEDIA_BASE_ANDROID_DEMUXER_STREAM_PLAYER_PARAMS_H_
+#if defined(GOOGLE_TV)
#include <string>
+#endif // defined(GOOGLE_TV)
#include <vector>
#include "media/base/audio_decoder_config.h"
@@ -33,7 +35,10 @@ struct MEDIA_EXPORT DemuxerConfigs {
std::vector<uint8> video_extra_data;
int duration_ms;
+
+#if defined(GOOGLE_TV)
std::string key_system;
+#endif // defined(GOOGLE_TV)
};
struct MEDIA_EXPORT AccessUnit {
diff --git a/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java b/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
index b1580d17dc..db73598550 100644
--- a/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
+++ b/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
@@ -8,6 +8,8 @@ import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioTrack;
import android.media.MediaCodec;
+import android.media.MediaCodecInfo;
+import android.media.MediaCodecList;
import android.media.MediaCrypto;
import android.media.MediaFormat;
import android.view.Surface;
@@ -15,6 +17,9 @@ import android.util.Log;
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
import org.chromium.base.CalledByNative;
import org.chromium.base.JNINamespace;
@@ -25,12 +30,20 @@ import org.chromium.base.JNINamespace;
*/
@JNINamespace("media")
class MediaCodecBridge {
-
private static final String TAG = "MediaCodecBridge";
// Error code for MediaCodecBridge. Keep this value in sync with
- // INFO_MEDIA_CODEC_ERROR in media_codec_bridge.h.
- private static final int MEDIA_CODEC_ERROR = -1000;
+ // MediaCodecStatus in media_codec_bridge.h.
+ private static final int MEDIA_CODEC_OK = 0;
+ private static final int MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER = 1;
+ private static final int MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER = 2;
+ private static final int MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED = 3;
+ private static final int MEDIA_CODEC_OUTPUT_FORMAT_CHANGED = 4;
+ private static final int MEDIA_CODEC_INPUT_END_OF_STREAM = 5;
+ private static final int MEDIA_CODEC_OUTPUT_END_OF_STREAM = 6;
+ private static final int MEDIA_CODEC_NO_KEY = 7;
+ private static final int MEDIA_CODEC_STOPPED = 8;
+ private static final int MEDIA_CODEC_ERROR = 9;
// After a flush(), dequeueOutputBuffer() can often produce empty presentation timestamps
// for several frames. As a result, the player may find that the time does not increase
@@ -48,15 +61,52 @@ class MediaCodecBridge {
private boolean mFlushed;
private long mLastPresentationTimeUs;
+ private static class DequeueInputResult {
+ private final int mStatus;
+ private final int mIndex;
+
+ private DequeueInputResult(int status, int index) {
+ mStatus = status;
+ mIndex = index;
+ }
+
+ @CalledByNative("DequeueInputResult")
+ private int status() { return mStatus; }
+
+ @CalledByNative("DequeueInputResult")
+ private int index() { return mIndex; }
+ }
+
+ /**
+ * This class represents supported android codec information.
+ */
+ private static class CodecInfo {
+ private final String mCodecType;
+ private final boolean mIsSecureDecoderSupported;
+
+ private CodecInfo(String codecType, boolean isSecureDecoderSupported) {
+ mCodecType = codecType;
+ mIsSecureDecoderSupported = isSecureDecoderSupported;
+ }
+
+ @CalledByNative("CodecInfo")
+ private String codecType() { return mCodecType; }
+
+ @CalledByNative("CodecInfo")
+ private boolean isSecureDecoderSupported() { return mIsSecureDecoderSupported; }
+ }
+
private static class DequeueOutputResult {
+ private final int mStatus;
private final int mIndex;
private final int mFlags;
private final int mOffset;
private final long mPresentationTimeMicroseconds;
private final int mNumBytes;
- private DequeueOutputResult(int index, int flags, int offset,
+ private DequeueOutputResult(int status, int index, int flags, int offset,
long presentationTimeMicroseconds, int numBytes) {
+ mStatus = status;
mIndex = index;
mFlags = flags;
mOffset = offset;
@@ -65,6 +115,9 @@ class MediaCodecBridge {
}
@CalledByNative("DequeueOutputResult")
+ private int status() { return mStatus; }
+
+ @CalledByNative("DequeueOutputResult")
private int index() { return mIndex; }
@CalledByNative("DequeueOutputResult")
@@ -80,22 +133,87 @@ class MediaCodecBridge {
private int numBytes() { return mNumBytes; }
}
- private MediaCodecBridge(String mime) throws IOException {
- mMediaCodec = MediaCodec.createDecoderByType(mime);
+ /**
+ * Get a list of supported android codec mimes.
+ */
+ @CalledByNative
+ private static CodecInfo[] getCodecsInfo() {
+ Map<String, CodecInfo> CodecInfoMap = new HashMap<String, CodecInfo>();
+ int count = MediaCodecList.getCodecCount();
+ for (int i = 0; i < count; ++i) {
+ MediaCodecInfo info = MediaCodecList.getCodecInfoAt(i);
+ if (info.isEncoder()) {
+ continue;
+ }
+
+ String[] supportedTypes = info.getSupportedTypes();
+ String codecString = info.getName();
+ String secureCodecName = codecString + ".secure";
+ boolean secureDecoderSupported = false;
+ try {
+ MediaCodec secureCodec = MediaCodec.createByCodecName(secureCodecName);
+ secureDecoderSupported = true;
+ secureCodec.release();
+ } catch (Exception e) {
+ Log.e(TAG, "Failed to create " + secureCodecName);
+ }
+ for (int j = 0; j < supportedTypes.length; ++j) {
+ if (!CodecInfoMap.containsKey(supportedTypes[j]) || secureDecoderSupported) {
+ CodecInfoMap.put(supportedTypes[j],
+ new CodecInfo(supportedTypes[j], secureDecoderSupported));
+ }
+ }
+ }
+ return CodecInfoMap.values().toArray(
+ new CodecInfo[CodecInfoMap.size()]);
+ }
+
+ private static String getSecureDecoderNameForMime(String mime) {
+ int count = MediaCodecList.getCodecCount();
+ for (int i = 0; i < count; ++i) {
+ MediaCodecInfo info = MediaCodecList.getCodecInfoAt(i);
+ if (info.isEncoder()) {
+ continue;
+ }
+
+ String[] supportedTypes = info.getSupportedTypes();
+ for (int j = 0; j < supportedTypes.length; ++j) {
+ if (supportedTypes[j].equalsIgnoreCase(mime)) {
+ return info.getName() + ".secure";
+ }
+ }
+ }
+
+ return null;
+ }
+
+ private MediaCodecBridge(MediaCodec mediaCodec) {
+ assert(mediaCodec != null);
+ mMediaCodec = mediaCodec;
mLastPresentationTimeUs = 0;
mFlushed = true;
}
@CalledByNative
- private static MediaCodecBridge create(String mime) {
- MediaCodecBridge mediaCodecBridge = null;
+ private static MediaCodecBridge create(String mime, boolean isSecure) {
+ MediaCodec mediaCodec = null;
try {
- mediaCodecBridge = new MediaCodecBridge(mime);
- } catch (IOException e) {
- Log.e(TAG, "Failed to create MediaCodecBridge " + e.toString());
+ // |isSecure| only applies to video decoders.
+ if (mime.startsWith("video") && isSecure) {
+ mediaCodec = MediaCodec.createByCodecName(getSecureDecoderNameForMime(mime));
+ } else {
+ mediaCodec = MediaCodec.createDecoderByType(mime);
+ }
+ } catch (Exception e) {
+ Log.e(TAG, "Failed to create MediaCodec: " + mime + ", isSecure: "
+ + isSecure + ", " + e.toString());
+ }
+
+ if (mediaCodec == null) {
+ return null;
}
- return mediaCodecBridge;
+ return new MediaCodecBridge(mediaCodec);
}
@CalledByNative
@@ -113,22 +231,39 @@ class MediaCodecBridge {
}
@CalledByNative
- private int dequeueInputBuffer(long timeoutUs) {
+ private DequeueInputResult dequeueInputBuffer(long timeoutUs) {
+ int status = MEDIA_CODEC_ERROR;
+ int index = -1;
try {
- return mMediaCodec.dequeueInputBuffer(timeoutUs);
+ int index_or_status = mMediaCodec.dequeueInputBuffer(timeoutUs);
+ if (index_or_status >= 0) { // index!
+ status = MEDIA_CODEC_OK;
+ index = index_or_status;
+ } else if (index_or_status == MediaCodec.INFO_TRY_AGAIN_LATER) {
+ Log.e(TAG, "dequeueInputBuffer: MediaCodec.INFO_TRY_AGAIN_LATER");
+ status = MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER;
+ } else {
+ assert(false);
+ }
} catch(Exception e) {
- Log.e(TAG, "Cannot dequeue Input buffer " + e.toString());
+ Log.e(TAG, "Failed to dequeue input buffer: " + e.toString());
}
- return MEDIA_CODEC_ERROR;
+ return new DequeueInputResult(status, index);
}
@CalledByNative
- private void flush() {
- mMediaCodec.flush();
- mFlushed = true;
- if (mAudioTrack != null) {
- mAudioTrack.flush();
+ private int flush() {
+ try {
+ mFlushed = true;
+ if (mAudioTrack != null) {
+ mAudioTrack.flush();
+ }
+ mMediaCodec.flush();
+ } catch(IllegalStateException e) {
+ Log.e(TAG, "Failed to flush MediaCodec " + e.toString());
+ return MEDIA_CODEC_ERROR;
}
+ return MEDIA_CODEC_OK;
}
@CalledByNative
@@ -160,18 +295,20 @@ class MediaCodecBridge {
}
@CalledByNative
- private void queueInputBuffer(
+ private int queueInputBuffer(
int index, int offset, int size, long presentationTimeUs, int flags) {
resetLastPresentationTimeIfNeeded(presentationTimeUs);
try {
mMediaCodec.queueInputBuffer(index, offset, size, presentationTimeUs, flags);
- } catch(IllegalStateException e) {
- Log.e(TAG, "Failed to queue input buffer " + e.toString());
+ } catch(Exception e) {
+ Log.e(TAG, "Failed to queue input buffer: " + e.toString());
+ return MEDIA_CODEC_ERROR;
}
+ return MEDIA_CODEC_OK;
}
@CalledByNative
- private void queueSecureInputBuffer(
+ private int queueSecureInputBuffer(
int index, int offset, byte[] iv, byte[] keyId, int[] numBytesOfClearData,
int[] numBytesOfEncryptedData, int numSubSamples, long presentationTimeUs) {
resetLastPresentationTimeIfNeeded(presentationTimeUs);
@@ -180,9 +317,19 @@ class MediaCodecBridge {
cryptoInfo.set(numSubSamples, numBytesOfClearData, numBytesOfEncryptedData,
keyId, iv, MediaCodec.CRYPTO_MODE_AES_CTR);
mMediaCodec.queueSecureInputBuffer(index, offset, cryptoInfo, presentationTimeUs, 0);
+ } catch (MediaCodec.CryptoException e) {
+ Log.e(TAG, "Failed to queue secure input buffer: " + e.toString());
+ // TODO(xhwang): Replace hard coded value with constant/enum.
+ if (e.getErrorCode() == 1) {
+ Log.e(TAG, "No key available.");
+ return MEDIA_CODEC_NO_KEY;
+ }
+ return MEDIA_CODEC_ERROR;
} catch(IllegalStateException e) {
- Log.e(TAG, "Failed to queue secure input buffer " + e.toString());
+ Log.e(TAG, "Failed to queue secure input buffer: " + e.toString());
+ return MEDIA_CODEC_ERROR;
}
+ return MEDIA_CODEC_OK;
}
@CalledByNative
@@ -198,9 +345,10 @@ class MediaCodecBridge {
@CalledByNative
private DequeueOutputResult dequeueOutputBuffer(long timeoutUs) {
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
- int index = MEDIA_CODEC_ERROR;
+ int status = MEDIA_CODEC_ERROR;
+ int index = -1;
try {
- index = mMediaCodec.dequeueOutputBuffer(info, timeoutUs);
+ int index_or_status = mMediaCodec.dequeueOutputBuffer(info, timeoutUs);
if (info.presentationTimeUs < mLastPresentationTimeUs) {
// TODO(qinmin): return a special code through DequeueOutputResult
// to notify the native code the the frame has a wrong presentation
@@ -208,11 +356,25 @@ class MediaCodecBridge {
info.presentationTimeUs = mLastPresentationTimeUs;
}
mLastPresentationTimeUs = info.presentationTimeUs;
+
+ if (index_or_status >= 0) { // index!
+ status = MEDIA_CODEC_OK;
+ index = index_or_status;
+ } else if (index_or_status == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
+ status = MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED;
+ } else if (index_or_status == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
+ status = MEDIA_CODEC_OUTPUT_FORMAT_CHANGED;
+ } else if (index_or_status == MediaCodec.INFO_TRY_AGAIN_LATER) {
+ status = MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER;
+ } else {
+ assert(false);
+ }
} catch (IllegalStateException e) {
- Log.e(TAG, "Cannot dequeue output buffer " + e.toString());
+ Log.e(TAG, "Failed to dequeue output buffer: " + e.toString());
}
+
return new DequeueOutputResult(
- index, info.flags, info.offset, info.presentationTimeUs, info.size);
+ status, index, info.flags, info.offset, info.presentationTimeUs, info.size);
}
@CalledByNative
diff --git a/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java b/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
index d588b005f8..1118dd0a32 100644
--- a/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
+++ b/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
@@ -31,6 +31,8 @@ import java.util.UUID;
class MediaDrmBridge {
private static final String TAG = "MediaDrmBridge";
+ private static final String SECURITY_LEVEL = "securityLevel";
+ private static final String PRIVACY_MODE = "privacyMode";
private MediaDrm mMediaDrm;
private UUID mSchemeUUID;
private int mNativeMediaDrmBridge;
@@ -57,17 +59,18 @@ class MediaDrmBridge {
return new UUID(mostSigBits, leastSigBits);
}
- private MediaDrmBridge(UUID schemeUUID, int nativeMediaDrmBridge) {
- try {
- mSchemeUUID = schemeUUID;
- mMediaDrm = new MediaDrm(schemeUUID);
- mNativeMediaDrmBridge = nativeMediaDrmBridge;
- mMediaDrm.setPropertyString("privacyMode", "enable");
- mMediaDrm.setOnEventListener(new MediaDrmListener());
- mHandler = new Handler();
- } catch (android.media.UnsupportedSchemeException e) {
- Log.e(TAG, "Unsupported DRM scheme " + e.toString());
- }
+ private MediaDrmBridge(UUID schemeUUID, String securityLevel, int nativeMediaDrmBridge)
+ throws android.media.UnsupportedSchemeException {
+ mSchemeUUID = schemeUUID;
+ mMediaDrm = new MediaDrm(schemeUUID);
+ mHandler = new Handler();
+ mNativeMediaDrmBridge = nativeMediaDrmBridge;
+ mMediaDrm.setOnEventListener(new MediaDrmListener());
+ mMediaDrm.setPropertyString(PRIVACY_MODE, "enable");
+ String currentSecurityLevel = mMediaDrm.getPropertyString(SECURITY_LEVEL);
+ Log.e(TAG, "Security level: current " + currentSecurityLevel + ", new " + securityLevel);
+ if (!securityLevel.equals(currentSecurityLevel))
+ mMediaDrm.setPropertyString(SECURITY_LEVEL, securityLevel);
}
/**
@@ -113,10 +116,10 @@ class MediaDrmBridge {
final byte[] sessionId = mMediaDrm.openSession();
mSessionId = new String(sessionId, "UTF-8");
} catch (android.media.NotProvisionedException e) {
- Log.e(TAG, "Cannot open a new session " + e.toString());
+ Log.e(TAG, "Cannot open a new session: " + e.toString());
return true;
} catch (Exception e) {
- Log.e(TAG, "Cannot open a new session " + e.toString());
+ Log.e(TAG, "Cannot open a new session: " + e.toString());
return false;
}
@@ -124,19 +127,40 @@ class MediaDrmBridge {
return createMediaCrypto();
}
+ @CalledByNative
+ private static boolean isCryptoSchemeSupported(byte[] schemeUUID, String containerMimeType) {
+ UUID cryptoScheme = getUUIDFromBytes(schemeUUID);
+ return MediaDrm.isCryptoSchemeSupported(cryptoScheme);
+ }
+
/**
* Create a new MediaDrmBridge from the crypto scheme UUID.
*
* @param schemeUUID Crypto scheme UUID.
+ * @param securityLevel Security level to be used.
* @param nativeMediaDrmBridge Native object of this class.
*/
@CalledByNative
- private static MediaDrmBridge create(byte[] schemeUUID, int nativeMediaDrmBridge) {
+ private static MediaDrmBridge create(
+ byte[] schemeUUID, String securityLevel, int nativeMediaDrmBridge) {
UUID cryptoScheme = getUUIDFromBytes(schemeUUID);
- if (cryptoScheme != null && MediaDrm.isCryptoSchemeSupported(cryptoScheme)) {
- return new MediaDrmBridge(cryptoScheme, nativeMediaDrmBridge);
+ if (cryptoScheme == null || !MediaDrm.isCryptoSchemeSupported(cryptoScheme)) {
+ return null;
}
- return null;
+
+ MediaDrmBridge media_drm_bridge = null;
+ try {
+ media_drm_bridge = new MediaDrmBridge(
+ cryptoScheme, securityLevel, nativeMediaDrmBridge);
+ } catch (android.media.UnsupportedSchemeException e) {
+ Log.e(TAG, "Unsupported DRM scheme: " + e.toString());
+ } catch (java.lang.IllegalArgumentException e) {
+ Log.e(TAG, "Failed to create MediaDrmBridge: " + e.toString());
+ } catch (java.lang.IllegalStateException e) {
+ Log.e(TAG, "Failed to create MediaDrmBridge: " + e.toString());
+ }
+
+ return media_drm_bridge;
}
/**
@@ -154,17 +178,21 @@ class MediaDrmBridge {
private void release() {
if (mMediaCrypto != null) {
mMediaCrypto.release();
+ mMediaCrypto = null;
}
if (mSessionId != null) {
try {
final byte[] session = mSessionId.getBytes("UTF-8");
mMediaDrm.closeSession(session);
} catch (java.io.UnsupportedEncodingException e) {
- Log.e(TAG, "Failed to close session " + e.toString());
+ Log.e(TAG, "Failed to close session: " + e.toString());
}
+ mSessionId = null;
+ }
+ if (mMediaDrm != null) {
+ mMediaDrm.release();
+ mMediaDrm = null;
}
- mMediaDrm.release();
- mMediaDrm = null;
}
/**
@@ -194,7 +222,7 @@ class MediaDrmBridge {
// NotProvisionedException happened during openSession().
if (mSessionId == null) {
if (mPendingInitData != null) {
- Log.e(TAG, "generateKeyRequest is called when another call is pending.");
+ Log.e(TAG, "generateKeyRequest called when another call is pending.");
onKeyError();
return;
}
@@ -224,10 +252,10 @@ class MediaDrmBridge {
} catch (android.media.NotProvisionedException e) {
// MediaDrm.EVENT_PROVISION_REQUIRED is also fired in this case.
// Provisioning is handled in the handler of that event.
- Log.e(TAG, "Cannot get key request " + e.toString());
+ Log.e(TAG, "Cannot get key request: " + e.toString());
return;
} catch (java.io.UnsupportedEncodingException e) {
- Log.e(TAG, "Cannot get key request " + e.toString());
+ Log.e(TAG, "Cannot get key request: " + e.toString());
}
onKeyError();
}
@@ -246,7 +274,7 @@ class MediaDrmBridge {
final byte[] session = sessionId.getBytes("UTF-8");
mMediaDrm.removeKeys(session);
} catch (java.io.UnsupportedEncodingException e) {
- Log.e(TAG, "Cannot cancel key request " + e.toString());
+ Log.e(TAG, "Cannot cancel key request: " + e.toString());
}
}
@@ -279,11 +307,11 @@ class MediaDrmBridge {
});
return;
} catch (android.media.NotProvisionedException e) {
- Log.e(TAG, "failed to provide key response " + e.toString());
+ Log.e(TAG, "failed to provide key response: " + e.toString());
} catch (android.media.DeniedByServerException e) {
- Log.e(TAG, "failed to provide key response " + e.toString());
+ Log.e(TAG, "failed to provide key response: " + e.toString());
} catch (java.io.UnsupportedEncodingException e) {
- Log.e(TAG, "failed to provide key response " + e.toString());
+ Log.e(TAG, "failed to provide key response: " + e.toString());
}
onKeyError();
}
@@ -302,7 +330,7 @@ class MediaDrmBridge {
* @param response Response data from the provision server.
*/
private void onProvisionResponse(byte[] response) {
- Log.d(TAG, "provide key response.");
+ Log.d(TAG, "onProvisionResponse()");
if (response == null || response.length == 0) {
Log.e(TAG, "Invalid provision response.");
@@ -313,7 +341,11 @@ class MediaDrmBridge {
try {
mMediaDrm.provideProvisionResponse(response);
} catch (android.media.DeniedByServerException e) {
- Log.e(TAG, "failed to provide provision response " + e.toString());
+ Log.e(TAG, "failed to provide provision response: " + e.toString());
+ onKeyError();
+ return;
+ } catch (java.lang.IllegalStateException e) {
+ Log.e(TAG, "failed to provide provision response: " + e.toString());
onKeyError();
return;
}
diff --git a/media/base/android/media_codec_bridge.cc b/media/base/android/media_codec_bridge.cc
index bb5ddccfd7..266372efcd 100644
--- a/media/base/android/media_codec_bridge.cc
+++ b/media/base/android/media_codec_bridge.cc
@@ -5,6 +5,7 @@
#include "media/base/android/media_codec_bridge.h"
#include <jni.h>
+#include <string>
#include "base/android/build_info.h"
#include "base/android/jni_android.h"
@@ -20,6 +21,7 @@
#include "media/base/decrypt_config.h"
using base::android::AttachCurrentThread;
+using base::android::ConvertJavaStringToUTF8;
using base::android::ConvertUTF8ToJavaString;
using base::android::ScopedJavaLocalRef;
@@ -27,7 +29,7 @@ namespace media {
enum { kBufferFlagEndOfStream = 4 };
-static const char* AudioCodecToMimeType(const AudioCodec codec) {
+static const std::string AudioCodecToAndroidMimeType(const AudioCodec codec) {
switch (codec) {
case kCodecMP3:
return "audio/mpeg";
@@ -36,21 +38,53 @@ static const char* AudioCodecToMimeType(const AudioCodec codec) {
case kCodecAAC:
return "audio/mp4a-latm";
default:
- return NULL;
+ return std::string();
}
}
-static const char* VideoCodecToMimeType(const VideoCodec codec) {
+static const std::string VideoCodecToAndroidMimeType(const VideoCodec codec) {
switch (codec) {
case kCodecH264:
return "video/avc";
case kCodecVP8:
return "video/x-vnd.on2.vp8";
default:
- return NULL;
+ return std::string();
}
}
+static const std::string CodecTypeToAndroidMimeType(const std::string& codec) {
+ // TODO(xhwang): Shall we handle more detailed strings like "mp4a.40.2"?
+ if (codec == "avc1")
+ return "video/avc";
+ if (codec == "mp4a")
+ return "audio/mp4a-latm";
+ if (codec == "vp8" || codec == "vp8.0")
+ return "video/x-vnd.on2.vp8";
+ if (codec == "vorbis")
+ return "audio/vorbis";
+ return std::string();
+}
+
+// TODO(qinmin): using a map to help all the conversions in this class.
+static const std::string AndroidMimeTypeToCodecType(const std::string& mime) {
+ if (mime == "video/mp4v-es")
+ return "mp4v";
+ if (mime == "video/avc")
+ return "avc1";
+ if (mime == "video/x-vnd.on2.vp8")
+ return "vp8";
+ if (mime == "video/x-vnd.on2.vp9")
+ return "vp9";
+ if (mime == "audio/mp4a-latm")
+ return "mp4a";
+ if (mime == "audio/mpeg")
+ return "mp3";
+ if (mime == "audio/vorbis")
+ return "vorbis";
+ return std::string();
+}
+
static ScopedJavaLocalRef<jintArray> ToJavaIntArray(
JNIEnv* env, scoped_ptr<jint[]> native_array, int size) {
ScopedJavaLocalRef<jintArray> j_array(env, env->NewIntArray(size));
@@ -59,27 +93,53 @@ static ScopedJavaLocalRef<jintArray> ToJavaIntArray(
}
// static
-const base::TimeDelta MediaCodecBridge::kTimeOutInfinity =
- base::TimeDelta::FromMicroseconds(-1);
+bool MediaCodecBridge::IsAvailable() {
+ // MediaCodec is only available on JB and greater.
+ return base::android::BuildInfo::GetInstance()->sdk_int() >= 16;
+}
// static
-const base::TimeDelta MediaCodecBridge::kTimeOutNoWait =
- base::TimeDelta::FromMicroseconds(0);
+void MediaCodecBridge::GetCodecsInfo(
+ std::vector<CodecsInfo>* codecs_info) {
+ JNIEnv* env = AttachCurrentThread();
+ if (!IsAvailable())
+ return;
+
+ std::string mime_type;
+ ScopedJavaLocalRef<jobjectArray> j_codec_info_array =
+ Java_MediaCodecBridge_getCodecsInfo(env);
+ jsize len = env->GetArrayLength(j_codec_info_array.obj());
+ for (jsize i = 0; i < len; ++i) {
+ ScopedJavaLocalRef<jobject> j_info(
+ env, env->GetObjectArrayElement(j_codec_info_array.obj(), i));
+ ScopedJavaLocalRef<jstring> j_codec_type =
+ Java_CodecInfo_codecType(env, j_info.obj());
+ ConvertJavaStringToUTF8(env, j_codec_type.obj(), &mime_type);
+ CodecsInfo info;
+ info.codecs = AndroidMimeTypeToCodecType(mime_type);
+ info.secure_decoder_supported =
+ Java_CodecInfo_isSecureDecoderSupported(env, j_info.obj());
+ codecs_info->push_back(info);
+ }
+}
// static
-bool MediaCodecBridge::IsAvailable() {
- // MediaCodec is only available on JB and greater.
- return base::android::BuildInfo::GetInstance()->sdk_int() >= 16;
+bool MediaCodecBridge::CanDecode(const std::string& codec, bool is_secure) {
+ JNIEnv* env = AttachCurrentThread();
+ std::string mime = CodecTypeToAndroidMimeType(codec);
+ if (mime.empty())
+ return false;
+ ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
+ return !Java_MediaCodecBridge_create(env, j_mime.obj(), is_secure).is_null();
}
-MediaCodecBridge::MediaCodecBridge(const char* mime) {
+MediaCodecBridge::MediaCodecBridge(const std::string& mime, bool is_secure) {
JNIEnv* env = AttachCurrentThread();
CHECK(env);
- DCHECK(mime);
-
- ScopedJavaLocalRef<jstring> j_type = ConvertUTF8ToJavaString(env, mime);
- j_media_codec_.Reset(Java_MediaCodecBridge_create(
- env, j_type.obj()));
+ DCHECK(!mime.empty());
+ ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
+ j_media_codec_.Reset(
+ Java_MediaCodecBridge_create(env, j_mime.obj(), is_secure));
}
MediaCodecBridge::~MediaCodecBridge() {
@@ -95,9 +155,10 @@ void MediaCodecBridge::StartInternal() {
GetOutputBuffers();
}
-void MediaCodecBridge::Reset() {
+MediaCodecStatus MediaCodecBridge::Reset() {
JNIEnv* env = AttachCurrentThread();
- Java_MediaCodecBridge_flush(env, j_media_codec_.obj());
+ return static_cast<MediaCodecStatus>(
+ Java_MediaCodecBridge_flush(env, j_media_codec_.obj()));
}
void MediaCodecBridge::Stop() {
@@ -112,23 +173,24 @@ void MediaCodecBridge::GetOutputFormat(int* width, int* height) {
*height = Java_MediaCodecBridge_getOutputHeight(env, j_media_codec_.obj());
}
-size_t MediaCodecBridge::QueueInputBuffer(
- int index, const uint8* data, int size,
+MediaCodecStatus MediaCodecBridge::QueueInputBuffer(
+ int index, const uint8* data, int data_size,
const base::TimeDelta& presentation_time) {
- size_t size_to_copy = FillInputBuffer(index, data, size);
+ int size_to_copy = FillInputBuffer(index, data, data_size);
+ DCHECK_EQ(size_to_copy, data_size);
JNIEnv* env = AttachCurrentThread();
- Java_MediaCodecBridge_queueInputBuffer(
+ return static_cast<MediaCodecStatus>(Java_MediaCodecBridge_queueInputBuffer(
env, j_media_codec_.obj(),
- index, 0, size_to_copy, presentation_time.InMicroseconds(), 0);
- return size_to_copy;
+ index, 0, size_to_copy, presentation_time.InMicroseconds(), 0));
}
-size_t MediaCodecBridge::QueueSecureInputBuffer(
+MediaCodecStatus MediaCodecBridge::QueueSecureInputBuffer(
int index, const uint8* data, int data_size, const uint8* key_id,
int key_id_size, const uint8* iv, int iv_size,
const SubsampleEntry* subsamples, int subsamples_size,
const base::TimeDelta& presentation_time) {
- size_t size_to_copy = FillInputBuffer(index, data, data_size);
+ int size_to_copy = FillInputBuffer(index, data, data_size);
+ DCHECK_EQ(size_to_copy, data_size);
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jbyteArray> j_key_id =
@@ -146,12 +208,11 @@ size_t MediaCodecBridge::QueueSecureInputBuffer(
ScopedJavaLocalRef<jintArray> cypher_array = ToJavaIntArray(
env, native_cypher_array.Pass(), subsamples_size);
- Java_MediaCodecBridge_queueSecureInputBuffer(
- env, j_media_codec_.obj(), index, 0, j_iv.obj(), j_key_id.obj(),
- clear_array.obj(), cypher_array.obj(), subsamples_size,
- presentation_time.InMicroseconds());
-
- return size_to_copy;
+ return static_cast<MediaCodecStatus>(
+ Java_MediaCodecBridge_queueSecureInputBuffer(
+ env, j_media_codec_.obj(), index, 0, j_iv.obj(), j_key_id.obj(),
+ clear_array.obj(), cypher_array.obj(), subsamples_size,
+ presentation_time.InMicroseconds()));
}
void MediaCodecBridge::QueueEOS(int input_buffer_index) {
@@ -161,36 +222,34 @@ void MediaCodecBridge::QueueEOS(int input_buffer_index) {
input_buffer_index, 0, 0, 0, kBufferFlagEndOfStream);
}
-int MediaCodecBridge::DequeueInputBuffer(base::TimeDelta timeout) {
+MediaCodecStatus MediaCodecBridge::DequeueInputBuffer(
+ const base::TimeDelta& timeout, int* index) {
JNIEnv* env = AttachCurrentThread();
- return Java_MediaCodecBridge_dequeueInputBuffer(
+ ScopedJavaLocalRef<jobject> result = Java_MediaCodecBridge_dequeueInputBuffer(
env, j_media_codec_.obj(), timeout.InMicroseconds());
+ *index = Java_DequeueInputResult_index(env, result.obj());
+ return static_cast<MediaCodecStatus>(
+ Java_DequeueInputResult_status(env, result.obj()));
}
-int MediaCodecBridge::DequeueOutputBuffer(
- base::TimeDelta timeout, size_t* offset, size_t* size,
+MediaCodecStatus MediaCodecBridge::DequeueOutputBuffer(
+ const base::TimeDelta& timeout, int* index, size_t* offset, size_t* size,
base::TimeDelta* presentation_time, bool* end_of_stream) {
JNIEnv* env = AttachCurrentThread();
-
ScopedJavaLocalRef<jobject> result =
Java_MediaCodecBridge_dequeueOutputBuffer(env, j_media_codec_.obj(),
timeout.InMicroseconds());
-
- int j_buffer = Java_DequeueOutputResult_index(env, result.obj());
- if (j_buffer >= 0) {
- int64 presentation_time_us =
- Java_DequeueOutputResult_presentationTimeMicroseconds(
- env, result.obj());
- int flags = Java_DequeueOutputResult_flags(env, result.obj());
- *offset = base::checked_numeric_cast<size_t>(
- Java_DequeueOutputResult_offset(env, result.obj()));
- *size = base::checked_numeric_cast<size_t>(
- Java_DequeueOutputResult_numBytes(env, result.obj()));
- *presentation_time =
- base::TimeDelta::FromMicroseconds(presentation_time_us);
- *end_of_stream = flags & kBufferFlagEndOfStream;
- }
- return j_buffer;
+ *index = Java_DequeueOutputResult_index(env, result.obj());;
+ *offset = base::checked_numeric_cast<size_t>(
+ Java_DequeueOutputResult_offset(env, result.obj()));
+ *size = base::checked_numeric_cast<size_t>(
+ Java_DequeueOutputResult_numBytes(env, result.obj()));
+ *presentation_time = base::TimeDelta::FromMicroseconds(
+ Java_DequeueOutputResult_presentationTimeMicroseconds(env, result.obj()));
+ int flags = Java_DequeueOutputResult_flags(env, result.obj());
+ *end_of_stream = flags & kBufferFlagEndOfStream;
+ return static_cast<MediaCodecStatus>(
+ Java_DequeueOutputResult_status(env, result.obj()));
}
void MediaCodecBridge::ReleaseOutputBuffer(int index, bool render) {
@@ -227,8 +286,9 @@ size_t MediaCodecBridge::FillInputBuffer(
return size_to_copy;
}
-AudioCodecBridge::AudioCodecBridge(const char* mime)
- : MediaCodecBridge(mime) {
+AudioCodecBridge::AudioCodecBridge(const std::string& mime)
+ // Audio codec doesn't care about security level.
+ : MediaCodecBridge(mime, false) {
}
bool AudioCodecBridge::Start(
@@ -236,13 +296,16 @@ bool AudioCodecBridge::Start(
const uint8* extra_data, size_t extra_data_size, bool play_audio,
jobject media_crypto) {
JNIEnv* env = AttachCurrentThread();
- DCHECK(AudioCodecToMimeType(codec));
if (!media_codec())
return false;
+ std::string codec_string = AudioCodecToAndroidMimeType(codec);
+ if (codec_string.empty())
+ return false;
+
ScopedJavaLocalRef<jstring> j_mime =
- ConvertUTF8ToJavaString(env, AudioCodecToMimeType(codec));
+ ConvertUTF8ToJavaString(env, codec_string);
ScopedJavaLocalRef<jobject> j_format(
Java_MediaCodecBridge_createAudioFormat(
env, j_mime.obj(), sample_rate, channel_count));
@@ -255,6 +318,7 @@ bool AudioCodecBridge::Start(
env, media_codec(), j_format.obj(), media_crypto, 0, play_audio)) {
return false;
}
+
StartInternal();
return true;
}
@@ -266,7 +330,7 @@ bool AudioCodecBridge::ConfigureMediaFormat(
return true;
JNIEnv* env = AttachCurrentThread();
- switch(codec) {
+ switch (codec) {
case kCodecVorbis:
{
if (extra_data[0] != 2) {
@@ -357,7 +421,7 @@ bool AudioCodecBridge::ConfigureMediaFormat(
}
default:
LOG(ERROR) << "Invalid header encountered for codec: "
- << AudioCodecToMimeType(codec);
+ << AudioCodecToAndroidMimeType(codec);
return false;
}
return true;
@@ -382,21 +446,24 @@ void AudioCodecBridge::SetVolume(double volume) {
Java_MediaCodecBridge_setVolume(env, media_codec(), volume);
}
-VideoCodecBridge::VideoCodecBridge(const char* mime)
- : MediaCodecBridge(mime) {
+VideoCodecBridge::VideoCodecBridge(const std::string& mime, bool is_secure)
+ : MediaCodecBridge(mime, is_secure) {
}
bool VideoCodecBridge::Start(
const VideoCodec codec, const gfx::Size& size, jobject surface,
jobject media_crypto) {
JNIEnv* env = AttachCurrentThread();
- DCHECK(VideoCodecToMimeType(codec));
if (!media_codec())
return false;
+ std::string codec_string = VideoCodecToAndroidMimeType(codec);
+ if (codec_string.empty())
+ return false;
+
ScopedJavaLocalRef<jstring> j_mime =
- ConvertUTF8ToJavaString(env, VideoCodecToMimeType(codec));
+ ConvertUTF8ToJavaString(env, codec_string);
ScopedJavaLocalRef<jobject> j_format(
Java_MediaCodecBridge_createVideoFormat(
env, j_mime.obj(), size.width(), size.height()));
@@ -410,13 +477,14 @@ bool VideoCodecBridge::Start(
}
AudioCodecBridge* AudioCodecBridge::Create(const AudioCodec codec) {
- const char* mime = AudioCodecToMimeType(codec);
- return mime ? new AudioCodecBridge(mime) : NULL;
+ const std::string mime = AudioCodecToAndroidMimeType(codec);
+ return mime.empty() ? NULL : new AudioCodecBridge(mime);
}
-VideoCodecBridge* VideoCodecBridge::Create(const VideoCodec codec) {
- const char* mime = VideoCodecToMimeType(codec);
- return mime ? new VideoCodecBridge(mime) : NULL;
+VideoCodecBridge* VideoCodecBridge::Create(const VideoCodec codec,
+ bool is_secure) {
+ const std::string mime = VideoCodecToAndroidMimeType(codec);
+ return mime.empty() ? NULL : new VideoCodecBridge(mime, is_secure);
}
bool MediaCodecBridge::RegisterMediaCodecBridge(JNIEnv* env) {
diff --git a/media/base/android/media_codec_bridge.h b/media/base/android/media_codec_bridge.h
index 3469b1804e..4bb4f1e748 100644
--- a/media/base/android/media_codec_bridge.h
+++ b/media/base/android/media_codec_bridge.h
@@ -18,6 +18,21 @@ namespace media {
struct SubsampleEntry;
+// These must be in sync with MediaCodecBridge.MEDIA_CODEC_XXX constants in
+// MediaCodecBridge.java.
+enum MediaCodecStatus {
+ MEDIA_CODEC_OK,
+ MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER,
+ MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER,
+ MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED,
+ MEDIA_CODEC_OUTPUT_FORMAT_CHANGED,
+ MEDIA_CODEC_INPUT_END_OF_STREAM,
+ MEDIA_CODEC_OUTPUT_END_OF_STREAM,
+ MEDIA_CODEC_NO_KEY,
+ MEDIA_CODEC_STOPPED,
+ MEDIA_CODEC_ERROR
+};
+
// This class serves as a bridge for native code to call java functions inside
// Android MediaCodec class. For more information on Android MediaCodec, check
// http://developer.android.com/reference/android/media/MediaCodec.html
@@ -26,26 +41,34 @@ struct SubsampleEntry;
// object.
class MEDIA_EXPORT MediaCodecBridge {
public:
- enum DequeueBufferInfo {
- INFO_OUTPUT_BUFFERS_CHANGED = -3,
- INFO_OUTPUT_FORMAT_CHANGED = -2,
- INFO_TRY_AGAIN_LATER = -1,
- INFO_MEDIA_CODEC_ERROR = -1000,
- };
-
- static const base::TimeDelta kTimeOutInfinity;
- static const base::TimeDelta kTimeOutNoWait;
-
// Returns true if MediaCodec is available on the device.
static bool IsAvailable();
+ // Returns whether MediaCodecBridge has a decoder that |is_secure| and can
+ // decode |codec| type.
+ static bool CanDecode(const std::string& codec, bool is_secure);
+
+ // Represents supported codecs on android. |secure_decoder_supported| is true
+ // if secure decoder is available for the codec type.
+ // TODO(qinmin): Curretly the codecs string only contains one codec, do we
+ // need more specific codecs separated by comma. (e.g. "vp8" -> "vp8, vp8.0")
+ struct CodecsInfo {
+ std::string codecs;
+ bool secure_decoder_supported;
+ };
+
+ // Get a list of supported codecs.
+ static void GetCodecsInfo(std::vector<CodecsInfo>* codecs_info);
+
virtual ~MediaCodecBridge();
// Resets both input and output, all indices previously returned in calls to
// DequeueInputBuffer() and DequeueOutputBuffer() become invalid.
// Please note that this clears all the inputs in the media codec. In other
// words, there will be no outputs until new input is provided.
- void Reset();
+ // Returns MEDIA_CODEC_ERROR if an unexpected error happens, or Media_CODEC_OK
+ // otherwise.
+ MediaCodecStatus Reset();
// Finishes the decode/encode session. The instance remains active
// and ready to be StartAudio/Video()ed again. HOWEVER, due to the buggy
@@ -59,14 +82,16 @@ class MEDIA_EXPORT MediaCodecBridge {
void GetOutputFormat(int* width, int* height);
// Submits a byte array to the given input buffer. Call this after getting an
- // available buffer from DequeueInputBuffer(). Returns the number of bytes
- // put to the input buffer.
- size_t QueueInputBuffer(int index, const uint8* data, int size,
- const base::TimeDelta& presentation_time);
+ // available buffer from DequeueInputBuffer().
+ MediaCodecStatus QueueInputBuffer(int index,
+ const uint8* data,
+ int size,
+ const base::TimeDelta& presentation_time);
// Similar to the above call, but submits a buffer that is encrypted.
- size_t QueueSecureInputBuffer(
- int index, const uint8* data, int data_size,
+ MediaCodecStatus QueueSecureInputBuffer(
+ int index,
+ const uint8* data, int data_size,
const uint8* key_id, int key_id_size,
const uint8* iv, int iv_size,
const SubsampleEntry* subsamples, int subsamples_size,
@@ -75,19 +100,29 @@ class MEDIA_EXPORT MediaCodecBridge {
// Submits an empty buffer with a EOS (END OF STREAM) flag.
void QueueEOS(int input_buffer_index);
- // Returns an index (>=0) of an input buffer to be filled with valid data,
- // INFO_TRY_AGAIN_LATER if no such buffer is currently available, or
- // INFO_MEDIA_CODEC_ERROR if unexpected error happens.
- // Use kTimeOutInfinity for infinite timeout.
- int DequeueInputBuffer(base::TimeDelta timeout);
+ // Returns:
+ // MEDIA_CODEC_OK if an input buffer is ready to be filled with valid data,
+ // MEDIA_CODEC_ENQUEUE_INPUT_AGAIN_LATER if no such buffer is available, or
+ // MEDIA_CODEC_ERROR if unexpected error happens.
+ // Note: Never use infinite timeout as this would block the decoder thread and
+ // prevent the decoder job from being released.
+ MediaCodecStatus DequeueInputBuffer(const base::TimeDelta& timeout,
+ int* index);
// Dequeues an output buffer, block at most timeout_us microseconds.
- // Returns the index of an output buffer that has been successfully decoded
- // or one of DequeueBufferInfo above.
- // Use kTimeOutInfinity for infinite timeout.
- int DequeueOutputBuffer(
- base::TimeDelta timeout, size_t* offset, size_t* size,
- base::TimeDelta* presentation_time, bool* end_of_stream);
+ // Returns the status of this operation. If OK is returned, the output
+ // parameters should be populated. Otherwise, the values of output parameters
+ // should not be used.
+ // Note: Never use infinite timeout as this would block the decoder thread and
+ // prevent the decoder job from being released.
+ // TODO(xhwang): Can we drop |end_of_stream| and return
+ // MEDIA_CODEC_OUTPUT_END_OF_STREAM?
+ MediaCodecStatus DequeueOutputBuffer(const base::TimeDelta& timeout,
+ int* index,
+ size_t* offset,
+ size_t* size,
+ base::TimeDelta* presentation_time,
+ bool* end_of_stream);
// Returns the buffer to the codec. If you previously specified a surface
// when configuring this video decoder you can optionally render the buffer.
@@ -100,7 +135,7 @@ class MEDIA_EXPORT MediaCodecBridge {
static bool RegisterMediaCodecBridge(JNIEnv* env);
protected:
- explicit MediaCodecBridge(const char* mime);
+ MediaCodecBridge(const std::string& mime, bool is_secure);
// Calls start() against the media codec instance. Used in StartXXX() after
// configuring media codec.
@@ -137,7 +172,7 @@ class AudioCodecBridge : public MediaCodecBridge {
void SetVolume(double volume);
private:
- explicit AudioCodecBridge(const char* mime);
+ explicit AudioCodecBridge(const std::string& mime);
// Configure the java MediaFormat object with the extra codec data passed in.
bool ConfigureMediaFormat(jobject j_format, const AudioCodec codec,
@@ -148,7 +183,7 @@ class MEDIA_EXPORT VideoCodecBridge : public MediaCodecBridge {
public:
// Returns an VideoCodecBridge instance if |codec| is supported, or a NULL
// pointer otherwise.
- static VideoCodecBridge* Create(const VideoCodec codec);
+ static VideoCodecBridge* Create(const VideoCodec codec, bool is_secure);
// Start the video codec bridge.
// TODO(qinmin): Pass codec specific data if available.
@@ -156,7 +191,7 @@ class MEDIA_EXPORT VideoCodecBridge : public MediaCodecBridge {
jobject media_crypto);
private:
- explicit VideoCodecBridge(const char* mime);
+ VideoCodecBridge(const std::string& mime, bool is_secure);
};
} // namespace media
diff --git a/media/base/android/media_codec_bridge_unittest.cc b/media/base/android/media_codec_bridge_unittest.cc
index ee38e6d1a9..3ade9d64a2 100644
--- a/media/base/android/media_codec_bridge_unittest.cc
+++ b/media/base/android/media_codec_bridge_unittest.cc
@@ -95,6 +95,10 @@ namespace media {
static const int kPresentationTimeBase = 100;
+static inline const base::TimeDelta InfiniteTimeOut() {
+ return base::TimeDelta::FromMicroseconds(-1);
+}
+
void DecodeMediaFrame(
VideoCodecBridge* media_codec, const uint8* data, size_t data_size,
const base::TimeDelta input_presentation_timestamp,
@@ -103,17 +107,22 @@ void DecodeMediaFrame(
base::TimeDelta timestamp = initial_timestamp_lower_bound;
base::TimeDelta new_timestamp;
for (int i = 0; i < 10; ++i) {
- int input_buf_index = media_codec->DequeueInputBuffer(
- MediaCodecBridge::kTimeOutInfinity);
+ int input_buf_index = -1;
+ MediaCodecStatus status =
+ media_codec->DequeueInputBuffer(InfiniteTimeOut(), &input_buf_index);
+ ASSERT_EQ(MEDIA_CODEC_OK, status);
+
media_codec->QueueInputBuffer(
input_buf_index, data, data_size, input_presentation_timestamp);
+
size_t unused_offset = 0;
size_t size = 0;
bool eos = false;
- int output_buf_index = media_codec->DequeueOutputBuffer(
- MediaCodecBridge::kTimeOutInfinity,
- &unused_offset, &size, &new_timestamp, &eos);
- if (output_buf_index > 0)
+ int output_buf_index = -1;
+ status = media_codec->DequeueOutputBuffer(InfiniteTimeOut(),
+ &output_buf_index, &unused_offset, &size, &new_timestamp, &eos);
+
+ if (status == MEDIA_CODEC_OK && output_buf_index > 0)
media_codec->ReleaseOutputBuffer(output_buf_index, false);
// Output time stamp should not be smaller than old timestamp.
ASSERT_TRUE(new_timestamp >= timestamp);
@@ -127,7 +136,7 @@ TEST(MediaCodecBridgeTest, Initialize) {
return;
scoped_ptr<media::MediaCodecBridge> media_codec;
- media_codec.reset(VideoCodecBridge::Create(kCodecH264));
+ media_codec.reset(VideoCodecBridge::Create(kCodecH264, false));
}
TEST(MediaCodecBridgeTest, DoNormal) {
@@ -139,8 +148,10 @@ TEST(MediaCodecBridgeTest, DoNormal) {
media_codec->Start(kCodecMP3, 44100, 2, NULL, 0, false, NULL);
- int input_buf_index = media_codec->DequeueInputBuffer(
- MediaCodecBridge::kTimeOutInfinity);
+ int input_buf_index = -1;
+ MediaCodecStatus status =
+ media_codec->DequeueInputBuffer(InfiniteTimeOut(), &input_buf_index);
+ ASSERT_EQ(MEDIA_CODEC_OK, status);
ASSERT_GE(input_buf_index, 0);
int64 input_pts = kPresentationTimeBase;
@@ -148,14 +159,12 @@ TEST(MediaCodecBridgeTest, DoNormal) {
input_buf_index, test_mp3, sizeof(test_mp3),
base::TimeDelta::FromMicroseconds(++input_pts));
- input_buf_index = media_codec->DequeueInputBuffer(
- MediaCodecBridge::kTimeOutInfinity);
+ status = media_codec->DequeueInputBuffer(InfiniteTimeOut(), &input_buf_index);
media_codec->QueueInputBuffer(
input_buf_index, test_mp3, sizeof(test_mp3),
base::TimeDelta::FromMicroseconds(++input_pts));
- input_buf_index = media_codec->DequeueInputBuffer(
- MediaCodecBridge::kTimeOutInfinity);
+ status = media_codec->DequeueInputBuffer(InfiniteTimeOut(), &input_buf_index);
media_codec->QueueEOS(input_buf_index);
input_pts = kPresentationTimeBase;
@@ -164,21 +173,25 @@ TEST(MediaCodecBridgeTest, DoNormal) {
size_t unused_offset = 0;
size_t size = 0;
base::TimeDelta timestamp;
- int output_buf_index = media_codec->DequeueOutputBuffer(
- MediaCodecBridge::kTimeOutInfinity,
- &unused_offset, &size, &timestamp, &eos);
- switch (output_buf_index) {
- case MediaCodecBridge::INFO_TRY_AGAIN_LATER:
+ int output_buf_index = -1;
+ status = media_codec->DequeueOutputBuffer(InfiniteTimeOut(),
+ &output_buf_index, &unused_offset, &size, &timestamp, &eos);
+ switch (status) {
+ case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
FAIL();
return;
- case MediaCodecBridge::INFO_OUTPUT_FORMAT_CHANGED:
+ case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
continue;
- case MediaCodecBridge::INFO_OUTPUT_BUFFERS_CHANGED:
+ case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
media_codec->GetOutputBuffers();
continue;
+
+ default:
+ break;
}
+ ASSERT_GE(output_buf_index, 0);
EXPECT_LE(1u, size);
if (!eos)
EXPECT_EQ(++input_pts, timestamp.InMicroseconds());
@@ -222,7 +235,7 @@ TEST(MediaCodecBridgeTest, PresentationTimestampsDoNotDecrease) {
return;
scoped_ptr<VideoCodecBridge> media_codec;
- media_codec.reset(VideoCodecBridge::Create(kCodecVP8));
+ media_codec.reset(VideoCodecBridge::Create(kCodecVP8, false));
EXPECT_TRUE(media_codec->Start(
kCodecVP8, gfx::Size(320, 240), NULL, NULL));
scoped_refptr<DecoderBuffer> buffer =
@@ -250,7 +263,7 @@ TEST(MediaCodecBridgeTest, PresentationTimestampsDoNotDecrease) {
TEST(MediaCodecBridgeTest, CreateUnsupportedCodec) {
EXPECT_EQ(NULL, AudioCodecBridge::Create(kUnknownAudioCodec));
- EXPECT_EQ(NULL, VideoCodecBridge::Create(kUnknownVideoCodec));
+ EXPECT_EQ(NULL, VideoCodecBridge::Create(kUnknownVideoCodec, false));
}
} // namespace media
diff --git a/media/base/android/media_decoder_job.cc b/media/base/android/media_decoder_job.cc
index 0a2e1476ef..c5b6cbc57a 100644
--- a/media/base/android/media_decoder_job.cc
+++ b/media/base/android/media_decoder_job.cc
@@ -29,6 +29,7 @@ MediaDecoderJob::MediaDecoderJob(
weak_this_(this),
request_data_cb_(request_data_cb),
access_unit_index_(0),
+ input_buf_index_(-1),
stop_decode_pending_(false),
destroy_pending_(false) {
}
@@ -36,13 +37,14 @@ MediaDecoderJob::MediaDecoderJob(
MediaDecoderJob::~MediaDecoderJob() {}
void MediaDecoderJob::OnDataReceived(const DemuxerData& data) {
+ DVLOG(1) << __FUNCTION__ << ": " << data.access_units.size() << " units";
DCHECK(ui_loop_->BelongsToCurrentThread());
DCHECK(!on_data_received_cb_.is_null());
base::Closure done_cb = base::ResetAndReturn(&on_data_received_cb_);
if (stop_decode_pending_) {
- OnDecodeCompleted(DECODE_STOPPED, kNoTimestamp(), 0);
+ OnDecodeCompleted(MEDIA_CODEC_STOPPED, kNoTimestamp(), 0);
return;
}
@@ -51,11 +53,6 @@ void MediaDecoderJob::OnDataReceived(const DemuxerData& data) {
done_cb.Run();
}
-bool MediaDecoderJob::HasData() const {
- DCHECK(ui_loop_->BelongsToCurrentThread());
- return access_unit_index_ < received_data_.access_units.size();
-}
-
void MediaDecoderJob::Prefetch(const base::Closure& prefetch_cb) {
DCHECK(ui_loop_->BelongsToCurrentThread());
DCHECK(on_data_received_cb_.is_null());
@@ -132,44 +129,73 @@ void MediaDecoderJob::Release() {
delete this;
}
-MediaDecoderJob::DecodeStatus MediaDecoderJob::QueueInputBuffer(
- const AccessUnit& unit) {
- base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
- kMediaCodecTimeoutInMilliseconds);
- int input_buf_index = media_codec_bridge_->DequeueInputBuffer(timeout);
- if (input_buf_index == MediaCodecBridge::INFO_MEDIA_CODEC_ERROR)
- return DECODE_FAILED;
- if (input_buf_index == MediaCodecBridge::INFO_TRY_AGAIN_LATER)
- return DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER;
+MediaCodecStatus MediaDecoderJob::QueueInputBuffer(const AccessUnit& unit) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(decoder_loop_->BelongsToCurrentThread());
+
+ int input_buf_index = input_buf_index_;
+ input_buf_index_ = -1;
+
+ // TODO(xhwang): Hide DequeueInputBuffer() and the index in MediaCodecBridge.
+ if (input_buf_index == -1) {
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
+ kMediaCodecTimeoutInMilliseconds);
+ MediaCodecStatus status =
+ media_codec_bridge_->DequeueInputBuffer(timeout, &input_buf_index);
+ if (status != MEDIA_CODEC_OK) {
+ DVLOG(1) << "DequeueInputBuffer fails: " << status;
+ return status;
+ }
+ }
// TODO(qinmin): skip frames if video is falling far behind.
- DCHECK(input_buf_index >= 0);
+ DCHECK_GE(input_buf_index, 0);
if (unit.end_of_stream || unit.data.empty()) {
media_codec_bridge_->QueueEOS(input_buf_index);
- return DECODE_INPUT_END_OF_STREAM;
+ return MEDIA_CODEC_INPUT_END_OF_STREAM;
}
+
if (unit.key_id.empty()) {
- media_codec_bridge_->QueueInputBuffer(
+ return media_codec_bridge_->QueueInputBuffer(
input_buf_index, &unit.data[0], unit.data.size(), unit.timestamp);
- } else {
- if (unit.iv.empty() || unit.subsamples.empty()) {
- LOG(ERROR) << "The access unit doesn't have iv or subsamples while it "
- << "has key IDs!";
- return DECODE_FAILED;
- }
- media_codec_bridge_->QueueSecureInputBuffer(
- input_buf_index, &unit.data[0], unit.data.size(),
- reinterpret_cast<const uint8*>(&unit.key_id[0]), unit.key_id.size(),
- reinterpret_cast<const uint8*>(&unit.iv[0]), unit.iv.size(),
- &unit.subsamples[0], unit.subsamples.size(), unit.timestamp);
}
- return DECODE_SUCCEEDED;
+ if (unit.iv.empty() || unit.subsamples.empty()) {
+ DVLOG(1) << "The access unit doesn't have iv or subsamples while it "
+ << "has key IDs!";
+ return MEDIA_CODEC_ERROR;
+ }
+
+ MediaCodecStatus status = media_codec_bridge_->QueueSecureInputBuffer(
+ input_buf_index, &unit.data[0], unit.data.size(),
+ reinterpret_cast<const uint8*>(&unit.key_id[0]), unit.key_id.size(),
+ reinterpret_cast<const uint8*>(&unit.iv[0]), unit.iv.size(),
+ &unit.subsamples[0], unit.subsamples.size(), unit.timestamp);
+
+ // In case of MEDIA_CODEC_NO_KEY, we must reuse the |input_buf_index_|.
+ // Otherwise MediaDrm will report errors.
+ if (status == MEDIA_CODEC_NO_KEY)
+ input_buf_index_ = input_buf_index;
+
+ return status;
+}
+
+bool MediaDecoderJob::HasData() const {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(!input_eos_encountered_ ||
+ (received_data_.access_units.size() > 0 &&
+ access_unit_index_ < received_data_.access_units.size()))
+ << "access_unit_index_.size() " << received_data_.access_units.size()
+ << " access_unit_index_ " << access_unit_index_;
+ return access_unit_index_ < received_data_.access_units.size() ||
+ input_eos_encountered_;
}
void MediaDecoderJob::RequestData(const base::Closure& done_cb) {
+ DVLOG(1) << __FUNCTION__;
DCHECK(ui_loop_->BelongsToCurrentThread());
DCHECK(on_data_received_cb_.is_null());
+ DCHECK(!input_eos_encountered_);
received_data_ = DemuxerData();
access_unit_index_ = 0;
@@ -199,90 +225,88 @@ void MediaDecoderJob::DecodeInternal(
const base::TimeDelta& start_presentation_timestamp,
bool needs_flush,
const MediaDecoderJob::DecoderCallback& callback) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(decoder_loop_->BelongsToCurrentThread());
+
if (needs_flush) {
DVLOG(1) << "DecodeInternal needs flush.";
input_eos_encountered_ = false;
- media_codec_bridge_->Reset();
+ MediaCodecStatus reset_status = media_codec_bridge_->Reset();
+ if (MEDIA_CODEC_OK != reset_status) {
+ callback.Run(reset_status, start_presentation_timestamp, 0);
+ return;
+ }
}
- DecodeStatus decode_status = DECODE_INPUT_END_OF_STREAM;
+ MediaCodecStatus input_status = MEDIA_CODEC_INPUT_END_OF_STREAM;
if (!input_eos_encountered_) {
- decode_status = QueueInputBuffer(unit);
- if (decode_status == DECODE_INPUT_END_OF_STREAM) {
+ input_status = QueueInputBuffer(unit);
+ if (input_status == MEDIA_CODEC_INPUT_END_OF_STREAM) {
input_eos_encountered_ = true;
- } else if (decode_status != DECODE_SUCCEEDED) {
- callback.Run(decode_status, start_presentation_timestamp, 0);
+ } else if (input_status != MEDIA_CODEC_OK) {
+ callback.Run(input_status, start_presentation_timestamp, 0);
return;
}
}
+ int buffer_index = 0;
size_t offset = 0;
size_t size = 0;
base::TimeDelta presentation_timestamp;
- bool end_of_stream = false;
+ bool output_eos_encountered = false;
base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
kMediaCodecTimeoutInMilliseconds);
- int output_buffer_index = media_codec_bridge_->DequeueOutputBuffer(
- timeout, &offset, &size, &presentation_timestamp, &end_of_stream);
-
- if (end_of_stream)
- decode_status = DECODE_OUTPUT_END_OF_STREAM;
-
- if (output_buffer_index < 0) {
- MediaCodecBridge::DequeueBufferInfo buffer_info =
- static_cast<MediaCodecBridge::DequeueBufferInfo>(output_buffer_index);
- switch (buffer_info) {
- case MediaCodecBridge::INFO_OUTPUT_BUFFERS_CHANGED:
- DCHECK_NE(decode_status, DECODE_INPUT_END_OF_STREAM);
+
+ MediaCodecStatus status = media_codec_bridge_->DequeueOutputBuffer(
+ timeout, &buffer_index, &offset, &size, &presentation_timestamp,
+ &output_eos_encountered);
+
+ if (status != MEDIA_CODEC_OK) {
+ if (status == MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED) {
media_codec_bridge_->GetOutputBuffers();
- break;
- case MediaCodecBridge::INFO_OUTPUT_FORMAT_CHANGED:
- DCHECK_NE(decode_status, DECODE_INPUT_END_OF_STREAM);
- // TODO(qinmin): figure out what we should do if format changes.
- decode_status = DECODE_FORMAT_CHANGED;
- break;
- case MediaCodecBridge::INFO_TRY_AGAIN_LATER:
- decode_status = DECODE_TRY_DEQUEUE_OUTPUT_AGAIN_LATER;
- break;
- case MediaCodecBridge::INFO_MEDIA_CODEC_ERROR:
- decode_status = DECODE_FAILED;
- break;
+ status = MEDIA_CODEC_OK;
}
- } else {
- base::TimeDelta time_to_render;
- DCHECK(!start_time_ticks.is_null());
- if (ComputeTimeToRender()) {
- time_to_render = presentation_timestamp - (base::TimeTicks::Now() -
- start_time_ticks + start_presentation_timestamp);
- }
-
- // TODO(acolwell): Change to > since the else will never run for audio.
- if (time_to_render >= base::TimeDelta()) {
- base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&MediaDecoderJob::ReleaseOutputBuffer,
- weak_this_.GetWeakPtr(), output_buffer_index, size,
- presentation_timestamp, callback, decode_status),
- time_to_render);
- } else {
- // TODO(qinmin): The codec is lagging behind, need to recalculate the
- // |start_presentation_timestamp_| and |start_time_ticks_|.
- DVLOG(1) << "codec is lagging behind :"
- << time_to_render.InMicroseconds();
- ReleaseOutputBuffer(output_buffer_index, size, presentation_timestamp,
- callback, decode_status);
- }
+ callback.Run(status, start_presentation_timestamp, 0);
+ return;
+ }
- return;
+ // TODO(xhwang/qinmin): This logic is correct but strange. Clean it up.
+ if (output_eos_encountered)
+ status = MEDIA_CODEC_OUTPUT_END_OF_STREAM;
+ else if (input_status == MEDIA_CODEC_INPUT_END_OF_STREAM)
+ status = MEDIA_CODEC_INPUT_END_OF_STREAM;
+
+ base::TimeDelta time_to_render;
+ DCHECK(!start_time_ticks.is_null());
+ if (ComputeTimeToRender()) {
+ time_to_render = presentation_timestamp - (base::TimeTicks::Now() -
+ start_time_ticks + start_presentation_timestamp);
+ }
+
+ // TODO(acolwell): Change to > since the else will never run for audio.
+ if (time_to_render >= base::TimeDelta()) {
+ decoder_loop_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&MediaDecoderJob::ReleaseOutputBuffer,
+ weak_this_.GetWeakPtr(), buffer_index, size,
+ presentation_timestamp, callback, status),
+ time_to_render);
+ return;
}
- callback.Run(decode_status, start_presentation_timestamp, 0);
+
+ // TODO(qinmin): The codec is lagging behind, need to recalculate the
+ // |start_presentation_timestamp_| and |start_time_ticks_|.
+ DVLOG(1) << "codec is lagging behind :" << time_to_render.InMicroseconds();
+ ReleaseOutputBuffer(buffer_index, size, presentation_timestamp,
+ callback, status);
}
void MediaDecoderJob::OnDecodeCompleted(
- DecodeStatus status, const base::TimeDelta& presentation_timestamp,
+ MediaCodecStatus status, const base::TimeDelta& presentation_timestamp,
size_t audio_output_bytes) {
DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(status != MEDIA_CODEC_STOPPED || received_data_.access_units.empty());
if (destroy_pending_) {
delete this;
@@ -290,12 +314,24 @@ void MediaDecoderJob::OnDecodeCompleted(
}
DCHECK(!decode_cb_.is_null());
-
- if (status != MediaDecoderJob::DECODE_FAILED &&
- status != MediaDecoderJob::DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER &&
- status != MediaDecoderJob::DECODE_INPUT_END_OF_STREAM) {
- access_unit_index_++;
- }
+ switch (status) {
+ case MEDIA_CODEC_OK:
+ case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
+ case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+ case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
+ case MEDIA_CODEC_OUTPUT_END_OF_STREAM:
+ if (!input_eos_encountered_)
+ access_unit_index_++;
+ break;
+
+ case MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER:
+ case MEDIA_CODEC_INPUT_END_OF_STREAM:
+ case MEDIA_CODEC_NO_KEY:
+ case MEDIA_CODEC_STOPPED:
+ case MEDIA_CODEC_ERROR:
+ // Do nothing.
+ break;
+ };
stop_decode_pending_ = false;
base::ResetAndReturn(&decode_cb_).Run(status, presentation_timestamp,
diff --git a/media/base/android/media_decoder_job.h b/media/base/android/media_decoder_job.h
index 00e20cf508..d5a93b977c 100644
--- a/media/base/android/media_decoder_job.h
+++ b/media/base/android/media_decoder_job.h
@@ -9,6 +9,7 @@
#include "base/memory/weak_ptr.h"
#include "base/time/time.h"
#include "media/base/android/demuxer_stream_player_params.h"
+#include "media/base/android/media_codec_bridge.h"
namespace base {
class MessageLoopProxy;
@@ -16,30 +17,17 @@ class MessageLoopProxy;
namespace media {
-class MediaCodecBridge;
-
// Class for managing all the decoding tasks. Each decoding task will be posted
// onto the same thread. The thread will be stopped once Stop() is called.
class MediaDecoderJob {
public:
- enum DecodeStatus {
- DECODE_SUCCEEDED,
- DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER,
- DECODE_TRY_DEQUEUE_OUTPUT_AGAIN_LATER,
- DECODE_FORMAT_CHANGED,
- DECODE_INPUT_END_OF_STREAM,
- DECODE_OUTPUT_END_OF_STREAM,
- DECODE_FAILED,
- DECODE_STOPPED
- };
-
struct Deleter {
inline void operator()(MediaDecoderJob* ptr) const { ptr->Release(); }
};
// Callback when a decoder job finishes its work. Args: whether decode
// finished successfully, presentation time, audio output bytes.
- typedef base::Callback<void(DecodeStatus, const base::TimeDelta&,
+ typedef base::Callback<void(MediaCodecStatus, const base::TimeDelta&,
size_t)> DecoderCallback;
virtual ~MediaDecoderJob();
@@ -47,9 +35,6 @@ class MediaDecoderJob {
// Called by MediaSourcePlayer when more data for this object has arrived.
void OnDataReceived(const DemuxerData& data);
- // Returns true if this object has data to decode.
- bool HasData() const;
-
// Prefetch so we know the decoder job has data when we call Decode().
// |prefetch_cb| - Run when prefetching has completed.
void Prefetch(const base::Closure& prefetch_cb);
@@ -70,7 +55,7 @@ class MediaDecoderJob {
// this method will just allow the decode to complete as normal. If
// this object is waiting for a data request to complete, then this method
// will wait for the data to arrive and then call the |callback|
- // passed to Decode() with a status of DECODE_STOPPED. This ensures that
+ // passed to Decode() with a status of MEDIA_CODEC_STOPPED. This ensures that
// the |callback| passed to Decode() is always called and the status
// reflects whether data was actually decoded or the decode terminated early.
void StopDecode();
@@ -90,7 +75,7 @@ class MediaDecoderJob {
int outputBufferIndex, size_t size,
const base::TimeDelta& presentation_timestamp,
const DecoderCallback& callback,
- DecodeStatus status) = 0;
+ MediaCodecStatus status) = 0;
// Returns true if the "time to render" needs to be computed for frames in
// this decoder job.
@@ -100,7 +85,10 @@ class MediaDecoderJob {
// Causes this instance to be deleted on the thread it is bound to.
void Release();
- DecodeStatus QueueInputBuffer(const AccessUnit& unit);
+ MediaCodecStatus QueueInputBuffer(const AccessUnit& unit);
+
+ // Returns true if this object has data to decode.
+ bool HasData() const;
// Initiates a request for more data.
// |done_cb| is called when more data is available in |received_data_|.
@@ -124,7 +112,7 @@ class MediaDecoderJob {
const DecoderCallback& callback);
// Called on the UI thread to indicate that one decode cycle has completed.
- void OnDecodeCompleted(DecodeStatus status,
+ void OnDecodeCompleted(MediaCodecStatus status,
const base::TimeDelta& presentation_timestamp,
size_t audio_output_bytes);
@@ -163,6 +151,10 @@ class MediaDecoderJob {
// Data received over IPC from last RequestData() operation.
DemuxerData received_data_;
+ // The index of input buffer that can be used by QueueInputBuffer().
+ // If the index is uninitialized or invalid, it must be -1.
+ int input_buf_index_;
+
bool stop_decode_pending_;
// Indicates that this object should be destroyed once the current
diff --git a/media/base/android/media_drm_bridge.cc b/media/base/android/media_drm_bridge.cc
index e0e4ec5472..5c8ef25c70 100644
--- a/media/base/android/media_drm_bridge.cc
+++ b/media/base/android/media_drm_bridge.cc
@@ -141,27 +141,74 @@ static bool GetPsshData(const uint8* data, int data_size,
return false;
}
+static MediaDrmBridge::SecurityLevel GetSecurityLevelFromString(
+ const std::string& security_level_str) {
+ if (0 == security_level_str.compare("L1"))
+ return MediaDrmBridge::SECURITY_LEVEL_1;
+ if (0 == security_level_str.compare("L3"))
+ return MediaDrmBridge::SECURITY_LEVEL_3;
+ DCHECK(security_level_str.empty());
+ return MediaDrmBridge::SECURITY_LEVEL_NONE;
+}
+
// static
-MediaDrmBridge* MediaDrmBridge::Create(int media_keys_id,
- const std::vector<uint8>& scheme_uuid,
- MediaPlayerManager* manager) {
- if (!IsAvailable() || scheme_uuid.empty())
- return NULL;
-
- // TODO(qinmin): check whether the uuid is valid.
- return new MediaDrmBridge(media_keys_id, scheme_uuid, manager);
+scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
+ int media_keys_id,
+ const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
+ MediaPlayerManager* manager) {
+ scoped_ptr<MediaDrmBridge> media_drm_bridge;
+
+ if (IsAvailable() && !scheme_uuid.empty()) {
+ // TODO(qinmin): check whether the uuid is valid.
+ media_drm_bridge.reset(
+ new MediaDrmBridge(media_keys_id, scheme_uuid, security_level, manager));
+ if (media_drm_bridge->j_media_drm_.is_null())
+ media_drm_bridge.reset();
+ }
+
+ return media_drm_bridge.Pass();
}
+// static
bool MediaDrmBridge::IsAvailable() {
return base::android::BuildInfo::GetInstance()->sdk_int() >= 18;
}
+// static
+bool MediaDrmBridge::IsSecureDecoderRequired(
+ const std::string& security_level_str) {
+ return IsSecureDecoderRequired(
+ GetSecurityLevelFromString(security_level_str));
+}
+
+bool MediaDrmBridge::IsSecurityLevelSupported(
+ const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level) {
+ // Pass 0 as |media_keys_id| and NULL as |manager| as they are not used in
+ // creation time of MediaDrmBridge.
+ return MediaDrmBridge::Create(0, scheme_uuid, security_level, NULL) != NULL;
+}
+
+bool MediaDrmBridge::IsCryptoSchemeSupported(
+ const std::vector<uint8>& scheme_uuid,
+ const std::string& container_mime_type) {
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
+ base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
+ ScopedJavaLocalRef<jstring> j_container_mime_type =
+ ConvertUTF8ToJavaString(env, container_mime_type);
+ return Java_MediaDrmBridge_isCryptoSchemeSupported(
+ env, j_scheme_uuid.obj(), j_container_mime_type.obj());
+}
+
bool MediaDrmBridge::RegisterMediaDrmBridge(JNIEnv* env) {
return RegisterNativesImpl(env);
}
MediaDrmBridge::MediaDrmBridge(int media_keys_id,
const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
MediaPlayerManager* manager)
: media_keys_id_(media_keys_id),
scheme_uuid_(scheme_uuid),
@@ -171,13 +218,17 @@ MediaDrmBridge::MediaDrmBridge(int media_keys_id,
ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
+ ScopedJavaLocalRef<jstring> j_security_level =
+ ConvertUTF8ToJavaString(env, security_level);
j_media_drm_.Reset(Java_MediaDrmBridge_create(
- env, j_scheme_uuid.obj(), reinterpret_cast<intptr_t>(this)));
+ env, j_scheme_uuid.obj(), j_security_level.obj(),
+ reinterpret_cast<intptr_t>(this)));
}
MediaDrmBridge::~MediaDrmBridge() {
JNIEnv* env = AttachCurrentThread();
- Java_MediaDrmBridge_release(env, j_media_drm_.obj());
+ if (!j_media_drm_.is_null())
+ Java_MediaDrmBridge_release(env, j_media_drm_.obj());
}
bool MediaDrmBridge::GenerateKeyRequest(const std::string& type,
@@ -199,6 +250,7 @@ bool MediaDrmBridge::GenerateKeyRequest(const std::string& type,
void MediaDrmBridge::AddKey(const uint8* key, int key_length,
const uint8* init_data, int init_data_length,
const std::string& session_id) {
+ DVLOG(1) << __FUNCTION__;
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jbyteArray> j_key_data =
base::android::ToJavaByteArray(env, key, key_length);
@@ -267,22 +319,22 @@ ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
return Java_MediaDrmBridge_getMediaCrypto(env, j_media_drm_.obj());
}
+// static
+bool MediaDrmBridge::IsSecureDecoderRequired(SecurityLevel security_level) {
+ return MediaDrmBridge::SECURITY_LEVEL_1 == security_level;
+}
+
MediaDrmBridge::SecurityLevel MediaDrmBridge::GetSecurityLevel() {
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_security_level =
Java_MediaDrmBridge_getSecurityLevel(env, j_media_drm_.obj());
- std::string security_level =
+ std::string security_level_str =
ConvertJavaStringToUTF8(env, j_security_level.obj());
- if (0 == security_level.compare("L1"))
- return SECURITY_LEVEL_1;
- if (0 == security_level.compare("L3"))
- return SECURITY_LEVEL_3;
- DCHECK(security_level.empty());
- return SECURITY_LEVEL_NONE;
+ return GetSecurityLevelFromString(security_level_str);
}
bool MediaDrmBridge::IsProtectedSurfaceRequired() {
- return MediaDrmBridge::SECURITY_LEVEL_1 == GetSecurityLevel();
+ return IsSecureDecoderRequired(GetSecurityLevel());
}
} // namespace media
diff --git a/media/base/android/media_drm_bridge.h b/media/base/android/media_drm_bridge.h
index 821e9e8f14..e58d9edd6b 100644
--- a/media/base/android/media_drm_bridge.h
+++ b/media/base/android/media_drm_bridge.h
@@ -11,6 +11,7 @@
#include "base/android/scoped_java_ref.h"
#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
#include "media/base/media_keys.h"
@@ -32,13 +33,23 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
// Returns a MediaDrmBridge instance if |scheme_uuid| is supported, or a NULL
// pointer otherwise.
- static MediaDrmBridge* Create(int media_keys_id,
- const std::vector<uint8>& scheme_uuid,
- MediaPlayerManager* manager);
+ static scoped_ptr<MediaDrmBridge> Create(
+ int media_keys_id,
+ const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
+ MediaPlayerManager* manager);
// Checks whether MediaDRM is available.
static bool IsAvailable();
+ static bool IsSecurityLevelSupported(const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level);
+
+ static bool IsCryptoSchemeSupported(const std::vector<uint8>& scheme_uuid,
+ const std::string& container_mime_type);
+
+ static bool IsSecureDecoderRequired(const std::string& security_level_str);
+
static bool RegisterMediaDrmBridge(JNIEnv* env);
// MediaKeys implementations.
@@ -78,8 +89,11 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
int media_keys_id() const { return media_keys_id_; }
private:
+ static bool IsSecureDecoderRequired(SecurityLevel security_level);
+
MediaDrmBridge(int media_keys_id,
const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
MediaPlayerManager* manager);
// Get the security level of the media.
diff --git a/media/base/android/media_player_android.cc b/media/base/android/media_player_android.cc
index 6b1626a2b6..101ab436df 100644
--- a/media/base/android/media_player_android.cc
+++ b/media/base/android/media_player_android.cc
@@ -63,22 +63,6 @@ void MediaPlayerAndroid::ReleaseMediaResourcesFromManager() {
manager_->ReleaseMediaResources(player_id_);
}
-void MediaPlayerAndroid::DemuxerReady(const DemuxerConfigs& configs) {
- NOTREACHED() << "Unexpected ipc received";
-}
-
-void MediaPlayerAndroid::ReadFromDemuxerAck(const DemuxerData& data) {
- NOTREACHED() << "Unexpected ipc received";
-}
-
-void MediaPlayerAndroid::OnSeekRequestAck(unsigned seek_request_id) {
- NOTREACHED() << "Unexpected ipc received";
-}
-
-void MediaPlayerAndroid::DurationChanged(const base::TimeDelta& duration) {
- NOTREACHED() << "Unexpected ipc received";
-}
-
GURL MediaPlayerAndroid::GetUrl() {
return GURL();
}
@@ -92,4 +76,9 @@ void MediaPlayerAndroid::SetDrmBridge(MediaDrmBridge* drm_bridge) {
return;
}
+void MediaPlayerAndroid::OnKeyAdded() {
+ // Not all players care about the decryption key. Do nothing by default.
+ return;
+}
+
} // namespace media
diff --git a/media/base/android/media_player_android.h b/media/base/android/media_player_android.h
index 06cb573a43..464ad89c4d 100644
--- a/media/base/android/media_player_android.h
+++ b/media/base/android/media_player_android.h
@@ -10,7 +10,6 @@
#include "base/callback.h"
#include "base/time/time.h"
-#include "media/base/android/demuxer_stream_player_params.h"
#include "media/base/media_export.h"
#include "ui/gl/android/scoped_java_surface.h"
#include "url/gurl.h"
@@ -35,25 +34,6 @@ class MEDIA_EXPORT MediaPlayerAndroid {
MEDIA_ERROR_INVALID_CODE,
};
- // Types of media source that this object will play.
- enum SourceType {
- SOURCE_TYPE_URL,
- SOURCE_TYPE_MSE, // W3C Media Source Extensions
- SOURCE_TYPE_STREAM, // W3C Media Stream, e.g. getUserMedia().
- };
-
- // Construct a MediaPlayerAndroid object with all the needed media player
- // callbacks. This object needs to call |manager_|'s RequestMediaResources()
- // before decoding the media stream. This allows |manager_| to track
- // unused resources and free them when needed. On the other hand, it needs
- // to call ReleaseMediaResources() when it is done with decoding.
- static MediaPlayerAndroid* Create(int player_id,
- const GURL& url,
- SourceType source_type,
- const GURL& first_party_for_cookies,
- bool hide_url_log,
- MediaPlayerManager* manager);
-
// Passing an external java surface object to the player.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) = 0;
@@ -86,22 +66,13 @@ class MEDIA_EXPORT MediaPlayerAndroid {
virtual GURL GetUrl();
virtual GURL GetFirstPartyForCookies();
- // Methods for DemuxerStreamPlayer.
- // Informs DemuxerStreamPlayer that the demuxer is ready.
- virtual void DemuxerReady(const DemuxerConfigs& configs);
-
- // Called when the requested data is received from the demuxer.
- virtual void ReadFromDemuxerAck(const DemuxerData& data);
-
- // Called when a seek request is acked by the render process.
- virtual void OnSeekRequestAck(unsigned seek_request_id);
-
- // Called when the demuxer has changed the duration.
- virtual void DurationChanged(const base::TimeDelta& duration);
-
// Pass a drm bridge to a player.
virtual void SetDrmBridge(MediaDrmBridge* drm_bridge);
+ // Notifies the player that a decryption key has been added. The player
+ // may want to start/resume playback if it is waiting for a key.
+ virtual void OnKeyAdded();
+
int player_id() { return player_id_; }
protected:
diff --git a/media/base/android/media_player_bridge.cc b/media/base/android/media_player_bridge.cc
index 49c3563c12..750dd4bc02 100644
--- a/media/base/android/media_player_bridge.cc
+++ b/media/base/android/media_player_bridge.cc
@@ -12,7 +12,6 @@
#include "jni/MediaPlayerBridge_jni.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/media_resource_getter.h"
-#include "media/base/android/media_source_player.h"
using base::android::ConvertUTF8ToJavaString;
using base::android::ScopedJavaLocalRef;
@@ -22,32 +21,6 @@ static const int kTimeUpdateInterval = 250;
namespace media {
-#if !defined(GOOGLE_TV)
-// static
-MediaPlayerAndroid* MediaPlayerAndroid::Create(
- int player_id,
- const GURL& url,
- SourceType source_type,
- const GURL& first_party_for_cookies,
- bool hide_url_log,
- MediaPlayerManager* manager) {
- if (source_type == SOURCE_TYPE_URL) {
- MediaPlayerBridge* media_player_bridge = new MediaPlayerBridge(
- player_id,
- url,
- first_party_for_cookies,
- hide_url_log,
- manager);
- media_player_bridge->Initialize();
- return media_player_bridge;
- } else {
- return new MediaSourcePlayer(
- player_id,
- manager);
- }
-}
-#endif
-
MediaPlayerBridge::MediaPlayerBridge(
int player_id,
const GURL& url,
diff --git a/media/base/android/media_player_manager.h b/media/base/android/media_player_manager.h
index 46ea8cab3a..4ecac22518 100644
--- a/media/base/android/media_player_manager.h
+++ b/media/base/android/media_player_manager.h
@@ -79,17 +79,6 @@ class MEDIA_EXPORT MediaPlayerManager {
// Release all the players managed by this object.
virtual void DestroyAllMediaPlayers() = 0;
- // Callback when DemuxerStreamPlayer wants to read data from the demuxer.
- virtual void OnReadFromDemuxer(int player_id,
- media::DemuxerStream::Type type) = 0;
-
- // Called when player wants the media element to initiate a seek.
- virtual void OnMediaSeekRequest(int player_id, base::TimeDelta time_to_seek,
- unsigned seek_request_id) = 0;
-
- // Called when player wants to read the config data from the demuxer.
- virtual void OnMediaConfigRequest(int player_id) = 0;
-
// Get the MediaDrmBridge object for the given media key Id.
virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) = 0;
diff --git a/media/base/android/media_source_player.cc b/media/base/android/media_source_player.cc
index ba87bf0b38..835a7ae84b 100644
--- a/media/base/android/media_source_player.cc
+++ b/media/base/android/media_source_player.cc
@@ -4,6 +4,8 @@
#include "media/base/android/media_source_player.h"
+#include <limits>
+
#include "base/android/jni_android.h"
#include "base/android/jni_string.h"
#include "base/barrier_closure.h"
@@ -25,10 +27,43 @@ const int kBytesPerAudioOutputSample = 2;
namespace media {
+// static
+bool MediaSourcePlayer::IsTypeSupported(
+ const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
+ const std::string& container,
+ const std::vector<std::string>& codecs) {
+ if (!MediaDrmBridge::IsCryptoSchemeSupported(scheme_uuid, container)) {
+ DVLOG(1) << "UUID and container '" << container << "' not supported.";
+ return false;
+ }
+
+ if (!MediaDrmBridge::IsSecurityLevelSupported(scheme_uuid, security_level)) {
+ DVLOG(1) << "UUID and security level '" << security_level
+ << "' not supported.";
+ return false;
+ }
+
+ bool is_secure = MediaDrmBridge::IsSecureDecoderRequired(security_level);
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ if (!MediaCodecBridge::CanDecode(codecs[i], is_secure)) {
+ DVLOG(1) << "Codec '" << codecs[i] << "' "
+ << (is_secure ? "in secure mode " : "") << "not supported.";
+ return false;
+ }
+ }
+
+ return true;
+}
+
MediaSourcePlayer::MediaSourcePlayer(
int player_id,
- MediaPlayerManager* manager)
+ MediaPlayerManager* manager,
+ int demuxer_client_id,
+ DemuxerAndroid* demuxer)
: MediaPlayerAndroid(player_id, manager),
+ demuxer_client_id_(demuxer_client_id),
+ demuxer_(demuxer),
pending_event_(NO_EVENT_PENDING),
seek_request_id_(0),
width_(0),
@@ -48,9 +83,11 @@ MediaSourcePlayer::MediaSourcePlayer(
reconfig_video_decoder_(false),
weak_this_(this),
drm_bridge_(NULL) {
+ demuxer_->AddDemuxerClient(demuxer_client_id_, this);
}
MediaSourcePlayer::~MediaSourcePlayer() {
+ demuxer_->RemoveDemuxerClient(demuxer_client_id_);
Release();
}
@@ -104,7 +141,7 @@ void MediaSourcePlayer::Start() {
playing_ = true;
if (IsProtectedSurfaceRequired())
- manager()->OnProtectedSurfaceRequested(player_id());
+ manager()->OnProtectedSurfaceRequested(demuxer_client_id_);
StartInternal();
}
@@ -152,13 +189,13 @@ base::TimeDelta MediaSourcePlayer::GetDuration() {
void MediaSourcePlayer::Release() {
DVLOG(1) << __FUNCTION__;
- ClearDecodingData();
audio_decoder_job_.reset();
video_decoder_job_.reset();
reconfig_audio_decoder_ = false;
reconfig_video_decoder_ = false;
playing_ = false;
pending_event_ = NO_EVENT_PENDING;
+ decoder_starvation_callback_.Cancel();
surface_ = gfx::ScopedJavaSurface();
ReleaseMediaResourcesFromManager();
}
@@ -168,6 +205,12 @@ void MediaSourcePlayer::SetVolume(double volume) {
SetVolumeInternal();
}
+void MediaSourcePlayer::OnKeyAdded() {
+ DVLOG(1) << __FUNCTION__;
+ if (playing_)
+ StartInternal();
+}
+
bool MediaSourcePlayer::CanPause() {
return Seekable();
}
@@ -194,7 +237,6 @@ void MediaSourcePlayer::StartInternal() {
ConfigureAudioDecoderJob();
ConfigureVideoDecoderJob();
-
// If one of the decoder job is not ready, do nothing.
if ((HasAudio() && !audio_decoder_job_) ||
(HasVideo() && !video_decoder_job_)) {
@@ -207,7 +249,8 @@ void MediaSourcePlayer::StartInternal() {
ProcessPendingEvents();
}
-void MediaSourcePlayer::DemuxerReady(const DemuxerConfigs& configs) {
+void MediaSourcePlayer::OnDemuxerConfigsAvailable(
+ const DemuxerConfigs& configs) {
DVLOG(1) << __FUNCTION__;
duration_ = base::TimeDelta::FromMilliseconds(configs.duration_ms);
clock_.SetDuration(duration_);
@@ -252,7 +295,7 @@ void MediaSourcePlayer::DemuxerReady(const DemuxerConfigs& configs) {
}
}
-void MediaSourcePlayer::ReadFromDemuxerAck(const DemuxerData& data) {
+void MediaSourcePlayer::OnDemuxerDataAvailable(const DemuxerData& data) {
DVLOG(1) << __FUNCTION__ << "(" << data.type << ")";
DCHECK_LT(0u, data.access_units.size());
if (data.type == DemuxerStream::AUDIO)
@@ -261,7 +304,7 @@ void MediaSourcePlayer::ReadFromDemuxerAck(const DemuxerData& data) {
video_decoder_job_->OnDataReceived(data);
}
-void MediaSourcePlayer::DurationChanged(const base::TimeDelta& duration) {
+void MediaSourcePlayer::OnDemuxerDurationChanged(base::TimeDelta duration) {
duration_ = duration;
clock_.SetDuration(duration_);
}
@@ -303,7 +346,7 @@ void MediaSourcePlayer::SetDrmBridge(MediaDrmBridge* drm_bridge) {
StartInternal();
}
-void MediaSourcePlayer::OnSeekRequestAck(unsigned seek_request_id) {
+void MediaSourcePlayer::OnDemuxerSeeked(unsigned seek_request_id) {
DVLOG(1) << __FUNCTION__ << "(" << seek_request_id << ")";
// Do nothing until the most recent seek request is processed.
if (seek_request_id_ != seek_request_id)
@@ -317,24 +360,28 @@ void MediaSourcePlayer::OnSeekRequestAck(unsigned seek_request_id) {
void MediaSourcePlayer::UpdateTimestamps(
const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
+ base::TimeDelta new_max_time = presentation_timestamp;
+
if (audio_output_bytes > 0) {
audio_timestamp_helper_->AddFrames(
audio_output_bytes / (kBytesPerAudioOutputSample * num_channels_));
- clock_.SetMaxTime(audio_timestamp_helper_->GetTimestamp());
- } else {
- clock_.SetMaxTime(presentation_timestamp);
+ new_max_time = audio_timestamp_helper_->GetTimestamp();
}
+ clock_.SetMaxTime(new_max_time);
OnTimeUpdated();
}
void MediaSourcePlayer::ProcessPendingEvents() {
- DVLOG(1) << __FUNCTION__ << " : 0x"
- << std::hex << pending_event_;
+ DVLOG(1) << __FUNCTION__ << " : 0x" << std::hex << pending_event_;
// Wait for all the decoding jobs to finish before processing pending tasks.
- if ((audio_decoder_job_ && audio_decoder_job_->is_decoding()) ||
- (video_decoder_job_ && video_decoder_job_->is_decoding())) {
- DVLOG(1) << __FUNCTION__ << " : A job is still decoding.";
+ if (video_decoder_job_ && video_decoder_job_->is_decoding()) {
+ DVLOG(1) << __FUNCTION__ << " : A video job is still decoding.";
+ return;
+ }
+
+ if (audio_decoder_job_ && audio_decoder_job_->is_decoding()) {
+ DVLOG(1) << __FUNCTION__ << " : An audio job is still decoding.";
return;
}
@@ -346,8 +393,8 @@ void MediaSourcePlayer::ProcessPendingEvents() {
if (IsEventPending(SEEK_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : Handling SEEK_EVENT.";
ClearDecodingData();
- manager()->OnMediaSeekRequest(
- player_id(), GetCurrentTime(), ++seek_request_id_);
+ demuxer_->RequestDemuxerSeek(
+ demuxer_client_id_, GetCurrentTime(), ++seek_request_id_);
return;
}
@@ -355,7 +402,7 @@ void MediaSourcePlayer::ProcessPendingEvents() {
if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : Handling CONFIG_CHANGE_EVENT.";
DCHECK(reconfig_audio_decoder_ || reconfig_video_decoder_);
- manager()->OnMediaConfigRequest(player_id());
+ demuxer_->RequestDemuxerConfigs(demuxer_client_id_);
return;
}
@@ -393,13 +440,16 @@ void MediaSourcePlayer::ProcessPendingEvents() {
}
void MediaSourcePlayer::MediaDecoderCallback(
- bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
+ bool is_audio, MediaCodecStatus status,
const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
- DVLOG(1) << __FUNCTION__;
- if (is_audio)
+ DVLOG(1) << __FUNCTION__ << ": " << is_audio << ", " << status;
+
+ bool is_clock_manager = is_audio || !HasAudio();
+
+ if (is_clock_manager)
decoder_starvation_callback_.Cancel();
- if (decode_status == MediaDecoderJob::DECODE_FAILED) {
+ if (status == MEDIA_CODEC_ERROR) {
Release();
OnMediaError(MEDIA_ERROR_DECODE);
return;
@@ -410,41 +460,29 @@ void MediaSourcePlayer::MediaDecoderCallback(
return;
}
- if (decode_status == MediaDecoderJob::DECODE_SUCCEEDED &&
- (is_audio || !HasAudio())) {
- UpdateTimestamps(presentation_timestamp, audio_output_bytes);
- }
-
- if (decode_status == MediaDecoderJob::DECODE_OUTPUT_END_OF_STREAM) {
+ if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM) {
PlaybackCompleted(is_audio);
return;
}
+ if (status == MEDIA_CODEC_OK && is_clock_manager)
+ UpdateTimestamps(presentation_timestamp, audio_output_bytes);
+
if (!playing_) {
- if (is_audio || !HasAudio())
+ if (is_clock_manager)
clock_.Pause();
return;
}
- base::TimeDelta current_timestamp = GetCurrentTime();
- if (is_audio) {
- if (decode_status == MediaDecoderJob::DECODE_SUCCEEDED) {
- base::TimeDelta timeout =
- audio_timestamp_helper_->GetTimestamp() - current_timestamp;
- StartStarvationCallback(timeout);
- }
- DecodeMoreAudio();
+ if (status == MEDIA_CODEC_NO_KEY)
return;
- }
- if (!HasAudio() && decode_status == MediaDecoderJob::DECODE_SUCCEEDED) {
- DCHECK(current_timestamp <= presentation_timestamp);
- // For video only streams, fps can be estimated from the difference
- // between the previous and current presentation timestamps. The
- // previous presentation timestamp is equal to current_timestamp.
- // TODO(qinmin): determine whether 2 is a good coefficient for estimating
- // video frame timeout.
- StartStarvationCallback(2 * (presentation_timestamp - current_timestamp));
+ if (status == MEDIA_CODEC_OK && is_clock_manager)
+ StartStarvationCallback(presentation_timestamp);
+
+ if (is_audio) {
+ DecodeMoreAudio();
+ return;
}
DecodeMoreVideo();
@@ -537,8 +575,8 @@ void MediaSourcePlayer::ConfigureAudioDecoderJob() {
audio_decoder_job_.reset(AudioDecoderJob::Create(
audio_codec_, sampling_rate_, num_channels_, &audio_extra_data_[0],
audio_extra_data_.size(), media_crypto.obj(),
- base::Bind(&MediaPlayerManager::OnReadFromDemuxer,
- base::Unretained(manager()), player_id(),
+ base::Bind(&DemuxerAndroid::RequestDemuxerData,
+ base::Unretained(demuxer_), demuxer_client_id_,
DemuxerStream::AUDIO)));
if (audio_decoder_job_) {
@@ -567,13 +605,17 @@ void MediaSourcePlayer::ConfigureVideoDecoderJob() {
// Android does not allow 2 MediaCodec instances use the same surface.
video_decoder_job_.reset();
// Create the new VideoDecoderJob.
- video_decoder_job_.reset(VideoDecoderJob::Create(
- video_codec_, gfx::Size(width_, height_), surface_.j_surface().obj(),
- media_crypto.obj(),
- base::Bind(&MediaPlayerManager::OnReadFromDemuxer,
- base::Unretained(manager()),
- player_id(),
- DemuxerStream::VIDEO)));
+ bool is_secure = IsProtectedSurfaceRequired();
+ video_decoder_job_.reset(
+ VideoDecoderJob::Create(video_codec_,
+ is_secure,
+ gfx::Size(width_, height_),
+ surface_.j_surface().obj(),
+ media_crypto.obj(),
+ base::Bind(&DemuxerAndroid::RequestDemuxerData,
+ base::Unretained(demuxer_),
+ demuxer_client_id_,
+ DemuxerStream::VIDEO)));
if (video_decoder_job_)
reconfig_video_decoder_ = false;
@@ -590,8 +632,29 @@ void MediaSourcePlayer::OnDecoderStarved() {
}
void MediaSourcePlayer::StartStarvationCallback(
- const base::TimeDelta& timeout) {
- DVLOG(1) << __FUNCTION__ << "(" << timeout.InSecondsF() << ")";
+ const base::TimeDelta& presentation_timestamp) {
+ // 20ms was chosen because it is the typical size of a compressed audio frame.
+ // Anything smaller than this would likely cause unnecessary cycling in and
+ // out of the prefetch state.
+ const base::TimeDelta kMinStarvationTimeout =
+ base::TimeDelta::FromMilliseconds(20);
+
+ base::TimeDelta current_timestamp = GetCurrentTime();
+ base::TimeDelta timeout;
+ if (HasAudio()) {
+ timeout = audio_timestamp_helper_->GetTimestamp() - current_timestamp;
+ } else {
+ DCHECK(current_timestamp <= presentation_timestamp);
+
+ // For video only streams, fps can be estimated from the difference
+ // between the previous and current presentation timestamps. The
+ // previous presentation timestamp is equal to current_timestamp.
+ // TODO(qinmin): determine whether 2 is a good coefficient for estimating
+ // video frame timeout.
+ timeout = 2 * (presentation_timestamp - current_timestamp);
+ }
+
+ timeout = std::max(timeout, kMinStarvationTimeout);
decoder_starvation_callback_.Reset(
base::Bind(&MediaSourcePlayer::OnDecoderStarved,
diff --git a/media/base/android/media_source_player.h b/media/base/android/media_source_player.h
index decc5cc7c0..cb6b2cd2d0 100644
--- a/media/base/android/media_source_player.h
+++ b/media/base/android/media_source_player.h
@@ -18,6 +18,7 @@
#include "base/threading/thread.h"
#include "base/time/default_tick_clock.h"
#include "base/time/time.h"
+#include "media/base/android/demuxer_android.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/android/media_decoder_job.h"
#include "media/base/android/media_player_android.h"
@@ -34,13 +35,22 @@ class VideoDecoderJob;
// MediaCodec to decode audio and video streams in two separate threads.
// IPC is being used to send data from the render process to this object.
// TODO(qinmin): use shared memory to send data between processes.
-class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
+class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
+ public DemuxerAndroidClient {
public:
- // Construct a MediaSourcePlayer object with all the needed media player
- // callbacks.
- MediaSourcePlayer(int player_id, MediaPlayerManager* manager);
+ // Constructs a player with the given IDs. |manager| and |demuxer| must
+ // outlive the lifetime of this object.
+ MediaSourcePlayer(int player_id,
+ MediaPlayerManager* manager,
+ int demuxer_client_id,
+ DemuxerAndroid* demuxer);
virtual ~MediaSourcePlayer();
+ static bool IsTypeSupported(const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
+ const std::string& container,
+ const std::vector<std::string>& codecs);
+
// MediaPlayerAndroid implementation.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
virtual void Start() OVERRIDE;
@@ -57,11 +67,14 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
virtual bool CanSeekForward() OVERRIDE;
virtual bool CanSeekBackward() OVERRIDE;
virtual bool IsPlayerReady() OVERRIDE;
- virtual void OnSeekRequestAck(unsigned seek_request_id) OVERRIDE;
- virtual void DemuxerReady(const DemuxerConfigs& configs) OVERRIDE;
- virtual void ReadFromDemuxerAck(const DemuxerData& data) OVERRIDE;
- virtual void DurationChanged(const base::TimeDelta& duration) OVERRIDE;
virtual void SetDrmBridge(MediaDrmBridge* drm_bridge) OVERRIDE;
+ virtual void OnKeyAdded() OVERRIDE;
+
+ // DemuxerAndroidClient implementation.
+ virtual void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) OVERRIDE;
+ virtual void OnDemuxerDataAvailable(const DemuxerData& params) OVERRIDE;
+ virtual void OnDemuxerSeeked(unsigned seek_request_id) OVERRIDE;
+ virtual void OnDemuxerDurationChanged(base::TimeDelta duration) OVERRIDE;
private:
// Update the current timestamp.
@@ -76,7 +89,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
// Called when the decoder finishes its task.
void MediaDecoderCallback(
- bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
+ bool is_audio, MediaCodecStatus status,
const base::TimeDelta& presentation_timestamp,
size_t audio_output_bytes);
@@ -111,7 +124,10 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
void OnDecoderStarved();
// Starts the |decoder_starvation_callback_| task with the timeout value.
- void StartStarvationCallback(const base::TimeDelta& timeout);
+ // |presentation_timestamp| - The presentation timestamp used for starvation
+ // timeout computations. It represents the timestamp of the last piece of
+ // decoded data.
+ void StartStarvationCallback(const base::TimeDelta& presentation_timestamp);
// Schedules a seek event in |pending_events_| and calls StopDecode() on all
// the MediaDecoderJobs.
@@ -144,6 +160,9 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
void SetPendingEvent(PendingEventFlags event);
void ClearPendingEvent(PendingEventFlags event);
+ int demuxer_client_id_;
+ DemuxerAndroid* demuxer_;
+
// Pending event that the player needs to do.
unsigned pending_event_;
diff --git a/media/base/android/media_source_player_unittest.cc b/media/base/android/media_source_player_unittest.cc
index e3dbf20465..4c499c4cb0 100644
--- a/media/base/android/media_source_player_unittest.cc
+++ b/media/base/android/media_source_player_unittest.cc
@@ -8,6 +8,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/strings/stringprintf.h"
#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/media_source_player.h"
#include "media/base/decoder_buffer.h"
@@ -19,11 +20,17 @@ namespace media {
static const int kDefaultDurationInMs = 10000;
+static const char kAudioMp4[] = "audio/mp4";
+static const char kVideoMp4[] = "video/mp4";
+static const char kAudioWebM[] = "audio/webm";
+static const char kVideoWebM[] = "video/webm";
+
// Mock of MediaPlayerManager for testing purpose
class MockMediaPlayerManager : public MediaPlayerManager {
public:
- MockMediaPlayerManager() : num_requests_(0), last_seek_request_id_(0) {}
- virtual ~MockMediaPlayerManager() {};
+ explicit MockMediaPlayerManager(base::MessageLoop* message_loop)
+ : message_loop_(message_loop) {}
+ virtual ~MockMediaPlayerManager() {}
// MediaPlayerManager implementation.
virtual void RequestMediaResources(int player_id) OVERRIDE {}
@@ -37,8 +44,8 @@ class MockMediaPlayerManager : public MediaPlayerManager {
int player_id, base::TimeDelta duration, int width, int height,
bool success) OVERRIDE {}
virtual void OnPlaybackComplete(int player_id) OVERRIDE {
- if (message_loop_.is_running())
- message_loop_.Quit();
+ if (message_loop_->is_running())
+ message_loop_->Quit();
}
virtual void OnMediaInterrupted(int player_id) OVERRIDE {}
virtual void OnBufferingUpdate(int player_id, int percentage) OVERRIDE {}
@@ -50,17 +57,6 @@ class MockMediaPlayerManager : public MediaPlayerManager {
virtual MediaPlayerAndroid* GetFullscreenPlayer() OVERRIDE { return NULL; }
virtual MediaPlayerAndroid* GetPlayer(int player_id) OVERRIDE { return NULL; }
virtual void DestroyAllMediaPlayers() OVERRIDE {}
- virtual void OnReadFromDemuxer(int player_id,
- media::DemuxerStream::Type type) OVERRIDE {
- num_requests_++;
- if (message_loop_.is_running())
- message_loop_.Quit();
- }
- virtual void OnMediaSeekRequest(int player_id, base::TimeDelta time_to_seek,
- unsigned seek_request_id) OVERRIDE {
- last_seek_request_id_ = seek_request_id;
- }
- virtual void OnMediaConfigRequest(int player_id) OVERRIDE {}
virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) OVERRIDE {
return NULL;
}
@@ -76,23 +72,55 @@ class MockMediaPlayerManager : public MediaPlayerManager {
const std::vector<uint8>& message,
const std::string& destination_url) OVERRIDE {}
+ private:
+ base::MessageLoop* message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockMediaPlayerManager);
+};
+
+class MockDemuxerAndroid : public DemuxerAndroid {
+ public:
+ explicit MockDemuxerAndroid(base::MessageLoop* message_loop)
+ : message_loop_(message_loop),
+ num_requests_(0),
+ last_seek_request_id_(0) {}
+ virtual ~MockDemuxerAndroid() {}
+
+ virtual void AddDemuxerClient(int demuxer_client_id,
+ DemuxerAndroidClient* client) OVERRIDE {}
+ virtual void RemoveDemuxerClient(int demuxer_client_id) OVERRIDE {}
+ virtual void RequestDemuxerConfigs(int demuxer_client_id) OVERRIDE {}
+ virtual void RequestDemuxerData(int demuxer_client_id,
+ media::DemuxerStream::Type type) OVERRIDE {
+ num_requests_++;
+ if (message_loop_->is_running())
+ message_loop_->Quit();
+ }
+ virtual void RequestDemuxerSeek(int demuxer_client_id,
+ base::TimeDelta time_to_seek,
+ unsigned seek_request_id) OVERRIDE {
+ last_seek_request_id_ = seek_request_id;
+ }
+
int num_requests() const { return num_requests_; }
unsigned last_seek_request_id() const { return last_seek_request_id_; }
- base::MessageLoop* message_loop() { return &message_loop_; }
private:
- // The number of request this object sents for decoding data.
+ base::MessageLoop* message_loop_;
+
+ // The number of request this object has requested for decoding data.
int num_requests_;
unsigned last_seek_request_id_;
- base::MessageLoop message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockDemuxerAndroid);
};
class MediaSourcePlayerTest : public testing::Test {
public:
- MediaSourcePlayerTest() {
- manager_.reset(new MockMediaPlayerManager());
- player_.reset(new MediaSourcePlayer(0, manager_.get()));
- }
+ MediaSourcePlayerTest()
+ : manager_(&message_loop_),
+ demuxer_(&message_loop_),
+ player_(0, &manager_, 0, &demuxer_) {}
virtual ~MediaSourcePlayerTest() {}
protected:
@@ -100,10 +128,10 @@ class MediaSourcePlayerTest : public testing::Test {
MediaDecoderJob* GetMediaDecoderJob(bool is_audio) {
if (is_audio) {
return reinterpret_cast<MediaDecoderJob*>(
- player_->audio_decoder_job_.get());
+ player_.audio_decoder_job_.get());
}
return reinterpret_cast<MediaDecoderJob*>(
- player_->video_decoder_job_.get());
+ player_.video_decoder_job_.get());
}
// Starts an audio decoder job.
@@ -132,8 +160,8 @@ class MediaSourcePlayerTest : public testing::Test {
// Starts decoding the data.
void Start(const DemuxerConfigs& configs) {
- player_->DemuxerReady(configs);
- player_->Start();
+ player_.OnDemuxerConfigsAvailable(configs);
+ player_.Start();
}
DemuxerData CreateReadFromDemuxerAckForAudio(int packet_id) {
@@ -175,12 +203,22 @@ class MediaSourcePlayerTest : public testing::Test {
}
base::TimeTicks StartTimeTicks() {
- return player_->start_time_ticks_;
+ return player_.start_time_ticks_;
+ }
+
+ bool IsTypeSupported(const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
+ const std::string& container,
+ const std::vector<std::string>& codecs) {
+ return MediaSourcePlayer::IsTypeSupported(
+ scheme_uuid, security_level, container, codecs);
}
protected:
- scoped_ptr<MockMediaPlayerManager> manager_;
- scoped_ptr<MediaSourcePlayer> player_;
+ base::MessageLoop message_loop_;
+ MockMediaPlayerManager manager_;
+ MockDemuxerAndroid demuxer_;
+ MediaSourcePlayer player_;
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayerTest);
};
@@ -192,7 +230,7 @@ TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithValidConfig) {
// Test audio decoder job will be created when codec is successfully started.
StartAudioDecoderJob();
EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
@@ -211,7 +249,7 @@ TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
invalid_codec_data, invalid_codec_data + 4);
Start(configs);
EXPECT_EQ(NULL, GetMediaDecoderJob(true));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
@@ -225,14 +263,14 @@ TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
StartVideoDecoderJob();
// Video decoder job will not be created until surface is available.
EXPECT_EQ(NULL, GetMediaDecoderJob(false));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
- player_->SetVideoSurface(surface.Pass());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ player_.SetVideoSurface(surface.Pass());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
// The decoder job should be ready now.
EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
@@ -246,15 +284,15 @@ TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
StartVideoDecoderJob();
// Video decoder job will not be created until surface is available.
EXPECT_EQ(NULL, GetMediaDecoderJob(false));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
// Release the surface texture.
surface_texture = NULL;
- player_->SetVideoSurface(surface.Pass());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ player_.SetVideoSurface(surface.Pass());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
EXPECT_EQ(NULL, GetMediaDecoderJob(false));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, ReadFromDemuxerAfterSeek) {
@@ -264,15 +302,31 @@ TEST_F(MediaSourcePlayerTest, ReadFromDemuxerAfterSeek) {
// Test decoder job will resend a ReadFromDemuxer request after seek.
StartAudioDecoderJob();
EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
// Initiate a seek
- player_->SeekTo(base::TimeDelta());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
+ player_.SeekTo(base::TimeDelta());
+
+ // Verify that the seek does not occur until the initial prefetch
+ // completes.
+ EXPECT_EQ(0u, demuxer_.last_seek_request_id());
+
+ // Simulate aborted read caused by the seek. This aborts the initial
+ // prefetch.
+ DemuxerData data;
+ data.type = DemuxerStream::AUDIO;
+ data.access_units.resize(1);
+ data.access_units[0].status = DemuxerStream::kAborted;
+ player_.OnDemuxerDataAvailable(data);
+
+ // Verify that the seek is requested now that the initial prefetch
+ // has completed.
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+
// Sending back the seek ACK, this should trigger the player to call
// OnReadFromDemuxer() again.
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
- EXPECT_EQ(2, manager_->num_requests());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
+ EXPECT_EQ(2, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, SetSurfaceWhileSeeking) {
@@ -286,18 +340,18 @@ TEST_F(MediaSourcePlayerTest, SetSurfaceWhileSeeking) {
gfx::ScopedJavaSurface surface(surface_texture.get());
StartVideoDecoderJob();
// Player is still waiting for SetVideoSurface(), so no request is sent.
- EXPECT_EQ(0, manager_->num_requests());
- player_->SeekTo(base::TimeDelta());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
+ EXPECT_EQ(0, demuxer_.num_requests());
+ player_.SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
- player_->SetVideoSurface(surface.Pass());
+ player_.SetVideoSurface(surface.Pass());
EXPECT_TRUE(NULL == GetMediaDecoderJob(false));
- EXPECT_EQ(1u, manager_->last_seek_request_id());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
// Send the seek ack, player should start requesting data afterwards.
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartAfterSeekFinish) {
@@ -311,22 +365,22 @@ TEST_F(MediaSourcePlayerTest, StartAfterSeekFinish) {
configs.audio_sampling_rate = 44100;
configs.is_audio_encrypted = false;
configs.duration_ms = kDefaultDurationInMs;
- player_->DemuxerReady(configs);
+ player_.OnDemuxerConfigsAvailable(configs);
EXPECT_EQ(NULL, GetMediaDecoderJob(true));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
// Initiate a seek
- player_->SeekTo(base::TimeDelta());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
+ player_.SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
- player_->Start();
+ player_.Start();
EXPECT_EQ(NULL, GetMediaDecoderJob(true));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
// Sending back the seek ACK.
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
@@ -339,27 +393,27 @@ TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
MediaDecoderJob* decoder_job = GetMediaDecoderJob(true);
EXPECT_TRUE(NULL != decoder_job);
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
// Sending data to player.
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio(0));
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
// Decoder job will not immediately stop after Pause() since it is
// running on another thread.
- player_->Pause();
+ player_.Pause();
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
// Nothing happens when calling Start() again.
- player_->Start();
+ player_.Start();
// Verify that Start() will not destroy and recreate the decoder job.
EXPECT_EQ(decoder_job, GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- manager_->message_loop()->Run();
+ message_loop_.Run();
// The decoder job should finish and a new request will be sent.
- EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_EQ(2, demuxer_.num_requests());
EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
}
@@ -383,27 +437,27 @@ TEST_F(MediaSourcePlayerTest, DecoderJobsCannotStartWithoutAudio) {
configs.is_video_encrypted = false;
configs.duration_ms = kDefaultDurationInMs;
Start(configs);
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
scoped_refptr<gfx::SurfaceTexture> surface_texture(
new gfx::SurfaceTexture(0));
gfx::ScopedJavaSurface surface(surface_texture.get());
- player_->SetVideoSurface(surface.Pass());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ player_.SetVideoSurface(surface.Pass());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
MediaDecoderJob* audio_decoder_job = GetMediaDecoderJob(true);
MediaDecoderJob* video_decoder_job = GetMediaDecoderJob(false);
- EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_EQ(2, demuxer_.num_requests());
EXPECT_FALSE(audio_decoder_job->is_decoding());
EXPECT_FALSE(video_decoder_job->is_decoding());
// Sending audio data to player, audio decoder should not start.
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForVideo());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
EXPECT_FALSE(video_decoder_job->is_decoding());
// Sending video data to player, both decoders should start now.
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio(0));
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
EXPECT_TRUE(audio_decoder_job->is_decoding());
EXPECT_TRUE(video_decoder_job->is_decoding());
}
@@ -415,27 +469,43 @@ TEST_F(MediaSourcePlayerTest, StartTimeTicksResetAfterDecoderUnderruns) {
// Test start time ticks will reset after decoder job underruns.
StartAudioDecoderJob();
EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
// For the first couple chunks, the decoder job may return
// DECODE_FORMAT_CHANGED status instead of DECODE_SUCCEEDED status. Decode
// more frames to guarantee that DECODE_SUCCEEDED will be returned.
for (int i = 0; i < 4; ++i) {
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio(i));
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(i));
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- manager_->message_loop()->Run();
+ message_loop_.Run();
}
// The decoder job should finish and a new request will be sent.
- EXPECT_EQ(5, manager_->num_requests());
- EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
+ EXPECT_EQ(5, demuxer_.num_requests());
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
base::TimeTicks previous = StartTimeTicks();
// Let the decoder timeout and execute the OnDecoderStarved() callback.
base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
- manager_->message_loop()->RunUntilIdle();
- // Send new data to the decoder. This should reset the start time ticks.
- player_->ReadFromDemuxerAck(CreateEOSAck(true));
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ EXPECT_TRUE(StartTimeTicks() != base::TimeTicks());
+ message_loop_.RunUntilIdle();
+
+ // Send new data to the decoder so it can finish the currently
+ // pending decode.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(3));
+ while(GetMediaDecoderJob(true)->is_decoding())
+ message_loop_.RunUntilIdle();
+
+ // Verify the start time ticks is cleared at this point because the
+ // player is prefetching.
+ EXPECT_TRUE(StartTimeTicks() == base::TimeTicks());
+
+ // Send new data to the decoder so it can finish prefetching. This should
+ // reset the start time ticks.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(3));
+ EXPECT_TRUE(StartTimeTicks() != base::TimeTicks());
+
base::TimeTicks current = StartTimeTicks();
EXPECT_LE(100.0, (current - previous).InMillisecondsF());
}
@@ -449,20 +519,93 @@ TEST_F(MediaSourcePlayerTest, NoRequestForDataAfterInputEOS) {
scoped_refptr<gfx::SurfaceTexture> surface_texture(
new gfx::SurfaceTexture(0));
gfx::ScopedJavaSurface surface(surface_texture.get());
- player_->SetVideoSurface(surface.Pass());
+ player_.SetVideoSurface(surface.Pass());
StartVideoDecoderJob();
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
- EXPECT_EQ(1, manager_->num_requests());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
+ EXPECT_EQ(1, demuxer_.num_requests());
// Send the first input chunk.
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForVideo());
- manager_->message_loop()->Run();
- EXPECT_EQ(2, manager_->num_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ message_loop_.Run();
+ EXPECT_EQ(2, demuxer_.num_requests());
// Send EOS.
- player_->ReadFromDemuxerAck(CreateEOSAck(false));
- manager_->message_loop()->Run();
+ player_.OnDemuxerDataAvailable(CreateEOSAck(false));
+ message_loop_.Run();
// No more request for data should be made.
- EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_EQ(2, demuxer_.num_requests());
}
+// TODO(xhwang): Enable this test when the test devices are updated.
+TEST_F(MediaSourcePlayerTest, DISABLED_IsTypeSupported_Widevine) {
+ if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
+ LOG(INFO) << "Could not run test - not supported on device.";
+ return;
+ }
+
+ uint8 kWidevineUUID[] = { 0xED, 0xEF, 0x8B, 0xA9, 0x79, 0xD6, 0x4A, 0xCE,
+ 0xA3, 0xC8, 0x27, 0xDC, 0xD5, 0x1D, 0x21, 0xED };
+
+ std::vector<uint8> widevine_uuid(kWidevineUUID,
+ kWidevineUUID + arraysize(kWidevineUUID));
+
+ // We test "L3" fully. But for "L1" we don't check the result as it depend on
+ // whether the test device supports "L1" decoding.
+
+ std::vector<std::string> codec_avc(1, "avc1");
+ std::vector<std::string> codec_aac(1, "mp4a");
+ std::vector<std::string> codec_avc_aac(1, "avc1");
+ codec_avc_aac.push_back("mp4a");
+
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kVideoMp4, codec_avc));
+ IsTypeSupported(widevine_uuid, "L1", kVideoMp4, codec_avc);
+
+ // TODO(xhwang): L1/L3 doesn't apply to audio, so the result is messy.
+ // Clean this up after we have a solution to specifying decoding mode.
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kAudioMp4, codec_aac));
+ IsTypeSupported(widevine_uuid, "L1", kAudioMp4, codec_aac);
+
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kVideoMp4, codec_avc_aac));
+ IsTypeSupported(widevine_uuid, "L1", kVideoMp4, codec_avc_aac);
+
+ std::vector<std::string> codec_vp8(1, "vp8");
+ std::vector<std::string> codec_vorbis(1, "vorbis");
+ std::vector<std::string> codec_vp8_vorbis(1, "vp8");
+ codec_vp8_vorbis.push_back("vorbis");
+
+ // TODO(xhwang): WebM is actually not supported but currently
+ // MediaDrmBridge.isCryptoSchemeSupported() doesn't check the container type.
+ // Fix isCryptoSchemeSupported() and update this test as necessary.
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kVideoWebM, codec_vp8));
+ IsTypeSupported(widevine_uuid, "L1", kVideoWebM, codec_vp8);
+
+ // TODO(xhwang): L1/L3 doesn't apply to audio, so the result is messy.
+ // Clean this up after we have a solution to specifying decoding mode.
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kAudioWebM, codec_vorbis));
+ IsTypeSupported(widevine_uuid, "L1", kAudioWebM, codec_vorbis);
+
+ EXPECT_TRUE(
+ IsTypeSupported(widevine_uuid, "L3", kVideoWebM, codec_vp8_vorbis));
+ IsTypeSupported(widevine_uuid, "L1", kVideoWebM, codec_vp8_vorbis);
+}
+
+TEST_F(MediaSourcePlayerTest, IsTypeSupported_InvalidUUID) {
+ if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
+ LOG(INFO) << "Could not run test - not supported on device.";
+ return;
+ }
+
+ uint8 kInvalidUUID[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF };
+
+ std::vector<uint8> invalid_uuid(kInvalidUUID,
+ kInvalidUUID + arraysize(kInvalidUUID));
+
+ std::vector<std::string> codec_avc(1, "avc1");
+ EXPECT_FALSE(IsTypeSupported(invalid_uuid, "L3", kVideoMp4, codec_avc));
+ EXPECT_FALSE(IsTypeSupported(invalid_uuid, "L1", kVideoMp4, codec_avc));
+}
+
+// TODO(xhwang): Are these IsTypeSupported tests device specific?
+// TODO(xhwang): Add more IsTypeSupported tests.
+
} // namespace media
diff --git a/media/base/android/video_decoder_job.cc b/media/base/android/video_decoder_job.cc
index 6ee595c0c0..d337f19dcb 100644
--- a/media/base/android/video_decoder_job.cc
+++ b/media/base/android/video_decoder_job.cc
@@ -24,11 +24,14 @@ class VideoDecoderThread : public base::Thread {
base::LazyInstance<VideoDecoderThread>::Leaky
g_video_decoder_thread = LAZY_INSTANCE_INITIALIZER;
-
-VideoDecoderJob* VideoDecoderJob::Create(
- const VideoCodec video_codec, const gfx::Size& size, jobject surface,
- jobject media_crypto, const base::Closure& request_data_cb) {
- scoped_ptr<VideoCodecBridge> codec(VideoCodecBridge::Create(video_codec));
+VideoDecoderJob* VideoDecoderJob::Create(const VideoCodec video_codec,
+ bool is_secure,
+ const gfx::Size& size,
+ jobject surface,
+ jobject media_crypto,
+ const base::Closure& request_data_cb) {
+ scoped_ptr<VideoCodecBridge> codec(
+ VideoCodecBridge::Create(video_codec, is_secure));
if (codec && codec->Start(video_codec, size, surface, media_crypto))
return new VideoDecoderJob(codec.Pass(), request_data_cb);
return NULL;
@@ -49,9 +52,8 @@ void VideoDecoderJob::ReleaseOutputBuffer(
int outputBufferIndex, size_t size,
const base::TimeDelta& presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback,
- DecodeStatus status) {
-
- if (status != DECODE_OUTPUT_END_OF_STREAM || size != 0u)
+ MediaCodecStatus status) {
+ if (status != MEDIA_CODEC_OUTPUT_END_OF_STREAM || size != 0u)
video_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, true);
callback.Run(status, presentation_timestamp, 0);
diff --git a/media/base/android/video_decoder_job.h b/media/base/android/video_decoder_job.h
index a26fa2bb14..27a3957c68 100644
--- a/media/base/android/video_decoder_job.h
+++ b/media/base/android/video_decoder_job.h
@@ -20,14 +20,18 @@ class VideoDecoderJob : public MediaDecoderJob {
// Create a new VideoDecoderJob instance.
// |video_codec| - The video format the object needs to decode.
- // |size| - The natrual size of the output frames.
+ // |is_secure| - Whether secure decoding is required.
+ // |size| - The natural size of the output frames.
// |surface| - The surface to render the frames to.
// |media_crypto| - Handle to a Java object responsible for decrypting the
// video data.
// |request_data_cb| - Callback used to request more data for the decoder.
- static VideoDecoderJob* Create(
- const VideoCodec video_codec, const gfx::Size& size, jobject surface,
- jobject media_crypto, const base::Closure& request_data_cb);
+ static VideoDecoderJob* Create(const VideoCodec video_codec,
+ bool is_secure,
+ const gfx::Size& size,
+ jobject surface,
+ jobject media_crypto,
+ const base::Closure& request_data_cb);
private:
VideoDecoderJob(scoped_ptr<VideoCodecBridge> video_codec_bridge,
@@ -38,7 +42,7 @@ class VideoDecoderJob : public MediaDecoderJob {
int outputBufferIndex, size_t size,
const base::TimeDelta& presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback,
- DecodeStatus status) OVERRIDE;
+ MediaCodecStatus status) OVERRIDE;
virtual bool ComputeTimeToRender() const OVERRIDE;
diff --git a/media/base/audio_decoder_config.cc b/media/base/audio_decoder_config.cc
index 38db05d3a5..dfaf94a268 100644
--- a/media/base/audio_decoder_config.cc
+++ b/media/base/audio_decoder_config.cc
@@ -6,6 +6,7 @@
#include "base/logging.h"
#include "base/metrics/histogram.h"
+#include "base/time/time.h"
#include "media/audio/sample_rates.h"
#include "media/base/limits.h"
#include "media/base/sample_format.h"
@@ -30,7 +31,8 @@ AudioDecoderConfig::AudioDecoderConfig(AudioCodec codec,
size_t extra_data_size,
bool is_encrypted) {
Initialize(codec, sample_format, channel_layout, samples_per_second,
- extra_data, extra_data_size, is_encrypted, true);
+ extra_data, extra_data_size, is_encrypted, true,
+ base::TimeDelta(), base::TimeDelta());
}
void AudioDecoderConfig::Initialize(AudioCodec codec,
@@ -40,7 +42,9 @@ void AudioDecoderConfig::Initialize(AudioCodec codec,
const uint8* extra_data,
size_t extra_data_size,
bool is_encrypted,
- bool record_stats) {
+ bool record_stats,
+ base::TimeDelta seek_preroll,
+ base::TimeDelta codec_delay) {
CHECK((extra_data_size != 0) == (extra_data != NULL));
if (record_stats) {
@@ -66,6 +70,8 @@ void AudioDecoderConfig::Initialize(AudioCodec codec,
bytes_per_channel_ = SampleFormatToBytesPerChannel(sample_format);
extra_data_.assign(extra_data, extra_data + extra_data_size);
is_encrypted_ = is_encrypted;
+ seek_preroll_ = seek_preroll;
+ codec_delay_ = codec_delay;
int channels = ChannelLayoutToChannelCount(channel_layout_);
bytes_per_frame_ = channels * bytes_per_channel_;
@@ -80,7 +86,9 @@ bool AudioDecoderConfig::IsValidConfig() const {
bytes_per_channel_ <= limits::kMaxBytesPerSample &&
samples_per_second_ > 0 &&
samples_per_second_ <= limits::kMaxSampleRate &&
- sample_format_ != kUnknownSampleFormat;
+ sample_format_ != kUnknownSampleFormat &&
+ seek_preroll_ >= base::TimeDelta() &&
+ codec_delay_ >= base::TimeDelta();
}
bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
@@ -92,7 +100,9 @@ bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
(!extra_data() || !memcmp(extra_data(), config.extra_data(),
extra_data_size())) &&
(is_encrypted() == config.is_encrypted()) &&
- (sample_format() == config.sample_format()));
+ (sample_format() == config.sample_format()) &&
+ (seek_preroll() == config.seek_preroll()) &&
+ (codec_delay() == config.codec_delay()));
}
} // namespace media
diff --git a/media/base/audio_decoder_config.h b/media/base/audio_decoder_config.h
index 1c61e70c3a..a17d2215b9 100644
--- a/media/base/audio_decoder_config.h
+++ b/media/base/audio_decoder_config.h
@@ -8,6 +8,7 @@
#include <vector>
#include "base/basictypes.h"
+#include "base/time/time.h"
#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
#include "media/base/sample_format.h"
@@ -63,7 +64,9 @@ class MEDIA_EXPORT AudioDecoderConfig {
void Initialize(AudioCodec codec, SampleFormat sample_format,
ChannelLayout channel_layout, int samples_per_second,
const uint8* extra_data, size_t extra_data_size,
- bool is_encrypted, bool record_stats);
+ bool is_encrypted, bool record_stats,
+ base::TimeDelta seek_preroll,
+ base::TimeDelta codec_delay);
// Returns true if this object has appropriate configuration values, false
// otherwise.
@@ -80,6 +83,8 @@ class MEDIA_EXPORT AudioDecoderConfig {
int samples_per_second() const { return samples_per_second_; }
SampleFormat sample_format() const { return sample_format_; }
int bytes_per_frame() const { return bytes_per_frame_; }
+ base::TimeDelta seek_preroll() const { return seek_preroll_; }
+ base::TimeDelta codec_delay() const { return codec_delay_; }
// Optional byte data required to initialize audio decoders such as Vorbis
// codebooks.
@@ -103,6 +108,15 @@ class MEDIA_EXPORT AudioDecoderConfig {
std::vector<uint8> extra_data_;
bool is_encrypted_;
+ // |seek_preroll_| is the duration of the data that the decoder must decode
+ // before the decoded data is valid.
+ base::TimeDelta seek_preroll_;
+
+ // |codec_delay_| is the overall delay overhead added by the codec while
+ // encoding. This value should be subtracted from each block's timestamp to
+ // get the actual timestamp.
+ base::TimeDelta codec_delay_;
+
// Not using DISALLOW_COPY_AND_ASSIGN here intentionally to allow the compiler
// generated copy constructor and assignment operator. Since the extra data is
// typically small, the performance impact is minimal.
diff --git a/media/base/decoder_buffer.cc b/media/base/decoder_buffer.cc
index 9eaa128ceb..d4e75410ab 100644
--- a/media/base/decoder_buffer.cc
+++ b/media/base/decoder_buffer.cc
@@ -80,7 +80,8 @@ std::string DecoderBuffer::AsHumanReadableString() {
<< " duration: " << duration_.InMicroseconds()
<< " size: " << size_
<< " side_data_size: " << side_data_size_
- << " encrypted: " << (decrypt_config_ != NULL);
+ << " encrypted: " << (decrypt_config_ != NULL)
+ << " discard_padding (ms): " << discard_padding_.InMilliseconds();
return s.str();
}
diff --git a/media/base/decoder_buffer.h b/media/base/decoder_buffer.h
index 6cf519f4c1..393e586d06 100644
--- a/media/base/decoder_buffer.h
+++ b/media/base/decoder_buffer.h
@@ -105,6 +105,16 @@ class MEDIA_EXPORT DecoderBuffer
return side_data_size_;
}
+ base::TimeDelta discard_padding() const {
+ DCHECK(!end_of_stream());
+ return discard_padding_;
+ }
+
+ void set_discard_padding(const base::TimeDelta discard_padding) {
+ DCHECK(!end_of_stream());
+ discard_padding_ = discard_padding;
+ }
+
const DecryptConfig* decrypt_config() const {
DCHECK(!end_of_stream());
return decrypt_config_.get();
@@ -142,6 +152,7 @@ class MEDIA_EXPORT DecoderBuffer
int side_data_size_;
scoped_ptr<uint8, base::ScopedPtrAlignedFree> side_data_;
scoped_ptr<DecryptConfig> decrypt_config_;
+ base::TimeDelta discard_padding_;
// Constructor helper method for memory allocations.
void Initialize();
diff --git a/media/base/keyboard_event_counter.cc b/media/base/keyboard_event_counter.cc
index a4ae1097f8..8432aec37e 100644
--- a/media/base/keyboard_event_counter.cc
+++ b/media/base/keyboard_event_counter.cc
@@ -15,7 +15,8 @@ KeyboardEventCounter::~KeyboardEventCounter() {}
void KeyboardEventCounter::Reset() {
pressed_keys_.clear();
- total_key_presses_ = 0;
+ base::subtle::NoBarrier_Store(
+ reinterpret_cast<base::subtle::AtomicWord*>(&total_key_presses_), 0);
}
void KeyboardEventCounter::OnKeyboardEvent(ui::EventType event,
diff --git a/media/base/media.cc b/media/base/media.cc
index e1bb4b2c10..75625fe5f3 100644
--- a/media/base/media.cc
+++ b/media/base/media.cc
@@ -46,10 +46,8 @@ class MediaInitializer {
// Perform initialization of libraries which require runtime CPU detection.
// TODO(dalecurtis): Add initialization of YUV, SincResampler.
vector_math::Initialize();
-#if !defined(OS_IOS)
SincResampler::InitializeCPUSpecificFeatures();
InitializeCPUSpecificYUVConversions();
-#endif
}
~MediaInitializer() {
diff --git a/media/base/media_stub.cc b/media/base/media_stub.cc
index 9efb37e2a0..e3e02e40d6 100644
--- a/media/base/media_stub.cc
+++ b/media/base/media_stub.cc
@@ -7,7 +7,7 @@
#include "base/files/file_path.h"
// This file is intended for platforms that don't need to load any media
-// libraries (e.g., iOS).
+// libraries (e.g., Android).
namespace media {
namespace internal {
diff --git a/media/base/media_switches.cc b/media/base/media_switches.cc
index 2ebf5dfc7b..96998cc491 100644
--- a/media/base/media_switches.cc
+++ b/media/base/media_switches.cc
@@ -15,8 +15,8 @@ const char kEnableEac3Playback[] = "enable-eac3-playback";
// Enables Opus playback in media elements.
const char kEnableOpusPlayback[] = "enable-opus-playback";
-// Enables VP8 Alpha playback in media elements.
-const char kEnableVp8AlphaPlayback[] = "enable-vp8-alpha-playback";
+// Disables VP8 Alpha playback in media elements.
+const char kDisableVp8AlphaPlayback[] = "disable-vp8-alpha-playback";
// Set number of threads to use for video decoding.
const char kVideoThreads[] = "video-threads";
@@ -25,6 +25,15 @@ const char kVideoThreads[] = "video-threads";
const char kOverrideEncryptedMediaCanPlayType[] =
"override-encrypted-media-canplaytype";
+// Enables MP3 stream parser for Media Source Extensions.
+const char kEnableMP3StreamParser[] = "enable-mp3-stream-parser";
+
+#if defined(OS_ANDROID)
+// Enables use of non-compositing MediaDrm decoding by default for Encrypted
+// Media Extensions implementation.
+const char kMediaDrmEnableNonCompositing[] = "mediadrm-enable-non-compositing";
+#endif
+
#if defined(GOOGLE_TV)
// Use external video surface for video with more than or equal pixels to
// specified value. For example, value of 0 will enable external video surface
diff --git a/media/base/media_switches.h b/media/base/media_switches.h
index e6c1de02fe..01f550233d 100644
--- a/media/base/media_switches.h
+++ b/media/base/media_switches.h
@@ -18,12 +18,18 @@ MEDIA_EXPORT extern const char kEnableEac3Playback[];
MEDIA_EXPORT extern const char kEnableOpusPlayback[];
-MEDIA_EXPORT extern const char kEnableVp8AlphaPlayback[];
+MEDIA_EXPORT extern const char kDisableVp8AlphaPlayback[];
MEDIA_EXPORT extern const char kVideoThreads[];
MEDIA_EXPORT extern const char kOverrideEncryptedMediaCanPlayType[];
+MEDIA_EXPORT extern const char kEnableMP3StreamParser[];
+
+#if defined(OS_ANDROID)
+MEDIA_EXPORT extern const char kMediaDrmEnableNonCompositing[];
+#endif
+
#if defined(GOOGLE_TV)
MEDIA_EXPORT extern const char kUseExternalVideoSurfaceThresholdInPixels[];
#endif
diff --git a/media/base/run_all_unittests.cc b/media/base/run_all_unittests.cc
index 28ef5c68f7..1c4da93047 100644
--- a/media/base/run_all_unittests.cc
+++ b/media/base/run_all_unittests.cc
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/bind.h"
#include "base/command_line.h"
#include "base/test/test_suite.h"
+#include "base/test/unit_test_launcher.h"
#include "build/build_config.h"
#include "media/base/media.h"
#include "media/base/media_switches.h"
@@ -37,13 +39,18 @@ void TestSuiteNoAtExit::Initialize() {
// Run this here instead of main() to ensure an AtExitManager is already
// present.
media::InitializeMediaLibraryForTesting();
- // Enable VP8 alpha support for all media tests.
- // TODO(tomfinegan): Remove this once the VP8 alpha flag is removed or
- // negated.
CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- cmd_line->AppendSwitch(switches::kEnableVp8AlphaPlayback);
+ cmd_line->AppendSwitch(switches::kEnableMP3StreamParser);
+
+ // Enable Opus support for all media tests.
+ // TODO(vigneshv): Remove this once the Opus flag is removed or negated.
+ cmd_line->AppendSwitch(switches::kEnableOpusPlayback);
}
int main(int argc, char** argv) {
- return TestSuiteNoAtExit(argc, argv).Run();
+ TestSuiteNoAtExit test_suite(argc, argv);
+
+ return base::LaunchUnitTests(
+ argc, argv, base::Bind(&TestSuiteNoAtExit::Run,
+ base::Unretained(&test_suite)));
}
diff --git a/media/base/simd/convert_rgb_to_yuv_sse2.cc b/media/base/simd/convert_rgb_to_yuv_sse2.cc
index f99a2fef84..124671c0c0 100644
--- a/media/base/simd/convert_rgb_to_yuv_sse2.cc
+++ b/media/base/simd/convert_rgb_to_yuv_sse2.cc
@@ -21,6 +21,18 @@ namespace media {
// Define a convenient macro to do static cast.
#define INT16_FIX(x) static_cast<int16>(FIX(x))
+// Android's pixel layout is RGBA, while other platforms
+// are BGRA.
+#if defined(OS_ANDROID)
+SIMD_ALIGNED(const int16 ConvertRGBAToYUV_kTable[8 * 3]) = {
+ INT16_FIX(0.257), INT16_FIX(0.504), INT16_FIX(0.098), 0,
+ INT16_FIX(0.257), INT16_FIX(0.504), INT16_FIX(0.098), 0,
+ -INT16_FIX(0.148), -INT16_FIX(0.291), INT16_FIX(0.439), 0,
+ -INT16_FIX(0.148), -INT16_FIX(0.291), INT16_FIX(0.439), 0,
+ INT16_FIX(0.439), -INT16_FIX(0.368), -INT16_FIX(0.071), 0,
+ INT16_FIX(0.439), -INT16_FIX(0.368), -INT16_FIX(0.071), 0,
+};
+#else
SIMD_ALIGNED(const int16 ConvertRGBAToYUV_kTable[8 * 3]) = {
INT16_FIX(0.098), INT16_FIX(0.504), INT16_FIX(0.257), 0,
INT16_FIX(0.098), INT16_FIX(0.504), INT16_FIX(0.257), 0,
@@ -29,6 +41,7 @@ SIMD_ALIGNED(const int16 ConvertRGBAToYUV_kTable[8 * 3]) = {
-INT16_FIX(0.071), -INT16_FIX(0.368), INT16_FIX(0.439), 0,
-INT16_FIX(0.071), -INT16_FIX(0.368), INT16_FIX(0.439), 0,
};
+#endif
#undef INT16_FIX
diff --git a/media/base/simd/convert_yuv_to_rgb_c.cc b/media/base/simd/convert_yuv_to_rgb_c.cc
index b8ebd1eeb1..0466112918 100644
--- a/media/base/simd/convert_yuv_to_rgb_c.cc
+++ b/media/base/simd/convert_yuv_to_rgb_c.cc
@@ -20,31 +20,39 @@ namespace media {
#define SK_G32_SHIFT 8
#define SK_B32_SHIFT 16
#define SK_A32_SHIFT 24
+#define R_INDEX 0
+#define G_INDEX 1
+#define B_INDEX 2
+#define A_INDEX 3
#else
#define SK_B32_SHIFT 0
#define SK_G32_SHIFT 8
#define SK_R32_SHIFT 16
#define SK_A32_SHIFT 24
+#define B_INDEX 0
+#define G_INDEX 1
+#define R_INDEX 2
+#define A_INDEX 3
#endif
static inline void ConvertYUVToRGB32_C(uint8 y,
uint8 u,
uint8 v,
uint8* rgb_buf) {
- int b = kCoefficientsRgbY[256+u][0];
- int g = kCoefficientsRgbY[256+u][1];
- int r = kCoefficientsRgbY[256+u][2];
- int a = kCoefficientsRgbY[256+u][3];
+ int b = kCoefficientsRgbY[256+u][B_INDEX];
+ int g = kCoefficientsRgbY[256+u][G_INDEX];
+ int r = kCoefficientsRgbY[256+u][R_INDEX];
+ int a = kCoefficientsRgbY[256+u][A_INDEX];
- b = paddsw(b, kCoefficientsRgbY[512+v][0]);
- g = paddsw(g, kCoefficientsRgbY[512+v][1]);
- r = paddsw(r, kCoefficientsRgbY[512+v][2]);
- a = paddsw(a, kCoefficientsRgbY[512+v][3]);
+ b = paddsw(b, kCoefficientsRgbY[512+v][B_INDEX]);
+ g = paddsw(g, kCoefficientsRgbY[512+v][G_INDEX]);
+ r = paddsw(r, kCoefficientsRgbY[512+v][R_INDEX]);
+ a = paddsw(a, kCoefficientsRgbY[512+v][A_INDEX]);
- b = paddsw(b, kCoefficientsRgbY[y][0]);
- g = paddsw(g, kCoefficientsRgbY[y][1]);
- r = paddsw(r, kCoefficientsRgbY[y][2]);
- a = paddsw(a, kCoefficientsRgbY[y][3]);
+ b = paddsw(b, kCoefficientsRgbY[y][B_INDEX]);
+ g = paddsw(g, kCoefficientsRgbY[y][G_INDEX]);
+ r = paddsw(r, kCoefficientsRgbY[y][R_INDEX]);
+ a = paddsw(a, kCoefficientsRgbY[y][A_INDEX]);
b >>= 6;
g >>= 6;
diff --git a/media/base/simd/yuv_to_rgb_table.cc b/media/base/simd/yuv_to_rgb_table.cc
index 00735655f5..253280da95 100644
--- a/media/base/simd/yuv_to_rgb_table.cc
+++ b/media/base/simd/yuv_to_rgb_table.cc
@@ -17,20 +17,42 @@ extern "C" {
// Defines the R,G,B,A contributions from U.
// The contribution to A is the same for any value of U
// causing the final A value to be 255 in every conversion.
+// Android's pixel layout is RGBA, while other platforms
+// are BGRA.
+#if defined(OS_ANDROID)
+#define RGBU(i) { \
+ 0, \
+ static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(256 * 64 - 1) \
+}
+#else
#define RGBU(i) { \
static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \
static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \
0, \
static_cast<int16>(256 * 64 - 1) \
}
+#endif
// Defines the R,G,B,A contributions from V.
+// Android's pixel layout is RGBA, while other platforms
+// are BGRA.
+#if defined(OS_ANDROID)
+#define RGBV(i) { \
+ static_cast<int16>(1.596 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
+ 0, \
+ 0 \
+}
+#else
#define RGBV(i) { \
0, \
static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
static_cast<int16>(1.596 * 64 * (i - 128) + 0.5), \
0 \
}
+#endif
// Used to define a set of multiplier words for each alpha level.
#define ALPHA(i) { \
diff --git a/media/base/sinc_resampler.cc b/media/base/sinc_resampler.cc
index a2918c3f0d..5566f64ce8 100644
--- a/media/base/sinc_resampler.cc
+++ b/media/base/sinc_resampler.cc
@@ -108,9 +108,8 @@ static double SincScaleFactor(double io_ratio) {
// If we know the minimum architecture at compile time, avoid CPU detection.
// Force NaCl code to use C routines since (at present) nothing there uses these
-// methods and plumbing the -msse built library is non-trivial. iOS lies
-// about its architecture, so we also need to exclude it here.
-#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL) && !defined(OS_IOS)
+// methods and plumbing the -msse built library is non-trivial.
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
#if defined(__SSE__)
#define CONVOLVE_FUNC Convolve_SSE
void SincResampler::InitializeCPUSpecificFeatures() {}
diff --git a/media/base/vector_math.cc b/media/base/vector_math.cc
index ac6de92ad8..de946ca8cb 100644
--- a/media/base/vector_math.cc
+++ b/media/base/vector_math.cc
@@ -18,9 +18,8 @@ namespace vector_math {
// If we know the minimum architecture at compile time, avoid CPU detection.
// Force NaCl code to use C routines since (at present) nothing there uses these
-// methods and plumbing the -msse built library is non-trivial. iOS lies about
-// its architecture, so we also need to exclude it here.
-#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL) && !defined(OS_IOS)
+// methods and plumbing the -msse built library is non-trivial.
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
#if defined(__SSE__)
#define FMAC_FUNC FMAC_SSE
#define FMUL_FUNC FMUL_SSE
diff --git a/media/cast/OWNERS b/media/cast/OWNERS
index c7233bd75b..22e814b0a7 100644
--- a/media/cast/OWNERS
+++ b/media/cast/OWNERS
@@ -1 +1,2 @@
hclam@chromium.org
+hubbe@chromium.org
diff --git a/media/cast/audio_sender/audio_encoder.cc b/media/cast/audio_sender/audio_encoder.cc
new file mode 100644
index 0000000000..175f82b312
--- /dev/null
+++ b/media/cast/audio_sender/audio_encoder.cc
@@ -0,0 +1,172 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/audio_sender/audio_encoder.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_thread.h"
+#include "third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "third_party/webrtc/modules/interface/module_common_types.h"
+
+namespace media {
+namespace cast {
+
+// 48KHz, 2 channels and 100 ms.
+static const int kMaxNumberOfSamples = 48 * 2 * 100;
+
+// This class is only called from the cast audio encoder thread.
+class WebrtEncodedDataCallback : public webrtc::AudioPacketizationCallback {
+ public:
+ WebrtEncodedDataCallback(scoped_refptr<CastThread> cast_thread,
+ AudioCodec codec,
+ int frequency)
+ : codec_(codec),
+ frequency_(frequency),
+ cast_thread_(cast_thread),
+ last_timestamp_(0) {}
+
+ virtual int32 SendData(
+ webrtc::FrameType /*frame_type*/,
+ uint8 /*payload_type*/,
+ uint32 timestamp,
+ const uint8* payload_data,
+ uint16 payload_size,
+ const webrtc::RTPFragmentationHeader* /*fragmentation*/) {
+ scoped_ptr<EncodedAudioFrame> audio_frame(new EncodedAudioFrame());
+ audio_frame->codec = codec_;
+ audio_frame->samples = timestamp - last_timestamp_;
+ DCHECK(audio_frame->samples <= kMaxNumberOfSamples);
+ last_timestamp_ = timestamp;
+ audio_frame->data.insert(audio_frame->data.begin(),
+ payload_data,
+ payload_data + payload_size);
+
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(*frame_encoded_callback_, base::Passed(&audio_frame),
+ recorded_time_));
+ return 0;
+ }
+
+ void SetEncodedCallbackInfo(
+ const base::TimeTicks& recorded_time,
+ const AudioEncoder::FrameEncodedCallback* frame_encoded_callback) {
+ recorded_time_ = recorded_time;
+ frame_encoded_callback_ = frame_encoded_callback;
+ }
+
+ private:
+ const AudioCodec codec_;
+ const int frequency_;
+ scoped_refptr<CastThread> cast_thread_;
+ uint32 last_timestamp_;
+ base::TimeTicks recorded_time_;
+ const AudioEncoder::FrameEncodedCallback* frame_encoded_callback_;
+};
+
+AudioEncoder::AudioEncoder(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config)
+ : cast_thread_(cast_thread),
+ audio_encoder_(webrtc::AudioCodingModule::Create(0)),
+ webrtc_encoder_callback_(
+ new WebrtEncodedDataCallback(cast_thread, audio_config.codec,
+ audio_config.frequency)),
+ timestamp_(0) { // Must start at 0; used above.
+
+ if (audio_encoder_->InitializeSender() != 0) {
+ DCHECK(false) << "Invalid webrtc return value";
+ }
+ if (audio_encoder_->RegisterTransportCallback(
+ webrtc_encoder_callback_.get()) != 0) {
+ DCHECK(false) << "Invalid webrtc return value";
+ }
+ webrtc::CodecInst send_codec;
+ send_codec.pltype = audio_config.rtp_payload_type;
+ send_codec.plfreq = audio_config.frequency;
+ send_codec.channels = audio_config.channels;
+
+ switch (audio_config.codec) {
+ case kOpus:
+ strncpy(send_codec.plname, "opus", sizeof(send_codec.plname));
+ send_codec.pacsize = audio_config.frequency / 50; // 20 ms
+ send_codec.rate = audio_config.bitrate; // 64000
+ break;
+ case kPcm16:
+ strncpy(send_codec.plname, "L16", sizeof(send_codec.plname));
+ send_codec.pacsize = audio_config.frequency / 100; // 10 ms
+ // TODO(pwestin) bug in webrtc; it should take audio_config.channels into
+ // account.
+ send_codec.rate = 8 * 2 * audio_config.frequency;
+ break;
+ default:
+ DCHECK(false) << "Codec must be specified for audio encoder";
+ return;
+ }
+ if (audio_encoder_->RegisterSendCodec(send_codec) != 0) {
+ DCHECK(false) << "Invalid webrtc return value; failed to register codec";
+ }
+}
+
+AudioEncoder::~AudioEncoder() {
+ webrtc::AudioCodingModule::Destroy(audio_encoder_);
+}
+
+// Called from main cast thread.
+void AudioEncoder::InsertRawAudioFrame(
+ const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure release_callback) {
+ cast_thread_->PostTask(CastThread::AUDIO_ENCODER, FROM_HERE,
+ base::Bind(&AudioEncoder::EncodeAudioFrameThread, this, audio_frame,
+ recorded_time, frame_encoded_callback, release_callback));
+}
+
+// Called from cast audio encoder thread.
+void AudioEncoder::EncodeAudioFrameThread(
+ const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure release_callback) {
+ int samples_per_10ms = audio_frame->frequency / 100;
+ int number_of_10ms_blocks = audio_frame->samples.size() /
+ (samples_per_10ms * audio_frame->channels);
+ DCHECK(webrtc::AudioFrame::kMaxDataSizeSamples > samples_per_10ms)
+ << "webrtc sanity check failed";
+
+ for (int i = 0; i < number_of_10ms_blocks; ++i) {
+ webrtc::AudioFrame webrtc_audio_frame;
+ webrtc_audio_frame.timestamp_ = timestamp_;
+
+ // Due to the webrtc::AudioFrame declaration we need to copy our data into
+ // the webrtc structure.
+ memcpy(&webrtc_audio_frame.data_[0],
+ &audio_frame->samples[i * samples_per_10ms * audio_frame->channels],
+ samples_per_10ms * audio_frame->channels * sizeof(int16));
+ webrtc_audio_frame.samples_per_channel_ = samples_per_10ms;
+ webrtc_audio_frame.sample_rate_hz_ = audio_frame->frequency;
+ webrtc_audio_frame.num_channels_ = audio_frame->channels;
+
+ // webrtc::AudioCodingModule is thread safe.
+ if (audio_encoder_->Add10MsData(webrtc_audio_frame) != 0) {
+ DCHECK(false) << "Invalid webrtc return value";
+ }
+ timestamp_ += samples_per_10ms;
+ }
+ // We are done with the audio frame release it.
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, release_callback);
+
+ // Note:
+ // Not all insert of 10 ms will generate a callback with encoded data.
+ webrtc_encoder_callback_->SetEncodedCallbackInfo(recorded_time,
+ &frame_encoded_callback);
+ for (int i = 0; i < number_of_10ms_blocks; ++i) {
+ audio_encoder_->Process();
+ }
+}
+
+} // namespace media
+} // namespace cast
diff --git a/media/cast/audio_sender/audio_encoder.h b/media/cast/audio_sender/audio_encoder.h
new file mode 100644
index 0000000000..8aacb0b475
--- /dev/null
+++ b/media/cast/audio_sender/audio_encoder.h
@@ -0,0 +1,63 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_AUDIO_SENDER_AUDIO_ENCODER_H_
+#define MEDIA_CAST_AUDIO_SENDER_AUDIO_ENCODER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace webrtc {
+class AudioCodingModule;
+}
+
+namespace media {
+namespace cast {
+
+class WebrtEncodedDataCallback;
+
+// Thread safe class.
+// It should be called from the main cast thread; however that is not required.
+class AudioEncoder : public base::RefCountedThreadSafe<AudioEncoder> {
+ public:
+ typedef base::Callback<void(scoped_ptr<EncodedAudioFrame>,
+ const base::TimeTicks&)> FrameEncodedCallback;
+
+ AudioEncoder(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config);
+
+ virtual ~AudioEncoder();
+
+ // The audio_frame must be valid until the closure callback is called.
+ // The closure callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure callback);
+
+ private:
+ void EncodeAudioFrameThread(
+ const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure release_callback);
+
+ scoped_refptr<CastThread> cast_thread_;
+ // Can't use scoped_ptr due to protected constructor within webrtc.
+ webrtc::AudioCodingModule* audio_encoder_;
+ scoped_ptr<WebrtEncodedDataCallback> webrtc_encoder_callback_;
+ uint32 timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioEncoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_AUDIO_SENDER_AUDIO_ENCODER_H_
diff --git a/media/cast/audio_sender/audio_encoder_unittest.cc b/media/cast/audio_sender/audio_encoder_unittest.cc
new file mode 100644
index 0000000000..5903ab6a07
--- /dev/null
+++ b/media/cast/audio_sender/audio_encoder_unittest.cc
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "media/cast/audio_sender/audio_encoder.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+
+using base::RunLoop;
+
+static void RelaseFrame(const PcmAudioFrame* frame) {
+ delete frame;
+};
+
+static void FrameEncoded(scoped_ptr<EncodedAudioFrame> encoded_frame,
+ const base::TimeTicks& recorded_time) {
+}
+
+class AudioEncoderTest : public ::testing::Test {
+ protected:
+ AudioEncoderTest() {}
+
+ virtual void SetUp() {
+ cast_thread_ = new CastThread(MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current());
+ AudioSenderConfig audio_config;
+ audio_config.codec = kOpus;
+ audio_config.use_external_encoder = false;
+ audio_config.frequency = 48000;
+ audio_config.channels = 2;
+ audio_config.bitrate = 64000;
+ audio_config.rtp_payload_type = 127;
+
+ audio_encoder_ = new AudioEncoder(cast_thread_, audio_config);
+ }
+
+ ~AudioEncoderTest() {}
+
+ base::MessageLoop loop_;
+ scoped_refptr<AudioEncoder> audio_encoder_;
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(AudioEncoderTest, Encode20ms) {
+ RunLoop run_loop;
+
+ PcmAudioFrame* audio_frame = new PcmAudioFrame();
+ audio_frame->channels = 2;
+ audio_frame->frequency = 48000;
+ audio_frame->samples.insert(audio_frame->samples.begin(), 480 * 2 * 2, 123);
+
+ base::TimeTicks recorded_time;
+ audio_encoder_->InsertRawAudioFrame(audio_frame, recorded_time,
+ base::Bind(&FrameEncoded),
+ base::Bind(&RelaseFrame, audio_frame));
+ run_loop.RunUntilIdle();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/audio_sender/audio_sender.cc b/media/cast/audio_sender/audio_sender.cc
new file mode 100644
index 0000000000..39fccda637
--- /dev/null
+++ b/media/cast/audio_sender/audio_sender.cc
@@ -0,0 +1,168 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/audio_sender/audio_sender.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/audio_sender/audio_encoder.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace media {
+namespace cast {
+
+const int64 kMinSchedulingDelayMs = 1;
+
+class LocalRtcpAudioSenderFeedback : public RtcpSenderFeedback {
+ public:
+ explicit LocalRtcpAudioSenderFeedback(AudioSender* audio_sender)
+ : audio_sender_(audio_sender) {
+ }
+
+ virtual void OnReceivedSendReportRequest() OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedReportBlock(
+ const RtcpReportBlock& report_block) OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedIntraFrameRequest() OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+
+ virtual void OnReceivedRpsi(uint8 payload_type,
+ uint64 picture_id) OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedNackRequest(
+ const std::list<uint16>& nack_sequence_numbers) OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedCastFeedback(
+ const RtcpCastMessage& cast_feedback) OVERRIDE {
+ if (!cast_feedback.missing_frames_and_packets_.empty()) {
+ audio_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
+ }
+ VLOG(1) << "Received audio ACK "
+ << static_cast<int>(cast_feedback.ack_frame_id_);
+ }
+
+ private:
+ AudioSender* audio_sender_;
+};
+
+class LocalRtpSenderStatistics : public RtpSenderStatistics {
+ public:
+ explicit LocalRtpSenderStatistics(RtpSender* rtp_sender)
+ : rtp_sender_(rtp_sender) {
+ }
+
+ virtual void GetStatistics(const base::TimeTicks& now,
+ RtcpSenderInfo* sender_info) OVERRIDE {
+ rtp_sender_->RtpStatistics(now, sender_info);
+ }
+
+ private:
+ RtpSender* rtp_sender_;
+};
+
+AudioSender::AudioSender(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ PacedPacketSender* const paced_packet_sender)
+ : incoming_feedback_ssrc_(audio_config.incoming_feedback_ssrc),
+ cast_thread_(cast_thread),
+ rtp_sender_(&audio_config, NULL, paced_packet_sender),
+ rtcp_feedback_(new LocalRtcpAudioSenderFeedback(this)),
+ rtp_audio_sender_statistics_(
+ new LocalRtpSenderStatistics(&rtp_sender_)),
+ rtcp_(rtcp_feedback_.get(),
+ paced_packet_sender,
+ rtp_audio_sender_statistics_.get(),
+ NULL,
+ audio_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
+ true,
+ audio_config.sender_ssrc,
+ audio_config.rtcp_c_name),
+ clock_(&default_tick_clock_),
+ weak_factory_(this) {
+
+ rtcp_.SetRemoteSSRC(audio_config.incoming_feedback_ssrc);
+
+ if (!audio_config.use_external_encoder) {
+ audio_encoder_ = new AudioEncoder(cast_thread, audio_config);
+ }
+ ScheduleNextRtcpReport();
+}
+
+AudioSender::~AudioSender() {}
+
+void AudioSender::InsertRawAudioFrame(
+ const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) {
+ DCHECK(audio_encoder_.get()) << "Invalid internal state";
+
+
+ audio_encoder_->InsertRawAudioFrame(audio_frame, recorded_time,
+ base::Bind(&AudioSender::SendEncodedAudioFrame,
+ weak_factory_.GetWeakPtr()),
+ callback);
+}
+
+void AudioSender::InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) {
+ DCHECK(audio_encoder_.get() == NULL) << "Invalid internal state";
+ rtp_sender_.IncomingEncodedAudioFrame(audio_frame, recorded_time);
+ callback.Run();
+}
+
+void AudioSender::SendEncodedAudioFrame(
+ scoped_ptr<EncodedAudioFrame> audio_frame,
+ const base::TimeTicks& recorded_time) {
+ rtp_sender_.IncomingEncodedAudioFrame(audio_frame.get(), recorded_time);
+}
+
+void AudioSender::ResendPackets(
+ const MissingFramesAndPacketsMap& missing_frames_and_packets) {
+ rtp_sender_.ResendPackets(missing_frames_and_packets);
+}
+
+void AudioSender::IncomingRtcpPacket(const uint8* packet, int length,
+ const base::Closure callback) {
+ rtcp_.IncomingRtcpPacket(packet, length);
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+}
+
+void AudioSender::ScheduleNextRtcpReport() {
+ base::TimeDelta time_to_next =
+ rtcp_.TimeToSendNextRtcpReport() - clock_->NowTicks();
+
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&AudioSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void AudioSender::SendRtcpReport() {
+ rtcp_.SendRtcpReport(incoming_feedback_ssrc_);
+ ScheduleNextRtcpReport();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/audio_sender/audio_sender.gypi b/media/cast/audio_sender/audio_sender.gypi
new file mode 100644
index 0000000000..3e2a56345b
--- /dev/null
+++ b/media/cast/audio_sender/audio_sender.gypi
@@ -0,0 +1,30 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'audio_sender',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc',
+ ],
+ 'sources': [
+ 'audio_encoder.h',
+ 'audio_encoder.cc',
+ 'audio_sender.h',
+ 'audio_sender.cc',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
+ '<(DEPTH)/media/cast/rtp_sender/rtp_sender.gyp:*',
+ '<(DEPTH)/third_party/webrtc/webrtc.gyp:webrtc',
+ ],
+ },
+ ],
+}
+
+
diff --git a/media/cast/audio_sender/audio_sender.h b/media/cast/audio_sender/audio_sender.h
new file mode 100644
index 0000000000..3d389b381f
--- /dev/null
+++ b/media/cast/audio_sender/audio_sender.h
@@ -0,0 +1,100 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_AUDIO_SENDER_H_
+#define MEDIA_CAST_AUDIO_SENDER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace media {
+namespace cast {
+
+class AudioEncoder;
+class LocalRtcpAudioSenderFeedback;
+class LocalRtpSenderStatistics;
+class PacedPacketSender;
+
+// This class is not thread safe.
+// It's only called from the main cast thread.
+class AudioSender : public base::NonThreadSafe,
+ public base::SupportsWeakPtr<AudioSender> {
+ public:
+ AudioSender(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ PacedPacketSender* const paced_packet_sender);
+
+ virtual ~AudioSender();
+
+ // The audio_frame must be valid until the closure callback is called.
+ // The closure callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback);
+
+ // The audio_frame must be valid until the closure callback is called.
+ // The closure callback is called from the main cast thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback);
+
+ // Only called from the main cast thread.
+ void IncomingRtcpPacket(const uint8* packet, int length,
+ const base::Closure callback);
+
+ // Only used for testing.
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ rtcp_.set_clock(clock);
+ rtp_sender_.set_clock(clock);
+ }
+
+ protected:
+ void SendEncodedAudioFrame(scoped_ptr<EncodedAudioFrame> audio_frame,
+ const base::TimeTicks& recorded_time);
+
+ private:
+ friend class LocalRtcpAudioSenderFeedback;
+
+ void ResendPackets(
+ const MissingFramesAndPacketsMap& missing_frames_and_packets);
+
+ void ScheduleNextRtcpReport();
+ void SendRtcpReport();
+
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+
+ base::WeakPtrFactory<AudioSender> weak_factory_;
+
+ const uint32 incoming_feedback_ssrc_;
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<AudioEncoder> audio_encoder_;
+ RtpSender rtp_sender_;
+ scoped_ptr<LocalRtpSenderStatistics> rtp_audio_sender_statistics_;
+ scoped_ptr<LocalRtcpAudioSenderFeedback> rtcp_feedback_;
+ Rtcp rtcp_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioSender);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_AUDIO_SENDER_H_
+
diff --git a/media/cast/audio_sender/audio_sender_unittest.cc b/media/cast/audio_sender/audio_sender_unittest.cc
new file mode 100644
index 0000000000..c08b3c81b8
--- /dev/null
+++ b/media/cast/audio_sender/audio_sender_unittest.cc
@@ -0,0 +1,96 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/threading/platform_thread.h"
+#include "media/cast/audio_sender/audio_sender.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+
+using base::RunLoop;
+using testing::_;
+
+static void RelaseFrame(const PcmAudioFrame* frame) {
+ delete frame;
+};
+
+class AudioSenderTest : public ::testing::Test {
+ protected:
+ AudioSenderTest() {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ virtual void SetUp() {
+ cast_thread_ = new CastThread(MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current());
+ AudioSenderConfig audio_config;
+ audio_config.codec = kOpus;
+ audio_config.use_external_encoder = false;
+ audio_config.frequency = 48000;
+ audio_config.channels = 2;
+ audio_config.bitrate = 64000;
+ audio_config.rtp_payload_type = 127;
+
+ audio_sender_.reset(
+ new AudioSender(cast_thread_, audio_config, &mock_transport_));
+ audio_sender_->set_clock(&testing_clock_);
+ }
+
+ ~AudioSenderTest() {}
+
+ base::MessageLoop loop_;
+ MockPacedPacketSender mock_transport_;
+ base::SimpleTestTickClock testing_clock_;
+ scoped_ptr<AudioSender> audio_sender_;
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(AudioSenderTest, Encode20ms) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+
+ RunLoop run_loop;
+
+ PcmAudioFrame* audio_frame = new PcmAudioFrame();
+ audio_frame->channels = 2;
+ audio_frame->frequency = 48000;
+ audio_frame->samples.insert(audio_frame->samples.begin(), 480 * 2 * 2, 123);
+
+ base::TimeTicks recorded_time;
+ audio_sender_->InsertRawAudioFrame(audio_frame, recorded_time,
+ base::Bind(&RelaseFrame, audio_frame));
+ run_loop.RunUntilIdle();
+}
+
+TEST_F(AudioSenderTest, RtcpTimer) {
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
+
+ RunLoop run_loop;
+ // Make sure that we send at least one RTCP packet.
+ base::TimeDelta max_rtcp_timeout =
+ base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
+ testing_clock_.Advance(max_rtcp_timeout);
+
+ // TODO(pwestin): haven't found a way to make the post delayed task to go
+ // faster than a real-time.
+ base::PlatformThread::Sleep(max_rtcp_timeout);
+ run_loop.RunUntilIdle();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/cast.gyp b/media/cast/cast.gyp
index 776bb46889..9b632f4f4c 100644
--- a/media/cast/cast.gyp
+++ b/media/cast/cast.gyp
@@ -16,6 +16,8 @@
'sources': [
'cast_config.h',
'cast_config.cc',
+ 'cast_thread.h',
+ 'cast_thread.cc',
], # source
},
{
@@ -56,10 +58,15 @@
'<(DEPTH)/third_party/webrtc/',
],
'sources': [
+ 'audio_sender/audio_encoder_unittest.cc',
+ 'audio_sender/audio_sender_unittest.cc',
'congestion_control/congestion_control_unittest.cc',
'framer/cast_message_builder_unittest.cc',
'framer/frame_buffer_unittest.cc',
'framer/framer_unittest.cc',
+ 'rtp_receiver/receiver_stats_unittest.cc',
+ 'rtp_receiver/rtp_parser/test/rtp_packet_builder.cc',
+ 'rtp_receiver/rtp_parser/rtp_parser_unittest.cc',
'rtp_sender/packet_storage/packet_storage_unittest.cc',
'rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc',
'rtp_sender/rtp_packetizer/test/rtp_header_parser.cc',
@@ -68,6 +75,8 @@
'rtcp/rtcp_receiver_unittest.cc',
'rtcp/rtcp_sender_unittest.cc',
'rtcp/rtcp_unittest.cc',
+ 'video_sender/video_encoder_unittest.cc',
+ 'video_sender/video_sender_unittest.cc',
], # source
},
], # targets
diff --git a/media/cast/cast_config.h b/media/cast/cast_config.h
index e1280cd03b..988924aab4 100644
--- a/media/cast/cast_config.h
+++ b/media/cast/cast_config.h
@@ -9,6 +9,8 @@
#include <vector>
#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
#include "media/cast/cast_defines.h"
namespace media {
@@ -175,17 +177,16 @@ class PacketSender {
// All packets to be sent to the network will be delivered via this function.
virtual bool SendPacket(const uint8* packet, int length) = 0;
- protected:
virtual ~PacketSender() {}
};
-class PacketReceiver {
+class PacketReceiver : public base::RefCountedThreadSafe<PacketReceiver> {
public:
// All packets received from the network should be delivered via this
// function.
- virtual void ReceivedPacket(const uint8* packet, int length) = 0;
+ virtual void ReceivedPacket(const uint8* packet, int length,
+ const base::Closure callback) = 0;
- protected:
virtual ~PacketReceiver() {}
};
diff --git a/media/cast/cast_defines.h b/media/cast/cast_defines.h
index 7239148644..1371732340 100644
--- a/media/cast/cast_defines.h
+++ b/media/cast/cast_defines.h
@@ -5,6 +5,9 @@
#ifndef MEDIA_CAST_CAST_DEFINES_H_
#define MEDIA_CAST_CAST_DEFINES_H_
+#include <map>
+#include <set>
+
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "base/time/time.h"
@@ -35,6 +38,84 @@ enum DefaultSettings {
kDefaultRtpMaxDelayMs = 100,
};
+const uint16 kRtcpCastAllPacketsLost = 0xffff;
+
+// Each uint16 represents one packet id within a cast frame.
+typedef std::set<uint16> PacketIdSet;
+// Each uint8 represents one cast frame.
+typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
+
+// TODO(pwestin): Re-factor the functions bellow into a class with static
+// methods.
+
+// Magic fractional unit. Used to convert time (in microseconds) to/from
+// fractional NTP seconds.
+static const double kMagicFractionalUnit = 4.294967296E3;
+
+// Network Time Protocol (NTP), which is in seconds relative to 0h UTC on
+// 1 January 1900.
+static const int64 kNtpEpochDeltaSeconds = GG_INT64_C(9435484800);
+static const int64 kNtpEpochDeltaMicroseconds =
+ kNtpEpochDeltaSeconds * base::Time::kMicrosecondsPerSecond;
+
+inline bool IsNewerFrameId(uint8 frame_id, uint8 prev_frame_id) {
+ return (frame_id != prev_frame_id) &&
+ static_cast<uint8>(frame_id - prev_frame_id) < 0x80;
+}
+
+inline bool IsOlderFrameId(uint8 frame_id, uint8 prev_frame_id) {
+ return (frame_id == prev_frame_id) || IsNewerFrameId(prev_frame_id, frame_id);
+}
+
+inline bool IsNewerPacketId(uint16 packet_id, uint16 prev_packet_id) {
+ return (packet_id != prev_packet_id) &&
+ static_cast<uint16>(packet_id - prev_packet_id) < 0x8000;
+}
+
+inline bool IsNewerSequenceNumber(uint16 sequence_number,
+ uint16 prev_sequence_number) {
+ // Same function as IsNewerPacketId just different data and name.
+ return IsNewerPacketId(sequence_number, prev_sequence_number);
+}
+
+// Create a NTP diff from seconds and fractions of seconds; delay_fraction is
+// fractions of a second where 0x80000000 is half a second.
+inline uint32 ConvertToNtpDiff(uint32 delay_seconds, uint32 delay_fraction) {
+ return ((delay_seconds & 0x0000FFFF) << 16) +
+ ((delay_fraction & 0xFFFF0000) >> 16);
+}
+
+inline base::TimeDelta ConvertFromNtpDiff(uint32 ntp_delay) {
+ uint32 delay_ms = (ntp_delay & 0x0000ffff) * 1000;
+ delay_ms >>= 16;
+ delay_ms += ((ntp_delay & 0xffff0000) >> 16) * 1000;
+ return base::TimeDelta::FromMilliseconds(delay_ms);
+}
+
+inline void ConvertTimeToFractions(int64 time_us,
+ uint32* seconds,
+ uint32* fractions) {
+ *seconds = static_cast<uint32>(time_us / base::Time::kMicrosecondsPerSecond);
+ *fractions = static_cast<uint32>(
+ (time_us % base::Time::kMicrosecondsPerSecond) * kMagicFractionalUnit);
+}
+
+inline void ConvertTimeToNtp(const base::TimeTicks& time,
+ uint32* ntp_seconds,
+ uint32* ntp_fractions) {
+ int64 time_us = time.ToInternalValue() - kNtpEpochDeltaMicroseconds;
+ ConvertTimeToFractions(time_us, ntp_seconds, ntp_fractions);
+}
+
+inline base::TimeTicks ConvertNtpToTime(uint32 ntp_seconds,
+ uint32 ntp_fractions) {
+ int64 ntp_time_us = static_cast<int64>(ntp_seconds) *
+ base::Time::kMicrosecondsPerSecond;
+ ntp_time_us += static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
+ return base::TimeTicks::FromInternalValue(ntp_time_us +
+ kNtpEpochDeltaMicroseconds);
+}
+
} // namespace cast
} // namespace media
diff --git a/media/cast/cast_receiver.gyp b/media/cast/cast_receiver.gyp
index 0fb5e38ae8..32bc5c7b78 100644
--- a/media/cast/cast_receiver.gyp
+++ b/media/cast/cast_receiver.gyp
@@ -17,6 +17,7 @@
# 'cast_receiver_impl.h',
], # source
'dependencies': [
+ 'rtp_receiver/rtp_receiver.gyp:*',
# 'audio_receiver',
# 'video_receiver',
'framer/framer.gyp:cast_framer',
diff --git a/media/cast/cast_receiver.h b/media/cast/cast_receiver.h
index 3dafbe5105..fa09721d32 100644
--- a/media/cast/cast_receiver.h
+++ b/media/cast/cast_receiver.h
@@ -1,6 +1,9 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// This is the main interface for the cast receiver. All configuration are done
+// at creation.
#ifndef MEDIA_CAST_CAST_RECEIVER_H_
#define MEDIA_CAST_CAST_RECEIVER_H_
@@ -12,8 +15,10 @@
namespace media {
namespace cast {
-class FrameReceiver {
+// This Class is thread safe.
+class FrameReceiver : public base::RefCountedThreadSafe<FrameReceiver>{
public:
+ // TODO(pwestin): These functions must be updated.
virtual bool GetRawVideoFrame(I420VideoFrame* video_frame,
base::TimeTicks* render_time) = 0;
@@ -32,24 +37,24 @@ class FrameReceiver {
virtual void ReleaseCodedAudioFrame(uint8 frame_id) = 0;
-protected:
virtual ~FrameReceiver() {}
};
+// This Class is thread safe.
class CastReceiver {
public:
static CastReceiver* CreateCastReceiver(
+ scoped_refptr<CastThread> cast_thread,
const AudioReceiverConfig& audio_config,
const VideoReceiverConfig& video_config,
PacketSender* const packet_sender);
// All received RTP and RTCP packets for the call should be inserted to this
- // PacketReceiver. The PacketReceiver pointer is valid as long as the
- // CastReceiver instance exists.
- virtual PacketReceiver* packet_receiver() = 0;
+ // PacketReceiver.
+ virtual scoped_refptr<PacketReceiver> packet_receiver() = 0;
// Polling interface to get audio and video frames from the CastReceiver.
- virtual FrameReceiver* frame_receiver() = 0;
+ virtual scoped_refptr<FrameReceiver> frame_receiver() = 0;
virtual ~CastReceiver() {};
};
diff --git a/media/cast/cast_sender.gyp b/media/cast/cast_sender.gyp
index c41bd64f4f..fe99f80382 100644
--- a/media/cast/cast_sender.gyp
+++ b/media/cast/cast_sender.gyp
@@ -4,26 +4,31 @@
{
'includes': [
-# 'audio_sender/audio_sender.gypi',
+ 'audio_sender/audio_sender.gypi',
'congestion_control/congestion_control.gypi',
-# 'video_sender/video_sender.gypi',
+ 'video_sender/video_sender.gypi',
],
'targets': [
{
'target_name': 'cast_sender_impl',
'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc/',
+ ],
'sources': [
'cast_sender.h',
-# 'cast_sender_impl.cc',
-# 'cast_sender_impl.h',
+ 'cast_sender_impl.cc',
+ 'cast_sender_impl.h',
], # source
'dependencies': [
-# 'audio_sender',
+ 'audio_sender',
'congestion_control',
'pacing/paced_sender.gyp:paced_sender',
'rtcp/rtcp.gyp:cast_rtcp',
'rtp_sender/rtp_sender.gyp:cast_rtp_sender',
-# 'video_sender',
+ 'video_sender',
], # dependencies
},
],
diff --git a/media/cast/cast_sender.h b/media/cast/cast_sender.h
index b5a3bcba70..f4d36539b4 100644
--- a/media/cast/cast_sender.h
+++ b/media/cast/cast_sender.h
@@ -1,38 +1,71 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// This is the main interface for the cast sender. All configuration are done
+// at creation.
+//
+// The FrameInput and PacketReciever interfaces should normally be accessed from
+// the IO thread. However they are allowed to be called from any thread.
#ifndef MEDIA_CAST_CAST_SENDER_H_
#define MEDIA_CAST_CAST_SENDER_H_
#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
namespace media {
namespace cast {
-class FrameInput {
+// This Class is thread safe.
+class FrameInput : public base::RefCountedThreadSafe<PacketReceiver> {
public:
- virtual void InsertRawVideoFrame(const I420VideoFrame& video_frame,
- base::TimeTicks capture_time) = 0;
+ // The video_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ virtual void InsertRawVideoFrame(const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) = 0;
- virtual void InsertCodedVideoFrame(const EncodedVideoFrame& video_frame,
- base::TimeTicks capture_time) = 0;
+ // The video_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) = 0;
- virtual void InsertRawAudioFrame(const PcmAudioFrame& audio_frame,
- base::TimeTicks recorded_time) = 0;
+ // The audio_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ virtual void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) = 0;
- virtual void InsertCodedAudioFrame(const EncodedAudioFrame& audio_frame,
- base::TimeTicks recorded_time) = 0;
+ // The audio_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) = 0;
- protected:
virtual ~FrameInput() {}
};
+// This Class is thread safe.
+// The provided PacketSender object will always be called form the main cast
+// thread.
class CastSender {
public:
static CastSender* CreateCastSender(
+ scoped_refptr<CastThread> cast_thread,
const AudioSenderConfig& audio_config,
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
@@ -40,12 +73,14 @@ class CastSender {
virtual ~CastSender() {};
- virtual FrameInput* frame_input() = 0;
+ // All audio and video frames for the session should be inserted to this
+ // object.
+ // Can be called from any thread.
+ virtual scoped_refptr<FrameInput> frame_input() = 0;
- // All RTCP packets for the call should be inserted to this
- // PacketReceiver. The PacketReceiver pointer is valid as long as the
- // CastSender instance exists.
- virtual PacketReceiver* packet_receiver() = 0;
+ // All RTCP packets for the session should be inserted to this object.
+ // Can be called from any thread.
+ virtual scoped_refptr<PacketReceiver> packet_receiver() = 0;
};
} // namespace cast
diff --git a/media/cast/cast_sender_impl.cc b/media/cast/cast_sender_impl.cc
new file mode 100644
index 0000000000..76f2f99765
--- /dev/null
+++ b/media/cast/cast_sender_impl.cc
@@ -0,0 +1,176 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "media/cast/cast_sender_impl.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+
+namespace media {
+namespace cast {
+
+// The LocalFrameInput class posts all incoming frames; audio and video to the
+// main cast thread for processing.
+// This make the cast sender interface thread safe.
+class LocalFrameInput : public FrameInput {
+ public:
+ LocalFrameInput(scoped_refptr<CastThread> cast_thread,
+ base::WeakPtr<AudioSender> audio_sender,
+ base::WeakPtr<VideoSender> video_sender)
+ : cast_thread_(cast_thread),
+ audio_sender_(audio_sender),
+ video_sender_(video_sender) {}
+
+ virtual void InsertRawVideoFrame(const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) OVERRIDE {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::InsertRawVideoFrame, video_sender_,
+ video_frame, capture_time, callback));
+ }
+
+ virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) OVERRIDE {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::InsertCodedVideoFrame, video_sender_,
+ video_frame, capture_time, callback));
+ }
+
+ virtual void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) OVERRIDE {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&AudioSender::InsertRawAudioFrame, audio_sender_,
+ audio_frame, recorded_time, callback));
+ }
+
+ virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) OVERRIDE {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&AudioSender::InsertCodedAudioFrame, audio_sender_,
+ audio_frame, recorded_time, callback));
+ }
+
+ private:
+ scoped_refptr<CastThread> cast_thread_;
+ base::WeakPtr<AudioSender> audio_sender_;
+ base::WeakPtr<VideoSender> video_sender_;
+};
+
+// LocalCastSenderPacketReceiver handle the incoming packets to the cast sender
+// it's only expected to receive RTCP feedback packets from the remote cast
+// receiver. The class verifies that that it is a RTCP packet and based on the
+// SSRC of the incoming packet route the packet to the correct sender; audio or
+// video.
+//
+// Definition of SSRC as defined in RFC 3550.
+// Synchronization source (SSRC): The source of a stream of RTP
+// packets, identified by a 32-bit numeric SSRC identifier carried in
+// the RTP header so as not to be dependent upon the network address.
+// All packets from a synchronization source form part of the same
+// timing and sequence number space, so a receiver groups packets by
+// synchronization source for playback. Examples of synchronization
+// sources include the sender of a stream of packets derived from a
+// signal source such as a microphone or a camera, or an RTP mixer
+// (see below). A synchronization source may change its data format,
+// e.g., audio encoding, over time. The SSRC identifier is a
+// randomly chosen value meant to be globally unique within a
+// particular RTP session (see Section 8). A participant need not
+// use the same SSRC identifier for all the RTP sessions in a
+// multimedia session; the binding of the SSRC identifiers is
+// provided through RTCP (see Section 6.5.1). If a participant
+// generates multiple streams in one RTP session, for example from
+// separate video cameras, each MUST be identified as a different
+// SSRC.
+
+class LocalCastSenderPacketReceiver : public PacketReceiver {
+ public:
+ LocalCastSenderPacketReceiver(scoped_refptr<CastThread> cast_thread,
+ base::WeakPtr<AudioSender> audio_sender,
+ base::WeakPtr<VideoSender> video_sender,
+ uint32 ssrc_of_audio_sender,
+ uint32 ssrc_of_video_sender)
+ : cast_thread_(cast_thread),
+ audio_sender_(audio_sender),
+ video_sender_(video_sender),
+ ssrc_of_audio_sender_(ssrc_of_audio_sender),
+ ssrc_of_video_sender_(ssrc_of_video_sender) {}
+
+ virtual ~LocalCastSenderPacketReceiver() {}
+
+ virtual void ReceivedPacket(const uint8* packet,
+ int length,
+ const base::Closure callback) OVERRIDE {
+ if (!Rtcp::IsRtcpPacket(packet, length)) {
+ // We should have no incoming RTP packets.
+ // No action; just log and call the callback informing that we are done
+ // with the packet.
+ VLOG(1) << "Unexpectedly received a RTP packet in the cast sender";
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+ return;
+ }
+ uint32 ssrc_of_sender = Rtcp::GetSsrcOfSender(packet, length);
+ if (ssrc_of_sender == ssrc_of_audio_sender_) {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&AudioSender::IncomingRtcpPacket, audio_sender_,
+ packet, length, callback));
+ } else if (ssrc_of_sender == ssrc_of_video_sender_) {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::IncomingRtcpPacket, video_sender_,
+ packet, length, callback));
+ } else {
+ // No action; just log and call the callback informing that we are done
+ // with the packet.
+ VLOG(1) << "Received a RTCP packet with a non matching sender SSRC "
+ << ssrc_of_sender;
+
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+ }
+ }
+
+ private:
+ scoped_refptr<CastThread> cast_thread_;
+ base::WeakPtr<AudioSender> audio_sender_;
+ base::WeakPtr<VideoSender> video_sender_;
+ uint32 ssrc_of_audio_sender_;
+ uint32 ssrc_of_video_sender_;
+};
+
+CastSender* CastSender::CreateCastSender(
+ scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacketSender* const packet_sender) {
+ return new CastSenderImpl(cast_thread,
+ audio_config,
+ video_config,
+ video_encoder_controller,
+ packet_sender);
+}
+
+CastSenderImpl::CastSenderImpl(
+ scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacketSender* const packet_sender)
+ : pacer_(cast_thread, packet_sender),
+ audio_sender_(cast_thread, audio_config, &pacer_),
+ video_sender_(cast_thread, video_config, video_encoder_controller,
+ &pacer_),
+ frame_input_(new LocalFrameInput(cast_thread, audio_sender_.AsWeakPtr(),
+ video_sender_.AsWeakPtr())),
+ packet_receiver_(new LocalCastSenderPacketReceiver(cast_thread,
+ audio_sender_.AsWeakPtr(), video_sender_.AsWeakPtr(),
+ audio_config.incoming_feedback_ssrc,
+ video_config.incoming_feedback_ssrc)) {}
+
+CastSenderImpl::~CastSenderImpl() {}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/cast_sender_impl.h b/media/cast/cast_sender_impl.h
new file mode 100644
index 0000000000..eb19caa247
--- /dev/null
+++ b/media/cast/cast_sender_impl.h
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef MEDIA_CAST_CAST_SENDER_IMPL_H_
+#define MEDIA_CAST_CAST_SENDER_IMPL_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/audio_sender/audio_sender.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_sender.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/video_sender/video_sender.h"
+
+namespace media {
+namespace cast {
+
+class AudioSender;
+class PacedSender;
+class VideoSender;
+
+// This calls is a pure owner class that group all required sending objects
+// together such as pacer, packet receiver, frame input, audio and video sender.
+class CastSenderImpl : public CastSender {
+ public:
+ CastSenderImpl(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacketSender* const packet_sender);
+
+ virtual ~CastSenderImpl();
+
+ virtual scoped_refptr<FrameInput> frame_input() OVERRIDE {
+ return frame_input_;
+ }
+
+ virtual scoped_refptr<PacketReceiver> packet_receiver() OVERRIDE {
+ return packet_receiver_;
+ }
+
+ private:
+ PacedSender pacer_;
+ AudioSender audio_sender_;
+ VideoSender video_sender_;
+ scoped_refptr<FrameInput> frame_input_;
+ scoped_refptr<PacketReceiver> packet_receiver_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_SENDER_IMPL_H_
+
diff --git a/media/cast/cast_thread.cc b/media/cast/cast_thread.cc
new file mode 100644
index 0000000000..62de8f1e76
--- /dev/null
+++ b/media/cast/cast_thread.cc
@@ -0,0 +1,62 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/cast_thread.h"
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+CastThread::CastThread(
+ scoped_refptr<MessageLoopProxy> main_thread_proxy,
+ scoped_refptr<MessageLoopProxy> audio_encode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> audio_decode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> video_encode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> video_decode_thread_proxy)
+ : main_thread_proxy_(main_thread_proxy),
+ audio_encode_thread_proxy_(audio_encode_thread_proxy),
+ audio_decode_thread_proxy_(audio_decode_thread_proxy),
+ video_encode_thread_proxy_(video_encode_thread_proxy),
+ video_decode_thread_proxy_(video_decode_thread_proxy) {
+ DCHECK(main_thread_proxy) << "Main thread required";
+}
+
+bool CastThread::PostTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task) {
+ scoped_refptr<base::MessageLoopProxy> message_proxy =
+ GetMessageLoopProxyForThread(identifier);
+
+ return message_proxy->PostTask(from_here, task);
+}
+
+bool CastThread::PostDelayedTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) {
+ scoped_refptr<base::MessageLoopProxy> message_proxy =
+ GetMessageLoopProxyForThread(identifier);
+
+ return message_proxy->PostDelayedTask(from_here, task, delay);
+}
+
+scoped_refptr<base::MessageLoopProxy> CastThread::GetMessageLoopProxyForThread(
+ ThreadId identifier) {
+ switch (identifier) {
+ case CastThread::MAIN:
+ return main_thread_proxy_;
+ case CastThread::AUDIO_ENCODER:
+ return audio_encode_thread_proxy_;
+ case CastThread::AUDIO_DECODER:
+ return audio_decode_thread_proxy_;
+ case CastThread::VIDEO_ENCODER:
+ return video_encode_thread_proxy_;
+ case CastThread::VIDEO_DECODER:
+ return video_decode_thread_proxy_;
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/cast_thread.h b/media/cast/cast_thread.h
new file mode 100644
index 0000000000..7ce8588b64
--- /dev/null
+++ b/media/cast/cast_thread.h
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_CAST_THREAD_H_
+#define MEDIA_CAST_CAST_THREAD_H_
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+using base::MessageLoopProxy;
+
+class CastThread : public base::RefCountedThreadSafe<CastThread> {
+ public:
+ // An enumeration of the cast threads.
+ enum ThreadId {
+ // The main thread is where the cast system is configured and where timers
+ // and network IO is performed.
+ MAIN,
+ // The audio encoder thread is where all send side audio processing is done,
+ // primarily encoding but also re-sampling.
+ AUDIO_ENCODER,
+ // The audio decoder thread is where all receive side audio processing is
+ // done, primarily decoding but also error concealment and re-sampling.
+ AUDIO_DECODER,
+ // The video encoder thread is where the video encode processing is done.
+ VIDEO_ENCODER,
+ // The video decoder thread is where the video decode processing is done.
+ VIDEO_DECODER,
+ };
+
+ CastThread(scoped_refptr<MessageLoopProxy> main_thread_proxy,
+ scoped_refptr<MessageLoopProxy> audio_encode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> audio_decode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> video_encode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> video_decode_thread_proxy);
+
+ // These are the same methods in message_loop.h, but are guaranteed to either
+ // get posted to the MessageLoop if it's still alive, or be deleted otherwise.
+ // They return true iff the thread existed and the task was posted. Note that
+ // even if the task is posted, there's no guarantee that it will run, since
+ // the target thread may already have a Quit message in its queue.
+ bool PostTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task);
+
+ bool PostDelayedTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay);
+
+ private:
+ scoped_refptr<base::MessageLoopProxy> GetMessageLoopProxyForThread(
+ ThreadId identifier);
+
+ scoped_refptr<MessageLoopProxy> main_thread_proxy_;
+ scoped_refptr<MessageLoopProxy> audio_encode_thread_proxy_;
+ scoped_refptr<MessageLoopProxy> audio_decode_thread_proxy_;
+ scoped_refptr<MessageLoopProxy> video_encode_thread_proxy_;
+ scoped_refptr<MessageLoopProxy> video_decode_thread_proxy_;
+
+ DISALLOW_COPY_AND_ASSIGN(CastThread);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_THREAD_H_
diff --git a/media/cast/pacing/paced_sender.cc b/media/cast/pacing/paced_sender.cc
index f89361bedf..d2935f3e65 100644
--- a/media/cast/pacing/paced_sender.cc
+++ b/media/cast/pacing/paced_sender.cc
@@ -4,19 +4,25 @@
#include "media/cast/pacing/paced_sender.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+
namespace media {
namespace cast {
static const int64 kPacingIntervalMs = 10;
-static const int64 kPacingMinIntervalMs = 7;
static const int kPacingMaxBurstsPerFrame = 3;
-PacedSender::PacedSender(PacketSender* transport)
- : burst_size_(1),
+PacedSender::PacedSender(scoped_refptr<CastThread> cast_thread,
+ PacketSender* transport)
+ : cast_thread_(cast_thread),
+ burst_size_(1),
packets_sent_in_burst_(0),
transport_(transport),
- default_tick_clock_(new base::DefaultTickClock()),
- clock_(default_tick_clock_.get()) {
+ clock_(&default_tick_clock_),
+ weak_factory_(this) {
+ ScheduleNextSend();
}
PacedSender::~PacedSender() {}
@@ -62,31 +68,29 @@ bool PacedSender::SendRtcpPacket(const std::vector<uint8>& packet) {
return transport_->SendPacket(&(packet[0]), packet.size());
}
-base::TimeTicks PacedSender::TimeNextProcess() {
- return time_last_process_ +
+void PacedSender::ScheduleNextSend() {
+ base::TimeDelta time_to_next = time_last_process_ - clock_->NowTicks() +
base::TimeDelta::FromMilliseconds(kPacingIntervalMs);
-}
-void PacedSender::Process() {
- int packets_to_send = 0;
- base::TimeTicks now = clock_->NowTicks();
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(0));
- base::TimeDelta min_pacing_interval =
- base::TimeDelta::FromMilliseconds(kPacingMinIntervalMs);
-
- // Have enough time have passed?
- if (now - time_last_process_ < min_pacing_interval) return;
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&PacedSender::SendNextPacketBurst, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
- time_last_process_ = now;
- packets_to_send = burst_size_;
- // Allow new packets to be inserted while we loop over our packets to send.
+void PacedSender::SendNextPacketBurst() {
+ int packets_to_send = burst_size_;
+ time_last_process_ = clock_->NowTicks();
for (int i = 0; i < packets_to_send; ++i) {
SendStoredPacket();
}
+ ScheduleNextSend();
}
void PacedSender::SendStoredPacket() {
- if (packet_list_.empty() && resend_packet_list_.empty()) return;
+ if (packet_list_.empty() && resend_packet_list_.empty()) return;
if (!resend_packet_list_.empty()) {
// Send our re-send packets first.
diff --git a/media/cast/pacing/paced_sender.h b/media/cast/pacing/paced_sender.h
index 45c8dc1996..9dcd03e846 100644
--- a/media/cast/pacing/paced_sender.h
+++ b/media/cast/pacing/paced_sender.h
@@ -10,10 +10,13 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
namespace media {
namespace cast {
@@ -29,21 +32,16 @@ class PacedPacketSender {
virtual bool SendRtcpPacket(const std::vector<uint8>& packet) = 0;
- protected:
virtual ~PacedPacketSender() {}
};
-class PacedSender : public PacedPacketSender {
+class PacedSender : public PacedPacketSender,
+ public base::NonThreadSafe,
+ public base::SupportsWeakPtr<PacedSender> {
public:
- explicit PacedSender(PacketSender* transport);
+ PacedSender(scoped_refptr<CastThread> cast_thread, PacketSender* transport);
virtual ~PacedSender();
- // Returns the time when the pacer want a worker thread to call Process.
- base::TimeTicks TimeNextProcess();
-
- // Process any pending packets in the queue(s).
- void Process();
-
virtual bool SendPacket(const std::vector<uint8>& packet,
int num_of_packets) OVERRIDE;
@@ -56,12 +54,21 @@ class PacedSender : public PacedPacketSender {
clock_ = clock;
}
+ protected:
+ // Schedule a delayed task on the main cast thread when it's time to send the
+ // next packet burst.
+ void ScheduleNextSend();
+
+ // Process any pending packets in the queue(s).
+ void SendNextPacketBurst();
+
private:
void SendStoredPacket();
void UpdateBurstSize(int num_of_packets);
typedef std::list<std::vector<uint8> > PacketList;
+ scoped_refptr<CastThread> cast_thread_;
int burst_size_;
int packets_sent_in_burst_;
base::TimeTicks time_last_process_;
@@ -69,9 +76,11 @@ class PacedSender : public PacedPacketSender {
PacketList resend_packet_list_;
PacketSender* transport_;
- scoped_ptr<base::TickClock> default_tick_clock_;
+ base::DefaultTickClock default_tick_clock_;
base::TickClock* clock_;
+ base::WeakPtrFactory<PacedSender> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(PacedSender);
};
diff --git a/media/cast/pacing/paced_sender_unittest.cc b/media/cast/pacing/paced_sender_unittest.cc
index 9108965dda..b731d601e2 100644
--- a/media/cast/pacing/paced_sender_unittest.cc
+++ b/media/cast/pacing/paced_sender_unittest.cc
@@ -2,16 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/pacing/mock_packet_sender.h"
#include "media/cast/pacing/paced_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
-using testing::_;
-
namespace media {
namespace cast {
+using base::RunLoop;
+using testing::_;
+
static const uint8 kValue = 123;
static const size_t kSize1 = 100;
static const size_t kSize2 = 101;
@@ -22,18 +25,29 @@ static const int64 kStartMillisecond = 123456789;
class PacedSenderTest : public ::testing::Test {
protected:
- PacedSenderTest()
- : paced_sender_(&mock_transport_) {
+ PacedSenderTest() {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
- paced_sender_.set_clock(&testing_clock_);
}
virtual ~PacedSenderTest() {}
+ virtual void SetUp() {
+ // TODO(pwestin): Write a generic message loop that runs with a mock clock.
+ cast_thread_ = new CastThread(MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current());
+ paced_sender_.reset(new PacedSender(cast_thread_, &mock_transport_));
+ paced_sender_->set_clock(&testing_clock_);
+ }
+
+ base::MessageLoop loop_;
MockPacketSender mock_transport_;
- PacedSender paced_sender_;
+ scoped_ptr<PacedSender> paced_sender_;
base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<CastThread> cast_thread_;
};
TEST_F(PacedSenderTest, PassThroughRtcp) {
@@ -42,14 +56,14 @@ TEST_F(PacedSenderTest, PassThroughRtcp) {
std::vector<uint8> packet(kSize1, kValue);
int num_of_packets = 1;
- EXPECT_TRUE(paced_sender_.SendPacket(packet, num_of_packets));
+ EXPECT_TRUE(paced_sender_->SendPacket(packet, num_of_packets));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(0);
- EXPECT_TRUE(paced_sender_.ResendPacket(packet, num_of_packets));
+ EXPECT_TRUE(paced_sender_->ResendPacket(packet, num_of_packets));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(1).WillRepeatedly(
testing::Return(true));
- EXPECT_TRUE(paced_sender_.SendRtcpPacket(packet));
+ EXPECT_TRUE(paced_sender_->SendRtcpPacket(packet));
}
TEST_F(PacedSenderTest, BasicPace) {
@@ -59,30 +73,52 @@ TEST_F(PacedSenderTest, BasicPace) {
EXPECT_CALL(mock_transport_,
SendPacket(_, kSize1)).Times(3).WillRepeatedly(testing::Return(true));
for (int i = 0; i < num_of_packets; ++i) {
- EXPECT_TRUE(paced_sender_.SendPacket(packet, num_of_packets));
+ EXPECT_TRUE(paced_sender_->SendPacket(packet, num_of_packets));
}
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
+ testing_clock_.Advance(timeout);
// Check that we get the next burst.
EXPECT_CALL(mock_transport_,
SendPacket(_, kSize1)).Times(3).WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+
+ // TODO(pwestin): haven't found a way to make the post delayed task to go
+ // faster than a real-time.
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
// If we call process too early make sure we don't send any packets.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(5));
+ timeout = base::TimeDelta::FromMilliseconds(5);
+ testing_clock_.Advance(timeout);
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(0);
- paced_sender_.Process();
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
// Check that we get the next burst.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(5));
+ testing_clock_.Advance(timeout);
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).WillRepeatedly(
testing::Return(true));
- paced_sender_.Process();
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
// Check that we don't get any more packets.
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(0);
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
- paced_sender_.Process();
+ timeout = base::TimeDelta::FromMilliseconds(10);
+ testing_clock_.Advance(timeout);
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
}
TEST_F(PacedSenderTest, PaceWithNack) {
@@ -98,65 +134,95 @@ TEST_F(PacedSenderTest, PaceWithNack) {
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).WillRepeatedly(
testing::Return(true));
for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_.SendPacket(firts_packet,
+ EXPECT_TRUE(paced_sender_->SendPacket(firts_packet,
num_of_packets_in_frame));
}
// Add first NACK request.
for (int i = 0; i < num_of_packets_in_nack; ++i) {
- EXPECT_TRUE(paced_sender_.ResendPacket(nack_packet,
+ EXPECT_TRUE(paced_sender_->ResendPacket(nack_packet,
num_of_packets_in_nack));
}
// Check that we get the first NACK burst.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kNackSize)).Times(5).
WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
+ testing_clock_.Advance(timeout);
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
// Add second NACK request.
for (int i = 0; i < num_of_packets_in_nack; ++i) {
- EXPECT_TRUE(paced_sender_.ResendPacket(nack_packet,
+ EXPECT_TRUE(paced_sender_->ResendPacket(nack_packet,
num_of_packets_in_nack));
}
// Check that we get the next NACK burst.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kNackSize)).Times(7)
.WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+
+ testing_clock_.Advance(timeout);
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
// End of NACK plus a packet from the oldest frame.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kNackSize)).Times(6)
.WillRepeatedly(testing::Return(true));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(1)
.WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+
+ testing_clock_.Advance(timeout);
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
// Add second frame.
// Make sure we don't delay the second frame due to the previous packets.
for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_.SendPacket(second_packet,
+ EXPECT_TRUE(paced_sender_->SendPacket(second_packet,
num_of_packets_in_frame));
}
// Last packets of frame 1 and the first packets of frame 2.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(5).WillRepeatedly(
testing::Return(true));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(2).WillRepeatedly(
testing::Return(true));
- paced_sender_.Process();
+
+ testing_clock_.Advance(timeout);
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
// Last packets of frame 2.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(7).WillRepeatedly(
testing::Return(true));
- paced_sender_.Process();
+
+ testing_clock_.Advance(timeout);
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
// No more packets.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(0);
- paced_sender_.Process();
+ testing_clock_.Advance(timeout);
+ base::PlatformThread::Sleep(timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
}
TEST_F(PacedSenderTest, PaceWith60fps) {
@@ -172,19 +238,24 @@ TEST_F(PacedSenderTest, PaceWith60fps) {
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).WillRepeatedly(
testing::Return(true));
for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_.SendPacket(firts_packet,
+ EXPECT_TRUE(paced_sender_->SendPacket(firts_packet,
num_of_packets_in_frame));
}
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+ base::TimeDelta timeout_10ms = base::TimeDelta::FromMilliseconds(10);
EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).
WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(6));
+ testing_clock_.Advance(timeout_10ms);
+ base::PlatformThread::Sleep(timeout_10ms);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(6));
// Add second frame, after 16 ms.
for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_.SendPacket(second_packet,
+ EXPECT_TRUE(paced_sender_->SendPacket(second_packet,
num_of_packets_in_frame));
}
@@ -193,53 +264,83 @@ TEST_F(PacedSenderTest, PaceWith60fps) {
.WillRepeatedly(testing::Return(true));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(1)
.WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+
+ base::PlatformThread::Sleep(timeout_10ms);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(4)
.WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+
+ base::PlatformThread::Sleep(timeout_10ms);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
testing_clock_.Advance(base::TimeDelta::FromMilliseconds(3));
// Add third frame, after 33 ms.
for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_.SendPacket(third_packet,
+ EXPECT_TRUE(paced_sender_->SendPacket(third_packet,
num_of_packets_in_frame));
}
-
testing_clock_.Advance(base::TimeDelta::FromMilliseconds(7));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(4)
.WillRepeatedly(testing::Return(true));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize3)).Times(1)
.WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+
+ base::PlatformThread::Sleep(timeout_10ms);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
// Add fourth frame, after 50 ms.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_.SendPacket(fourth_packet,
+ EXPECT_TRUE(paced_sender_->SendPacket(fourth_packet,
num_of_packets_in_frame));
}
EXPECT_CALL(mock_transport_, SendPacket(_, kSize3)).Times(6)
.WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+ testing_clock_.Advance(timeout_10ms);
+ base::PlatformThread::Sleep(timeout_10ms);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize3)).Times(2)
.WillRepeatedly(testing::Return(true));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize4)).Times(4)
.WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+ testing_clock_.Advance(timeout_10ms);
+ base::PlatformThread::Sleep(timeout_10ms);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize4)).Times(5)
.WillRepeatedly(testing::Return(true));
- paced_sender_.Process();
+ testing_clock_.Advance(timeout_10ms);
+ base::PlatformThread::Sleep(timeout_10ms);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
EXPECT_CALL(mock_transport_, SendPacket(_, kSize4)).Times(0);
- paced_sender_.Process();
+ testing_clock_.Advance(timeout_10ms);
+ base::PlatformThread::Sleep(timeout_10ms);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
}
} // namespace cast
diff --git a/media/cast/rtcp/rtcp.cc b/media/cast/rtcp/rtcp.cc
index 2ee2631fdc..c3e2c8e4d8 100644
--- a/media/cast/rtcp/rtcp.cc
+++ b/media/cast/rtcp/rtcp.cc
@@ -12,6 +12,7 @@
#include "media/cast/rtcp/rtcp_receiver.h"
#include "media/cast/rtcp/rtcp_sender.h"
#include "media/cast/rtcp/rtcp_utility.h"
+#include "net/base/big_endian.h"
namespace media {
namespace cast {
@@ -21,9 +22,6 @@ static const int kMaxRttMs = 1000000; // 1000 seconds.
// Time limit for received RTCP messages when we stop using it for lip-sync.
static const int64 kMaxDiffSinceReceivedRtcpMs = 100000; // 100 seconds.
-// Magic fractional unit.
-static const double kMagicFractionalUnit = 4.294967296E3;
-
class LocalRtcpRttFeedback : public RtcpRttFeedback {
public:
explicit LocalRtcpRttFeedback(Rtcp* rtcp)
@@ -120,17 +118,7 @@ Rtcp::Rtcp(RtcpSenderFeedback* sender_feedback,
Rtcp::~Rtcp() {}
-base::TimeTicks Rtcp::TimeToSendNextRtcpReport() {
- if (next_time_to_send_rtcp_.is_null()) {
- UpdateNextTimeToSendRtcp();
- }
- return next_time_to_send_rtcp_;
-}
-
-void Rtcp::SetRemoteSSRC(uint32 ssrc) {
- rtcp_receiver_->SetRemoteSSRC(ssrc);
-}
-
+// static
bool Rtcp::IsRtcpPacket(const uint8* packet, int length) {
DCHECK_GE(length, 8) << "Invalid RTCP packet";
if (length < 8) return false;
@@ -142,6 +130,26 @@ bool Rtcp::IsRtcpPacket(const uint8* packet, int length) {
return false;
}
+// static
+uint32 Rtcp::GetSsrcOfSender(const uint8* rtcp_buffer, int length) {
+ uint32 ssrc_of_sender;
+ net::BigEndianReader big_endian_reader(rtcp_buffer, length);
+ big_endian_reader.Skip(4); // Skip header
+ big_endian_reader.ReadU32(&ssrc_of_sender);
+ return ssrc_of_sender;
+}
+
+base::TimeTicks Rtcp::TimeToSendNextRtcpReport() {
+ if (next_time_to_send_rtcp_.is_null()) {
+ UpdateNextTimeToSendRtcp();
+ }
+ return next_time_to_send_rtcp_;
+}
+
+void Rtcp::SetRemoteSSRC(uint32 ssrc) {
+ rtcp_receiver_->SetRemoteSSRC(ssrc);
+}
+
void Rtcp::IncomingRtcpPacket(const uint8* rtcp_buffer, int length) {
RtcpParser rtcp_parser(rtcp_buffer, length);
if (!rtcp_parser.IsValid()) {
@@ -378,30 +386,6 @@ bool Rtcp::Rtt(base::TimeDelta* rtt,
return true;
}
-void Rtcp::ConvertTimeToFractions(int64 time_us,
- uint32* seconds,
- uint32* fractions) const {
- *seconds = static_cast<uint32>(time_us / base::Time::kMicrosecondsPerSecond);
- *fractions = static_cast<uint32>(
- (time_us % base::Time::kMicrosecondsPerSecond) * kMagicFractionalUnit);
-}
-
-void Rtcp::ConvertTimeToNtp(const base::TimeTicks& time,
- uint32* ntp_seconds,
- uint32* ntp_fractions) const {
- int64 time_us = time.ToInternalValue() - kNtpEpochDeltaMicroseconds;
- ConvertTimeToFractions(time_us, ntp_seconds, ntp_fractions);
-}
-
-base::TimeTicks Rtcp::ConvertNtpToTime(uint32 ntp_seconds,
- uint32 ntp_fractions) const {
- int64 ntp_time_us = static_cast<int64>(ntp_seconds) *
- base::Time::kMicrosecondsPerSecond;
- ntp_time_us += static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
- return base::TimeTicks::FromInternalValue(ntp_time_us +
- kNtpEpochDeltaMicroseconds);
-}
-
int Rtcp::CheckForWrapAround(uint32 new_timestamp,
uint32 old_timestamp) const {
if (new_timestamp < old_timestamp) {
diff --git a/media/cast/rtcp/rtcp.h b/media/cast/rtcp/rtcp.h
index 9cf9708a47..31962a526c 100644
--- a/media/cast/rtcp/rtcp.h
+++ b/media/cast/rtcp/rtcp.h
@@ -66,12 +66,6 @@ class RtpReceiverStatistics {
class Rtcp {
public:
- // Network Time Protocol (NTP), which is in seconds relative to 0h UTC on
- // 1 January 1900.
- static const int64 kNtpEpochDeltaSeconds = GG_INT64_C(9435484800);
- static const int64 kNtpEpochDeltaMicroseconds =
- kNtpEpochDeltaSeconds * base::Time::kMicrosecondsPerSecond;
-
Rtcp(RtcpSenderFeedback* sender_feedback,
PacedPacketSender* paced_packet_sender,
RtpSenderStatistics* rtp_sender_statistics,
@@ -86,6 +80,8 @@ class Rtcp {
static bool IsRtcpPacket(const uint8* rtcp_buffer, int length);
+ static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, int length);
+
base::TimeTicks TimeToSendNextRtcpReport();
void SendRtcpReport(uint32 media_ssrc);
void SendRtcpPli(uint32 media_ssrc);
@@ -104,19 +100,13 @@ class Rtcp {
}
protected:
- void ConvertTimeToNtp(const base::TimeTicks& time,
- uint32* ntp_seconds,
- uint32* ntp_fractions) const;
-
- base::TimeTicks ConvertNtpToTime(uint32 ntp_seconds,
- uint32 ntp_fractions) const;
-
int CheckForWrapAround(uint32 new_timestamp,
uint32 old_timestamp) const;
void OnReceivedLipSyncInfo(uint32 rtp_timestamp,
uint32 ntp_seconds,
uint32 ntp_fraction);
+
private:
friend class LocalRtcpRttFeedback;
friend class LocalRtcpReceiverFeedback;
@@ -137,24 +127,8 @@ class Rtcp {
void UpdateRtt(const base::TimeDelta& sender_delay,
const base::TimeDelta& receiver_delay);
- void ConvertTimeToFractions(int64 time_us,
- uint32* seconds,
- uint32* fractions) const;
-
void UpdateNextTimeToSendRtcp();
- inline uint32 ConvertToNtpDiff(uint32 delay_seconds, uint32 delay_fraction) {
- return ((delay_seconds & 0x0000FFFF) << 16) +
- ((delay_fraction & 0xFFFF0000) >> 16);
- }
-
- inline base::TimeDelta ConvertFromNtpDiff(uint32 ntp_delay) {
- uint32 delay_ms = (ntp_delay & 0x0000ffff) * 1000;
- delay_ms /= 65536;
- delay_ms += ((ntp_delay & 0xffff0000) >> 16) * 1000;
- return base::TimeDelta::FromMilliseconds(delay_ms);
- }
-
const base::TimeDelta rtcp_interval_;
const RtcpMode rtcp_mode_;
const bool sending_media_;
diff --git a/media/cast/rtcp/rtcp_defines.h b/media/cast/rtcp/rtcp_defines.h
index 102e321ada..f0635f8ca8 100644
--- a/media/cast/rtcp/rtcp_defines.h
+++ b/media/cast/rtcp/rtcp_defines.h
@@ -15,11 +15,6 @@
namespace media {
namespace cast {
-const uint16 kRtcpCastAllPacketsLost = 0xffff;
-
-typedef std::set<uint16> PacketIdSet;
-typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
-
class RtcpCastMessage {
public:
explicit RtcpCastMessage(uint32 media_ssrc);
diff --git a/media/cast/rtcp/rtcp_unittest.cc b/media/cast/rtcp/rtcp_unittest.cc
index 049fbeb800..dfcc6ea910 100644
--- a/media/cast/rtcp/rtcp_unittest.cc
+++ b/media/cast/rtcp/rtcp_unittest.cc
@@ -84,8 +84,6 @@ class RtcpPeer : public Rtcp {
c_name) {
}
- using Rtcp::ConvertTimeToNtp;
- using Rtcp::ConvertNtpToTime;
using Rtcp::CheckForWrapAround;
using Rtcp::OnReceivedLipSyncInfo;
};
@@ -308,15 +306,14 @@ TEST_F(RtcpTest, NtpAndTime) {
kReceiverSsrc,
kCName);
rtcp_peer.set_clock(&testing_clock_);
- int64 input_time_us = 12345678901000LL + Rtcp::kNtpEpochDeltaMicroseconds;
uint32 ntp_seconds = 0;
uint32 ntp_fractions = 0;
- base::TimeTicks input_time =
- base::TimeTicks::FromInternalValue(input_time_us);
- rtcp_peer.ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
+ base::TimeTicks input_time = base::TimeTicks::FromInternalValue(
+ 12345678901000LL + kNtpEpochDeltaMicroseconds);
+ ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
EXPECT_EQ(12345678u, ntp_seconds);
EXPECT_EQ(input_time,
- rtcp_peer.ConvertNtpToTime(ntp_seconds, ntp_fractions));
+ ConvertNtpToTime(ntp_seconds, ntp_fractions));
}
TEST_F(RtcpTest, WrapAround) {
@@ -366,14 +363,13 @@ TEST_F(RtcpTest, RtpTimestampInSenderTime) {
EXPECT_FALSE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
&rtp_timestamp_in_ticks));
- int64 input_time_us = 12345678901000LL + Rtcp::kNtpEpochDeltaMicroseconds;
uint32 ntp_seconds = 0;
uint32 ntp_fractions = 0;
- base::TimeTicks input_time =
- base::TimeTicks::FromInternalValue(input_time_us);
+ base::TimeTicks input_time = base::TimeTicks::FromInternalValue(
+ 12345678901000LL + kNtpEpochDeltaMicroseconds);
// Test exact match.
- rtcp_peer.ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
+ ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
&rtp_timestamp_in_ticks));
diff --git a/media/cast/rtp_common/rtp_defines.h b/media/cast/rtp_common/rtp_defines.h
index 89ee019427..2268fa9a31 100644
--- a/media/cast/rtp_common/rtp_defines.h
+++ b/media/cast/rtp_common/rtp_defines.h
@@ -5,8 +5,6 @@
#ifndef MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
#define MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
-#include <cstring>
-
#include "base/basictypes.h"
#include "media/cast/cast_config.h"
#include "media/cast/rtcp/rtcp_defines.h"
@@ -45,26 +43,6 @@ class RtpPayloadFeedback {
virtual ~RtpPayloadFeedback() {}
};
-inline bool IsNewerFrameId(uint8 frame_id, uint8 prev_frame_id) {
- return (frame_id != prev_frame_id) &&
- static_cast<uint8>(frame_id - prev_frame_id) < 0x80;
-}
-
-inline bool IsOlderFrameId(uint8 frame_id, uint8 prev_frame_id) {
- return (frame_id == prev_frame_id) || IsNewerFrameId(prev_frame_id, frame_id);
-}
-
-inline bool IsNewerPacketId(uint16 packet_id, uint16 prev_packet_id) {
- return (packet_id != prev_packet_id) &&
- static_cast<uint16>(packet_id - prev_packet_id) < 0x8000;
-}
-
-inline bool IsNewerSequenceNumber(uint16 sequence_number,
- uint16 prev_sequence_number) {
- // Same function as IsNewerPacketId just different data and name.
- return IsNewerPacketId(sequence_number, prev_sequence_number);
-}
-
} // namespace cast
} // namespace media
diff --git a/media/cast/rtp_receiver/receiver_stats.cc b/media/cast/rtp_receiver/receiver_stats.cc
new file mode 100644
index 0000000000..44a9b81007
--- /dev/null
+++ b/media/cast/rtp_receiver/receiver_stats.cc
@@ -0,0 +1,120 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_receiver/receiver_stats.h"
+
+#include "base/logging.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+static const uint32 kMaxSequenceNumber = 65536;
+
+ReceiverStats::ReceiverStats(uint32 ssrc)
+ : ssrc_(ssrc),
+ min_sequence_number_(0),
+ max_sequence_number_(0),
+ total_number_packets_(0),
+ sequence_number_cycles_(0),
+ interval_min_sequence_number_(0),
+ interval_number_packets_(0),
+ interval_wrap_count_(0),
+ default_tick_clock_(),
+ clock_(&default_tick_clock_) {}
+
+ReceiverStats::~ReceiverStats() {}
+
+void ReceiverStats::GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost,
+ uint32* extended_high_sequence_number,
+ uint32* jitter) {
+ // Compute losses.
+ if (interval_number_packets_ == 0) {
+ *fraction_lost = 0;
+ } else {
+ int diff = 0;
+ if (interval_wrap_count_ == 0) {
+ diff = max_sequence_number_ - interval_min_sequence_number_ + 1;
+ } else {
+ diff = kMaxSequenceNumber * (interval_wrap_count_ - 1) +
+ (max_sequence_number_ - interval_min_sequence_number_ +
+ kMaxSequenceNumber + 1);
+ }
+
+ if (diff < 1) {
+ *fraction_lost = 0;
+ } else {
+ *fraction_lost = static_cast<uint8>((256 * (1 -
+ static_cast<float>(interval_number_packets_) / abs(diff))));
+ }
+ }
+
+ int expected_packets_num = max_sequence_number_ - min_sequence_number_ + 1;
+ if (total_number_packets_ == 0) {
+ *cumulative_lost = 0;
+ } else if (sequence_number_cycles_ == 0) {
+ *cumulative_lost = expected_packets_num - total_number_packets_;
+ } else {
+ *cumulative_lost = kMaxSequenceNumber * (sequence_number_cycles_ - 1) +
+ (expected_packets_num - total_number_packets_ + kMaxSequenceNumber);
+ }
+
+ // Extended high sequence number consists of the highest seq number and the
+ // number of cycles (wrap).
+ *extended_high_sequence_number = (sequence_number_cycles_ << 16) +
+ max_sequence_number_;
+
+ *jitter = static_cast<uint32>(abs(jitter_.InMilliseconds()));
+
+ // Reset interval values.
+ interval_min_sequence_number_ = 0;
+ interval_number_packets_ = 0;
+ interval_wrap_count_ = 0;
+}
+
+void ReceiverStats::UpdateStatistics(const RtpCastHeader& header) {
+ if (ssrc_ != header.webrtc.header.ssrc) return;
+
+ uint16 new_seq_num = header.webrtc.header.sequenceNumber;
+
+ if (interval_number_packets_ == 0) {
+ // First packet in the interval.
+ interval_min_sequence_number_ = new_seq_num;
+ }
+ if (total_number_packets_ == 0) {
+ // First incoming packet.
+ min_sequence_number_ = new_seq_num;
+ max_sequence_number_ = new_seq_num;
+ }
+
+ if (IsNewerSequenceNumber(new_seq_num, max_sequence_number_)) {
+ // Check wrap.
+ if (new_seq_num < max_sequence_number_) {
+ ++sequence_number_cycles_;
+ ++interval_wrap_count_;
+ }
+ max_sequence_number_ = new_seq_num;
+ }
+
+ // Compute Jitter.
+ base::TimeTicks now = clock_->NowTicks();
+ base::TimeDelta delta_new_timestamp =
+ base::TimeDelta::FromMilliseconds(header.webrtc.header.timestamp);
+ if (total_number_packets_ > 0) {
+ // Update jitter.
+ base::TimeDelta delta = (now - last_received_packet_time_) -
+ ((delta_new_timestamp - last_received_timestamp_) / 90000);
+ jitter_ += (delta - jitter_) / 16;
+ }
+ last_received_timestamp_ = delta_new_timestamp;
+ last_received_packet_time_ = now;
+
+ // Increment counters.
+ ++total_number_packets_;
+ ++interval_number_packets_;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/rtp_receiver/receiver_stats.h b/media/cast/rtp_receiver/receiver_stats.h
new file mode 100644
index 0000000000..610f515c0e
--- /dev/null
+++ b/media/cast/rtp_receiver/receiver_stats.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_RECEIVER_RECEIVER_STATS_H_
+#define MEDIA_CAST_RTP_RECEIVER_RECEIVER_STATS_H_
+
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+class ReceiverStats {
+ public:
+ explicit ReceiverStats(uint32 ssrc);
+ ~ReceiverStats();
+ void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost, // 24 bits valid.
+ uint32* extended_high_sequence_number,
+ uint32* jitter);
+ void UpdateStatistics(const RtpCastHeader& header);
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ }
+
+ private:
+ const uint32 ssrc_;
+
+ // Global metrics.
+ uint16 min_sequence_number_;
+ uint16 max_sequence_number_;
+ uint32 total_number_packets_;
+ uint16 sequence_number_cycles_;
+ base::TimeDelta last_received_timestamp_;
+ base::TimeTicks last_received_packet_time_;
+ base::TimeDelta jitter_;
+
+ // Intermediate metrics - between RTCP reports.
+ int interval_min_sequence_number_;
+ int interval_number_packets_;
+ int interval_wrap_count_;
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_RECEIVER_RECEIVER_STATS_H_
diff --git a/media/cast/rtp_receiver/receiver_stats_unittest.cc b/media/cast/rtp_receiver/receiver_stats_unittest.cc
new file mode 100644
index 0000000000..c6cf91ab07
--- /dev/null
+++ b/media/cast/rtp_receiver/receiver_stats_unittest.cc
@@ -0,0 +1,157 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <gtest/gtest.h>
+
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/receiver_stats.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+static const uint32 kStdTimeIncrementMs = 33;
+static const uint32 kSsrc = 0x1234;
+
+class ReceiverStatsTest : public ::testing::Test {
+ protected:
+ ReceiverStatsTest()
+ : stats_(kSsrc),
+ rtp_header_(),
+ fraction_lost_(0),
+ cumulative_lost_(0),
+ extended_high_sequence_number_(0),
+ jitter_(0) {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ start_time_ = testing_clock_.NowTicks();
+ delta_increments_ = base::TimeDelta::FromMilliseconds(kStdTimeIncrementMs);
+ }
+ ~ReceiverStatsTest() {}
+
+ virtual void SetUp() {
+ rtp_header_.webrtc.header.sequenceNumber = 0;
+ rtp_header_.webrtc.header.timestamp = 0;
+ rtp_header_.webrtc.header.ssrc = kSsrc;
+ }
+
+ uint32 ExpectedJitter(uint32 const_interval, int num_packets) {
+ float jitter = 0;
+ // Assume timestamps have a constant kStdTimeIncrementMs interval.
+ float float_interval =
+ static_cast<float>(const_interval - kStdTimeIncrementMs);
+ for (int i = 0; i < num_packets; ++i) {
+ jitter += (float_interval - jitter) / 16;
+ }
+ return static_cast<uint32>(jitter + 0.5f);
+ }
+
+ uint32 Timestamp() {
+ base::TimeDelta delta = testing_clock_.NowTicks() - start_time_;
+ return static_cast<uint32>(delta.InMilliseconds() * 90);
+ }
+
+ ReceiverStats stats_;
+ RtpCastHeader rtp_header_;
+ uint8 fraction_lost_;
+ uint32 cumulative_lost_;
+ uint32 extended_high_sequence_number_;
+ uint32 jitter_;
+ base::SimpleTestTickClock testing_clock_;
+ base::TimeTicks start_time_;
+ base::TimeDelta delta_increments_;
+};
+
+TEST_F(ReceiverStatsTest, ResetState) {
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_EQ(0u, fraction_lost_);
+ EXPECT_EQ(0u, cumulative_lost_);
+ EXPECT_EQ(0u, extended_high_sequence_number_);
+ EXPECT_EQ(0u, jitter_);
+}
+
+TEST_F(ReceiverStatsTest, LossCount) {
+ for (int i = 0; i < 300; ++i) {
+ if (i % 4)
+ stats_.UpdateStatistics(rtp_header_);
+ if (i % 3) {
+ rtp_header_.webrtc.header.timestamp = Timestamp();
+ }
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ testing_clock_.Advance(delta_increments_);
+ }
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_EQ(63u, fraction_lost_);
+ EXPECT_EQ(74u, cumulative_lost_);
+ // Build extended sequence number.
+ uint32 extended_seq_num = rtp_header_.webrtc.header.sequenceNumber - 1;
+ EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+}
+
+TEST_F(ReceiverStatsTest, NoLossWrap) {
+ rtp_header_.webrtc.header.sequenceNumber = 65500;
+ for (int i = 0; i < 300; ++i) {
+ stats_.UpdateStatistics(rtp_header_);
+ if (i % 3) {
+ rtp_header_.webrtc.header.timestamp = Timestamp();
+ }
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ testing_clock_.Advance(delta_increments_);
+ }
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_EQ(0u, fraction_lost_);
+ EXPECT_EQ(0u, cumulative_lost_);
+ // Build extended sequence number (one wrap cycle).
+ uint32 extended_seq_num = (1 << 16) +
+ rtp_header_.webrtc.header.sequenceNumber - 1;
+ EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+}
+
+TEST_F(ReceiverStatsTest, LossCountWrap) {
+ const uint32 start_sequence_number = 65500;
+ rtp_header_.webrtc.header.sequenceNumber = start_sequence_number;
+ for (int i = 0; i < 300; ++i) {
+ if (i % 4)
+ stats_.UpdateStatistics(rtp_header_);
+ if (i % 3)
+ // Update timestamp.
+ ++rtp_header_.webrtc.header.timestamp;
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ testing_clock_.Advance(delta_increments_);
+ }
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_EQ(63u, fraction_lost_);
+ EXPECT_EQ(74u, cumulative_lost_);
+ // Build extended sequence number (one wrap cycle).
+ uint32 extended_seq_num = (1 << 16) +
+ rtp_header_.webrtc.header.sequenceNumber - 1;
+ EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+}
+
+TEST_F(ReceiverStatsTest, Jitter) {
+ rtp_header_.webrtc.header.timestamp = Timestamp();
+ for (int i = 0; i < 300; ++i) {
+ stats_.UpdateStatistics(rtp_header_);
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ rtp_header_.webrtc.header.timestamp += 33 * 90;
+ testing_clock_.Advance(delta_increments_);
+ }
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_FALSE(fraction_lost_);
+ EXPECT_FALSE(cumulative_lost_);
+ // Build extended sequence number (one wrap cycle).
+ uint32 extended_seq_num = rtp_header_.webrtc.header.sequenceNumber - 1;
+ EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+ EXPECT_EQ(ExpectedJitter(kStdTimeIncrementMs, 300), jitter_);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h b/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
new file mode 100644
index 0000000000..d39bc2a255
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_
+#define MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_
+
+#include "media/cast/rtp_receiver/rtp_parser/rtp_feedback.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockRtpFeedback : public RtpFeedback {
+ public:
+ MOCK_METHOD4(OnInitializeDecoder,
+ int32(const int8 payloadType,
+ const int frequency,
+ const uint8 channels,
+ const uint32 rate));
+
+ MOCK_METHOD1(OnPacketTimeout,
+ void(const int32 id));
+ MOCK_METHOD2(OnReceivedPacket,
+ void(const int32 id, const RtpRtcpPacketType packet_type));
+ MOCK_METHOD2(OnPeriodicDeadOrAlive,
+ void(const int32 id, const RTPAliveType alive));
+ MOCK_METHOD2(OnIncomingSSRCChanged,
+ void(const int32 id, const uint32 ssrc));
+ MOCK_METHOD3(OnIncomingCSRCChanged,
+ void(const int32 id, const uint32 csrc, const bool added));
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_ \ No newline at end of file
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc b/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
new file mode 100644
index 0000000000..0eb691be7a
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
@@ -0,0 +1,107 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+
+#include "base/logging.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver.h"
+#include "net/base/big_endian.h"
+
+namespace media {
+namespace cast {
+
+static const int kRtpCommonHeaderLength = 12;
+static const int kRtpCastHeaderLength = 7;
+static const uint8 kCastKeyFrameBitMask = 0x80;
+static const uint8 kCastReferenceFrameIdBitMask = 0x40;
+
+RtpParser::RtpParser(RtpData* incoming_payload_callback,
+ const RtpParserConfig parser_config)
+ : data_callback_(incoming_payload_callback),
+ parser_config_(parser_config) {
+}
+
+RtpParser::~RtpParser() {
+}
+
+bool RtpParser::ParsePacket(const uint8* packet, int length,
+ RtpCastHeader* rtp_header) {
+ if (length == 0) return false;
+ // Get RTP general header.
+ if (!ParseCommon(packet, length, rtp_header)) return false;
+ if (rtp_header->webrtc.header.payloadType == parser_config_.payload_type &&
+ rtp_header->webrtc.header.ssrc == parser_config_.ssrc) {
+ return ParseCast(packet + kRtpCommonHeaderLength,
+ length - kRtpCommonHeaderLength, rtp_header);
+ }
+ // Not a valid payload type / ssrc combination.
+ return false;
+}
+
+bool RtpParser::ParseCommon(const uint8* packet,
+ int length,
+ RtpCastHeader* rtp_header) {
+ if (length < kRtpCommonHeaderLength) return false;
+ uint8 version = packet[0] >> 6;
+ if (version != 2) return false;
+ uint8 cc = packet[0] & 0x0f;
+ bool marker = ((packet[1] & 0x80) != 0);
+ int payload_type = packet[1] & 0x7f;
+
+ uint16 sequence_number;
+ uint32 rtp_timestamp, ssrc;
+ net::BigEndianReader big_endian_reader(packet + 2, 80);
+ big_endian_reader.ReadU16(&sequence_number);
+ big_endian_reader.ReadU32(&rtp_timestamp);
+ big_endian_reader.ReadU32(&ssrc);
+
+ rtp_header->webrtc.header.markerBit = marker;
+ rtp_header->webrtc.header.payloadType = payload_type;
+ rtp_header->webrtc.header.sequenceNumber = sequence_number;
+ rtp_header->webrtc.header.timestamp = rtp_timestamp;
+ rtp_header->webrtc.header.ssrc = ssrc;
+ rtp_header->webrtc.header.numCSRCs = cc;
+
+ uint8 csrc_octs = cc * 4;
+ rtp_header->webrtc.type.Audio.numEnergy = rtp_header->webrtc.header.numCSRCs;
+ rtp_header->webrtc.header.headerLength = kRtpCommonHeaderLength + csrc_octs;
+ rtp_header->webrtc.type.Audio.isCNG = false;
+ rtp_header->webrtc.type.Audio.channel = parser_config_.audio_channels;
+ return true;
+}
+
+bool RtpParser::ParseCast(const uint8* packet,
+ int length,
+ RtpCastHeader* rtp_header) {
+ if (length < kRtpCastHeaderLength) return false;
+ // Extract header.
+ const uint8* data_ptr = packet;
+ int data_length = length;
+ rtp_header->is_key_frame = (data_ptr[0] & kCastKeyFrameBitMask);
+ rtp_header->is_reference = (data_ptr[0] & kCastReferenceFrameIdBitMask);
+ rtp_header->frame_id = data_ptr[1];
+
+ net::BigEndianReader big_endian_reader(data_ptr + 2, 32);
+ big_endian_reader.ReadU16(&rtp_header->packet_id);
+ big_endian_reader.ReadU16(&rtp_header->max_packet_id);
+
+ if (rtp_header->is_reference) {
+ rtp_header->reference_frame_id = data_ptr[6];
+ data_ptr += kRtpCastHeaderLength;
+ data_length -= kRtpCastHeaderLength;
+ } else {
+ data_ptr += kRtpCastHeaderLength - 1;
+ data_length -= kRtpCastHeaderLength - 1;
+ }
+
+ if (rtp_header->max_packet_id < rtp_header->packet_id) {
+ return false;
+ }
+ data_callback_->OnReceivedPayloadData(data_ptr, data_length, rtp_header);
+ return true;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi b/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi
new file mode 100644
index 0000000000..0814e55cf8
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi
@@ -0,0 +1,25 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_rtp_parser',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'rtp_parser_config.h',
+ 'rtp_parser.cc',
+ 'rtp_parser.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ ],
+ },
+ ],
+}
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.h b/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
new file mode 100644
index 0000000000..7f85609bf6
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
+#define MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
+
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+class RtpData;
+
+struct RtpParserConfig {
+ RtpParserConfig() {
+ ssrc = 0;
+ payload_type = 0;
+ audio_channels = 0;
+ }
+
+ uint32 ssrc;
+ int payload_type;
+ AudioCodec audio_codec;
+ VideoCodec video_codec;
+ int audio_channels;
+};
+
+class RtpParser {
+ public:
+ RtpParser(RtpData* incoming_payload_callback,
+ const RtpParserConfig parser_config);
+
+ ~RtpParser();
+
+ bool ParsePacket(const uint8* packet, int length,
+ RtpCastHeader* rtp_header);
+
+ private:
+ bool ParseCommon(const uint8* packet, int length,
+ RtpCastHeader* rtp_header);
+
+ bool ParseCast(const uint8* packet, int length,
+ RtpCastHeader* rtp_header);
+
+ RtpData* data_callback_;
+ RtpParserConfig parser_config_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc b/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
new file mode 100644
index 0000000000..71e6f501a5
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
@@ -0,0 +1,201 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <gtest/gtest.h>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+#include "media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h"
+#include "media/cast/rtp_receiver/rtp_receiver.h"
+
+namespace media {
+namespace cast {
+
+static const int kPacketLength = 1500;
+static const int kCastRtpLength = 7;
+static const int kTestPayloadType = 127;
+static const uint32 kTestSsrc = 1234;
+static const uint32 kTestTimestamp = 111111;
+static const uint16 kTestSeqNum = 4321;
+static const uint8 kRefFrameId = 17;
+
+class RtpDataTest : public RtpData {
+ public:
+ RtpDataTest() {
+ expected_header_.reset(new RtpCastHeader());
+ }
+
+ ~RtpDataTest() {}
+
+ void SetExpectedHeader(const RtpCastHeader& cast_header) {
+ memcpy(expected_header_.get(), &cast_header, sizeof(RtpCastHeader));
+ }
+
+ void OnReceivedPayloadData(const uint8* payloadData,
+ int payloadSize,
+ const RtpCastHeader* rtpHeader) {
+ VerifyCommonHeader(*rtpHeader);
+ VerifyCastHeader(*rtpHeader);
+ // TODO(mikhal): Add data verification.
+ }
+
+ void VerifyCommonHeader(const RtpCastHeader& parsed_header) {
+ EXPECT_EQ(expected_header_->packet_id == expected_header_->max_packet_id,
+ parsed_header.webrtc.header.markerBit);
+ EXPECT_EQ(kTestPayloadType, parsed_header.webrtc.header.payloadType);
+ EXPECT_EQ(kTestSsrc, parsed_header.webrtc.header.ssrc);
+ EXPECT_EQ(0, parsed_header.webrtc.header.numCSRCs);
+ }
+
+ void VerifyCastHeader(const RtpCastHeader& parsed_header) {
+ EXPECT_EQ(expected_header_->is_key_frame, parsed_header.is_key_frame);
+ EXPECT_EQ(expected_header_->frame_id, parsed_header.frame_id);
+ EXPECT_EQ(expected_header_->packet_id, parsed_header.packet_id);
+ EXPECT_EQ(expected_header_->max_packet_id, parsed_header.max_packet_id);
+ EXPECT_EQ(expected_header_->is_reference, parsed_header.is_reference);
+ }
+
+ private:
+ scoped_ptr<RtpCastHeader> expected_header_;
+};
+
+class RtpParserTest : public ::testing::Test {
+ protected:
+ RtpParserTest() {
+ PopulateConfig();
+ rtp_data_.reset(new RtpDataTest());
+ rtp_parser_.reset(new RtpParser(rtp_data_.get(), config_));
+ }
+
+ ~RtpParserTest() {}
+
+ virtual void SetUp() {
+ cast_header_.InitRTPVideoHeaderCast();
+ cast_header_.is_reference = true;
+ cast_header_.reference_frame_id = kRefFrameId;
+ packet_builder_.SetSsrc(kTestSsrc);
+ packet_builder_.SetReferenceFrameId(kRefFrameId, true);
+ packet_builder_.SetSequenceNumber(kTestSeqNum);
+ packet_builder_.SetTimestamp(kTestTimestamp);
+ packet_builder_.SetPayloadType(kTestPayloadType);
+ packet_builder_.SetMarkerBit(true); // Only one packet.
+ }
+
+ void PopulateConfig() {
+ config_.payload_type = kTestPayloadType;
+ config_.ssrc = kTestSsrc;
+ }
+
+ scoped_ptr<RtpDataTest> rtp_data_;
+ RtpPacketBuilder packet_builder_;
+ scoped_ptr<RtpParser> rtp_parser_;
+ RtpParserConfig config_;
+ RtpCastHeader cast_header_;
+};
+
+TEST_F(RtpParserTest, ParseDefaultCastPacket) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ rtp_data_->SetExpectedHeader(cast_header_);
+ EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, ParseNonDefaultCastPacket) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(5);
+ packet_builder_.SetMaxPacketId(15);
+ packet_builder_.SetMarkerBit(false);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ cast_header_.is_key_frame = true;
+ cast_header_.frame_id = 10;
+ cast_header_.packet_id = 5;
+ cast_header_.max_packet_id = 15;
+ rtp_data_->SetExpectedHeader(cast_header_);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, TooBigPacketId) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(15);
+ packet_builder_.SetMaxPacketId(5);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_FALSE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, MaxPacketId) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(65535);
+ packet_builder_.SetMaxPacketId(65535);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ cast_header_.is_key_frame = true;
+ cast_header_.frame_id = 10;
+ cast_header_.packet_id = 65535;
+ cast_header_.max_packet_id = 65535;
+ rtp_data_->SetExpectedHeader(cast_header_);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, InvalidPayloadType) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(65535);
+ packet_builder_.SetMaxPacketId(65535);
+ packet_builder_.SetPayloadType(kTestPayloadType - 1);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_FALSE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, InvalidSsrc) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(65535);
+ packet_builder_.SetMaxPacketId(65535);
+ packet_builder_.SetSsrc(kTestSsrc - 1);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_FALSE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, ParseCastPacketWithoutReference) {
+ cast_header_.is_reference = false;
+ cast_header_.reference_frame_id = 0;
+ packet_builder_.SetReferenceFrameId(kRefFrameId, false);
+
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ rtp_data_->SetExpectedHeader(cast_header_);
+ EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc b/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc
new file mode 100644
index 0000000000..9f61d9bc0d
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc
@@ -0,0 +1,104 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h"
+
+#include "base/logging.h"
+#include "net/base/big_endian.h"
+
+namespace media {
+namespace cast {
+
+const int kCastRtpHeaderLength = 7;
+const int kGenericRtpHeaderLength = 12;
+
+RtpPacketBuilder::RtpPacketBuilder()
+ : is_key_(false),
+ frame_id_(0),
+ packet_id_(0),
+ max_packet_id_(0),
+ reference_frame_id_(0),
+ is_reference_set_(false),
+ timestamp_(0),
+ sequence_number_(0),
+ marker_(false),
+ payload_type_(0),
+ ssrc_(0) {}
+
+void RtpPacketBuilder::SetKeyFrame(bool is_key) {
+ is_key_ = is_key;
+}
+
+void RtpPacketBuilder::SetFrameId(uint8 frame_id) {
+ frame_id_ = frame_id;
+}
+
+void RtpPacketBuilder::SetPacketId(uint16 packet_id) {
+ packet_id_ = packet_id;
+}
+
+void RtpPacketBuilder::SetMaxPacketId(uint16 max_packet_id) {
+ max_packet_id_ = max_packet_id;
+}
+
+void RtpPacketBuilder::SetReferenceFrameId(uint8 reference_frame_id,
+ bool is_set) {
+ is_reference_set_ = is_set;
+ if (is_set)
+ reference_frame_id_ = reference_frame_id;
+}
+void RtpPacketBuilder::SetTimestamp(uint32 timestamp) {
+ timestamp_ = timestamp;
+}
+
+void RtpPacketBuilder::SetSequenceNumber(uint16 sequence_number) {
+ sequence_number_ = sequence_number;
+}
+
+void RtpPacketBuilder::SetMarkerBit(bool marker) {
+ marker_ = marker;
+}
+
+void RtpPacketBuilder::SetPayloadType(int payload_type) {
+ payload_type_ = payload_type;
+}
+
+void RtpPacketBuilder::SetSsrc(uint32 ssrc) {
+ ssrc_ = ssrc;
+}
+
+void RtpPacketBuilder::BuildHeader(uint8* data, uint32 data_length) {
+ BuildCommonHeader(data, data_length);
+ BuildCastHeader(data + kGenericRtpHeaderLength,
+ data_length - kGenericRtpHeaderLength);
+}
+
+void RtpPacketBuilder::BuildCastHeader(uint8* data, uint32 data_length) {
+ // Build header.
+ DCHECK_LE(kCastRtpHeaderLength, data_length);
+ // Set the first 7 bytes to 0.
+ memset(data, 0, kCastRtpHeaderLength);
+ net::BigEndianWriter big_endian_writer(data, 56);
+ big_endian_writer.WriteU8(
+ (is_key_ ? 0x80 : 0) | (is_reference_set_ ? 0x40 : 0));
+ big_endian_writer.WriteU8(frame_id_);
+ big_endian_writer.WriteU16(packet_id_);
+ big_endian_writer.WriteU16(max_packet_id_);
+ if (is_reference_set_) {
+ big_endian_writer.WriteU8(reference_frame_id_);
+ }
+}
+
+void RtpPacketBuilder::BuildCommonHeader(uint8* data, uint32 data_length) {
+ DCHECK_LE(kGenericRtpHeaderLength, data_length);
+ net::BigEndianWriter big_endian_writer(data, 96);
+ big_endian_writer.WriteU8(0x80);
+ big_endian_writer.WriteU8(payload_type_ | (marker_ ? kRtpMarkerBitMask : 0));
+ big_endian_writer.WriteU16(sequence_number_);
+ big_endian_writer.WriteU32(timestamp_);
+ big_endian_writer.WriteU32(ssrc_);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h b/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h
new file mode 100644
index 0000000000..70f520e14d
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h
@@ -0,0 +1,51 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test helper class that builds rtp packets.
+
+#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_TEST_RTP_PACKET_BUILDER_H_
+#define MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_TEST_RTP_PACKET_BUILDER_H_
+
+
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+class RtpPacketBuilder {
+ public:
+ RtpPacketBuilder();
+ void SetKeyFrame(bool is_key);
+ void SetFrameId(uint8 frame_id);
+ void SetPacketId(uint16 packet_id);
+ void SetMaxPacketId(uint16 max_packet_id);
+ void SetReferenceFrameId(uint8 reference_frame_id, bool is_set);
+ void SetTimestamp(uint32 timestamp);
+ void SetSequenceNumber(uint16 sequence_number);
+ void SetMarkerBit(bool marker);
+ void SetPayloadType(int payload_type);
+ void SetSsrc(uint32 ssrc);
+ void BuildHeader(uint8* data, uint32 data_length);
+
+ private:
+ bool is_key_;
+ uint8 frame_id_;
+ uint16 packet_id_;
+ uint16 max_packet_id_;
+ uint8 reference_frame_id_;
+ bool is_reference_set_;
+ uint32 timestamp_;
+ uint16 sequence_number_;
+ bool marker_;
+ int payload_type_;
+ uint32 ssrc_;
+
+ void BuildCastHeader(uint8* data, uint32 data_length);
+ void BuildCommonHeader(uint8* data, uint32 data_length);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_TEST_RTP_PACKET_BUILDER_H_
diff --git a/media/cast/rtp_receiver/rtp_receiver.cc b/media/cast/rtp_receiver/rtp_receiver.cc
new file mode 100644
index 0000000000..97e9b03032
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_receiver.cc
@@ -0,0 +1,57 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_receiver/rtp_receiver.h"
+
+#include "base/logging.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/receiver_stats.h"
+#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+
+namespace media {
+namespace cast {
+
+RtpReceiver::RtpReceiver(const AudioReceiverConfig* audio_config,
+ const VideoReceiverConfig* video_config,
+ RtpData* incoming_payload_callback) {
+ DCHECK(incoming_payload_callback) << "Invalid argument";
+ DCHECK(audio_config || video_config) << "Invalid argument";
+ // Configure parser.
+ RtpParserConfig config;
+ if (audio_config) {
+ config.ssrc = audio_config->incoming_ssrc;
+ config.payload_type = audio_config->rtp_payload_type;
+ config.audio_codec = audio_config->codec;
+ config.audio_channels = audio_config->channels;
+ } else {
+ config.ssrc = video_config->incoming_ssrc;
+ config.payload_type = video_config->rtp_payload_type;
+ config.video_codec = video_config->codec;
+ }
+ stats_.reset(new ReceiverStats(config.ssrc));
+ parser_.reset(new RtpParser(incoming_payload_callback, config));
+}
+
+RtpReceiver::~RtpReceiver() {}
+
+bool RtpReceiver::ReceivedPacket(const uint8* packet, int length) {
+ RtpCastHeader rtp_header;
+ if (!parser_->ParsePacket(packet, length, &rtp_header)) return false;
+
+ stats_->UpdateStatistics(rtp_header);
+ return true;
+}
+
+void RtpReceiver::GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost,
+ uint32* extended_high_sequence_number,
+ uint32* jitter) {
+ stats_->GetStatistics(fraction_lost,
+ cumulative_lost,
+ extended_high_sequence_number,
+ jitter);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/rtp_receiver/rtp_receiver.gyp b/media/cast/rtp_receiver/rtp_receiver.gyp
new file mode 100644
index 0000000000..c1d4d5adf0
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_receiver.gyp
@@ -0,0 +1,27 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_rtp_receiver',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'receiver_stats.cc',
+ 'receiver_stats.h',
+ 'rtp_receiver.cc',
+ 'rtp_receiver.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ 'rtp_parser/rtp_parser.gypi:*',
+ ],
+ },
+ ],
+}
diff --git a/media/cast/rtp_receiver/rtp_receiver.h b/media/cast/rtp_receiver/rtp_receiver.h
new file mode 100644
index 0000000000..6cac8cadd7
--- /dev/null
+++ b/media/cast/rtp_receiver/rtp_receiver.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Interface to the rtp receiver.
+
+#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_H_
+#define MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+class RtpData {
+ public:
+ virtual void OnReceivedPayloadData(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader* rtp_header) = 0;
+
+ protected:
+ virtual ~RtpData() {}
+};
+
+class ReceiverStats;
+class RtpParser;
+
+class RtpReceiver {
+ public:
+ RtpReceiver(const AudioReceiverConfig* audio_config,
+ const VideoReceiverConfig* video_config,
+ RtpData* incoming_payload_callback);
+ ~RtpReceiver();
+
+ bool ReceivedPacket(const uint8* packet, int length);
+
+ void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost, // 24 bits valid.
+ uint32* extended_high_sequence_number,
+ uint32* jitter);
+
+ private:
+ scoped_ptr<ReceiverStats> stats_;
+ scoped_ptr<RtpParser> parser_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_H_
diff --git a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
index 264701c40b..6900bc24b3 100644
--- a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
+++ b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
@@ -23,7 +23,6 @@ RtpPacketizer::RtpPacketizer(PacedPacketSender* transport,
: config_(rtp_packetizer_config),
transport_(transport),
packet_storage_(packet_storage),
- time_last_sent_rtp_timestamp_(0),
sequence_number_(config_.sequence_number),
rtp_timestamp_(config_.rtp_timestamp),
frame_id_(0),
@@ -36,30 +35,33 @@ RtpPacketizer::RtpPacketizer(PacedPacketSender* transport,
RtpPacketizer::~RtpPacketizer() {}
void RtpPacketizer::IncomingEncodedVideoFrame(
- const EncodedVideoFrame& video_frame,
- int64 capture_time_ms) {
+ const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time) {
DCHECK(!config_.audio) << "Invalid state";
if (config_.audio) return;
+ base::TimeTicks zero_time;
+ base::TimeDelta capture_delta = capture_time - zero_time;
+
// Timestamp is in 90 KHz for video.
- rtp_timestamp_ = static_cast<uint32>(capture_time_ms * 90);
- time_last_sent_rtp_timestamp_ = capture_time_ms;
+ rtp_timestamp_ = static_cast<uint32>(capture_delta.InMilliseconds() * 90);
+ time_last_sent_rtp_timestamp_ = capture_time;
- Cast(video_frame.key_frame,
- video_frame.last_referenced_frame_id,
+ Cast(video_frame->key_frame,
+ video_frame->last_referenced_frame_id,
rtp_timestamp_,
- video_frame.data);
+ video_frame->data);
}
void RtpPacketizer::IncomingEncodedAudioFrame(
- const EncodedAudioFrame& audio_frame,
- int64 recorded_time) {
+ const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time) {
DCHECK(config_.audio) << "Invalid state";
if (!config_.audio) return;
- rtp_timestamp_ += audio_frame.samples; // Timestamp is in samples for audio.
+ rtp_timestamp_ += audio_frame->samples; // Timestamp is in samples for audio.
time_last_sent_rtp_timestamp_ = recorded_time;
- Cast(true, 0, rtp_timestamp_, audio_frame.data);
+ Cast(true, 0, rtp_timestamp_, audio_frame->data);
}
uint16 RtpPacketizer::NextSequenceNumber() {
@@ -67,9 +69,9 @@ uint16 RtpPacketizer::NextSequenceNumber() {
return sequence_number_ - 1;
}
-bool RtpPacketizer::LastSentTimestamp(int64* time_sent,
+bool RtpPacketizer::LastSentTimestamp(base::TimeTicks* time_sent,
uint32* rtp_timestamp) const {
- if (time_last_sent_rtp_timestamp_ == 0) return false;
+ if (time_last_sent_rtp_timestamp_.is_null()) return false;
*time_sent = time_last_sent_rtp_timestamp_;
*rtp_timestamp = rtp_timestamp_;
diff --git a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
index f1941cd02f..63035d098d 100644
--- a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
+++ b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
@@ -9,6 +9,7 @@
#include <list>
#include <map>
+#include "base/time/time.h"
#include "media/cast/rtp_common/rtp_defines.h"
#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
@@ -18,6 +19,9 @@ namespace cast {
class PacedPacketSender;
+// This object is only called from the main cast thread.
+// This class break encoded audio and video frames into packets and add an RTP
+// header to each packet.
class RtpPacketizer {
public:
RtpPacketizer(PacedPacketSender* transport,
@@ -25,38 +29,42 @@ class RtpPacketizer {
RtpPacketizerConfig rtp_packetizer_config);
~RtpPacketizer();
- void IncomingEncodedVideoFrame(const EncodedVideoFrame& video_frame,
- int64 capture_time);
+ // The video_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time);
- void IncomingEncodedAudioFrame(const EncodedAudioFrame& audio_frame,
- int64 recorded_time);
+ // The audio_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time);
- bool LastSentTimestamp(int64* time_sent, uint32* rtp_timestamp) const;
+ bool LastSentTimestamp(base::TimeTicks* time_sent,
+ uint32* rtp_timestamp) const;
// Return the next sequence number, and increment by one. Enables unique
// incremental sequence numbers for every packet (including retransmissions).
uint16 NextSequenceNumber();
- uint32 send_packets_count() {return send_packets_count_;}
- uint32 send_octet_count() {return send_octet_count_;}
+ int send_packets_count() { return send_packets_count_; }
+ int send_octet_count() { return send_octet_count_; }
private:
void Cast(bool is_key, uint8 reference_frame_id,
uint32 timestamp, std::vector<uint8> data);
void BuildCommonRTPheader(std::vector<uint8>* packet, bool marker_bit,
uint32 time_stamp);
+
RtpPacketizerConfig config_;
PacedPacketSender* transport_;
PacketStorage* packet_storage_;
- int64 time_last_sent_rtp_timestamp_;
+ base::TimeTicks time_last_sent_rtp_timestamp_;
uint16 sequence_number_;
uint32 rtp_timestamp_;
uint8 frame_id_;
uint16 packet_id_;
- uint32 send_packets_count_;
- uint32 send_octet_count_;
+ int send_packets_count_;
+ int send_octet_count_;
};
} // namespace cast
diff --git a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
index 7d99a46857..bed7cba2e8 100644
--- a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
+++ b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
@@ -17,7 +17,7 @@ namespace media {
namespace cast {
static const int kPayload = 127;
-static const uint32 kTimestamp = 10;
+static const uint32 kTimestampMs = 10;
static const uint16 kSeqNum = 33;
static const int kTimeOffset = 22222;
static const int kMaxPacketLength = 1500;
@@ -46,7 +46,7 @@ class TestRtpPacketTransport : public PacedPacketSender {
rtp_header.webrtc.header.markerBit);
EXPECT_EQ(kPayload, rtp_header.webrtc.header.payloadType);
EXPECT_EQ(sequence_number_, rtp_header.webrtc.header.sequenceNumber);
- EXPECT_EQ(kTimestamp * 90, rtp_header.webrtc.header.timestamp);
+ EXPECT_EQ(kTimestampMs * 90, rtp_header.webrtc.header.timestamp);
EXPECT_EQ(config_.ssrc, rtp_header.webrtc.header.ssrc);
EXPECT_EQ(0, rtp_header.webrtc.header.numCSRCs);
}
@@ -121,7 +121,10 @@ class RtpPacketizerTest : public ::testing::Test {
TEST_F(RtpPacketizerTest, SendStandardPackets) {
int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
transport_->SetExpectedNumberOfPackets(expected_num_of_packets);
- rtp_packetizer_->IncomingEncodedVideoFrame(video_frame_, kTimestamp);
+
+ base::TimeTicks time;
+ time += base::TimeDelta::FromMilliseconds(kTimestampMs);
+ rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_,time);
}
TEST_F(RtpPacketizerTest, Stats) {
@@ -130,7 +133,10 @@ TEST_F(RtpPacketizerTest, Stats) {
// Insert packets at varying lengths.
unsigned int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
transport_->SetExpectedNumberOfPackets(expected_num_of_packets);
- rtp_packetizer_->IncomingEncodedVideoFrame(video_frame_, kTimestamp);
+
+ base::TimeTicks time;
+ time += base::TimeDelta::FromMilliseconds(kTimestampMs);
+ rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_, time);
EXPECT_EQ(expected_num_of_packets, rtp_packetizer_->send_packets_count());
EXPECT_EQ(kFrameSize, rtp_packetizer_->send_octet_count());
}
diff --git a/media/cast/rtp_sender/rtp_sender.cc b/media/cast/rtp_sender/rtp_sender.cc
index c735dd0ea3..ecaae40dd7 100644
--- a/media/cast/rtp_sender/rtp_sender.cc
+++ b/media/cast/rtp_sender/rtp_sender.cc
@@ -12,25 +12,6 @@
namespace media {
namespace cast {
-namespace {
-
-// January 1970, in milliseconds.
-static const int64 kNtpJan1970 = 2208988800000LL;
-
-// Magic fractional unit.
-static const uint32 kMagicFractionalUnit = 4294967;
-
-void ConvertTimeToFractions(int64 time_ms, uint32* seconds,
- uint32* fractions) {
- *seconds = static_cast<uint32>(time_ms / 1000);
- *fractions = static_cast<uint32>((time_ms % 1000) * kMagicFractionalUnit);
-}
-
-void ConvertTimeToNtp(int64 time_ms, uint32* ntp_seconds,
- uint32* ntp_fractions) {
- ConvertTimeToFractions(time_ms + kNtpJan1970, ntp_seconds, ntp_fractions);
-}
-} // namespace
RtpSender::RtpSender(const AudioSenderConfig* audio_config,
const VideoSenderConfig* video_config,
@@ -65,13 +46,13 @@ RtpSender::RtpSender(const AudioSenderConfig* audio_config,
RtpSender::~RtpSender() {}
-void RtpSender::IncomingEncodedVideoFrame(const EncodedVideoFrame& video_frame,
- int64 capture_time) {
+void RtpSender::IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time) {
packetizer_->IncomingEncodedVideoFrame(video_frame, capture_time);
}
-void RtpSender::IncomingEncodedAudioFrame(const EncodedAudioFrame& audio_frame,
- int64 recorded_time) {
+void RtpSender::IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time) {
packetizer_->IncomingEncodedAudioFrame(audio_frame, recorded_time);
}
@@ -107,7 +88,6 @@ void RtpSender::ResendPackets(
}
} while (success);
-
} else {
for (std::set<uint16>::const_iterator set_it = packets.begin();
set_it != packets.end(); ++set_it) {
@@ -138,23 +118,24 @@ void RtpSender::UpdateSequenceNumber(std::vector<uint8>* packet) {
(*packet)[index + 1] =(static_cast<uint8>(new_sequence_number >> 8));
}
-void RtpSender::RtpStatistics(int64 now_ms, RtcpSenderInfo* sender_info) {
+void RtpSender::RtpStatistics(const base::TimeTicks& now,
+ RtcpSenderInfo* sender_info) {
// The timestamp of this Rtcp packet should be estimated as the timestamp of
// the frame being captured at this moment. We are calculating that
// timestamp as the last frame's timestamp + the time since the last frame
// was captured.
uint32 ntp_seconds = 0;
uint32 ntp_fraction = 0;
- ConvertTimeToNtp(now_ms, &ntp_seconds, &ntp_fraction);
+ ConvertTimeToNtp(now, &ntp_seconds, &ntp_fraction);
// sender_info->ntp_seconds = ntp_seconds;
sender_info->ntp_fraction = ntp_fraction;
- int64 time_sent_ms;
+ base::TimeTicks time_sent;
uint32 rtp_timestamp;
- if (packetizer_->LastSentTimestamp(&time_sent_ms, &rtp_timestamp)) {
- int64 time_since_last_send_ms = now_ms - time_sent_ms;
+ if (packetizer_->LastSentTimestamp(&time_sent, &rtp_timestamp)) {
+ base::TimeDelta time_since_last_send = now - time_sent;
sender_info->rtp_timestamp = rtp_timestamp +
- time_since_last_send_ms * (config_.frequency / 1000);
+ time_since_last_send.InMilliseconds() * (config_.frequency / 1000);
} else {
sender_info->rtp_timestamp = 0;
}
diff --git a/media/cast/rtp_sender/rtp_sender.h b/media/cast/rtp_sender/rtp_sender.h
index 9352fd3f6e..f6e59acba8 100644
--- a/media/cast/rtp_sender/rtp_sender.h
+++ b/media/cast/rtp_sender/rtp_sender.h
@@ -27,6 +27,10 @@ struct RtcpSenderInfo;
typedef std::map<uint8, std::set<uint16> > MissingFramesAndPackets;
+// This object is only called from the main cast thread.
+// This class handles splitting encoded audio and video frames into packets and
+// add an RTP header to each packet. The sent packets are stored until they are
+// acknowledged by the remote peer or timed out.
class RtpSender {
public:
RtpSender(const AudioSenderConfig* audio_config,
@@ -35,16 +39,23 @@ class RtpSender {
~RtpSender();
- void IncomingEncodedVideoFrame(const EncodedVideoFrame& video_frame,
- int64 capture_time);
+ // The video_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time);
- void IncomingEncodedAudioFrame(const EncodedAudioFrame& audio_frame,
- int64 recorded_time);
+ // The audio_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time);
- void ResendPackets(
- const MissingFramesAndPackets& missing_frames_and_packets);
+ void ResendPackets(const MissingFramesAndPackets& missing_packets);
- void RtpStatistics(int64 now_ms, RtcpSenderInfo* sender_info);
+ void RtpStatistics(const base::TimeTicks& now, RtcpSenderInfo* sender_info);
+
+ // Used for testing.
+ void set_clock(base::TickClock* clock) {
+ // TODO(pwestin): review how we pass in a clock for testing.
+ clock_ = clock;
+ }
private:
void UpdateSequenceNumber(std::vector<uint8>* packet);
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
new file mode 100644
index 0000000000..d24ef4be5a
--- /dev/null
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
@@ -0,0 +1,352 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// TODO (pwestin): add a link to the design document describing the generic
+// protocol and the VP8 specific details.
+#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
+
+namespace media {
+namespace cast {
+
+static const uint32 kMinIntra = 300;
+
+Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames)
+ : cast_config_(video_config),
+ use_multiple_video_buffers_(
+ cast_config_.max_number_of_video_buffers_used ==
+ kNumberOfVp8VideoBuffers),
+ max_number_of_repeated_buffers_in_a_row_(
+ (max_unacked_frames > kNumberOfVp8VideoBuffers) ?
+ ((max_unacked_frames - 1) / kNumberOfVp8VideoBuffers) : 0),
+ config_(new vpx_codec_enc_cfg_t()),
+ encoder_(new vpx_codec_ctx_t()),
+ // Creating a wrapper to the image - setting image data to NULL. Actual
+ // pointer will be set during encode. Setting align to 1, as it is
+ // meaningless (actual memory is not allocated).
+ raw_image_(vpx_img_wrap(NULL, IMG_FMT_I420, video_config.width,
+ video_config.height, 1, NULL)),
+ key_frame_requested_(true),
+ timestamp_(0),
+ last_encoded_frame_id_(kStartFrameId),
+ number_of_repeated_buffers_(0) {
+ // VP8 have 3 buffers available for prediction, with
+ // max_number_of_video_buffers_used set to 1 we maximize the coding efficiency
+ // however in this mode we can not skip frames in the receiver to catch up
+ // after a temporary network outage; with max_number_of_video_buffers_used
+ // set to 3 we allow 2 frames to be skipped by the receiver without error
+ // propagation.
+ DCHECK(cast_config_.max_number_of_video_buffers_used == 1 ||
+ cast_config_.max_number_of_video_buffers_used ==
+ kNumberOfVp8VideoBuffers) << "Invalid argument";
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ acked_frame_buffers_[i] = true;
+ used_buffers_frame_id_[i] = kStartFrameId;
+ }
+ InitEncode(video_config.number_of_cores);
+}
+
+Vp8Encoder::~Vp8Encoder() {
+ vpx_codec_destroy(encoder_);
+ vpx_img_free(raw_image_);
+}
+
+void Vp8Encoder::InitEncode(int number_of_cores) {
+ // Populate encoder configuration with default values.
+ if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), config_.get(), 0)) {
+ DCHECK(false) << "Invalid return value";
+ }
+ config_->g_w = cast_config_.width;
+ config_->g_h = cast_config_.height;
+ config_->rc_target_bitrate = cast_config_.start_bitrate / 1000; // In kbit/s.
+
+ // Setting the codec time base.
+ config_->g_timebase.num = 1;
+ config_->g_timebase.den = kVideoFrequency;
+ config_->g_lag_in_frames = 0;
+ config_->kf_mode = VPX_KF_DISABLED;
+ if (use_multiple_video_buffers_) {
+ // We must enable error resilience when we use multiple buffers, due to
+ // codec requirements.
+ config_->g_error_resilient = 1;
+ }
+
+ if (cast_config_.width * cast_config_.height > 640 * 480
+ && number_of_cores >= 2) {
+ config_->g_threads = 2; // 2 threads for qHD/HD.
+ } else {
+ config_->g_threads = 1; // 1 thread for VGA or less.
+ }
+
+ // Rate control settings.
+ // TODO(pwestin): revisit these constants. Currently identical to webrtc.
+ config_->rc_dropframe_thresh = 30;
+ config_->rc_end_usage = VPX_CBR;
+ config_->g_pass = VPX_RC_ONE_PASS;
+ config_->rc_resize_allowed = 0;
+ config_->rc_min_quantizer = cast_config_.min_qp;
+ config_->rc_max_quantizer = cast_config_.max_qp;
+ config_->rc_undershoot_pct = 100;
+ config_->rc_overshoot_pct = 15;
+ config_->rc_buf_initial_sz = 500;
+ config_->rc_buf_optimal_sz = 600;
+ config_->rc_buf_sz = 1000;
+
+ // set the maximum target size of any key-frame.
+ uint32 rc_max_intra_target = MaxIntraTarget(config_->rc_buf_optimal_sz);
+ vpx_codec_flags_t flags = 0;
+ // TODO(mikhal): Tune settings.
+ if (vpx_codec_enc_init(encoder_, vpx_codec_vp8_cx(), config_.get(), flags)) {
+ DCHECK(false) << "Invalid return value";
+ }
+ vpx_codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
+ vpx_codec_control(encoder_, VP8E_SET_NOISE_SENSITIVITY, 0);
+ vpx_codec_control(encoder_, VP8E_SET_CPUUSED, -6);
+ vpx_codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target);
+}
+
+bool Vp8Encoder::Encode(const I420VideoFrame& input_image,
+ EncodedVideoFrame* encoded_image) {
+ // Image in vpx_image_t format.
+ // Input image is const. VP8's raw image is not defined as const.
+ raw_image_->planes[PLANE_Y] = const_cast<uint8*>(input_image.y_plane.data);
+ raw_image_->planes[PLANE_U] = const_cast<uint8*>(input_image.u_plane.data);
+ raw_image_->planes[PLANE_V] = const_cast<uint8*>(input_image.v_plane.data);
+
+ raw_image_->stride[VPX_PLANE_Y] = input_image.y_plane.stride;
+ raw_image_->stride[VPX_PLANE_U] = input_image.u_plane.stride;
+ raw_image_->stride[VPX_PLANE_V] = input_image.v_plane.stride;
+
+ uint8 latest_frame_id_to_reference;
+ Vp8Buffers buffer_to_update;
+ vpx_codec_flags_t flags = 0;
+ if (key_frame_requested_) {
+ flags = VPX_EFLAG_FORCE_KF;
+ // Self reference.
+ latest_frame_id_to_reference =
+ static_cast<uint8>(last_encoded_frame_id_ + 1);
+ // We can pick any buffer as buffer_to_update since we update
+ // them all.
+ buffer_to_update = kLastBuffer;
+ } else {
+ // Reference all acked frames (buffers).
+ latest_frame_id_to_reference = GetLatestFrameIdToReference();
+ GetCodecReferenceFlags(&flags);
+ buffer_to_update = GetNextBufferToUpdate();
+ GetCodecUpdateFlags(buffer_to_update, &flags);
+ }
+
+ // Note: The duration does not reflect the real time between frames. This is
+ // done to keep the encoder happy.
+ uint32 duration = kVideoFrequency / cast_config_.max_frame_rate;
+ if (vpx_codec_encode(encoder_, raw_image_, timestamp_, duration, flags,
+ VPX_DL_REALTIME)) {
+ return false;
+ }
+ timestamp_ += duration;
+
+ // Get encoded frame.
+ const vpx_codec_cx_pkt_t *pkt = NULL;
+ vpx_codec_iter_t iter = NULL;
+ int total_size = 0;
+ while ((pkt = vpx_codec_get_cx_data(encoder_, &iter)) != NULL) {
+ if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ total_size += pkt->data.frame.sz;
+ encoded_image->data.reserve(total_size);
+ encoded_image->data.insert(
+ encoded_image->data.end(),
+ static_cast<const uint8*>(pkt->data.frame.buf),
+ static_cast<const uint8*>(pkt->data.frame.buf) +
+ pkt->data.frame.sz);
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ encoded_image->key_frame = true;
+ } else {
+ encoded_image->key_frame = false;
+ }
+ }
+ }
+ // Don't update frame_id for zero size frames.
+ if (total_size == 0) return true;
+
+ // Populate the encoded frame.
+ encoded_image->codec = kVp8;
+ encoded_image->last_referenced_frame_id = latest_frame_id_to_reference;
+ encoded_image->frame_id = ++last_encoded_frame_id_;
+
+ if (encoded_image->key_frame) {
+ key_frame_requested_ = false;
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ used_buffers_frame_id_[i] = encoded_image->frame_id;
+ }
+ // We can pick any buffer as last_used_vp8_buffer_ since we update
+ // them all.
+ last_used_vp8_buffer_ = buffer_to_update;
+ } else {
+ if (buffer_to_update != kNoBuffer) {
+ acked_frame_buffers_[buffer_to_update] = false;
+ used_buffers_frame_id_[buffer_to_update] = encoded_image->frame_id;
+ last_used_vp8_buffer_ = buffer_to_update;
+ }
+ }
+ return true;
+}
+
+void Vp8Encoder::GetCodecReferenceFlags(vpx_codec_flags_t* flags) {
+ if (!use_multiple_video_buffers_) return;
+
+ // We need to reference something.
+ DCHECK(acked_frame_buffers_[kAltRefBuffer] ||
+ acked_frame_buffers_[kGoldenBuffer] ||
+ acked_frame_buffers_[kLastBuffer]) << "Invalid state";
+
+ if (!acked_frame_buffers_[kAltRefBuffer]) {
+ *flags |= VP8_EFLAG_NO_REF_ARF;
+ }
+ if (!acked_frame_buffers_[kGoldenBuffer]) {
+ *flags |= VP8_EFLAG_NO_REF_GF;
+ }
+ if (!acked_frame_buffers_[kLastBuffer]) {
+ *flags |= VP8_EFLAG_NO_REF_LAST;
+ }
+}
+
+uint8 Vp8Encoder::GetLatestFrameIdToReference() {
+ if (!use_multiple_video_buffers_) return last_encoded_frame_id_;
+
+ int latest_frame_id_to_reference = -1;
+ if (acked_frame_buffers_[kAltRefBuffer]) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kAltRefBuffer];
+ }
+ if (acked_frame_buffers_[kGoldenBuffer]) {
+ if (latest_frame_id_to_reference == -1) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kGoldenBuffer];
+ } else {
+ if (IsNewerFrameId(used_buffers_frame_id_[kGoldenBuffer],
+ latest_frame_id_to_reference)) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kGoldenBuffer];
+ }
+ }
+ }
+ if (acked_frame_buffers_[kLastBuffer]) {
+ if (latest_frame_id_to_reference == -1) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kLastBuffer];
+ } else {
+ if (IsNewerFrameId(used_buffers_frame_id_[kLastBuffer],
+ latest_frame_id_to_reference)) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kLastBuffer];
+ }
+ }
+ }
+ DCHECK(latest_frame_id_to_reference != -1) << "Invalid state";
+ return static_cast<uint8>(latest_frame_id_to_reference);
+}
+
+Vp8Encoder::Vp8Buffers Vp8Encoder::GetNextBufferToUpdate() {
+ // Update at most one buffer, except for key-frames.
+
+ Vp8Buffers buffer_to_update;
+ if (number_of_repeated_buffers_ < max_number_of_repeated_buffers_in_a_row_) {
+ // TODO(pwestin): experiment with this. The issue with only this change is
+ // that we can end up with only 4 frames in flight when we expect 6.
+ // buffer_to_update = last_used_vp8_buffer_;
+ buffer_to_update = kNoBuffer;
+ ++number_of_repeated_buffers_;
+ } else {
+ number_of_repeated_buffers_ = 0;
+ switch (last_used_vp8_buffer_) {
+ case kAltRefBuffer:
+ buffer_to_update = kLastBuffer;
+ break;
+ case kLastBuffer:
+ buffer_to_update = kGoldenBuffer;
+ break;
+ case kGoldenBuffer:
+ buffer_to_update = kAltRefBuffer;
+ break;
+ case kNoBuffer:
+ DCHECK(false) << "Invalid state";
+ break;
+ }
+ }
+ return buffer_to_update;
+}
+
+void Vp8Encoder::GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
+ vpx_codec_flags_t* flags) {
+ if (!use_multiple_video_buffers_) return;
+
+ // Update at most one buffer, except for key-frames.
+ switch (buffer_to_update) {
+ case kAltRefBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_GF;
+ *flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kLastBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_GF;
+ *flags |= VP8_EFLAG_NO_UPD_ARF;
+ break;
+ case kGoldenBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_ARF;
+ *flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kNoBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_ARF;
+ *flags |= VP8_EFLAG_NO_UPD_GF;
+ *flags |= VP8_EFLAG_NO_UPD_LAST;
+ *flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+ break;
+ }
+}
+
+void Vp8Encoder::UpdateRates(uint32 new_bitrate) {
+ config_->rc_target_bitrate = new_bitrate / 1000; // In kbit/s.
+ // Update encoder context.
+ if (vpx_codec_enc_config_set(encoder_, config_.get())) {
+ DCHECK(false) << "Invalid return value";
+ }
+}
+
+void Vp8Encoder::LatestFrameIdToReference(uint8 frame_id) {
+ if (!use_multiple_video_buffers_) return;
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ if (frame_id == used_buffers_frame_id_[i]) {
+ acked_frame_buffers_[i] = true;
+ }
+ }
+}
+
+void Vp8Encoder::RequestKeyFrame() {
+ key_frame_requested_ = true;
+}
+
+// Calculate the max size of the key frame relative to a normal delta frame.
+uint32 Vp8Encoder::MaxIntraTarget(uint32 optimal_buffer_size_ms) const {
+ // Set max to the optimal buffer level (normalized by target BR),
+ // and scaled by a scale_parameter.
+ // Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
+ // This values is presented in percentage of perFrameBw:
+ // perFrameBw = targetBR[Kbps] * 1000 / frameRate.
+ // The target in % is as follows:
+
+ float scale_parameter = 0.5;
+ uint32 target_pct = optimal_buffer_size_ms * scale_parameter *
+ cast_config_.max_frame_rate / 10;
+
+ // Don't go below 3 times the per frame bandwidth.
+ return std::max(target_pct, kMinIntra);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi b/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
new file mode 100644
index 0000000000..0b12789aa0
--- /dev/null
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
@@ -0,0 +1,19 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_vp8_encoder',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'vp8_encoder.cc',
+ 'vp8_encoder.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ ],
+ },
+ ],
+}
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.h b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
new file mode 100644
index 0000000000..354d529d2a
--- /dev/null
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
+#define MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
+
+// VPX forward declaration.
+typedef struct vpx_codec_ctx vpx_enc_ctx_t;
+
+namespace media {
+namespace cast {
+
+const int kNumberOfVp8VideoBuffers = 3;
+
+class Vp8Encoder {
+ public:
+ Vp8Encoder(const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames);
+
+ ~Vp8Encoder();
+
+ // Encode a raw image (as a part of a video stream).
+ bool Encode(const I420VideoFrame& input_image,
+ EncodedVideoFrame* encoded_image);
+
+ // Update the encoder with a new target bit rate.
+ void UpdateRates(uint32 new_bitrate);
+
+ // Set the next frame to be a key frame.
+ void RequestKeyFrame();
+
+ void LatestFrameIdToReference(uint8 frame_id);
+
+ private:
+ enum Vp8Buffers {
+ kAltRefBuffer = 0,
+ kGoldenBuffer = 1,
+ kLastBuffer = 2,
+ kNoBuffer = 3 // Note: must be last.
+ };
+
+ void InitEncode(int number_of_cores);
+
+ // Calculate the max target in % for a keyframe.
+ uint32 MaxIntraTarget(uint32 optimal_buffer_size) const;
+
+ // Calculate which next Vp8 buffers to update with the next frame.
+ Vp8Buffers GetNextBufferToUpdate();
+
+ // Calculate which previous frame to reference.
+ uint8_t GetLatestFrameIdToReference();
+
+ // Get encoder flags for our referenced encoder buffers.
+ void GetCodecReferenceFlags(vpx_codec_flags_t* flags);
+
+ // Get encoder flags for our encoder buffers to update with next frame.
+ void GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
+ vpx_codec_flags_t* flags);
+
+ const VideoSenderConfig cast_config_;
+ const bool use_multiple_video_buffers_;
+ const int max_number_of_repeated_buffers_in_a_row_;
+
+ // VP8 internal objects.
+ scoped_ptr<vpx_codec_enc_cfg_t> config_;
+ vpx_enc_ctx_t* encoder_;
+ vpx_image_t* raw_image_;
+
+ bool key_frame_requested_;
+ int64 timestamp_;
+ uint8 last_encoded_frame_id_;
+ uint8 used_buffers_frame_id_[kNumberOfVp8VideoBuffers];
+ bool acked_frame_buffers_[kNumberOfVp8VideoBuffers];
+ Vp8Buffers last_used_vp8_buffer_;
+ int number_of_repeated_buffers_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
diff --git a/media/cast/video_sender/mock_video_encoder_controller.h b/media/cast/video_sender/mock_video_encoder_controller.h
new file mode 100644
index 0000000000..90b2abdf3b
--- /dev/null
+++ b/media/cast/video_sender/mock_video_encoder_controller.h
@@ -0,0 +1,31 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
+#define MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
+
+#include "media/cast/cast_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockVideoEncoderController : public VideoEncoderController {
+ public:
+ MOCK_METHOD1(SetBitRate, void(int new_bit_rate));
+
+ MOCK_METHOD1(SkipNextFrame, void(bool skip_next_frame));
+
+ MOCK_METHOD0(GenerateKeyFrame, void());
+
+ MOCK_METHOD1(LatestFrameIdToReference, void(uint8 frame_id));
+
+ MOCK_CONST_METHOD0(NumberOfSkippedFrames, int());
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
+
diff --git a/media/cast/video_sender/video_encoder.cc b/media/cast/video_sender/video_encoder.cc
new file mode 100644
index 0000000000..0b7202bbb1
--- /dev/null
+++ b/media/cast/video_sender/video_encoder.cc
@@ -0,0 +1,112 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/video_encoder.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+VideoEncoder::VideoEncoder(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames)
+ : video_config_(video_config),
+ cast_thread_(cast_thread),
+ skip_next_frame_(false),
+ skip_count_(0) {
+ if (video_config.codec == kVp8) {
+ vp8_encoder_.reset(new Vp8Encoder(video_config, max_unacked_frames));
+ } else {
+ DCHECK(false) << "Invalid config"; // Codec not supported.
+ }
+
+ dynamic_config_.key_frame_requested = false;
+ dynamic_config_.latest_frame_id_to_reference = kStartFrameId;
+ dynamic_config_.bit_rate = video_config.start_bitrate;
+}
+
+VideoEncoder::~VideoEncoder() {}
+
+bool VideoEncoder::EncodeVideoFrame(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback) {
+ if (video_config_.codec != kVp8) return false;
+
+ if (skip_next_frame_) {
+ ++skip_count_;
+ VLOG(1) << "Skip encoding frame";
+ return false;
+ }
+
+ cast_thread_->PostTask(CastThread::VIDEO_ENCODER, FROM_HERE,
+ base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread, this,
+ video_frame, capture_time, dynamic_config_, frame_encoded_callback,
+ frame_release_callback));
+
+ dynamic_config_.key_frame_requested = false;
+ return true;
+}
+
+void VideoEncoder::EncodeVideoFrameEncoderThread(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const CodecDynamicConfig& dynamic_config,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback) {
+ if (dynamic_config.key_frame_requested) {
+ vp8_encoder_->RequestKeyFrame();
+ }
+ vp8_encoder_->LatestFrameIdToReference(
+ dynamic_config.latest_frame_id_to_reference);
+ vp8_encoder_->UpdateRates(dynamic_config.bit_rate);
+
+ scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
+ bool retval = vp8_encoder_->Encode(*video_frame, encoded_frame.get());
+
+ // We are done with the video frame release it.
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, frame_release_callback);
+
+ if (!retval) {
+ VLOG(1) << "Encoding failed";
+ return;
+ }
+ if (encoded_frame->data.size() <= 0) {
+ VLOG(1) << "Encoding resulted in an empty frame";
+ return;
+ }
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(frame_encoded_callback,
+ base::Passed(&encoded_frame), capture_time));
+}
+
+// Inform the encoder about the new target bit rate.
+void VideoEncoder::SetBitRate(int new_bit_rate) OVERRIDE {
+ dynamic_config_.bit_rate = new_bit_rate;
+}
+
+// Inform the encoder to not encode the next frame.
+void VideoEncoder::SkipNextFrame(bool skip_next_frame) OVERRIDE {
+ skip_next_frame_ = skip_next_frame;
+}
+
+// Inform the encoder to encode the next frame as a key frame.
+void VideoEncoder::GenerateKeyFrame() OVERRIDE {
+ dynamic_config_.key_frame_requested = true;
+}
+
+// Inform the encoder to only reference frames older or equal to frame_id;
+void VideoEncoder::LatestFrameIdToReference(uint8 frame_id) OVERRIDE {
+ dynamic_config_.latest_frame_id_to_reference = frame_id;
+}
+
+int VideoEncoder::NumberOfSkippedFrames() const OVERRIDE {
+ return skip_count_;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/video_sender/video_encoder.h b/media/cast/video_sender/video_encoder.h
new file mode 100644
index 0000000000..d3b261e103
--- /dev/null
+++ b/media/cast/video_sender/video_encoder.h
@@ -0,0 +1,80 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
+#define MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
+
+namespace media {
+namespace cast {
+
+// This object is called external from the main cast thread and internally from
+// the video encoder thread.
+class VideoEncoder : public VideoEncoderController,
+ public base::RefCountedThreadSafe<VideoEncoder> {
+ public:
+ typedef base::Callback<void(scoped_ptr<EncodedVideoFrame>,
+ const base::TimeTicks&)> FrameEncodedCallback;
+
+ VideoEncoder(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames);
+
+ virtual ~VideoEncoder();
+
+ // Called from the main cast thread. This function post the encode task to the
+ // video encoder thread;
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the video encoder thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ // Once the encoded frame is ready the frame_encoded_callback is called.
+ bool EncodeVideoFrame(const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback);
+
+ protected:
+ struct CodecDynamicConfig {
+ bool key_frame_requested;
+ uint8 latest_frame_id_to_reference;
+ int bit_rate;
+ };
+
+ // The actual encode, called from the video encoder thread.
+ void EncodeVideoFrameEncoderThread(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const CodecDynamicConfig& dynamic_config,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback);
+
+ // The following functions are called from the main cast thread.
+ virtual void SetBitRate(int new_bit_rate) OVERRIDE;
+ virtual void SkipNextFrame(bool skip_next_frame) OVERRIDE;
+ virtual void GenerateKeyFrame() OVERRIDE;
+ virtual void LatestFrameIdToReference(uint8 frame_id) OVERRIDE;
+ virtual int NumberOfSkippedFrames() const OVERRIDE;
+
+ private:
+ const VideoSenderConfig video_config_;
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_ptr<Vp8Encoder> vp8_encoder_;
+ CodecDynamicConfig dynamic_config_;
+ bool skip_next_frame_;
+ int skip_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoEncoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
diff --git a/media/cast/video_sender/video_encoder_unittest.cc b/media/cast/video_sender/video_encoder_unittest.cc
new file mode 100644
index 0000000000..c3c682d3b1
--- /dev/null
+++ b/media/cast/video_sender/video_encoder_unittest.cc
@@ -0,0 +1,282 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/video_sender/video_encoder.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using base::RunLoop;
+using base::MessageLoopProxy;
+using base::Thread;
+using testing::_;
+
+static void ReleaseFrame(const I420VideoFrame* frame) {
+ // Empty since we in this test send in the same frame.
+};
+
+class TestVideoEncoderCallback :
+ public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
+ public:
+ TestVideoEncoderCallback() {}
+
+ void SetExpectedResult(bool expected_key_frame,
+ uint8 expected_frame_id,
+ uint8 expected_last_referenced_frame_id,
+ const base::TimeTicks& expected_capture_time) {
+ expected_key_frame_ = expected_key_frame;
+ expected_frame_id_ = expected_frame_id;
+ expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
+ expected_capture_time_ = expected_capture_time;
+ }
+
+ void DeliverEncodedVideoFrame(scoped_ptr<EncodedVideoFrame> encoded_frame,
+ const base::TimeTicks& capture_time) {
+ EXPECT_EQ(expected_key_frame_, encoded_frame->key_frame);
+ EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
+ EXPECT_EQ(expected_last_referenced_frame_id_,
+ encoded_frame->last_referenced_frame_id);
+ EXPECT_EQ(expected_capture_time_, capture_time);
+ }
+
+ private:
+ bool expected_key_frame_;
+ uint8 expected_frame_id_;
+ uint8 expected_last_referenced_frame_id_;
+ base::TimeTicks expected_capture_time_;
+};
+
+class VideoEncoderTest : public ::testing::Test {
+ public:
+
+ protected:
+ VideoEncoderTest()
+ : pixels_(320 * 240, 123),
+ test_video_encoder_callback_(new TestVideoEncoderCallback()) {
+ video_config_.sender_ssrc = 1;
+ video_config_.incoming_feedback_ssrc = 2;
+ video_config_.rtp_payload_type = 127;
+ video_config_.use_external_encoder = false;
+ video_config_.width = 320;
+ video_config_.height = 240;
+ video_config_.max_bitrate = 5000000;
+ video_config_.min_bitrate = 1000000;
+ video_config_.start_bitrate = 2000000;
+ video_config_.max_qp = 56;
+ video_config_.min_qp = 0;
+ video_config_.max_frame_rate = 30;
+ video_config_.max_number_of_video_buffers_used = 3;
+ video_config_.codec = kVp8;
+ video_frame_.width = 320;
+ video_frame_.height = 240;
+ video_frame_.y_plane.stride = video_frame_.width;
+ video_frame_.y_plane.length = video_frame_.width;
+ video_frame_.y_plane.data = &(pixels_[0]);
+ video_frame_.u_plane.stride = video_frame_.width / 2;
+ video_frame_.u_plane.length = video_frame_.width / 2;
+ video_frame_.u_plane.data = &(pixels_[0]);
+ video_frame_.v_plane.stride = video_frame_.width / 2;
+ video_frame_.v_plane.length = video_frame_.width / 2;
+ video_frame_.v_plane.data = &(pixels_[0]);
+ }
+
+ ~VideoEncoderTest() {}
+
+ virtual void SetUp() {
+ cast_thread_ = new CastThread(MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current());
+ }
+
+ void Configure(uint8 max_unacked_frames) {
+ video_encoder_= new VideoEncoder(cast_thread_, video_config_,
+ max_unacked_frames);
+ video_encoder_controller_ = video_encoder_.get();
+ }
+
+ base::MessageLoop loop_;
+ std::vector<uint8> pixels_;
+ scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
+ VideoSenderConfig video_config_;
+ scoped_refptr<VideoEncoder> video_encoder_;
+ VideoEncoderController* video_encoder_controller_;
+ I420VideoFrame video_frame_;
+
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(VideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
+ Configure(3);
+
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ base::TimeTicks capture_time;
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ video_encoder_controller_->LatestFrameIdToReference(0);
+ test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ video_encoder_controller_->LatestFrameIdToReference(1);
+ test_video_encoder_callback_->SetExpectedResult(false, 2, 1, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+
+ video_encoder_controller_->LatestFrameIdToReference(2);
+
+ for (int i = 3; i < 6; ++i) {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+}
+
+TEST_F(VideoEncoderTest, EncodePattern60fpsRunningOutOfAck) {
+ Configure(6);
+
+ base::TimeTicks capture_time;
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(0);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(1);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+
+ video_encoder_controller_->LatestFrameIdToReference(2);
+
+ for (int i = 3; i < 9; ++i) {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame( &video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+}
+
+TEST_F(VideoEncoderTest, EncodePattern60fps200msDelayRunningOutOfAck) {
+ Configure(12);
+
+ base::TimeTicks capture_time;
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(0);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(1);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(2);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 3, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(3);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 4, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+
+ video_encoder_controller_->LatestFrameIdToReference(4);
+
+ for (int i = 5; i < 17; ++i) {
+ RunLoop run_loop;
+ test_video_encoder_callback_->SetExpectedResult(false, i, 4, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/video_sender/video_sender.cc b/media/cast/video_sender/video_sender.cc
new file mode 100644
index 0000000000..1b42238832
--- /dev/null
+++ b/media/cast/video_sender/video_sender.cc
@@ -0,0 +1,346 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/video_sender.h"
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/video_sender/video_encoder.h"
+
+namespace media {
+namespace cast {
+
+const int64 kMinSchedulingDelayMs = 1;
+
+class LocalRtcpVideoSenderFeedback : public RtcpSenderFeedback {
+ public:
+ explicit LocalRtcpVideoSenderFeedback(VideoSender* video_sender)
+ : video_sender_(video_sender) {
+ }
+
+ virtual void OnReceivedSendReportRequest() OVERRIDE {}
+
+ virtual void OnReceivedReportBlock(
+ const RtcpReportBlock& report_block) OVERRIDE {}
+
+ virtual void OnReceivedRpsi(uint8 payload_type,
+ uint64 picture_id) OVERRIDE {
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedNackRequest(
+ const std::list<uint16>& nack_sequence_numbers) OVERRIDE {
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedIntraFrameRequest() OVERRIDE {
+ video_sender_->OnReceivedIntraFrameRequest();
+ }
+
+ virtual void OnReceivedCastFeedback(
+ const RtcpCastMessage& cast_feedback) OVERRIDE {
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+ }
+
+ private:
+ VideoSender* video_sender_;
+};
+
+class LocalRtpVideoSenderStatistics : public RtpSenderStatistics {
+ public:
+ explicit LocalRtpVideoSenderStatistics(RtpSender* rtp_sender)
+ : rtp_sender_(rtp_sender) {
+ }
+
+ virtual void GetStatistics(const base::TimeTicks& now,
+ RtcpSenderInfo* sender_info) OVERRIDE {
+ rtp_sender_->RtpStatistics(now, sender_info);
+ }
+
+ private:
+ RtpSender* rtp_sender_;
+};
+
+VideoSender::VideoSender(
+ scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacedPacketSender* const paced_packet_sender)
+ : incoming_feedback_ssrc_(video_config.incoming_feedback_ssrc),
+ rtp_max_delay_(
+ base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
+ max_frame_rate_(video_config.max_frame_rate),
+ cast_thread_(cast_thread),
+ rtcp_feedback_(new LocalRtcpVideoSenderFeedback(this)),
+ rtp_sender_(new RtpSender(NULL, &video_config, paced_packet_sender)),
+ last_acked_frame_id_(-1),
+ last_sent_frame_id_(-1),
+ last_sent_key_frame_id_(-1),
+ duplicate_ack_(0),
+ last_skip_count_(0),
+ congestion_control_(video_config.congestion_control_back_off,
+ video_config.max_bitrate,
+ video_config.min_bitrate,
+ video_config.start_bitrate),
+ clock_(&default_tick_clock_),
+ weak_factory_(this) {
+ max_unacked_frames_ = static_cast<uint8>(video_config.rtp_max_delay_ms *
+ video_config.max_frame_rate / 1000);
+ DCHECK(max_unacked_frames_ > 0) << "Invalid argument";
+
+ rtp_video_sender_statistics_.reset(
+ new LocalRtpVideoSenderStatistics(rtp_sender_.get()));
+
+ if (video_config.use_external_encoder) {
+ DCHECK(video_encoder_controller) << "Invalid argument";
+ video_encoder_controller_ = video_encoder_controller;
+ } else {
+ video_encoder_ = new VideoEncoder(cast_thread, video_config,
+ max_unacked_frames_);
+ video_encoder_controller_ = video_encoder_.get();
+ }
+ rtcp_.reset(new Rtcp(
+ rtcp_feedback_.get(),
+ paced_packet_sender,
+ rtp_video_sender_statistics_.get(),
+ NULL,
+ video_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
+ true,
+ video_config.sender_ssrc,
+ video_config.rtcp_c_name));
+
+ rtcp_->SetRemoteSSRC(video_config.incoming_feedback_ssrc);
+ ScheduleNextRtcpReport();
+ ScheduleNextResendCheck();
+ ScheduleNextSkippedFramesCheck();
+}
+
+VideoSender::~VideoSender() {}
+
+void VideoSender::InsertRawVideoFrame(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) {
+ DCHECK(video_encoder_.get()) << "Invalid state";
+
+ if (!video_encoder_->EncodeVideoFrame(video_frame, capture_time,
+ base::Bind(&VideoSender::SendEncodedVideoFrameMainThread,
+ weak_factory_.GetWeakPtr()), callback)) {
+ VLOG(1) << "Failed to InsertRawVideoFrame";
+ }
+}
+
+void VideoSender::InsertCodedVideoFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) {
+ DCHECK(!video_encoder_.get()) << "Invalid state";
+ DCHECK(encoded_frame) << "Invalid argument";
+
+ SendEncodedVideoFrame(encoded_frame, capture_time);
+ callback.Run();
+}
+
+void VideoSender::SendEncodedVideoFrameMainThread(
+ scoped_ptr<EncodedVideoFrame> video_frame,
+ const base::TimeTicks& capture_time) {
+ SendEncodedVideoFrame(video_frame.get(), capture_time);
+}
+
+void VideoSender::SendEncodedVideoFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks& capture_time) {
+ last_send_time_ = clock_->NowTicks();
+ rtp_sender_->IncomingEncodedVideoFrame(encoded_frame, capture_time);
+ if (encoded_frame->key_frame) {
+ last_sent_key_frame_id_ = encoded_frame->frame_id;
+ }
+ last_sent_frame_id_ = encoded_frame->frame_id;
+ UpdateFramesInFlight();
+}
+
+void VideoSender::OnReceivedIntraFrameRequest() {
+ if (last_sent_key_frame_id_ != -1) {
+ uint8 frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
+ static_cast<uint8>(last_sent_key_frame_id_);
+ if (frames_in_flight < (max_unacked_frames_ - 1)) return;
+ }
+ video_encoder_controller_->GenerateKeyFrame();
+ last_acked_frame_id_ = -1;
+ last_sent_frame_id_ = -1;
+}
+
+void VideoSender::IncomingRtcpPacket(const uint8* packet, int length,
+ const base::Closure callback) {
+ rtcp_->IncomingRtcpPacket(packet, length);
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+}
+
+void VideoSender::ScheduleNextRtcpReport() {
+ base::TimeDelta time_to_next =
+ rtcp_->TimeToSendNextRtcpReport() - clock_->NowTicks();
+
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void VideoSender::SendRtcpReport() {
+ rtcp_->SendRtcpReport(incoming_feedback_ssrc_);
+ ScheduleNextRtcpReport();
+}
+
+void VideoSender::ScheduleNextResendCheck() {
+ base::TimeDelta time_to_next;
+ if (last_send_time_.is_null()) {
+ time_to_next = rtp_max_delay_;
+ } else {
+ time_to_next = last_send_time_ - clock_->NowTicks() + rtp_max_delay_;
+ }
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::ResendCheck, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void VideoSender::ResendCheck() {
+ if (!last_send_time_.is_null() && last_sent_frame_id_ != -1) {
+ base::TimeDelta time_to_next =
+ last_send_time_ - clock_->NowTicks() + rtp_max_delay_;
+
+ if (last_acked_frame_id_ == -1) {
+ // We have not received any ack, send a key frame.
+ video_encoder_controller_->GenerateKeyFrame();
+ last_acked_frame_id_ = -1;
+ last_sent_frame_id_ = -1;
+ UpdateFramesInFlight();
+ } else {
+ ResendFrame(static_cast<uint8>(last_acked_frame_id_ + 1));
+ }
+ }
+ ScheduleNextResendCheck();
+}
+
+void VideoSender::ScheduleNextSkippedFramesCheck() {
+ base::TimeDelta time_to_next;
+ if (last_checked_skip_count_time_.is_null()) {
+ time_to_next =
+ base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
+ } else {
+ time_to_next = last_checked_skip_count_time_ - clock_->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
+ }
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::SkippedFramesCheck, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void VideoSender::SkippedFramesCheck() {
+ int skip_count = video_encoder_controller_->NumberOfSkippedFrames();
+ if (skip_count - last_skip_count_ >
+ kSkippedFramesThreshold * max_frame_rate_) {
+ // TODO(pwestin): Propagate this up to the application.
+ }
+ last_skip_count_ = skip_count;
+ last_checked_skip_count_time_ = clock_->NowTicks();
+ ScheduleNextSkippedFramesCheck();
+}
+
+void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+
+ if (rtcp_->Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt)) {
+ // Don't use a RTT lower than our average.
+ rtt = std::max(rtt, avg_rtt);
+ } else {
+ // We have no measured value use default.
+ rtt = base::TimeDelta::FromMilliseconds(kStartRttMs);
+ }
+ if (cast_feedback.missing_frames_and_packets_.empty()) {
+ // No lost packets.
+ int resend_frame = -1;
+ if (last_sent_frame_id_ == -1) return;
+
+ video_encoder_controller_->LatestFrameIdToReference(
+ cast_feedback.ack_frame_id_);
+
+ if (static_cast<uint8>(last_acked_frame_id_ + 1) ==
+ cast_feedback.ack_frame_id_) {
+ uint32 new_bitrate = 0;
+ if (congestion_control_.OnAck(rtt, &new_bitrate)) {
+ video_encoder_controller_->SetBitRate(new_bitrate);
+ }
+ }
+ if (last_acked_frame_id_ == cast_feedback.ack_frame_id_ &&
+ // We only count duplicate ACKs when we have sent newer frames.
+ IsNewerFrameId(last_sent_frame_id_, last_acked_frame_id_)) {
+ duplicate_ack_++;
+ } else {
+ duplicate_ack_ = 0;
+ }
+ if (duplicate_ack_ >= 2 && duplicate_ack_ % 3 == 2) {
+ // Resend last ACK + 1 frame.
+ resend_frame = static_cast<uint8>(last_acked_frame_id_ + 1);
+ }
+ if (resend_frame != -1) {
+ ResendFrame(static_cast<uint8>(resend_frame));
+ }
+ } else {
+ rtp_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
+ last_send_time_ = clock_->NowTicks();
+
+ uint32 new_bitrate = 0;
+ if (congestion_control_.OnNack(rtt, &new_bitrate)) {
+ video_encoder_controller_->SetBitRate(new_bitrate);
+ }
+ }
+ ReceivedAck(cast_feedback.ack_frame_id_);
+}
+
+void VideoSender::ReceivedAck(uint8 acked_frame_id) {
+ last_acked_frame_id_ = acked_frame_id;
+ UpdateFramesInFlight();
+}
+
+void VideoSender::UpdateFramesInFlight() {
+ if (last_sent_frame_id_ != -1) {
+ uint8 frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
+ static_cast<uint8>(last_acked_frame_id_);
+ if (frames_in_flight >= max_unacked_frames_) {
+ video_encoder_controller_->SkipNextFrame(true);
+ return;
+ }
+ }
+ video_encoder_controller_->SkipNextFrame(false);
+}
+
+void VideoSender::ResendFrame(uint8 resend_frame_id) {
+ MissingFramesAndPacketsMap missing_frames_and_packets;
+ PacketIdSet missing;
+ missing_frames_and_packets.insert(std::make_pair(resend_frame_id, missing));
+ rtp_sender_->ResendPackets(missing_frames_and_packets);
+ last_send_time_ = clock_->NowTicks();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/video_sender/video_sender.gypi b/media/cast/video_sender/video_sender.gypi
new file mode 100644
index 0000000000..9499066165
--- /dev/null
+++ b/media/cast/video_sender/video_sender.gypi
@@ -0,0 +1,31 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'codecs/vp8/vp8_encoder.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'video_sender',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'video_encoder.h',
+ 'video_encoder.cc',
+ 'video_sender.h',
+ 'video_sender.cc',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/media/cast/rtcp/rtcp.gyp:*',
+ '<(DEPTH)/media/cast/rtp_sender/rtp_sender.gyp:*',
+ 'congestion_control',
+ 'cast_vp8_encoder',
+ ],
+ },
+ ],
+}
diff --git a/media/cast/video_sender/video_sender.h b/media/cast/video_sender/video_sender.h
new file mode 100644
index 0000000000..9098e975c4
--- /dev/null
+++ b/media/cast/video_sender/video_sender.h
@@ -0,0 +1,145 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+#define MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/congestion_control/congestion_control.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace media {
+namespace cast {
+
+class VideoEncoder;
+class LocalRtcpVideoSenderFeedback;
+class LocalRtpVideoSenderStatistics;
+class LocalVideoEncoderCallback;
+class PacedPacketSender;
+
+// Not thread safe. Only called from the main cast thread.
+// This class owns all objects related to sending video, objects that create RTP
+// packets, congestion control, video encoder, parsing and sending of
+// RTCP packets.
+// Additionally it posts a bunch of delayed tasks to the main thread for various
+// timeouts.
+class VideoSender : public base::NonThreadSafe,
+ public base::SupportsWeakPtr<VideoSender> {
+ public:
+ VideoSender(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacedPacketSender* const paced_packet_sender);
+
+ virtual ~VideoSender();
+
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the video encoder thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ void InsertRawVideoFrame(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback);
+
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the main thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback);
+
+ // Only called from the main cast thread.
+ void IncomingRtcpPacket(const uint8* packet, int length,
+ const base::Closure callback);
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ congestion_control_.set_clock(clock);
+ rtcp_->set_clock(clock);
+ rtp_sender_->set_clock(clock);
+ }
+
+ protected:
+ // Protected for testability.
+ void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
+
+ private:
+ friend class LocalRtcpVideoSenderFeedback;
+
+ // Schedule when we should send the next RTPC report,
+ // via a PostDelayedTask to the main cast thread.
+ void ScheduleNextRtcpReport();
+ void SendRtcpReport();
+
+ // Schedule when we should check that we have received an acknowledgment, or a
+ // loss report from our remote peer. If we have not heard back from our remote
+ // peer we speculatively resend our oldest unacknowledged frame (the whole
+ // frame). Note for this to happen we need to lose all pending packets (in
+ // normal operation 3 full frames), hence this is the last resort to prevent
+ // us getting stuck after a long outage.
+ void ScheduleNextResendCheck();
+ void ResendCheck();
+
+ // Monitor how many frames that are silently dropped by the video sender
+ // per time unit.
+ void ScheduleNextSkippedFramesCheck();
+ void SkippedFramesCheck();
+
+ void SendEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time);
+ void OnReceivedIntraFrameRequest();
+ void ResendFrame(uint8 resend_frame_id);
+ void ReceivedAck(uint8 acked_frame_id);
+ void UpdateFramesInFlight();
+
+ void SendEncodedVideoFrameMainThread(
+ scoped_ptr<EncodedVideoFrame> video_frame,
+ const base::TimeTicks& capture_time);
+
+ const uint32 incoming_feedback_ssrc_;
+ const base::TimeDelta rtp_max_delay_;
+ const int max_frame_rate_;
+
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_ptr<LocalRtcpVideoSenderFeedback> rtcp_feedback_;
+ scoped_ptr<LocalRtpVideoSenderStatistics> rtp_video_sender_statistics_;
+ scoped_refptr<VideoEncoder> video_encoder_;
+ scoped_ptr<Rtcp> rtcp_;
+ scoped_ptr<RtpSender> rtp_sender_;
+ VideoEncoderController* video_encoder_controller_;
+ uint8 max_unacked_frames_;
+ int last_acked_frame_id_;
+ int last_sent_frame_id_;
+ int last_sent_key_frame_id_;
+ int duplicate_ack_;
+ base::TimeTicks last_send_time_;
+ base::TimeTicks last_checked_skip_count_time_;
+ int last_skip_count_;
+ CongestionControl congestion_control_;
+
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+
+ base::WeakPtrFactory<VideoSender> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoSender);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+
diff --git a/media/cast/video_sender/video_sender_unittest.cc b/media/cast/video_sender/video_sender_unittest.cc
new file mode 100644
index 0000000000..91e74f1353
--- /dev/null
+++ b/media/cast/video_sender/video_sender_unittest.cc
@@ -0,0 +1,226 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/video_sender/mock_video_encoder_controller.h"
+#include "media/cast/video_sender/video_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+
+using base::RunLoop;
+using testing::_;
+
+class PeerVideoSender : public VideoSender {
+ public:
+ PeerVideoSender(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacedPacketSender* const paced_packet_sender)
+ : VideoSender(cast_thread, video_config, video_encoder_controller,
+ paced_packet_sender) {
+ }
+ using VideoSender::OnReceivedCastFeedback;
+};
+
+static void ReleaseVideoFrame(const I420VideoFrame* frame) {
+ delete [] frame->y_plane.data;
+ delete [] frame->u_plane.data;
+ delete [] frame->v_plane.data;
+ delete frame;
+};
+
+static void ReleaseEncodedFrame(const EncodedVideoFrame* frame) {
+ // Do nothing.
+};
+
+class VideoSenderTest : public ::testing::Test {
+ protected:
+ VideoSenderTest() {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ ~VideoSenderTest() {}
+
+ void InitEncoder(bool external) {
+ VideoSenderConfig video_config;
+ video_config.sender_ssrc = 1;
+ video_config.incoming_feedback_ssrc = 2;
+ video_config.rtp_payload_type = 127;
+ video_config.use_external_encoder = external;
+ video_config.width = 320;
+ video_config.height = 240;
+ video_config.max_bitrate = 5000000;
+ video_config.min_bitrate = 1000000;
+ video_config.start_bitrate = 1000000;
+ video_config.max_qp = 56;
+ video_config.min_qp = 0;
+ video_config.max_frame_rate = 30;
+ video_config.max_number_of_video_buffers_used = 3;
+ video_config.codec = kVp8;
+
+ if (external) {
+ video_sender_.reset(new PeerVideoSender(cast_thread_, video_config,
+ &mock_video_encoder_controller_, &mock_transport_));
+ } else {
+ video_sender_.reset(new PeerVideoSender(cast_thread_, video_config, NULL,
+ &mock_transport_));
+ }
+ video_sender_->set_clock(&testing_clock_);
+ }
+
+ virtual void SetUp() {
+ cast_thread_ = new CastThread(MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current());
+ }
+
+ I420VideoFrame* AllocateNewVideoFrame() {
+ I420VideoFrame* video_frame = new I420VideoFrame();
+ video_frame->width = 320;
+ video_frame->height = 240;
+
+ video_frame->y_plane.stride = video_frame->width;
+ video_frame->y_plane.length = video_frame->width;
+ video_frame->y_plane.data =
+ new uint8[video_frame->width * video_frame->height];
+ memset(video_frame->y_plane.data, 123,
+ video_frame->width * video_frame->height);
+ video_frame->u_plane.stride = video_frame->width / 2;
+ video_frame->u_plane.length = video_frame->width / 2;
+ video_frame->u_plane.data =
+ new uint8[video_frame->width * video_frame->height / 4];
+ memset(video_frame->u_plane.data, 123,
+ video_frame->width * video_frame->height / 4);
+ video_frame->v_plane.stride = video_frame->width / 2;
+ video_frame->v_plane.length = video_frame->width / 2;
+ video_frame->v_plane.data =
+ new uint8[video_frame->width * video_frame->height / 4];
+ memset(video_frame->v_plane.data, 123,
+ video_frame->width * video_frame->height / 4);
+ return video_frame;
+ }
+
+ base::MessageLoop loop_;
+ MockVideoEncoderController mock_video_encoder_controller_;
+ base::SimpleTestTickClock testing_clock_;
+ MockPacedPacketSender mock_transport_;
+ scoped_ptr<PeerVideoSender> video_sender_;
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(VideoSenderTest, BuiltInEncoder) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+
+ RunLoop run_loop;
+ InitEncoder(false);
+ I420VideoFrame* video_frame = AllocateNewVideoFrame();
+
+ base::TimeTicks capture_time;
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time,
+ base::Bind(&ReleaseVideoFrame, video_frame));
+
+ run_loop.RunUntilIdle();
+}
+
+TEST_F(VideoSenderTest, ExternalEncoder) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+ EXPECT_CALL(mock_video_encoder_controller_, SkipNextFrame(false)).Times(1);
+ InitEncoder(true);
+
+ EncodedVideoFrame video_frame;
+ base::TimeTicks capture_time;
+
+ video_frame.codec = kVp8;
+ video_frame.key_frame = true;
+ video_frame.frame_id = 0;
+ video_frame.last_referenced_frame_id = 0;
+ video_frame.data.insert(video_frame.data.begin(), 123, 1000);
+
+ video_sender_->InsertCodedVideoFrame(&video_frame, capture_time,
+ base::Bind(&ReleaseEncodedFrame, &video_frame));
+}
+
+TEST_F(VideoSenderTest, RtcpTimer) {
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
+ RunLoop run_loop;
+ InitEncoder(false);
+
+ // Make sure that we send at least one RTCP packet.
+ base::TimeDelta max_rtcp_timeout =
+ base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
+ testing_clock_.Advance(max_rtcp_timeout);
+
+ // TODO(pwestin): haven't found a way to make the post delayed task to go
+ // faster than a real-time.
+ base::PlatformThread::Sleep(max_rtcp_timeout);
+ run_loop.RunUntilIdle();
+}
+
+TEST_F(VideoSenderTest, ResendTimer) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(2);
+ EXPECT_CALL(mock_transport_, ResendPacket(_, _)).Times(1);
+
+ RunLoop run_loop;
+ InitEncoder(false);
+
+ I420VideoFrame* video_frame = AllocateNewVideoFrame();
+
+ base::TimeTicks capture_time;
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time,
+ base::Bind(&ReleaseVideoFrame, video_frame));
+
+ run_loop.RunUntilIdle();
+
+ // ACK the key frame.
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.media_ssrc_ = 2;
+ cast_feedback.ack_frame_id_ = 0;
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+
+ video_frame = AllocateNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time,
+ base::Bind(&ReleaseVideoFrame, video_frame));
+
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+
+ base::TimeDelta max_resend_timeout =
+ base::TimeDelta::FromMilliseconds(1 + kDefaultRtpMaxDelayMs);
+
+ // Make sure that we do a re-send.
+ testing_clock_.Advance(max_resend_timeout);
+
+ // TODO(pwestin): haven't found a way to make the post delayed task to go
+ // faster than a real-time.
+ base::PlatformThread::Sleep(max_resend_timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+}
+
+} // namespace cast
+} // namespace media
+
diff --git a/media/cdm/aes_decryptor.cc b/media/cdm/aes_decryptor.cc
index da11442a37..b2dc48b9eb 100644
--- a/media/cdm/aes_decryptor.cc
+++ b/media/cdm/aes_decryptor.cc
@@ -6,9 +6,13 @@
#include <vector>
+#include "base/base64.h"
+#include "base/json/json_reader.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/values.h"
#include "crypto/encryptor.h"
#include "crypto/symmetric_key.h"
#include "media/base/audio_decoder_config.h"
@@ -26,6 +30,8 @@ enum ClearBytesBufferSel {
kDstContainsClearBytes
};
+typedef std::vector<std::pair<std::string, std::string> > JWKKeys;
+
static void CopySubsamples(const std::vector<SubsampleEntry>& subsamples,
const ClearBytesBufferSel sel,
const uint8* src,
@@ -43,6 +49,105 @@ static void CopySubsamples(const std::vector<SubsampleEntry>& subsamples,
}
}
+// Processes a JSON Web Key to extract the key id and key value. Adds the
+// id/value pair to |jwk_keys| and returns true on success.
+static bool ProcessSymmetricKeyJWK(const DictionaryValue& jwk,
+ JWKKeys* jwk_keys) {
+ // A symmetric keys JWK looks like the following in JSON:
+ // { "kty":"oct",
+ // "kid":"AAECAwQFBgcICQoLDA0ODxAREhM=",
+ // "k":"FBUWFxgZGhscHR4fICEiIw==" }
+ // There may be other properties specified, but they are ignored.
+ // Ref: http://tools.ietf.org/html/draft-ietf-jose-json-web-key-14
+ // and:
+ // http://tools.ietf.org/html/draft-jones-jose-json-private-and-symmetric-key-00
+
+ // Have found a JWK, start by checking that it is a symmetric key.
+ std::string type;
+ if (!jwk.GetString("kty", &type) || type != "oct") {
+ DVLOG(1) << "JWK is not a symmetric key";
+ return false;
+ }
+
+ // Get the key id and actual key parameters.
+ std::string encoded_key_id;
+ std::string encoded_key;
+ if (!jwk.GetString("kid", &encoded_key_id)) {
+ DVLOG(1) << "Missing 'kid' parameter";
+ return false;
+ }
+ if (!jwk.GetString("k", &encoded_key)) {
+ DVLOG(1) << "Missing 'k' parameter";
+ return false;
+ }
+
+ // Key ID and key are base64-encoded strings, so decode them.
+ // TODO(jrummell): The JWK spec and the EME spec don't say that 'kid' must be
+ // base64-encoded (they don't say anything at all). Verify with the EME spec.
+ std::string decoded_key_id;
+ std::string decoded_key;
+ if (!base::Base64Decode(encoded_key_id, &decoded_key_id) ||
+ decoded_key_id.empty()) {
+ DVLOG(1) << "Invalid 'kid' value";
+ return false;
+ }
+ if (!base::Base64Decode(encoded_key, &decoded_key) ||
+ decoded_key.length() !=
+ static_cast<size_t>(DecryptConfig::kDecryptionKeySize)) {
+ DVLOG(1) << "Invalid length of 'k' " << decoded_key.length();
+ return false;
+ }
+
+ // Add the decoded key ID and the decoded key to the list.
+ jwk_keys->push_back(std::make_pair(decoded_key_id, decoded_key));
+ return true;
+}
+
+// Extracts the JSON Web Keys from a JSON Web Key Set. If |input| looks like
+// a valid JWK Set, then true is returned and |jwk_keys| is updated to contain
+// the list of keys found. Otherwise return false.
+static bool ExtractJWKKeys(const std::string& input, JWKKeys* jwk_keys) {
+ // TODO(jrummell): The EME spec references a smaller set of allowed ASCII
+ // values. Verify with spec that the smaller character set is needed.
+ if (!IsStringASCII(input))
+ return false;
+
+ scoped_ptr<Value> root(base::JSONReader().ReadToValue(input));
+ if (!root.get() || root->GetType() != Value::TYPE_DICTIONARY)
+ return false;
+
+ // A JSON Web Key Set looks like the following in JSON:
+ // { "keys": [ JWK1, JWK2, ... ] }
+ // (See ProcessSymmetricKeyJWK() for description of JWK.)
+ // There may be other properties specified, but they are ignored.
+ // Locate the set from the dictionary.
+ DictionaryValue* dictionary = static_cast<DictionaryValue*>(root.get());
+ ListValue* list_val = NULL;
+ if (!dictionary->GetList("keys", &list_val)) {
+ DVLOG(1) << "Missing 'keys' parameter or not a list in JWK Set";
+ return false;
+ }
+
+ // Create a local list of keys, so that |jwk_keys| only gets updated on
+ // success.
+ JWKKeys local_keys;
+ for (size_t i = 0; i < list_val->GetSize(); ++i) {
+ DictionaryValue* jwk = NULL;
+ if (!list_val->GetDictionary(i, &jwk)) {
+ DVLOG(1) << "Unable to access 'keys'[" << i << "] in JWK Set";
+ return false;
+ }
+ if (!ProcessSymmetricKeyJWK(*jwk, &local_keys)) {
+ DVLOG(1) << "Error from 'keys'[" << i << "]";
+ return false;
+ }
+ }
+
+ // Successfully processed all JWKs in the set.
+ jwk_keys->swap(local_keys);
+ return true;
+}
+
// Decrypts |input| using |key|. Returns a DecoderBuffer with the decrypted
// data if decryption succeeded or NULL if decryption failed.
static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
@@ -170,41 +275,65 @@ void AesDecryptor::AddKey(const uint8* key,
CHECK(key);
CHECK_GT(key_length, 0);
+ // AddKey() is called from update(), where the key(s) are passed as a JSON
+ // Web Key (JWK) set. Each JWK needs to be a symmetric key ('kty' = "oct"),
+ // with 'kid' being the base64-encoded key id, and 'k' being the
+ // base64-encoded key.
+ //
+ // For backwards compatibility with v0.1b of the spec (where |key| is the raw
+ // key and |init_data| is the key id), if |key| is not valid JSON, then
+ // attempt to process it as a raw key.
+
// TODO(xhwang): Add |session_id| check after we figure out how:
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16550
- if (key_length != DecryptConfig::kDecryptionKeySize) {
- DVLOG(1) << "Invalid key length: " << key_length;
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
- return;
- }
- // TODO(xhwang): Fix the decryptor to accept no |init_data|. See
- // http://crbug.com/123265. Until then, ensure a non-empty value is passed.
- static const uint8 kDummyInitData[1] = { 0 };
- if (!init_data) {
- init_data = kDummyInitData;
- init_data_length = arraysize(kDummyInitData);
- }
+ std::string key_string(reinterpret_cast<const char*>(key), key_length);
+ JWKKeys jwk_keys;
+ if (ExtractJWKKeys(key_string, &jwk_keys)) {
+ // Since |key| represents valid JSON, init_data must be empty.
+ DCHECK(!init_data);
+ DCHECK_EQ(init_data_length, 0);
- // TODO(xhwang): For now, use |init_data| for key ID. Make this more spec
- // compliant later (http://crbug.com/123262, http://crbug.com/123265).
- std::string key_id_string(reinterpret_cast<const char*>(init_data),
- init_data_length);
- std::string key_string(reinterpret_cast<const char*>(key) , key_length);
- scoped_ptr<DecryptionKey> decryption_key(new DecryptionKey(key_string));
- if (!decryption_key) {
- DVLOG(1) << "Could not create key.";
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
- return;
- }
+ // Make sure that at least one key was extracted.
+ if (jwk_keys.empty()) {
+ key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
+ for (JWKKeys::iterator it = jwk_keys.begin() ; it != jwk_keys.end(); ++it) {
+ if (!AddDecryptionKey(it->first, it->second)) {
+ key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
+ }
+ } else {
+ // v0.1b backwards compatibility support.
+ // TODO(jrummell): Remove this code once v0.1b no longer supported.
- if (!decryption_key->Init()) {
- DVLOG(1) << "Could not initialize decryption key.";
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
- return;
- }
+ if (key_string.length() !=
+ static_cast<size_t>(DecryptConfig::kDecryptionKeySize)) {
+ DVLOG(1) << "Invalid key length: " << key_string.length();
+ key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
+
+ // TODO(xhwang): Fix the decryptor to accept no |init_data|. See
+ // http://crbug.com/123265. Until then, ensure a non-empty value is passed.
+ static const uint8 kDummyInitData[1] = {0};
+ if (!init_data) {
+ init_data = kDummyInitData;
+ init_data_length = arraysize(kDummyInitData);
+ }
- SetKey(key_id_string, decryption_key.Pass());
+ // TODO(xhwang): For now, use |init_data| for key ID. Make this more spec
+ // compliant later (http://crbug.com/123262, http://crbug.com/123265).
+ std::string key_id_string(reinterpret_cast<const char*>(init_data),
+ init_data_length);
+ if (!AddDecryptionKey(key_id_string, key_string)) {
+ // Error logged in AddDecryptionKey()
+ key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
+ }
if (!new_audio_key_cb_.is_null())
new_audio_key_cb_.Run();
@@ -306,8 +435,19 @@ void AesDecryptor::DeinitializeDecoder(StreamType stream_type) {
NOTREACHED() << "AesDecryptor does not support audio/video decoding";
}
-void AesDecryptor::SetKey(const std::string& key_id,
- scoped_ptr<DecryptionKey> decryption_key) {
+bool AesDecryptor::AddDecryptionKey(const std::string& key_id,
+ const std::string& key_string) {
+ scoped_ptr<DecryptionKey> decryption_key(new DecryptionKey(key_string));
+ if (!decryption_key) {
+ DVLOG(1) << "Could not create key.";
+ return false;
+ }
+
+ if (!decryption_key->Init()) {
+ DVLOG(1) << "Could not initialize decryption key.";
+ return false;
+ }
+
base::AutoLock auto_lock(key_map_lock_);
KeyMap::iterator found = key_map_.find(key_id);
if (found != key_map_.end()) {
@@ -315,6 +455,7 @@ void AesDecryptor::SetKey(const std::string& key_id,
key_map_.erase(found);
}
key_map_[key_id] = decryption_key.release();
+ return true;
}
AesDecryptor::DecryptionKey* AesDecryptor::GetKey(
diff --git a/media/cdm/aes_decryptor.h b/media/cdm/aes_decryptor.h
index fda5a0faca..3ab4bc0f9f 100644
--- a/media/cdm/aes_decryptor.h
+++ b/media/cdm/aes_decryptor.h
@@ -86,8 +86,10 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
DISALLOW_COPY_AND_ASSIGN(DecryptionKey);
};
- // Sets |key| for |key_id|. The AesDecryptor takes the ownership of the |key|.
- void SetKey(const std::string& key_id, scoped_ptr<DecryptionKey> key);
+ // Creates a DecryptionKey using |key_string| and associates it with |key_id|.
+ // Returns true if successful.
+ bool AddDecryptionKey(const std::string& key_id,
+ const std::string& key_string);
// Gets a DecryptionKey associated with |key_id|. The AesDecryptor still owns
// the key. Returns NULL if no key is associated with |key_id|.
diff --git a/media/cdm/aes_decryptor_unittest.cc b/media/cdm/aes_decryptor_unittest.cc
index 1edb8e8222..a4b865c469 100644
--- a/media/cdm/aes_decryptor_unittest.cc
+++ b/media/cdm/aes_decryptor_unittest.cc
@@ -16,124 +16,79 @@
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
-using ::testing::ElementsAreArray;
using ::testing::Gt;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SaveArg;
-using ::testing::StrEq;
using ::testing::StrNe;
MATCHER(IsEmpty, "") { return arg.empty(); }
namespace media {
-// |encrypted_data| is encrypted from |plain_text| using |key|. |key_id| is
-// used to distinguish |key|.
-struct WebmEncryptedData {
- uint8 plain_text[32];
- int plain_text_size;
- uint8 key_id[32];
- int key_id_size;
- uint8 key[32];
- int key_size;
- uint8 encrypted_data[64];
- int encrypted_data_size;
-};
-
static const char kClearKeySystem[] = "org.w3.clearkey";
-// Frames 0 & 1 are encrypted with the same key. Frame 2 is encrypted with a
-// different key. Frame 3 is unencrypted.
-const WebmEncryptedData kWebmEncryptedFrames[] = {
- {
- // plaintext
- "Original data.", 14,
- // key_id
- { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13
- }, 20,
- // key
- { 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
- 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23
- }, 16,
- // encrypted_data
- { 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xf0, 0xd1, 0x12, 0xd5, 0x24, 0x81, 0x96,
- 0x55, 0x1b, 0x68, 0x9f, 0x38, 0x91, 0x85
- }, 23
- }, {
- // plaintext
- "Changed Original data.", 22,
- // key_id
- { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13
- }, 20,
- // key
- { 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
- 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23
- }, 16,
- // encrypted_data
- { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x57, 0x66, 0xf4, 0x12, 0x1a, 0xed, 0xb5,
- 0x79, 0x1c, 0x8e, 0x25, 0xd7, 0x17, 0xe7, 0x5e,
- 0x16, 0xe3, 0x40, 0x08, 0x27, 0x11, 0xe9
- }, 31
- }, {
- // plaintext
- "Original data.", 14,
- // key_id
- { 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
- 0x2c, 0x2d, 0x2e, 0x2f, 0x30
- }, 13,
- // key
- { 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
- 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40
- }, 16,
- // encrypted_data
- { 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x9c, 0x71, 0x26, 0x57, 0x3e, 0x25, 0x37,
- 0xf7, 0x31, 0x81, 0x19, 0x64, 0xce, 0xbc
- }, 23
- }, {
- // plaintext
- "Changed Original data.", 22,
- // key_id
- { 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
- 0x2c, 0x2d, 0x2e, 0x2f, 0x30
- }, 13,
- // key
- { 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
- 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40
- }, 16,
- // encrypted_data
- { 0x00, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64,
- 0x20, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61,
- 0x6c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2e
- }, 23
- }
-};
-
-static const uint8 kWebmWrongSizedKey[] = { 0x20, 0x20 };
-
-static const uint8 kSubsampleOriginalData[] = "Original subsample data.";
-static const int kSubsampleOriginalDataSize = 24;
+static const uint8 kOriginalData[] = "Original subsample data.";
+static const int kOriginalDataSize = 24;
-static const uint8 kSubsampleKeyId[] = { 0x00, 0x01, 0x02, 0x03 };
+static const uint8 kKeyId[] = {
+ // base64 equivalent is AAECAw==
+ 0x00, 0x01, 0x02, 0x03
+};
-static const uint8 kSubsampleKey[] = {
- 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
- 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13
+static const uint8 kKey[] = {
+ // base64 equivalent is BAUGBwgJCgsMDQ4PEBESEw==
+ 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13
};
-static const uint8 kSubsampleIv[] = {
+static const char kKeyAsJWK[] =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw==\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " }"
+ " ]"
+ "}";
+
+static const char kWrongKeyAsJWK[] =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw==\","
+ " \"k\": \"7u7u7u7u7u7u7u7u7u7u7g==\""
+ " }"
+ " ]"
+ "}";
+
+static const char kWrongSizedKeyAsJWK[] =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw==\","
+ " \"k\": \"AAECAw==\""
+ " }"
+ " ]"
+ "}";
+
+static const uint8 kIv[] = {
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-// kSubsampleOriginalData encrypted with kSubsampleKey and kSubsampleIv using
+// kOriginalData encrypted with kKey and kIv but without any subsamples (or
+// equivalently using kSubsampleEntriesCypherOnly).
+static const uint8 kEncryptedData[] = {
+ 0x2f, 0x03, 0x09, 0xef, 0x71, 0xaf, 0x31, 0x16,
+ 0xfa, 0x9d, 0x18, 0x43, 0x1e, 0x96, 0x71, 0xb5,
+ 0xbf, 0xf5, 0x30, 0x53, 0x9a, 0x20, 0xdf, 0x95
+};
+
+// kOriginalData encrypted with kSubsampleKey and kSubsampleIv using
// kSubsampleEntriesNormal.
static const uint8 kSubsampleEncryptedData[] = {
0x4f, 0x72, 0x09, 0x16, 0x09, 0xe6, 0x79, 0xad,
@@ -141,25 +96,42 @@ static const uint8 kSubsampleEncryptedData[] = {
0x4d, 0x08, 0xd7, 0x78, 0xa4, 0xa7, 0xf1, 0x2e
};
-// kSubsampleEncryptedData with 8 bytes padding at the beginning.
-static const uint8 kPaddedSubsampleEncryptedData[] = {
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x4f, 0x72, 0x09, 0x16, 0x09, 0xe6, 0x79, 0xad,
- 0x70, 0x73, 0x75, 0x62, 0x09, 0xbb, 0x83, 0x1d,
- 0x4d, 0x08, 0xd7, 0x78, 0xa4, 0xa7, 0xf1, 0x2e
+static const uint8 kOriginalData2[] = "Changed Original data.";
+
+static const uint8 kIv2[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-// kSubsampleOriginalData encrypted with kSubsampleKey and kSubsampleIv but
-// without any subsamples (or equivalently using kSubsampleEntriesCypherOnly).
-static const uint8 kEncryptedData[] = {
- 0x2f, 0x03, 0x09, 0xef, 0x71, 0xaf, 0x31, 0x16,
- 0xfa, 0x9d, 0x18, 0x43, 0x1e, 0x96, 0x71, 0xb5,
- 0xbf, 0xf5, 0x30, 0x53, 0x9a, 0x20, 0xdf, 0x95
+static const uint8 kKeyId2[] = {
+ // base64 equivalent is AAECAwQFBgcICQoLDA0ODxAREhM=
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13
+};
+
+static const char kKey2AsJWK[] =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM=\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw==\""
+ " }"
+ " ]"
+ "}";
+
+// 'k' in bytes is x14x15x16x17x18x19x1ax1bx1cx1dx1ex1fx20x21x22x23
+
+static const uint8 kEncryptedData2[] = {
+ 0x57, 0x66, 0xf4, 0x12, 0x1a, 0xed, 0xb5, 0x79,
+ 0x1c, 0x8e, 0x25, 0xd7, 0x17, 0xe7, 0x5e, 0x16,
+ 0xe3, 0x40, 0x08, 0x27, 0x11, 0xe9
};
// Subsample entries for testing. The sum of |cypher_bytes| and |clear_bytes| of
-// all entries must be equal to kSubsampleOriginalDataSize to make the subsample
-// entries valid.
+// all entries must be equal to kOriginalDataSize to make the subsample entries
+// valid.
static const SubsampleEntry kSubsampleEntriesNormal[] = {
{ 2, 7 },
@@ -167,6 +139,18 @@ static const SubsampleEntry kSubsampleEntriesNormal[] = {
{ 1, 0 }
};
+static const SubsampleEntry kSubsampleEntriesWrongSize[] = {
+ { 3, 6 }, // This entry doesn't match the correct entry.
+ { 3, 11 },
+ { 1, 0 }
+};
+
+static const SubsampleEntry kSubsampleEntriesInvalidTotalSize[] = {
+ { 1, 1000 }, // This entry is too large.
+ { 3, 11 },
+ { 1, 0 }
+};
+
static const SubsampleEntry kSubsampleEntriesClearOnly[] = {
{ 7, 0 },
{ 8, 0 },
@@ -179,74 +163,24 @@ static const SubsampleEntry kSubsampleEntriesCypherOnly[] = {
{ 0, 10 }
};
-// Generates a 16 byte CTR counter block. The CTR counter block format is a
-// CTR IV appended with a CTR block counter. |iv| is an 8 byte CTR IV.
-// |iv_size| is the size of |iv| in bytes. Returns a string of
-// kDecryptionKeySize bytes.
-static std::string GenerateCounterBlock(const uint8* iv, int iv_size) {
- CHECK_GT(iv_size, 0);
- CHECK_LE(iv_size, DecryptConfig::kDecryptionKeySize);
-
- std::string counter_block(reinterpret_cast<const char*>(iv), iv_size);
- counter_block.append(DecryptConfig::kDecryptionKeySize - iv_size, 0);
- return counter_block;
-}
-
-// Creates a WebM encrypted buffer that the demuxer would pass to the
-// decryptor. |data| is the payload of a WebM encrypted Block. |key_id| is
-// initialization data from the WebM file. Every encrypted Block has
-// a signal byte prepended to a frame. If the frame is encrypted then an IV is
-// prepended to the Block. Current encrypted WebM request for comments
-// specification is here
-// http://wiki.webmproject.org/encryption/webm-encryption-rfc
-static scoped_refptr<DecoderBuffer> CreateWebMEncryptedBuffer(
- const uint8* data, int data_size,
- const uint8* key_id, int key_id_size) {
- scoped_refptr<DecoderBuffer> encrypted_buffer = DecoderBuffer::CopyFrom(
- data, data_size);
- CHECK(encrypted_buffer.get());
- DCHECK_EQ(kWebMSignalByteSize, 1);
-
- uint8 signal_byte = data[0];
- int data_offset = kWebMSignalByteSize;
-
- // Setting the DecryptConfig object of the buffer while leaving the
- // initialization vector empty will tell the decryptor that the frame is
- // unencrypted.
- std::string counter_block_str;
-
- if (signal_byte & kWebMFlagEncryptedFrame) {
- counter_block_str = GenerateCounterBlock(data + data_offset, kWebMIvSize);
- data_offset += kWebMIvSize;
- }
-
- encrypted_buffer->set_decrypt_config(
- scoped_ptr<DecryptConfig>(new DecryptConfig(
- std::string(reinterpret_cast<const char*>(key_id), key_id_size),
- counter_block_str,
- data_offset,
- std::vector<SubsampleEntry>())));
- return encrypted_buffer;
-}
-
-// TODO(xhwang): Refactor this function to encapsulate more details about
-// creating an encrypted DecoderBuffer with subsamples so we don't have so much
-// boilerplate code in each test before calling this function.
-static scoped_refptr<DecoderBuffer> CreateSubsampleEncryptedBuffer(
- const uint8* data, int data_size,
- const uint8* key_id, int key_id_size,
- const uint8* iv, int iv_size,
- int data_offset,
+static scoped_refptr<DecoderBuffer> CreateEncryptedBuffer(
+ const std::vector<uint8>& data,
+ const std::vector<uint8>& key_id,
+ const std::vector<uint8>& iv,
+ int offset,
const std::vector<SubsampleEntry>& subsample_entries) {
- scoped_refptr<DecoderBuffer> encrypted_buffer =
- DecoderBuffer::CopyFrom(data, data_size);
+ DCHECK(!data.empty());
+ int padded_size = offset + data.size();
+ scoped_refptr<DecoderBuffer> encrypted_buffer(new DecoderBuffer(padded_size));
+ memcpy(encrypted_buffer->writable_data() + offset, &data[0], data.size());
CHECK(encrypted_buffer.get());
- encrypted_buffer->set_decrypt_config(
- scoped_ptr<DecryptConfig>(new DecryptConfig(
- std::string(reinterpret_cast<const char*>(key_id), key_id_size),
- std::string(reinterpret_cast<const char*>(iv), iv_size),
- data_offset,
- subsample_entries)));
+ std::string key_id_string(
+ reinterpret_cast<const char*>(key_id.empty() ? NULL : &key_id[0]),
+ key_id.size());
+ std::string iv_string(
+ reinterpret_cast<const char*>(iv.empty() ? NULL : &iv[0]), iv.size());
+ encrypted_buffer->set_decrypt_config(scoped_ptr<DecryptConfig>(
+ new DecryptConfig(key_id_string, iv_string, offset, subsample_entries)));
return encrypted_buffer;
}
@@ -259,78 +193,116 @@ class AesDecryptorTest : public testing::Test {
base::Bind(&AesDecryptorTest::KeyMessage, base::Unretained(this))),
decrypt_cb_(base::Bind(&AesDecryptorTest::BufferDecrypted,
base::Unretained(this))),
- subsample_entries_normal_(
+ original_data_(kOriginalData, kOriginalData + kOriginalDataSize),
+ encrypted_data_(kEncryptedData,
+ kEncryptedData + arraysize(kEncryptedData)),
+ subsample_encrypted_data_(
+ kSubsampleEncryptedData,
+ kSubsampleEncryptedData + arraysize(kSubsampleEncryptedData)),
+ key_id_(kKeyId, kKeyId + arraysize(kKeyId)),
+ iv_(kIv, kIv + arraysize(kIv)),
+ normal_subsample_entries_(
kSubsampleEntriesNormal,
kSubsampleEntriesNormal + arraysize(kSubsampleEntriesNormal)) {
}
protected:
- void GenerateKeyRequest(const uint8* key_id, int key_id_size) {
- EXPECT_CALL(*this, KeyMessage(
- StrNe(std::string()), ElementsAreArray(key_id, key_id_size), ""))
+ void GenerateKeyRequest(const std::vector<uint8>& key_id) {
+ DCHECK(!key_id.empty());
+ EXPECT_CALL(*this, KeyMessage(StrNe(std::string()), key_id, ""))
.WillOnce(SaveArg<0>(&session_id_string_));
EXPECT_TRUE(decryptor_.GenerateKeyRequest(
- std::string(), key_id, key_id_size));
+ std::string(), &key_id[0], key_id.size()));
}
- void AddKeyAndExpectToSucceed(const uint8* key_id, int key_id_size,
- const uint8* key, int key_size) {
- EXPECT_CALL(*this, KeyAdded(session_id_string_));
- decryptor_.AddKey(key, key_size, key_id, key_id_size,
+ enum AddKeyExpectation {
+ KEY_ADDED,
+ KEY_ERROR
+ };
+
+ void AddRawKeyAndExpect(const std::vector<uint8>& key_id,
+ const std::vector<uint8>& key,
+ AddKeyExpectation result) {
+ // TODO(jrummell): Remove once raw keys no longer supported.
+ DCHECK(!key_id.empty());
+ DCHECK(!key.empty());
+
+ if (result == KEY_ADDED) {
+ EXPECT_CALL(*this, KeyAdded(session_id_string_));
+ } else if (result == KEY_ERROR) {
+ EXPECT_CALL(*this, KeyError(session_id_string_,
+ MediaKeys::kUnknownError, 0));
+ } else {
+ NOTREACHED();
+ }
+
+ decryptor_.AddKey(&key[0], key.size(), &key_id[0], key_id.size(),
session_id_string_);
}
- void AddKeyAndExpectToFail(const uint8* key_id, int key_id_size,
- const uint8* key, int key_size) {
- EXPECT_CALL(*this, KeyError(session_id_string_,
- MediaKeys::kUnknownError, 0));
- decryptor_.AddKey(key, key_size, key_id, key_id_size, session_id_string_);
+ void AddKeyAndExpect(const std::string& key, AddKeyExpectation result) {
+ DCHECK(!key.empty());
+
+ if (result == KEY_ADDED) {
+ EXPECT_CALL(*this, KeyAdded(session_id_string_));
+ } else if (result == KEY_ERROR) {
+ EXPECT_CALL(*this,
+ KeyError(session_id_string_, MediaKeys::kUnknownError, 0));
+ } else {
+ NOTREACHED();
+ }
+
+ decryptor_.AddKey(reinterpret_cast<const uint8*>(key.c_str()), key.length(),
+ NULL, 0,
+ session_id_string_);
}
MOCK_METHOD2(BufferDecrypted, void(Decryptor::Status,
const scoped_refptr<DecoderBuffer>&));
- void DecryptAndExpectToSucceed(const scoped_refptr<DecoderBuffer>& encrypted,
- const uint8* plain_text, int plain_text_size) {
- scoped_refptr<DecoderBuffer> decrypted;
- EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kSuccess, NotNull()))
- .WillOnce(SaveArg<1>(&decrypted));
-
- decryptor_.Decrypt(Decryptor::kVideo, encrypted, decrypt_cb_);
- ASSERT_TRUE(decrypted.get());
- ASSERT_EQ(plain_text_size, decrypted->data_size());
- EXPECT_EQ(0, memcmp(plain_text, decrypted->data(), plain_text_size));
- }
+ enum DecryptExpectation {
+ SUCCESS,
+ DATA_MISMATCH,
+ DATA_AND_SIZE_MISMATCH,
+ DECRYPT_ERROR
+ };
- void DecryptAndExpectDataMismatch(
- const scoped_refptr<DecoderBuffer>& encrypted,
- const uint8* plain_text, int plain_text_size) {
+ void DecryptAndExpect(const scoped_refptr<DecoderBuffer>& encrypted,
+ const std::vector<uint8>& plain_text,
+ DecryptExpectation result) {
scoped_refptr<DecoderBuffer> decrypted;
- EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kSuccess, NotNull()))
- .WillOnce(SaveArg<1>(&decrypted));
-
- decryptor_.Decrypt(Decryptor::kVideo, encrypted, decrypt_cb_);
- ASSERT_TRUE(decrypted.get());
- ASSERT_EQ(plain_text_size, decrypted->data_size());
- EXPECT_NE(0, memcmp(plain_text, decrypted->data(), plain_text_size));
- }
- void DecryptAndExpectSizeDataMismatch(
- const scoped_refptr<DecoderBuffer>& encrypted,
- const uint8* plain_text, int plain_text_size) {
- scoped_refptr<DecoderBuffer> decrypted;
- EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kSuccess, NotNull()))
- .WillOnce(SaveArg<1>(&decrypted));
+ if (result != DECRYPT_ERROR) {
+ EXPECT_CALL(*this, BufferDecrypted(Decryptor::kSuccess, NotNull()))
+ .WillOnce(SaveArg<1>(&decrypted));
+ } else {
+ EXPECT_CALL(*this, BufferDecrypted(Decryptor::kError, IsNull()))
+ .WillOnce(SaveArg<1>(&decrypted));
+ }
decryptor_.Decrypt(Decryptor::kVideo, encrypted, decrypt_cb_);
- ASSERT_TRUE(decrypted.get());
- EXPECT_NE(plain_text_size, decrypted->data_size());
- EXPECT_NE(0, memcmp(plain_text, decrypted->data(), plain_text_size));
- }
- void DecryptAndExpectToFail(const scoped_refptr<DecoderBuffer>& encrypted) {
- EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kError, IsNull()));
- decryptor_.Decrypt(Decryptor::kVideo, encrypted, decrypt_cb_);
+ std::vector<uint8> decrypted_text;
+ if (decrypted && decrypted->data_size()) {
+ decrypted_text.assign(
+ decrypted->data(), decrypted->data() + decrypted->data_size());
+ }
+
+ switch (result) {
+ case SUCCESS:
+ EXPECT_EQ(plain_text, decrypted_text);
+ break;
+ case DATA_MISMATCH:
+ EXPECT_EQ(plain_text.size(), decrypted_text.size());
+ EXPECT_NE(plain_text, decrypted_text);
+ break;
+ case DATA_AND_SIZE_MISMATCH:
+ EXPECT_NE(plain_text.size(), decrypted_text.size());
+ break;
+ case DECRYPT_ERROR:
+ EXPECT_TRUE(decrypted_text.empty());
+ break;
+ }
}
MOCK_METHOD1(KeyAdded, void(const std::string&));
@@ -343,7 +315,15 @@ class AesDecryptorTest : public testing::Test {
AesDecryptor decryptor_;
std::string session_id_string_;
AesDecryptor::DecryptCB decrypt_cb_;
- std::vector<SubsampleEntry> subsample_entries_normal_;
+
+ // Constants for testing.
+ const std::vector<uint8> original_data_;
+ const std::vector<uint8> encrypted_data_;
+ const std::vector<uint8> subsample_encrypted_data_;
+ const std::vector<uint8> key_id_;
+ const std::vector<uint8> iv_;
+ const std::vector<SubsampleEntry> normal_subsample_entries_;
+ const std::vector<SubsampleEntry> no_subsample_entries_;
};
TEST_F(AesDecryptorTest, GenerateKeyRequestWithNullInitData) {
@@ -351,314 +331,311 @@ TEST_F(AesDecryptorTest, GenerateKeyRequestWithNullInitData) {
EXPECT_TRUE(decryptor_.GenerateKeyRequest(std::string(), NULL, 0));
}
-TEST_F(AesDecryptorTest, NormalWebMDecryption) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+TEST_F(AesDecryptorTest, NormalDecryption) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
-TEST_F(AesDecryptorTest, UnencryptedFrameWebMDecryption) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[3];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+TEST_F(AesDecryptorTest, DecryptionWithOffset) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 23, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
+}
+
+TEST_F(AesDecryptorTest, UnencryptedFrame) {
+ // An empty iv string signals that the frame is unencrypted.
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ original_data_, key_id_, std::vector<uint8>(), 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
TEST_F(AesDecryptorTest, WrongKey) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
-
- // Change the first byte of the key.
- std::vector<uint8> wrong_key(frame.key, frame.key + frame.key_size);
- wrong_key[0]++;
-
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- &wrong_key[0], frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kWrongKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, NoKey) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data, frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kNoKey, IsNull()));
- decryptor_.Decrypt(Decryptor::kVideo, encrypted_data, decrypt_cb_);
+ decryptor_.Decrypt(Decryptor::kVideo, encrypted_buffer, decrypt_cb_);
}
TEST_F(AesDecryptorTest, KeyReplacement) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
-
- // Change the first byte of the key.
- std::vector<uint8> wrong_key(frame.key, frame.key + frame.key_size);
- wrong_key[0]++;
-
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- &wrong_key[0], frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+ GenerateKeyRequest(key_id_);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+
+ AddKeyAndExpect(kWrongKeyAsJWK, KEY_ADDED);
+ ASSERT_NO_FATAL_FAILURE(DecryptAndExpect(
+ encrypted_buffer, original_data_, DATA_MISMATCH));
+
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
}
TEST_F(AesDecryptorTest, WrongSizedKey) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToFail(frame.key_id, frame.key_id_size,
- kWebmWrongSizedKey, arraysize(kWebmWrongSizedKey));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kWrongSizedKeyAsJWK, KEY_ERROR);
+
+ // Repeat for a raw key. Use "-1" to create a wrong sized key.
+ std::vector<uint8> wrong_sized_key(kKey, kKey + arraysize(kKey) - 1);
+ AddRawKeyAndExpect(key_id_, wrong_sized_key, KEY_ERROR);
}
TEST_F(AesDecryptorTest, MultipleKeysAndFrames) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
-
- const WebmEncryptedData& frame2 = kWebmEncryptedFrames[2];
- GenerateKeyRequest(frame2.key_id, frame2.key_id_size);
- AddKeyAndExpectToSucceed(frame2.key_id, frame2.key_id_size,
- frame2.key, frame2.key_size);
-
- const WebmEncryptedData& frame1 = kWebmEncryptedFrames[1];
- scoped_refptr<DecoderBuffer> encrypted_data1 =
- CreateWebMEncryptedBuffer(frame1.encrypted_data,
- frame1.encrypted_data_size,
- frame1.key_id, frame1.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data1,
- frame1.plain_text,
- frame1.plain_text_size));
-
- scoped_refptr<DecoderBuffer> encrypted_data2 =
- CreateWebMEncryptedBuffer(frame2.encrypted_data,
- frame2.encrypted_data_size,
- frame2.key_id, frame2.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data2,
- frame2.plain_text,
- frame2.plain_text_size));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 10, no_subsample_entries_);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
+
+ AddKeyAndExpect(kKey2AsJWK, KEY_ADDED);
+
+ // The first key is still available after we added a second key.
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
+
+ // The second key is also available.
+ encrypted_buffer = CreateEncryptedBuffer(
+ std::vector<uint8>(kEncryptedData2,
+ kEncryptedData2 + arraysize(kEncryptedData2)),
+ std::vector<uint8>(kKeyId2, kKeyId2 + arraysize(kKeyId2)),
+ std::vector<uint8>(kIv2, kIv2 + arraysize(kIv2)),
+ 30,
+ no_subsample_entries_);
+ ASSERT_NO_FATAL_FAILURE(DecryptAndExpect(
+ encrypted_buffer,
+ std::vector<uint8>(kOriginalData2,
+ kOriginalData2 + arraysize(kOriginalData2) - 1),
+ SUCCESS));
}
TEST_F(AesDecryptorTest, CorruptedIv) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
-
- // Change byte 13 to modify the IV. Bytes 13-20 of WebM encrypted data
- // contains the IV.
- std::vector<uint8> frame_with_bad_iv(
- frame.encrypted_data, frame.encrypted_data + frame.encrypted_data_size);
- frame_with_bad_iv[1]++;
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(&frame_with_bad_iv[0],
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<uint8> bad_iv = iv_;
+ bad_iv[1]++;
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, bad_iv, 0, no_subsample_entries_);
+
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, CorruptedData) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
-
- // Change last byte to modify the data. Bytes 21+ of WebM encrypted data
- // contains the encrypted frame.
- std::vector<uint8> frame_with_bad_vp8_data(
- frame.encrypted_data, frame.encrypted_data + frame.encrypted_data_size);
- frame_with_bad_vp8_data[frame.encrypted_data_size - 1]++;
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(&frame_with_bad_vp8_data[0],
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
-}
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
-TEST_F(AesDecryptorTest, EncryptedAsUnencryptedFailure) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
-
- // Change signal byte from an encrypted frame to an unencrypted frame. Byte
- // 12 of WebM encrypted data contains the signal byte.
- std::vector<uint8> frame_with_wrong_signal_byte(
- frame.encrypted_data, frame.encrypted_data + frame.encrypted_data_size);
- frame_with_wrong_signal_byte[0] = 0;
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(&frame_with_wrong_signal_byte[0],
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(
- DecryptAndExpectSizeDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+ std::vector<uint8> bad_data = encrypted_data_;
+ bad_data[1]++;
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ bad_data, key_id_, iv_, 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
-TEST_F(AesDecryptorTest, UnencryptedAsEncryptedFailure) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[3];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
-
- // Change signal byte from an unencrypted frame to an encrypted frame. Byte
- // 0 of WebM encrypted data contains the signal byte.
- std::vector<uint8> frame_with_wrong_signal_byte(
- frame.encrypted_data, frame.encrypted_data + frame.encrypted_data_size);
- frame_with_wrong_signal_byte[0] = kWebMFlagEncryptedFrame;
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(&frame_with_wrong_signal_byte[0],
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(
- DecryptAndExpectSizeDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+TEST_F(AesDecryptorTest, EncryptedAsUnencryptedFailure) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, std::vector<uint8>(), 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, SubsampleDecryption) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kSubsampleEncryptedData, arraysize(kSubsampleEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- subsample_entries_normal_);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(
- encrypted_data, kSubsampleOriginalData, kSubsampleOriginalDataSize));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ subsample_encrypted_data_, key_id_, iv_, 0, normal_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
// Ensures noninterference of data offset and subsample mechanisms. We never
// expect to encounter this in the wild, but since the DecryptConfig doesn't
// disallow such a configuration, it should be covered.
TEST_F(AesDecryptorTest, SubsampleDecryptionWithOffset) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kPaddedSubsampleEncryptedData, arraysize(kPaddedSubsampleEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- arraysize(kPaddedSubsampleEncryptedData)
- - arraysize(kSubsampleEncryptedData),
- subsample_entries_normal_);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(
- encrypted_data, kSubsampleOriginalData, kSubsampleOriginalDataSize));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ subsample_encrypted_data_, key_id_, iv_, 23, normal_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
-// No subsample or offset.
-TEST_F(AesDecryptorTest, NormalDecryption) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kEncryptedData, arraysize(kEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- std::vector<SubsampleEntry>());
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(
- encrypted_data, kSubsampleOriginalData, kSubsampleOriginalDataSize));
+TEST_F(AesDecryptorTest, SubsampleWrongSize) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<SubsampleEntry> subsample_entries_wrong_size(
+ kSubsampleEntriesWrongSize,
+ kSubsampleEntriesWrongSize + arraysize(kSubsampleEntriesWrongSize));
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ subsample_encrypted_data_, key_id_, iv_, 0, subsample_entries_wrong_size);
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
-TEST_F(AesDecryptorTest, IncorrectSubsampleSize) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- std::vector<SubsampleEntry> entries = subsample_entries_normal_;
- entries[2].cypher_bytes += 1;
-
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kSubsampleEncryptedData, arraysize(kSubsampleEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- entries);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToFail(encrypted_data));
+TEST_F(AesDecryptorTest, SubsampleInvalidTotalSize) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<SubsampleEntry> subsample_entries_invalid_total_size(
+ kSubsampleEntriesInvalidTotalSize,
+ kSubsampleEntriesInvalidTotalSize +
+ arraysize(kSubsampleEntriesInvalidTotalSize));
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ subsample_encrypted_data_, key_id_, iv_, 0,
+ subsample_entries_invalid_total_size);
+ DecryptAndExpect(encrypted_buffer, original_data_, DECRYPT_ERROR);
}
// No cypher bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleClearBytesOnly) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- std::vector<SubsampleEntry> subsample_entries_clear_only(
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<SubsampleEntry> clear_only_subsample_entries(
kSubsampleEntriesClearOnly,
kSubsampleEntriesClearOnly + arraysize(kSubsampleEntriesClearOnly));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kSubsampleOriginalData, kSubsampleOriginalDataSize,
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- subsample_entries_clear_only);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- kSubsampleOriginalData, kSubsampleOriginalDataSize));
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ original_data_, key_id_, iv_, 0, clear_only_subsample_entries);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
// No clear bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleCypherBytesOnly) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- std::vector<SubsampleEntry> subsample_entries_cypher_only(
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<SubsampleEntry> cypher_only_subsample_entries(
kSubsampleEntriesCypherOnly,
kSubsampleEntriesCypherOnly + arraysize(kSubsampleEntriesCypherOnly));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kEncryptedData, arraysize(kEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- subsample_entries_cypher_only);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- kSubsampleOriginalData, kSubsampleOriginalDataSize));
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, cypher_only_subsample_entries);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
+}
+
+TEST_F(AesDecryptorTest, JWKKey) {
+ // Try a simple JWK key (i.e. not in a set)
+ const std::string key1 =
+ "{"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM=\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw==\""
+ "}";
+ AddKeyAndExpect(key1, KEY_ERROR);
+
+ // Try a key list with multiple entries.
+ const std::string key2 =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM=\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw==\""
+ " },"
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"JCUmJygpKissLS4vMA==\","
+ " \"k\":\"MTIzNDU2Nzg5Ojs8PT4/QA==\""
+ " }"
+ " ]"
+ "}";
+ AddKeyAndExpect(key2, KEY_ADDED);
+
+ // Try a key with no spaces and some \n plus additional fields.
+ const std::string key3 =
+ "\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\",\"alg\":\"A128KW\","
+ "\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM=\",\"k\":\"GawgguFyGrWKav7AX4VKUg="
+ "=\",\"foo\":\"bar\"}]}\n\n";
+ AddKeyAndExpect(key3, KEY_ADDED);
+
+ // Try some non-ASCII characters.
+ AddKeyAndExpect("This is not ASCII due to \xff\xfe\xfd in it.", KEY_ERROR);
+
+ // Try a badly formatted key. Assume that the JSON parser is fully tested,
+ // so we won't try a lot of combinations. However, need a test to ensure
+ // that the code doesn't crash if invalid JSON received.
+ AddKeyAndExpect("This is not a JSON key.", KEY_ERROR);
+
+ // Try passing some valid JSON that is not a dictionary at the top level.
+ AddKeyAndExpect("40", KEY_ERROR);
+
+ // Try an empty dictionary.
+ AddKeyAndExpect("{ }", KEY_ERROR);
+
+ // Try an empty 'keys' dictionary.
+ AddKeyAndExpect("{ \"keys\": [] }", KEY_ERROR);
+
+ // Try with 'keys' not a dictionary.
+ AddKeyAndExpect("{ \"keys\":\"1\" }", KEY_ERROR);
+
+ // Try with 'keys' a list of integers.
+ AddKeyAndExpect("{ \"keys\": [ 1, 2, 3 ] }", KEY_ERROR);
+
+ // Try a key missing padding(=) at end of base64 string.
+ const std::string key4 =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw==\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ AddKeyAndExpect(key4, KEY_ERROR);
+
+ // Try a key ID missing padding(=) at end of base64 string.
+ const std::string key5 =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " }"
+ " ]"
+ "}";
+ AddKeyAndExpect(key5, KEY_ERROR);
+
+ // Try a key with invalid base64 encoding.
+ const std::string key6 =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"!@#$%^&*()==\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " }"
+ " ]"
+ "}";
+ AddKeyAndExpect(key6, KEY_ERROR);
+}
+
+TEST_F(AesDecryptorTest, RawKey) {
+ // Verify that v0.1b keys (raw key) is still supported. Raw keys are
+ // 16 bytes long. Use the undecoded value of |kKey|.
+ GenerateKeyRequest(key_id_);
+ AddRawKeyAndExpect(
+ key_id_, std::vector<uint8>(kKey, kKey + arraysize(kKey)), KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
} // namespace media
diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc
index 9693bbb4de..72b31252f8 100644
--- a/media/ffmpeg/ffmpeg_common.cc
+++ b/media/ffmpeg/ffmpeg_common.cc
@@ -291,7 +291,9 @@ static void AVCodecContextToAudioDecoderConfig(
codec_context->extradata,
codec_context->extradata_size,
is_encrypted,
- record_stats);
+ record_stats,
+ base::TimeDelta(),
+ base::TimeDelta());
if (codec != kCodecOpus) {
DCHECK_EQ(av_get_bytes_per_sample(codec_context->sample_fmt) * 8,
config->bits_per_channel());
diff --git a/media/filters/decrypting_audio_decoder.cc b/media/filters/decrypting_audio_decoder.cc
index f516674a50..2c144b4fc7 100644
--- a/media/filters/decrypting_audio_decoder.cc
+++ b/media/filters/decrypting_audio_decoder.cc
@@ -191,7 +191,9 @@ void DecryptingAudioDecoder::SetDecryptor(Decryptor* decryptor) {
input_config.extra_data(),
input_config.extra_data_size(),
input_config.is_encrypted(),
- false);
+ false,
+ base::TimeDelta(),
+ base::TimeDelta());
state_ = kPendingDecoderInit;
decryptor_->InitializeAudioDecoder(
@@ -282,7 +284,9 @@ void DecryptingAudioDecoder::DecryptAndDecodeBuffer(
input_config.extra_data(),
input_config.extra_data_size(),
input_config.is_encrypted(),
- false);
+ false,
+ base::TimeDelta(),
+ base::TimeDelta());
state_ = kPendingConfigChange;
decryptor_->DeinitializeDecoder(Decryptor::kAudio);
diff --git a/media/filters/decrypting_audio_decoder_unittest.cc b/media/filters/decrypting_audio_decoder_unittest.cc
index fb97b91572..2f07e231c0 100644
--- a/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/media/filters/decrypting_audio_decoder_unittest.cc
@@ -113,7 +113,8 @@ class DecryptingAudioDecoderTest : public testing::Test {
.WillOnce(SaveArg<1>(&key_added_cb_));
config_.Initialize(kCodecVorbis, kSampleFormatPlanarF32,
- CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, true, true);
+ CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, true, true,
+ base::TimeDelta(), base::TimeDelta());
InitializeAndExpectStatus(config_, PIPELINE_OK);
EXPECT_EQ(DecryptingAudioDecoder::kSupportedBitsPerChannel,
diff --git a/media/filters/decrypting_demuxer_stream.cc b/media/filters/decrypting_demuxer_stream.cc
index 1f183ceb28..39386e075e 100644
--- a/media/filters/decrypting_demuxer_stream.cc
+++ b/media/filters/decrypting_demuxer_stream.cc
@@ -302,7 +302,9 @@ void DecryptingDemuxerStream::InitializeDecoderConfig() {
input_audio_config.extra_data(),
input_audio_config.extra_data_size(),
false, // Output audio is not encrypted.
- false);
+ false,
+ base::TimeDelta(),
+ base::TimeDelta());
break;
}
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index cce22b784a..f922e984b0 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -357,6 +357,7 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
codec_context_->opaque = this;
codec_context_->get_buffer2 = GetAudioBufferImpl;
+ codec_context_->refcounted_frames = 1;
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) {
@@ -396,10 +397,8 @@ void FFmpegAudioDecoder::ReleaseFFmpegResources() {
av_free(codec_context_);
}
- if (av_frame_) {
- av_free(av_frame_);
- av_frame_ = NULL;
- }
+ if (av_frame_)
+ av_frame_free(&av_frame_);
}
void FFmpegAudioDecoder::ResetTimestampState() {
@@ -426,9 +425,6 @@ void FFmpegAudioDecoder::RunDecodeLoop(
// want to hand it to the decoder at least once, otherwise we would end up
// skipping end of stream packets since they have a size of zero.
do {
- // Reset frame to default values.
- avcodec_get_frame_defaults(av_frame_);
-
int frame_decoded = 0;
int result = avcodec_decode_audio4(
codec_context_, av_frame_, &frame_decoded, &packet);
@@ -487,6 +483,7 @@ void FFmpegAudioDecoder::RunDecodeLoop(
// This is an unrecoverable error, so bail out.
QueuedAudioBuffer queue_entry = { kDecodeError, NULL };
queued_audio_.push_back(queue_entry);
+ av_frame_unref(av_frame_);
break;
}
@@ -509,8 +506,11 @@ void FFmpegAudioDecoder::RunDecodeLoop(
}
decoded_frames = output->frame_count();
+ av_frame_unref(av_frame_);
}
+ // WARNING: |av_frame_| no longer has valid data at this point.
+
if (decoded_frames > 0) {
// Set the timestamp/duration once all the extra frames have been
// discarded.
diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc
index 30cb0c0029..723eb5f28d 100644
--- a/media/filters/ffmpeg_demuxer.cc
+++ b/media/filters/ffmpeg_demuxer.cc
@@ -595,8 +595,6 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
media_log_->SetStringProperty("audio_codec_name", codec->name);
}
- media_log_->SetIntegerProperty("audio_sample_rate",
- audio_codec->sample_rate);
media_log_->SetIntegerProperty("audio_channels_count",
audio_codec->channels);
media_log_->SetIntegerProperty("audio_samples_per_second",
@@ -637,8 +635,6 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
media_log_->SetDoubleProperty("max_duration", max_duration.InSecondsF());
media_log_->SetDoubleProperty("start_time", start_time_.InSecondsF());
- media_log_->SetDoubleProperty("filesize_in_bytes",
- static_cast<double>(filesize_in_bytes));
media_log_->SetIntegerProperty("bitrate", bitrate_);
status_cb.Run(PIPELINE_OK);
diff --git a/media/filters/opus_audio_decoder.cc b/media/filters/opus_audio_decoder.cc
index 115799ab71..b3e903b231 100644
--- a/media/filters/opus_audio_decoder.cc
+++ b/media/filters/opus_audio_decoder.cc
@@ -4,6 +4,8 @@
#include "media/filters/opus_audio_decoder.h"
+#include <cmath>
+
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/location.h"
@@ -250,7 +252,6 @@ OpusAudioDecoder::OpusAudioDecoder(
channel_layout_(CHANNEL_LAYOUT_NONE),
samples_per_second_(0),
last_input_timestamp_(kNoTimestamp()),
- output_bytes_to_drop_(0),
skip_samples_(0) {
}
@@ -457,10 +458,24 @@ bool OpusAudioDecoder::ConfigureDecoder() {
config,
&opus_header);
- skip_samples_ = opus_header.skip_samples;
-
- if (skip_samples_ > 0)
- output_bytes_to_drop_ = skip_samples_ * config.bytes_per_frame();
+ if (!config.codec_delay().InMicroseconds()) {
+ // TODO(vigneshv): Replace this with return false once ffmpeg demuxer code
+ // starts populating the config correctly.
+ skip_samples_ = opus_header.skip_samples;
+ } else {
+ // Convert from seconds to samples.
+ skip_samples_ = std::ceil(config.codec_delay().InMicroseconds() *
+ config.samples_per_second() / 1000000.0);
+ if (skip_samples_ < 0) {
+ DVLOG(1) << "Invalid file. Incorrect value for codec delay.";
+ return false;
+ }
+ if (skip_samples_ != opus_header.skip_samples) {
+ DVLOG(1) << "Invalid file. Codec Delay in container does not match the "
+ << "value in Opus header.";
+ return false;
+ }
+ }
uint8 channel_mapping[kMaxVorbisChannels];
memcpy(&channel_mapping,
@@ -487,9 +502,6 @@ bool OpusAudioDecoder::ConfigureDecoder() {
return false;
}
- // TODO(tomfinegan): Handle audio delay once the matroska spec is updated
- // to represent the value.
-
bits_per_channel_ = config.bits_per_channel();
channel_layout_ = config.channel_layout();
samples_per_second_ = config.samples_per_second();
@@ -508,7 +520,7 @@ void OpusAudioDecoder::CloseDecoder() {
void OpusAudioDecoder::ResetTimestampState() {
output_timestamp_helper_->SetBaseTimestamp(kNoTimestamp());
last_input_timestamp_ = kNoTimestamp();
- output_bytes_to_drop_ = 0;
+ skip_samples_ = 0;
}
bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
@@ -539,16 +551,6 @@ bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
output_timestamp_helper_->SetBaseTimestamp(input->timestamp());
}
- if (decoded_audio_size > 0 && output_bytes_to_drop_ > 0) {
- int dropped_size = std::min(decoded_audio_size, output_bytes_to_drop_);
- DCHECK_EQ(dropped_size % kBytesPerChannel, 0);
- decoded_audio_data += dropped_size;
- decoded_audio_size -= dropped_size;
- output_bytes_to_drop_ -= dropped_size;
- samples_decoded = decoded_audio_size /
- demuxer_stream_->audio_decoder_config().bytes_per_frame();
- }
-
if (decoded_audio_size > 0) {
// Copy the audio samples into an output buffer.
uint8* data[] = { decoded_audio_data };
@@ -560,8 +562,28 @@ bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
output_timestamp_helper_->GetTimestamp(),
output_timestamp_helper_->GetFrameDuration(samples_decoded));
output_timestamp_helper_->AddFrames(samples_decoded);
+ if (skip_samples_ > 0) {
+ int dropped_size = std::min(samples_decoded, skip_samples_);
+ output_buffer->get()->TrimStart(dropped_size);
+ skip_samples_ -= dropped_size;
+ samples_decoded -= dropped_size;
+ }
+ if (input->discard_padding().InMicroseconds() > 0) {
+ int discard_padding = std::ceil(
+ input->discard_padding().InMicroseconds() *
+ samples_per_second_ / 1000000.0);
+ if (discard_padding < 0 || discard_padding > samples_decoded) {
+ DVLOG(1) << "Invalid file. Incorrect discard padding value.";
+ return false;
+ }
+ output_buffer->get()->TrimEnd(std::min(samples_decoded, discard_padding));
+ samples_decoded -= discard_padding;
+ }
}
+ decoded_audio_size =
+ samples_decoded *
+ demuxer_stream_->audio_decoder_config().bytes_per_frame();
// Decoding finished successfully, update statistics.
PipelineStatistics statistics;
statistics.audio_bytes_decoded = decoded_audio_size;
diff --git a/media/filters/opus_audio_decoder.h b/media/filters/opus_audio_decoder.h
index a808ff3482..77e84344f0 100644
--- a/media/filters/opus_audio_decoder.h
+++ b/media/filters/opus_audio_decoder.h
@@ -70,10 +70,6 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
scoped_ptr<AudioTimestampHelper> output_timestamp_helper_;
base::TimeDelta last_input_timestamp_;
- // Number of output sample bytes to drop before generating
- // output buffers.
- int output_bytes_to_drop_;
-
ReadCB read_cb_;
int skip_samples_;
diff --git a/media/filters/pipeline_integration_test.cc b/media/filters/pipeline_integration_test.cc
index e79f631b72..0ce2fd1244 100644
--- a/media/filters/pipeline_integration_test.cc
+++ b/media/filters/pipeline_integration_test.cc
@@ -11,6 +11,7 @@
#include "build/build_config.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_keys.h"
+#include "media/base/media_switches.h"
#include "media/base/test_data_util.h"
#include "media/cdm/aes_decryptor.h"
#include "media/filters/chunk_demuxer.h"
@@ -27,12 +28,14 @@ static const uint8 kInitData[] = { 0x69, 0x6e, 0x69, 0x74 };
static const char kWebM[] = "video/webm; codecs=\"vp8,vorbis\"";
static const char kWebMVP9[] = "video/webm; codecs=\"vp9\"";
static const char kAudioOnlyWebM[] = "video/webm; codecs=\"vorbis\"";
+static const char kOpusAudioOnlyWebM[] = "video/webm; codecs=\"opus\"";
static const char kVideoOnlyWebM[] = "video/webm; codecs=\"vp8\"";
static const char kMP4[] = "video/mp4; codecs=\"avc1.4D4041,mp4a.40.2\"";
static const char kMP4Video[] = "video/mp4; codecs=\"avc1.4D4041\"";
static const char kMP4Audio[] = "audio/mp4; codecs=\"mp4a.40.2\"";
static const char kMP4AudioType[] = "audio/mp4";
static const char kMP4VideoType[] = "video/mp4";
+static const char kMP3[] = "audio/mpeg";
// Key used to encrypt test files.
static const uint8 kSecretKey[] = {
@@ -56,6 +59,8 @@ static const int k640WebMFileDurationMs = 2763;
static const int k640IsoFileDurationMs = 2737;
static const int k640IsoCencFileDurationMs = 2736;
static const int k1280IsoFileDurationMs = 2736;
+static const int kOpusEndTrimmingWebMFileDurationMs = 2771;
+static const uint32 kOpusEndTrimmingWebMFileAudioBytes = 528676;
static const int kVP9WebMFileDurationMs = 2735;
static const int kVP8AWebMFileDurationMs = 2700;
@@ -284,13 +289,29 @@ class MockMediaSource {
}
void DemuxerOpenedTask() {
+ // This code assumes that |mimetype_| is one of the following forms.
+ // 1. audio/mpeg
+ // 2. video/webm;codec="vorbis,vp8".
size_t semicolon = mimetype_.find(";");
- std::string type = mimetype_.substr(0, semicolon);
- size_t quote1 = mimetype_.find("\"");
- size_t quote2 = mimetype_.find("\"", quote1 + 1);
- std::string codecStr = mimetype_.substr(quote1 + 1, quote2 - quote1 - 1);
+ std::string type = mimetype_;
std::vector<std::string> codecs;
- Tokenize(codecStr, ",", &codecs);
+ if (semicolon != std::string::npos) {
+ type = mimetype_.substr(0, semicolon);
+ size_t codecs_param_start = mimetype_.find("codecs=\"", semicolon);
+
+ CHECK_NE(codecs_param_start, std::string::npos);
+
+ codecs_param_start += 8; // Skip over the codecs=".
+
+ size_t codecs_param_end = mimetype_.find("\"", codecs_param_start);
+
+ CHECK_NE(codecs_param_end, std::string::npos);
+
+ std::string codecs_param =
+ mimetype_.substr(codecs_param_start,
+ codecs_param_end - codecs_param_start);
+ Tokenize(codecs_param, ",", &codecs);
+ }
CHECK_EQ(chunk_demuxer_->AddId(kSourceId, type, codecs), ChunkDemuxer::kOk);
AppendData(initial_append_size_);
@@ -516,6 +537,26 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VP8A_WebM) {
Stop();
}
+TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus_WebM) {
+ EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
+ MockMediaSource source("bear-opus-end-trimming.webm", kOpusAudioOnlyWebM,
+ kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(kOpusEndTrimmingWebMFileDurationMs,
+ pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+ EXPECT_EQ(kOpusEndTrimmingWebMFileAudioBytes,
+ pipeline_->GetStatistics().audio_bytes_decoded);
+ source.Abort();
+ Stop();
+}
+
TEST_F(PipelineIntegrationTest, MediaSource_ConfigChange_WebM) {
MockMediaSource source("bear-320x240-16x9-aspect.webm", kWebM,
kAppendWholeFile);
@@ -627,6 +668,27 @@ TEST_F(PipelineIntegrationTest,
}
#if defined(USE_PROPRIETARY_CODECS)
+TEST_F(PipelineIntegrationTest, MediaSource_MP3) {
+ MockMediaSource source("sfx.mp3", kMP3, kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+
+ Play();
+
+ EXPECT_TRUE(WaitUntilOnEnded());
+}
+
+
+TEST_F(PipelineIntegrationTest, MediaSource_MP3_Icecast) {
+ MockMediaSource source("icy_sfx.mp3", kMP3, kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+
+ Play();
+
+ EXPECT_TRUE(WaitUntilOnEnded());
+}
+
TEST_F(PipelineIntegrationTest, MediaSource_ConfigChange_MP4) {
MockMediaSource source("bear-640x360-av_frag.mp4", kMP4, kAppendWholeFile);
StartPipelineWithMediaSource(&source);
diff --git a/media/filters/source_buffer_stream.cc b/media/filters/source_buffer_stream.cc
index b0038dd7bb..15f894c83e 100644
--- a/media/filters/source_buffer_stream.cc
+++ b/media/filters/source_buffer_stream.cc
@@ -471,6 +471,9 @@ bool SourceBufferStream::Append(
void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
base::TimeDelta duration) {
+ DVLOG(1) << __FUNCTION__ << "(" << start.InSecondsF()
+ << ", " << end.InSecondsF()
+ << ", " << duration.InSecondsF() << ")";
DCHECK(start >= base::TimeDelta()) << start.InSecondsF();
DCHECK(start < end) << "start " << start.InSecondsF()
<< " end " << end.InSecondsF();
@@ -497,17 +500,22 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
if (new_range) {
itr = ranges_.insert(++itr, new_range);
--itr;
+
+ // Update the selected range if the next buffer position was transferred
+ // to |new_range|.
+ if (new_range->HasNextBufferPosition())
+ SetSelectedRange(new_range);
}
// If the current range now is completely covered by the removal
// range then delete it and move on.
if (start <= range->GetStartTimestamp()) {
if (selected_range_ == range)
- SetSelectedRange(NULL);
+ SetSelectedRange(NULL);
- delete range;
- itr = ranges_.erase(itr);
- continue;
+ delete range;
+ itr = ranges_.erase(itr);
+ continue;
}
// Truncate the current range so that it only contains data before
@@ -518,6 +526,7 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
// Check to see if the current playback position was removed and
// update the selected range appropriately.
if (!saved_buffers.empty()) {
+ DCHECK(!range->HasNextBufferPosition());
SetSelectedRange(NULL);
SetSelectedRangeIfNeeded(saved_buffers.front()->GetDecodeTimestamp());
}
@@ -525,6 +534,9 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
// Move on to the next range.
++itr;
}
+
+ DCHECK(IsRangeListSorted(ranges_));
+ DCHECK(OnlySelectedRangeIsSeeked());
}
void SourceBufferStream::ResetSeekState() {
@@ -1253,6 +1265,8 @@ void SourceBufferStream::CompleteConfigChange() {
void SourceBufferStream::SetSelectedRangeIfNeeded(
const base::TimeDelta timestamp) {
+ DVLOG(1) << __FUNCTION__ << "(" << timestamp.InSecondsF() << ")";
+
if (selected_range_) {
DCHECK(track_buffer_.empty());
return;
diff --git a/media/filters/source_buffer_stream_unittest.cc b/media/filters/source_buffer_stream_unittest.cc
index 3c120745b4..0507175451 100644
--- a/media/filters/source_buffer_stream_unittest.cc
+++ b/media/filters/source_buffer_stream_unittest.cc
@@ -2940,7 +2940,7 @@ TEST_F(SourceBufferStreamTest, Remove_Partial4) {
CheckExpectedRangesByTimestamp("{ [10,40) [2060,2150) }");
}
-// Test behavior when the current positing is removed and new buffers
+// Test behavior when the current position is removed and new buffers
// are appended over the removal range.
TEST_F(SourceBufferStreamTest, Remove_CurrentPosition) {
Seek(0);
@@ -2964,6 +2964,21 @@ TEST_F(SourceBufferStreamTest, Remove_CurrentPosition) {
CheckExpectedBuffers("210K 240 270K 300 330");
}
+// Test behavior when buffers in the selected range before the current position
+// are removed.
+TEST_F(SourceBufferStreamTest, Remove_BeforeCurrentPosition) {
+ Seek(0);
+ NewSegmentAppend("0K 30 60 90K 120 150 180K 210 240 270K 300 330");
+ CheckExpectedRangesByTimestamp("{ [0,360) }");
+ CheckExpectedBuffers("0K 30 60 90K 120");
+
+ // Remove a range that is before the current playback position.
+ RemoveInMs(0, 90, 360);
+ CheckExpectedRangesByTimestamp("{ [90,360) }");
+
+ CheckExpectedBuffers("150 180K 210 240 270K 300 330");
+}
+
// TODO(vrk): Add unit tests where keyframes are unaligned between streams.
// (crbug.com/133557)
diff --git a/media/filters/stream_parser_factory.cc b/media/filters/stream_parser_factory.cc
index c1587699fe..4d2e524538 100644
--- a/media/filters/stream_parser_factory.cc
+++ b/media/filters/stream_parser_factory.cc
@@ -10,6 +10,7 @@
#include "base/strings/string_util.h"
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
+#include "media/mp3/mp3_stream_parser.h"
#include "media/webm/webm_stream_parser.h"
#if defined(USE_PROPRIETARY_CODECS)
@@ -28,6 +29,8 @@ struct CodecInfo {
AUDIO,
VIDEO
};
+
+ // Update tools/metrics/histograms/histograms.xml if new values are added.
enum HistogramTag {
HISTOGRAM_UNKNOWN,
HISTOGRAM_VP8,
@@ -37,6 +40,8 @@ struct CodecInfo {
HISTOGRAM_MPEG2AAC,
HISTOGRAM_MPEG4AAC,
HISTOGRAM_EAC3,
+ HISTOGRAM_MP3,
+ HISTOGRAM_OPUS,
HISTOGRAM_MAX // Must be the last entry.
};
@@ -62,16 +67,24 @@ static const CodecInfo kVP9CodecInfo = { "vp9", CodecInfo::VIDEO, NULL,
CodecInfo::HISTOGRAM_VP9 };
static const CodecInfo kVorbisCodecInfo = { "vorbis", CodecInfo::AUDIO, NULL,
CodecInfo::HISTOGRAM_VORBIS };
+static const CodecInfo kOpusCodecInfo = { "opus", CodecInfo::AUDIO, NULL,
+ CodecInfo::HISTOGRAM_OPUS };
static const CodecInfo* kVideoWebMCodecs[] = {
&kVP8CodecInfo,
+#if !defined(OS_ANDROID)
+ // TODO(wonsik): crbug.com/285016 query Android platform for codec
+ // capabilities.
&kVP9CodecInfo,
+#endif
&kVorbisCodecInfo,
+ &kOpusCodecInfo,
NULL
};
static const CodecInfo* kAudioWebMCodecs[] = {
&kVorbisCodecInfo,
+ &kOpusCodecInfo,
NULL
};
@@ -147,6 +160,7 @@ static const CodecInfo* kAudioMP4Codecs[] = {
static StreamParser* BuildMP4Parser(
const std::vector<std::string>& codecs, const LogCB& log_cb) {
std::set<int> audio_object_types;
+
bool has_sbr = false;
#if defined(ENABLE_EAC3_PLAYBACK)
bool enable_eac3 = CommandLine::ForCurrentProcess()->HasSwitch(
@@ -175,12 +189,28 @@ static StreamParser* BuildMP4Parser(
return new mp4::MP4StreamParser(audio_object_types, has_sbr);
}
+
+static const CodecInfo kMP3CodecInfo = { NULL, CodecInfo::AUDIO, NULL,
+ CodecInfo::HISTOGRAM_MP3 };
+
+static const CodecInfo* kAudioMP3Codecs[] = {
+ &kMP3CodecInfo,
+ NULL
+};
+
+static StreamParser* BuildMP3Parser(
+ const std::vector<std::string>& codecs, const LogCB& log_cb) {
+ return new MP3StreamParser();
+}
+
#endif
+
static const SupportedTypeInfo kSupportedTypeInfo[] = {
{ "video/webm", &BuildWebMParser, kVideoWebMCodecs },
{ "audio/webm", &BuildWebMParser, kAudioWebMCodecs },
#if defined(USE_PROPRIETARY_CODECS)
+ { "audio/mpeg", &BuildMP3Parser, kAudioMP3Codecs },
{ "video/mp4", &BuildMP4Parser, kVideoMP4Codecs },
{ "audio/mp4", &BuildMP4Parser, kAudioMP4Codecs },
#endif
@@ -208,6 +238,11 @@ static bool VerifyCodec(
return false;
}
#endif
+ if (codec_info->tag == CodecInfo::HISTOGRAM_OPUS) {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ if (!cmd_line->HasSwitch(switches::kEnableOpusPlayback))
+ return false;
+ }
if (audio_codecs)
audio_codecs->push_back(codec_info->tag);
return true;
@@ -249,8 +284,26 @@ static bool CheckTypeAndCodecs(
for (size_t i = 0; i < arraysize(kSupportedTypeInfo); ++i) {
const SupportedTypeInfo& type_info = kSupportedTypeInfo[i];
if (type == type_info.type) {
+ if (codecs.empty()) {
+
+#if defined(USE_PROPRIETARY_CODECS)
+ if (type_info.codecs == kAudioMP3Codecs &&
+ !CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableMP3StreamParser)) {
+ DVLOG(1) << "MP3StreamParser is not enabled.";
+ return false;
+ }
+#endif
+
+ const CodecInfo* codec_info = type_info.codecs[0];
+ if (codec_info && !codec_info->pattern &&
+ VerifyCodec(codec_info, audio_codecs, video_codecs)) {
+
+ if (factory_function)
+ *factory_function = type_info.factory_function;
+ return true;
+ }
- if (codecs.size() == 0u) {
MEDIA_LOG(log_cb) << "A codecs parameter must be provided for '"
<< type << "'";
return false;
@@ -271,6 +324,7 @@ static bool CheckTypeAndCodecs(
break; // Since only 1 pattern will match, no need to check others.
}
}
+
if (!found_codec) {
MEDIA_LOG(log_cb) << "Codec '" << codec_id
<< "' is not supported for '" << type << "'";
diff --git a/media/filters/stream_parser_factory.h b/media/filters/stream_parser_factory.h
index ccf394150b..1f9ad347d1 100644
--- a/media/filters/stream_parser_factory.h
+++ b/media/filters/stream_parser_factory.h
@@ -32,7 +32,7 @@ class MEDIA_EXPORT StreamParserFactory {
// |has_video| is true if a video codec was specified.
// Returns NULL otherwise. The values of |has_audio| and |has_video| are
// undefined.
- static scoped_ptr<media::StreamParser> Create(
+ static scoped_ptr<StreamParser> Create(
const std::string& type, const std::vector<std::string>& codecs,
const LogCB& log_cb, bool* has_audio, bool* has_video);
};
diff --git a/media/filters/vpx_video_decoder.cc b/media/filters/vpx_video_decoder.cc
index 3f125943ef..3c02d15f90 100644
--- a/media/filters/vpx_video_decoder.cc
+++ b/media/filters/vpx_video_decoder.cc
@@ -121,7 +121,7 @@ bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
bool can_handle = false;
if (config.codec() == kCodecVP9)
can_handle = true;
- if (cmd_line->HasSwitch(switches::kEnableVp8AlphaPlayback) &&
+ if (!cmd_line->HasSwitch(switches::kDisableVp8AlphaPlayback) &&
config.codec() == kCodecVP8 && config.format() == VideoFrame::YV12A) {
can_handle = true;
}
diff --git a/media/media.gyp b/media/media.gyp
index ab2701881d..8f60528384 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -12,12 +12,12 @@
# detection of ABI mismatches and prevents silent errors.
'linux_link_pulseaudio%': 0,
'conditions': [
- ['OS=="android" or OS=="ios"', {
- # Android and iOS don't use ffmpeg.
+ ['OS=="android"', {
+ # Android doesn't use ffmpeg.
'media_use_ffmpeg%': 0,
- # Android and iOS don't use libvpx.
+ # Android doesn't use libvpx.
'media_use_libvpx%': 0,
- }, { # 'OS!="android" and OS!="ios"'
+ }, { # 'OS!="android"'
'media_use_ffmpeg%': 1,
'media_use_libvpx%': 1,
}],
@@ -27,24 +27,31 @@
}, {
'use_alsa%': 0,
}],
- ['os_posix==1 and OS!="mac" and OS!="ios" and OS!="android" and chromeos!=1', {
+ ['os_posix==1 and OS!="mac" and OS!="android" and chromeos!=1', {
'use_pulseaudio%': 1,
}, {
'use_pulseaudio%': 0,
}],
],
},
+ 'includes': [
+ 'media_cdm.gypi',
+ ],
'targets': [
{
'target_name': 'media',
'type': '<(component)',
'dependencies': [
'../base/base.gyp:base',
+ '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../crypto/crypto.gyp:crypto',
+ '../net/net.gyp:net',
+ '../gpu/gpu.gyp:command_buffer_common',
'../skia/skia.gyp:skia',
'../third_party/opus/opus.gyp:opus',
'../ui/ui.gyp:ui',
'../url/url.gyp:url_lib',
+ 'shared_memory_support',
],
'defines': [
'MEDIA_IMPLEMENTATION',
@@ -110,10 +117,6 @@
'audio/fake_audio_input_stream.h',
'audio/fake_audio_output_stream.cc',
'audio/fake_audio_output_stream.h',
- 'audio/ios/audio_manager_ios.h',
- 'audio/ios/audio_manager_ios.mm',
- 'audio/ios/audio_session_util_ios.h',
- 'audio/ios/audio_session_util_ios.mm',
'audio/linux/alsa_input.cc',
'audio/linux/alsa_input.h',
'audio/linux/alsa_output.cc',
@@ -186,6 +189,7 @@
'audio/win/wavein_input_win.h',
'audio/win/waveout_output_win.cc',
'audio/win/waveout_output_win.h',
+ 'base/android/demuxer_android.h',
'base/android/demuxer_stream_player_params.cc',
'base/android/demuxer_stream_player_params.h',
'base/android/media_player_manager.h',
@@ -382,6 +386,8 @@
'midi/midi_manager_mac.h',
'midi/midi_port_info.cc',
'midi/midi_port_info.h',
+ 'mp3/mp3_stream_parser.cc',
+ 'mp3/mp3_stream_parser.h',
'video/capture/android/video_capture_device_android.cc',
'video/capture/android/video_capture_device_android.h',
'video/capture/fake_video_capture_device.cc',
@@ -399,6 +405,7 @@
'video/capture/video_capture_device_dummy.h',
'video/capture/video_capture_proxy.cc',
'video/capture/video_capture_proxy.h',
+ 'video/capture/video_capture_types.cc',
'video/capture/video_capture_types.h',
'video/capture/win/capability_list_win.cc',
'video/capture/win/capability_list_win.h',
@@ -457,13 +464,6 @@
'USE_NEON'
],
}],
- ['OS!="ios"', {
- 'dependencies': [
- '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
- '../gpu/gpu.gyp:command_buffer_common',
- 'shared_memory_support',
- ],
- }],
['media_use_ffmpeg==1', {
'dependencies': [
'../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
@@ -510,53 +510,6 @@
'filters/vpx_video_decoder.h',
],
}],
- ['OS=="ios"', {
- 'includes': [
- # For shared_memory_support_sources variable.
- 'shared_memory_support.gypi',
- ],
- 'sources': [
- 'base/media_stub.cc',
- # These sources are normally built via a dependency on the
- # shared_memory_support target, but that target is not built on iOS.
- # Instead, directly build only the files that are needed for iOS.
- '<@(shared_memory_support_sources)',
- ],
- 'sources/': [
- # Exclude everything but iOS-specific files.
- ['exclude', '\\.(cc|mm)$'],
- ['include', '_ios\\.(cc|mm)$'],
- ['include', '(^|/)ios/'],
- # Re-include specific pieces.
- # iOS support is limited to audio input only.
- ['include', '^audio/audio_buffers_state\\.'],
- ['include', '^audio/audio_input_controller\\.'],
- ['include', '^audio/audio_manager\\.'],
- ['include', '^audio/audio_manager_base\\.'],
- ['include', '^audio/audio_parameters\\.'],
- ['include', '^audio/fake_audio_consumer\\.'],
- ['include', '^audio/fake_audio_input_stream\\.'],
- ['include', '^audio/fake_audio_output_stream\\.'],
- ['include', '^base/audio_bus\\.'],
- ['include', '^base/channel_layout\\.'],
- ['include', '^base/media\\.cc$'],
- ['include', '^base/media_stub\\.cc$'],
- ['include', '^base/media_switches\\.'],
- ['include', '^base/user_input_monitor\\.'],
- ['include', '^base/vector_math\\.'],
- ],
- 'link_settings': {
- 'libraries': [
- '$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
- '$(SDKROOT)/System/Library/Frameworks/AVFoundation.framework',
- '$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
- '$(SDKROOT)/System/Library/Frameworks/CoreMIDI.framework',
- ],
- },
- 'defines': [
- 'DISABLE_USER_INPUT_MONITOR',
- ],
- }],
['OS=="android"', {
'link_settings': {
'libraries': [
@@ -865,8 +818,7 @@
'../build/linux/system.gyp:gtk',
],
}],
- # ios check is necessary due to http://crbug.com/172682.
- ['OS!="ios" and (target_arch=="ia32" or target_arch=="x64")', {
+ ['target_arch=="ia32" or target_arch=="x64"', {
'dependencies': [
'media_asm',
'media_mmx',
@@ -883,15 +835,6 @@
],
}],
],
- 'target_conditions': [
- ['OS=="ios"', {
- 'sources/': [
- # Pull in specific Mac files for iOS (which have been filtered out
- # by file name rules).
- ['include', '^audio/mac/audio_input_mac\\.'],
- ],
- }],
- ],
},
{
'target_name': 'media_unittests',
@@ -899,9 +842,11 @@
'dependencies': [
'media',
'media_test_support',
+ 'shared_memory_support',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_base',
+ '../gpu/gpu.gyp:command_buffer_common',
'../skia/skia.gyp:skia',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
@@ -913,13 +858,13 @@
'audio/audio_input_unittest.cc',
'audio/audio_input_volume_unittest.cc',
'audio/audio_low_latency_input_output_unittest.cc',
+ 'audio/audio_manager_unittest.cc',
'audio/audio_output_controller_unittest.cc',
'audio/audio_output_device_unittest.cc',
'audio/audio_output_proxy_unittest.cc',
'audio/audio_parameters_unittest.cc',
'audio/audio_power_monitor_unittest.cc',
'audio/fake_audio_consumer_unittest.cc',
- 'audio/ios/audio_manager_ios_unittest.cc',
'audio/linux/alsa_output_unittest.cc',
'audio/mac/audio_auhal_mac_unittest.cc',
'audio/mac/audio_device_listener_mac_unittest.cc',
@@ -1023,12 +968,6 @@
'USE_NEON'
],
}],
- ['OS!="ios"', {
- 'dependencies': [
- '../gpu/gpu.gyp:command_buffer_common',
- 'shared_memory_support',
- ],
- }],
['media_use_ffmpeg==1', {
'dependencies': [
'../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
@@ -1038,7 +977,17 @@
'base/media_file_checker_unittest.cc',
],
}],
- ['os_posix==1 and OS!="mac" and OS!="ios"', {
+ ['use_alsa==1', {
+ 'defines': [
+ 'USE_ALSA',
+ ],
+ }],
+ ['use_pulseaudio==1', {
+ 'defines': [
+ 'USE_PULSEAUDIO',
+ ],
+ }],
+ ['os_posix==1 and OS!="mac"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
@@ -1047,17 +996,6 @@
}],
],
}],
- ['OS=="ios"', {
- 'sources/': [
- ['exclude', '.*'],
- ['include', '^audio/audio_input_controller_unittest\\.cc$'],
- ['include', '^audio/audio_input_unittest\\.cc$'],
- ['include', '^audio/audio_parameters_unittest\\.cc$'],
- ['include', '^audio/ios/audio_manager_ios_unittest\\.cc$'],
- ['include', '^base/mock_reader\\.h$'],
- ['include', '^base/run_all_unittests\\.cc$'],
- ],
- }],
['OS=="android"', {
'sources!': [
'audio/audio_input_volume_unittest.cc',
@@ -1104,7 +1042,7 @@
'audio/audio_low_latency_input_output_unittest.cc',
],
}],
- ['OS!="ios" and (target_arch=="ia32" or target_arch=="x64")', {
+ ['target_arch=="ia32" or target_arch=="x64"', {
'sources': [
'base/simd/convert_rgb_to_yuv_unittest.cc',
],
@@ -1161,9 +1099,56 @@
'video/mock_video_decode_accelerator.h',
],
},
+ {
+ # Minimal target for NaCl and other renderer side media clients which
+ # only need to send audio data across the shared memory to the browser
+ # process.
+ 'target_name': 'shared_memory_support',
+ 'type': '<(component)',
+ 'dependencies': [
+ '../base/base.gyp:base',
+ ],
+ 'defines': [
+ 'MEDIA_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'includes': [
+ 'shared_memory_support.gypi',
+ ],
+ 'sources': [
+ '<@(shared_memory_support_sources)',
+ ],
+ 'conditions': [
+ ['arm_neon==1', {
+ 'defines': [
+ 'USE_NEON'
+ ],
+ }],
+ ['target_arch=="ia32" or target_arch=="x64"', {
+ 'dependencies': [
+ 'shared_memory_support_sse'
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'demuxer_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ ],
+ 'sources': [
+ 'tools/demuxer_bench/demuxer_bench.cc',
+ ],
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ 'msvs_disabled_warnings': [ 4267, ],
+ },
],
'conditions': [
- ['OS!="ios" and target_arch!="arm"', {
+ ['target_arch!="arm"', {
'targets': [
{
'target_name': 'media_asm',
@@ -1318,60 +1303,6 @@
},
], # targets
}],
- ['OS!="ios"', {
- 'includes': [
- 'media_cdm.gypi',
- ],
- 'targets': [
- {
- # Minimal target for NaCl and other renderer side media clients which
- # only need to send audio data across the shared memory to the browser
- # process.
- 'target_name': 'shared_memory_support',
- 'type': '<(component)',
- 'dependencies': [
- '../base/base.gyp:base',
- ],
- 'defines': [
- 'MEDIA_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '..',
- ],
- 'includes': [
- 'shared_memory_support.gypi',
- ],
- 'sources': [
- '<@(shared_memory_support_sources)',
- ],
- 'conditions': [
- ['arm_neon==1', {
- 'defines': [
- 'USE_NEON'
- ],
- }],
- ['target_arch=="ia32" or target_arch=="x64"', {
- 'dependencies': [
- 'shared_memory_support_sse'
- ],
- }],
- ],
- },
- {
- 'target_name': 'demuxer_bench',
- 'type': 'executable',
- 'dependencies': [
- 'media',
- '../base/base.gyp:base',
- ],
- 'sources': [
- 'tools/demuxer_bench/demuxer_bench.cc',
- ],
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [ 4267, ],
- },
- ],
- }],
['use_x11==1', {
'targets': [
{
diff --git a/media/media.target.darwin-arm.mk b/media/media.target.darwin-arm.mk
index 05d668aea3..dbae6fc5b7 100644
--- a/media/media.target.darwin-arm.mk
+++ b/media/media.target.darwin-arm.mk
@@ -136,10 +136,12 @@ LOCAL_SRC_FILES := \
media/filters/wsola_internals.cc \
media/midi/midi_manager.cc \
media/midi/midi_port_info.cc \
+ media/mp3/mp3_stream_parser.cc \
media/video/capture/android/video_capture_device_android.cc \
media/video/capture/fake_video_capture_device.cc \
media/video/capture/video_capture_device.cc \
media/video/capture/video_capture_proxy.cc \
+ media/video/capture/video_capture_types.cc \
media/video/picture.cc \
media/video/video_decode_accelerator.cc \
media/video/video_encode_accelerator.cc \
@@ -202,6 +204,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -214,8 +217,10 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
+ '-DPOSIX_AVOID_MMAP' \
'-DSK_ENABLE_INST_COUNT=0' \
'-DSK_SUPPORT_GPU=1' \
'-DGR_GL_CUSTOM_SETUP_HEADER="GrGLConfig_chrome.h"' \
@@ -223,7 +228,6 @@ MY_DEFS_Debug := \
'-DUSE_CHROMIUM_SKIA' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
- '-DPOSIX_AVOID_MMAP' \
'-DU_USING_ICU_NAMESPACE=0' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -314,6 +318,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -326,8 +331,10 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
+ '-DPOSIX_AVOID_MMAP' \
'-DSK_ENABLE_INST_COUNT=0' \
'-DSK_SUPPORT_GPU=1' \
'-DGR_GL_CUSTOM_SETUP_HEADER="GrGLConfig_chrome.h"' \
@@ -335,7 +342,6 @@ MY_DEFS_Release := \
'-DUSE_CHROMIUM_SKIA' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
- '-DPOSIX_AVOID_MMAP' \
'-DU_USING_ICU_NAMESPACE=0' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -409,7 +415,9 @@ LOCAL_LDFLAGS_Debug := \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
-Wl,--icf=safe \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -428,7 +436,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--icf=safe \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media.target.darwin-mips.mk b/media/media.target.darwin-mips.mk
index 09f2346f28..189f4450c3 100644
--- a/media/media.target.darwin-mips.mk
+++ b/media/media.target.darwin-mips.mk
@@ -136,10 +136,12 @@ LOCAL_SRC_FILES := \
media/filters/wsola_internals.cc \
media/midi/midi_manager.cc \
media/midi/midi_port_info.cc \
+ media/mp3/mp3_stream_parser.cc \
media/video/capture/android/video_capture_device_android.cc \
media/video/capture/fake_video_capture_device.cc \
media/video/capture/video_capture_device.cc \
media/video/capture/video_capture_proxy.cc \
+ media/video/capture/video_capture_types.cc \
media/video/picture.cc \
media/video/video_decode_accelerator.cc \
media/video/video_encode_accelerator.cc \
@@ -201,6 +203,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -213,8 +216,10 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
+ '-DPOSIX_AVOID_MMAP' \
'-DSK_ENABLE_INST_COUNT=0' \
'-DSK_SUPPORT_GPU=1' \
'-DGR_GL_CUSTOM_SETUP_HEADER="GrGLConfig_chrome.h"' \
@@ -222,7 +227,6 @@ MY_DEFS_Debug := \
'-DUSE_CHROMIUM_SKIA' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
- '-DPOSIX_AVOID_MMAP' \
'-DU_USING_ICU_NAMESPACE=0' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -312,6 +316,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -324,8 +329,10 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
+ '-DPOSIX_AVOID_MMAP' \
'-DSK_ENABLE_INST_COUNT=0' \
'-DSK_SUPPORT_GPU=1' \
'-DGR_GL_CUSTOM_SETUP_HEADER="GrGLConfig_chrome.h"' \
@@ -333,7 +340,6 @@ MY_DEFS_Release := \
'-DUSE_CHROMIUM_SKIA' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
- '-DPOSIX_AVOID_MMAP' \
'-DU_USING_ICU_NAMESPACE=0' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -405,7 +411,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -422,7 +430,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media.target.darwin-x86.mk b/media/media.target.darwin-x86.mk
index bf86bb2353..6be9cd7f43 100644
--- a/media/media.target.darwin-x86.mk
+++ b/media/media.target.darwin-x86.mk
@@ -136,10 +136,12 @@ LOCAL_SRC_FILES := \
media/filters/wsola_internals.cc \
media/midi/midi_manager.cc \
media/midi/midi_port_info.cc \
+ media/mp3/mp3_stream_parser.cc \
media/video/capture/android/video_capture_device_android.cc \
media/video/capture/fake_video_capture_device.cc \
media/video/capture/video_capture_device.cc \
media/video/capture/video_capture_proxy.cc \
+ media/video/capture/video_capture_types.cc \
media/video/picture.cc \
media/video/video_decode_accelerator.cc \
media/video/video_encode_accelerator.cc \
@@ -205,6 +207,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -217,6 +220,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
'-DSK_ENABLE_INST_COUNT=0' \
@@ -319,6 +323,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -331,6 +336,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
'-DSK_ENABLE_INST_COUNT=0' \
@@ -410,7 +416,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -427,7 +435,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media.target.linux-arm.mk b/media/media.target.linux-arm.mk
index 05d668aea3..dbae6fc5b7 100644
--- a/media/media.target.linux-arm.mk
+++ b/media/media.target.linux-arm.mk
@@ -136,10 +136,12 @@ LOCAL_SRC_FILES := \
media/filters/wsola_internals.cc \
media/midi/midi_manager.cc \
media/midi/midi_port_info.cc \
+ media/mp3/mp3_stream_parser.cc \
media/video/capture/android/video_capture_device_android.cc \
media/video/capture/fake_video_capture_device.cc \
media/video/capture/video_capture_device.cc \
media/video/capture/video_capture_proxy.cc \
+ media/video/capture/video_capture_types.cc \
media/video/picture.cc \
media/video/video_decode_accelerator.cc \
media/video/video_encode_accelerator.cc \
@@ -202,6 +204,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -214,8 +217,10 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
+ '-DPOSIX_AVOID_MMAP' \
'-DSK_ENABLE_INST_COUNT=0' \
'-DSK_SUPPORT_GPU=1' \
'-DGR_GL_CUSTOM_SETUP_HEADER="GrGLConfig_chrome.h"' \
@@ -223,7 +228,6 @@ MY_DEFS_Debug := \
'-DUSE_CHROMIUM_SKIA' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
- '-DPOSIX_AVOID_MMAP' \
'-DU_USING_ICU_NAMESPACE=0' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -314,6 +318,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -326,8 +331,10 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
+ '-DPOSIX_AVOID_MMAP' \
'-DSK_ENABLE_INST_COUNT=0' \
'-DSK_SUPPORT_GPU=1' \
'-DGR_GL_CUSTOM_SETUP_HEADER="GrGLConfig_chrome.h"' \
@@ -335,7 +342,6 @@ MY_DEFS_Release := \
'-DUSE_CHROMIUM_SKIA' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
- '-DPOSIX_AVOID_MMAP' \
'-DU_USING_ICU_NAMESPACE=0' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -409,7 +415,9 @@ LOCAL_LDFLAGS_Debug := \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
-Wl,--icf=safe \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -428,7 +436,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--icf=safe \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media.target.linux-mips.mk b/media/media.target.linux-mips.mk
index 09f2346f28..189f4450c3 100644
--- a/media/media.target.linux-mips.mk
+++ b/media/media.target.linux-mips.mk
@@ -136,10 +136,12 @@ LOCAL_SRC_FILES := \
media/filters/wsola_internals.cc \
media/midi/midi_manager.cc \
media/midi/midi_port_info.cc \
+ media/mp3/mp3_stream_parser.cc \
media/video/capture/android/video_capture_device_android.cc \
media/video/capture/fake_video_capture_device.cc \
media/video/capture/video_capture_device.cc \
media/video/capture/video_capture_proxy.cc \
+ media/video/capture/video_capture_types.cc \
media/video/picture.cc \
media/video/video_decode_accelerator.cc \
media/video/video_encode_accelerator.cc \
@@ -201,6 +203,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -213,8 +216,10 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
+ '-DPOSIX_AVOID_MMAP' \
'-DSK_ENABLE_INST_COUNT=0' \
'-DSK_SUPPORT_GPU=1' \
'-DGR_GL_CUSTOM_SETUP_HEADER="GrGLConfig_chrome.h"' \
@@ -222,7 +227,6 @@ MY_DEFS_Debug := \
'-DUSE_CHROMIUM_SKIA' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
- '-DPOSIX_AVOID_MMAP' \
'-DU_USING_ICU_NAMESPACE=0' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -312,6 +316,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -324,8 +329,10 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
+ '-DPOSIX_AVOID_MMAP' \
'-DSK_ENABLE_INST_COUNT=0' \
'-DSK_SUPPORT_GPU=1' \
'-DGR_GL_CUSTOM_SETUP_HEADER="GrGLConfig_chrome.h"' \
@@ -333,7 +340,6 @@ MY_DEFS_Release := \
'-DUSE_CHROMIUM_SKIA' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
- '-DPOSIX_AVOID_MMAP' \
'-DU_USING_ICU_NAMESPACE=0' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -405,7 +411,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -422,7 +430,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media.target.linux-x86.mk b/media/media.target.linux-x86.mk
index bf86bb2353..6be9cd7f43 100644
--- a/media/media.target.linux-x86.mk
+++ b/media/media.target.linux-x86.mk
@@ -136,10 +136,12 @@ LOCAL_SRC_FILES := \
media/filters/wsola_internals.cc \
media/midi/midi_manager.cc \
media/midi/midi_port_info.cc \
+ media/mp3/mp3_stream_parser.cc \
media/video/capture/android/video_capture_device_android.cc \
media/video/capture/fake_video_capture_device.cc \
media/video/capture/video_capture_device.cc \
media/video/capture/video_capture_proxy.cc \
+ media/video/capture/video_capture_types.cc \
media/video/picture.cc \
media/video/video_decode_accelerator.cc \
media/video/video_encode_accelerator.cc \
@@ -205,6 +207,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -217,6 +220,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
'-DSK_ENABLE_INST_COUNT=0' \
@@ -319,6 +323,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -331,6 +336,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DDISABLE_USER_INPUT_MONITOR' \
'-DSK_ENABLE_INST_COUNT=0' \
@@ -410,7 +416,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -427,7 +435,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media_android_jni_headers.target.darwin-arm.mk b/media/media_android_jni_headers.target.darwin-arm.mk
index 3c9132c340..b661bbe0aa 100644
--- a/media/media_android_jni_headers.target.darwin-arm.mk
+++ b/media/media_android_jni_headers.target.darwin-arm.mk
@@ -138,6 +138,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -150,6 +151,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -216,6 +218,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -228,6 +231,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/media_android_jni_headers.target.darwin-mips.mk b/media/media_android_jni_headers.target.darwin-mips.mk
index 8d8aa4bcac..2573b1ad8d 100644
--- a/media/media_android_jni_headers.target.darwin-mips.mk
+++ b/media/media_android_jni_headers.target.darwin-mips.mk
@@ -137,6 +137,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -149,6 +150,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -214,6 +216,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -226,6 +229,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/media_android_jni_headers.target.darwin-x86.mk b/media/media_android_jni_headers.target.darwin-x86.mk
index 5f14b4ba86..0f5933fff2 100644
--- a/media/media_android_jni_headers.target.darwin-x86.mk
+++ b/media/media_android_jni_headers.target.darwin-x86.mk
@@ -140,6 +140,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -152,6 +153,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -221,6 +223,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -233,6 +236,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/media_android_jni_headers.target.linux-arm.mk b/media/media_android_jni_headers.target.linux-arm.mk
index 3c9132c340..b661bbe0aa 100644
--- a/media/media_android_jni_headers.target.linux-arm.mk
+++ b/media/media_android_jni_headers.target.linux-arm.mk
@@ -138,6 +138,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -150,6 +151,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -216,6 +218,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -228,6 +231,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/media_android_jni_headers.target.linux-mips.mk b/media/media_android_jni_headers.target.linux-mips.mk
index 8d8aa4bcac..2573b1ad8d 100644
--- a/media/media_android_jni_headers.target.linux-mips.mk
+++ b/media/media_android_jni_headers.target.linux-mips.mk
@@ -137,6 +137,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -149,6 +150,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -214,6 +216,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -226,6 +229,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/media_android_jni_headers.target.linux-x86.mk b/media/media_android_jni_headers.target.linux-x86.mk
index 5f14b4ba86..0f5933fff2 100644
--- a/media/media_android_jni_headers.target.linux-x86.mk
+++ b/media/media_android_jni_headers.target.linux-x86.mk
@@ -140,6 +140,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -152,6 +153,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -221,6 +223,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -233,6 +236,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/media_asm.target.darwin-x86.mk b/media/media_asm.target.darwin-x86.mk
index 381f449a1f..8742e1510e 100644
--- a/media/media_asm.target.darwin-x86.mk
+++ b/media/media_asm.target.darwin-x86.mk
@@ -176,6 +176,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -188,6 +189,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -257,6 +259,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -269,6 +272,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -315,7 +319,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -332,7 +338,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media_asm.target.linux-x86.mk b/media/media_asm.target.linux-x86.mk
index 381f449a1f..8742e1510e 100644
--- a/media/media_asm.target.linux-x86.mk
+++ b/media/media_asm.target.linux-x86.mk
@@ -176,6 +176,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -188,6 +189,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -257,6 +259,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -269,6 +272,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -315,7 +319,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -332,7 +338,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media_cdm.gypi b/media/media_cdm.gypi
index a4a94f79db..2f15fac524 100644
--- a/media/media_cdm.gypi
+++ b/media/media_cdm.gypi
@@ -5,10 +5,10 @@
{
'variables': {
'conditions': [
- ['OS == "android" or OS == "ios"', {
- # Android and iOS don't use ffmpeg.
+ ['OS == "android"', {
+ # Android doesn't use ffmpeg.
'use_ffmpeg%': 0,
- }, { # 'OS != "android" and OS != "ios"'
+ }, { # 'OS != "android"'
'use_ffmpeg%': 1,
}],
],
diff --git a/media/media_mmx.target.darwin-x86.mk b/media/media_mmx.target.darwin-x86.mk
index f6679833e3..d41181e278 100644
--- a/media/media_mmx.target.darwin-x86.mk
+++ b/media/media_mmx.target.darwin-x86.mk
@@ -64,6 +64,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -76,6 +77,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -148,6 +150,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -160,6 +163,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -208,7 +212,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -225,7 +231,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media_mmx.target.linux-x86.mk b/media/media_mmx.target.linux-x86.mk
index f6679833e3..d41181e278 100644
--- a/media/media_mmx.target.linux-x86.mk
+++ b/media/media_mmx.target.linux-x86.mk
@@ -64,6 +64,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -76,6 +77,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -148,6 +150,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -160,6 +163,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -208,7 +212,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -225,7 +231,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media_sse.target.darwin-x86.mk b/media/media_sse.target.darwin-x86.mk
index 49b557d797..0df3310d53 100644
--- a/media/media_sse.target.darwin-x86.mk
+++ b/media/media_sse.target.darwin-x86.mk
@@ -64,6 +64,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -76,6 +77,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -148,6 +150,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -160,6 +163,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -208,7 +212,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -225,7 +231,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media_sse.target.linux-x86.mk b/media/media_sse.target.linux-x86.mk
index 49b557d797..0df3310d53 100644
--- a/media/media_sse.target.linux-x86.mk
+++ b/media/media_sse.target.linux-x86.mk
@@ -64,6 +64,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -76,6 +77,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -148,6 +150,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -160,6 +163,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -208,7 +212,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -225,7 +231,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media_sse2.target.darwin-x86.mk b/media/media_sse2.target.darwin-x86.mk
index 50ff2eb28a..9c233e22a2 100644
--- a/media/media_sse2.target.darwin-x86.mk
+++ b/media/media_sse2.target.darwin-x86.mk
@@ -66,6 +66,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -78,6 +79,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -150,6 +152,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -162,6 +165,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -210,7 +214,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -227,7 +233,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/media_sse2.target.linux-x86.mk b/media/media_sse2.target.linux-x86.mk
index 50ff2eb28a..9c233e22a2 100644
--- a/media/media_sse2.target.linux-x86.mk
+++ b/media/media_sse2.target.linux-x86.mk
@@ -66,6 +66,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -78,6 +79,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -150,6 +152,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -162,6 +165,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -210,7 +214,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -227,7 +233,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/mp3/mp3_stream_parser.cc b/media/mp3/mp3_stream_parser.cc
new file mode 100644
index 0000000000..319c868775
--- /dev/null
+++ b/media/mp3/mp3_stream_parser.cc
@@ -0,0 +1,566 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp3/mp3_stream_parser.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/bit_reader.h"
+#include "media/base/buffers.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/video_decoder_config.h"
+#include "net/http/http_util.h"
+
+namespace media {
+
+static const uint32 kMP3StartCodeMask = 0xffe00000;
+static const uint32 kICYStartCode = 0x49435920; // 'ICY '
+
+// Arbitrary upper bound on the size of an IceCast header before it
+// triggers an error.
+static const int kMaxIcecastHeaderSize = 4096;
+
+static const uint32 kID3StartCodeMask = 0xffffff00;
+static const uint32 kID3v1StartCode = 0x54414700; // 'TAG\0'
+static const int kID3v1Size = 128;
+static const int kID3v1ExtendedSize = 227;
+static const uint32 kID3v2StartCode = 0x49443300; // 'ID3\0'
+
+// Map that determines which bitrate_index & channel_mode combinations
+// are allowed.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const bool kIsAllowed[17][4] = {
+ { true, true, true, true }, // free
+ { true, false, false, false }, // 32
+ { true, false, false, false }, // 48
+ { true, false, false, false }, // 56
+ { true, true, true, true }, // 64
+ { true, false, false, false }, // 80
+ { true, true, true, true }, // 96
+ { true, true, true, true }, // 112
+ { true, true, true, true }, // 128
+ { true, true, true, true }, // 160
+ { true, true, true, true }, // 192
+ { false, true, true, true }, // 224
+ { false, true, true, true }, // 256
+ { false, true, true, true }, // 320
+ { false, true, true, true }, // 384
+ { false, false, false, false } // bad
+};
+
+// Maps version and layer information in the frame header
+// into an index for the |kBitrateMap|.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const int kVersionLayerMap[4][4] = {
+ // { reserved, L3, L2, L1 }
+ { 5, 4, 4, 3 }, // MPEG 2.5
+ { 5, 5, 5, 5 }, // reserved
+ { 5, 4, 4, 3 }, // MPEG 2
+ { 5, 2, 1, 0 } // MPEG 1
+};
+
+// Maps the bitrate index field in the header and an index
+// from |kVersionLayerMap| to a frame bitrate.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const int kBitrateMap[16][6] = {
+ // { V1L1, V1L2, V1L3, V2L1, V2L2 & V2L3, reserved }
+ { 0, 0, 0, 0, 0, 0 },
+ { 32, 32, 32, 32, 8, 0 },
+ { 64, 48, 40, 48, 16, 0 },
+ { 96, 56, 48, 56, 24, 0 },
+ { 128, 64, 56, 64, 32, 0 },
+ { 160, 80, 64, 80, 40, 0 },
+ { 192, 96, 80, 96, 48, 0 },
+ { 224, 112, 96, 112, 56, 0 },
+ { 256, 128, 112, 128, 64, 0 },
+ { 288, 160, 128, 144, 80, 0 },
+ { 320, 192, 160, 160, 96, 0 },
+ { 352, 224, 192, 176, 112, 0 },
+ { 384, 256, 224, 192, 128, 0 },
+ { 416, 320, 256, 224, 144, 0 },
+ { 448, 384, 320, 256, 160, 0 },
+ { 0, 0, 0, 0, 0}
+};
+
+// Maps the sample rate index and version fields from the frame header
+// to a sample rate.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const int kSampleRateMap[4][4] = {
+ // { V2.5, reserved, V2, V1 }
+ { 11025, 0, 22050, 44100 },
+ { 12000, 0, 24000, 48000 },
+ { 8000, 0, 16000, 32000 },
+ { 0, 0, 0, 0 }
+};
+
+// Frame header field constants.
+static const int kVersion1 = 3;
+static const int kVersion2 = 2;
+static const int kVersionReserved = 1;
+static const int kVersion2_5 = 0;
+static const int kLayerReserved = 0;
+static const int kLayer1 = 3;
+static const int kLayer2 = 2;
+static const int kLayer3 = 1;
+static const int kBitrateFree = 0;
+static const int kBitrateBad = 0xf;
+static const int kSampleRateReserved = 3;
+
+MP3StreamParser::MP3StreamParser()
+ : state_(UNINITIALIZED),
+ in_media_segment_(false) {
+}
+
+MP3StreamParser::~MP3StreamParser() {}
+
+void MP3StreamParser::Init(const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ const NewTextBuffersCB& text_cb,
+ const NeedKeyCB& need_key_cb,
+ const AddTextTrackCB& add_text_track_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK_EQ(state_, UNINITIALIZED);
+ init_cb_ = init_cb;
+ config_cb_ = config_cb;
+ new_buffers_cb_ = new_buffers_cb;
+ new_segment_cb_ = new_segment_cb;
+ end_of_segment_cb_ = end_of_segment_cb;
+ log_cb_ = log_cb;
+
+ ChangeState(INITIALIZED);
+}
+
+void MP3StreamParser::Flush() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK_NE(state_, UNINITIALIZED);
+ queue_.Reset();
+ timestamp_helper_->SetBaseTimestamp(base::TimeDelta());
+ in_media_segment_ = false;
+}
+
+bool MP3StreamParser::Parse(const uint8* buf, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+ DCHECK(buf);
+ DCHECK_GT(size, 0);
+ DCHECK_NE(state_, UNINITIALIZED);
+
+ if (state_ == PARSE_ERROR)
+ return false;
+
+ DCHECK_EQ(state_, INITIALIZED);
+
+ queue_.Push(buf, size);
+
+ for (;;) {
+ const uint8* data;
+ int data_size;
+ queue_.Peek(&data, &data_size);
+
+ if (size < 4)
+ return true;
+
+ uint32 start_code = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3];
+ int bytes_read = 0;
+ if ((start_code & kMP3StartCodeMask) == kMP3StartCodeMask) {
+ bytes_read = ParseMP3Frame(data, data_size);
+ } else if (start_code == kICYStartCode) {
+ bytes_read = ParseIcecastHeader(data, data_size);
+ } else if ((start_code & kID3StartCodeMask) == kID3v1StartCode) {
+ bytes_read = ParseID3v1(data, data_size);
+ } else if ((start_code & kID3StartCodeMask) == kID3v2StartCode) {
+ bytes_read = ParseID3v2(data, data_size);
+ } else {
+ bytes_read = FindNextValidStartCode(data, data_size);
+
+ if (bytes_read > 0) {
+ DVLOG(1) << "Unexpected start code 0x" << std::hex << start_code;
+ DVLOG(1) << "SKIPPING " << bytes_read << " bytes of garbage.";
+ }
+ }
+
+ CHECK_LE(bytes_read, data_size);
+
+ if (bytes_read < 0) {
+ ChangeState(PARSE_ERROR);
+ return false;
+ } else if (bytes_read == 0) {
+ // Need more data.
+ return true;
+ }
+
+ queue_.Pop(bytes_read);
+ }
+
+ return true;
+}
+
+void MP3StreamParser::ChangeState(State state) {
+ DVLOG(1) << __FUNCTION__ << "() : " << state_ << " -> " << state;
+ state_ = state;
+}
+
+int MP3StreamParser::ParseFrameHeader(const uint8* data, int size,
+ int* frame_size,
+ int* sample_rate,
+ ChannelLayout* channel_layout,
+ int* sample_count) const {
+ DCHECK(data);
+ DCHECK_GE(size, 0);
+ DCHECK(frame_size);
+
+ if (size < 4)
+ return 0;
+
+ BitReader reader(data, size);
+ int sync;
+ int version;
+ int layer;
+ int is_protected;
+ int bitrate_index;
+ int sample_rate_index;
+ int has_padding;
+ int is_private;
+ int channel_mode;
+ int other_flags;
+
+ if (!reader.ReadBits(11, &sync) ||
+ !reader.ReadBits(2, &version) ||
+ !reader.ReadBits(2, &layer) ||
+ !reader.ReadBits(1, &is_protected) ||
+ !reader.ReadBits(4, &bitrate_index) ||
+ !reader.ReadBits(2, &sample_rate_index) ||
+ !reader.ReadBits(1, &has_padding) ||
+ !reader.ReadBits(1, &is_private) ||
+ !reader.ReadBits(2, &channel_mode) ||
+ !reader.ReadBits(6, &other_flags)) {
+ return -1;
+ }
+
+ DVLOG(2) << "Header data :" << std::hex
+ << " sync 0x" << sync
+ << " version 0x" << version
+ << " layer 0x" << layer
+ << " bitrate_index 0x" << bitrate_index
+ << " sample_rate_index 0x" << sample_rate_index
+ << " channel_mode 0x" << channel_mode;
+
+ if (sync != 0x7ff ||
+ version == kVersionReserved ||
+ layer == kLayerReserved ||
+ bitrate_index == kBitrateFree || bitrate_index == kBitrateBad ||
+ sample_rate_index == kSampleRateReserved) {
+ MEDIA_LOG(log_cb_) << "Invalid header data :" << std::hex
+ << " sync 0x" << sync
+ << " version 0x" << version
+ << " layer 0x" << layer
+ << " bitrate_index 0x" << bitrate_index
+ << " sample_rate_index 0x" << sample_rate_index
+ << " channel_mode 0x" << channel_mode;
+ return -1;
+ }
+
+ if (layer == kLayer2 && kIsAllowed[bitrate_index][channel_mode]) {
+ MEDIA_LOG(log_cb_) << "Invalid (bitrate_index, channel_mode) combination :"
+ << std::hex
+ << " bitrate_index " << bitrate_index
+ << " channel_mode " << channel_mode;
+ return -1;
+ }
+
+ int bitrate = kBitrateMap[bitrate_index][kVersionLayerMap[version][layer]];
+
+ if (bitrate == 0) {
+ MEDIA_LOG(log_cb_) << "Invalid bitrate :" << std::hex
+ << " version " << version
+ << " layer " << layer
+ << " bitrate_index " << bitrate_index;
+ return -1;
+ }
+
+ DVLOG(2) << " bitrate " << bitrate;
+
+ int frame_sample_rate = kSampleRateMap[sample_rate_index][version];
+ if (frame_sample_rate == 0) {
+ MEDIA_LOG(log_cb_) << "Invalid sample rate :" << std::hex
+ << " version " << version
+ << " sample_rate_index " << sample_rate_index;
+ return -1;
+ }
+
+ if (sample_rate)
+ *sample_rate = frame_sample_rate;
+
+ // http://teslabs.com/openplayer/docs/docs/specs/mp3_structure2.pdf
+ // Table 2.1.5
+ int samples_per_frame;
+ switch (layer) {
+ case kLayer1:
+ samples_per_frame = 384;
+ break;
+
+ case kLayer2:
+ samples_per_frame = 1152;
+ break;
+
+ case kLayer3:
+ if (version == kVersion2 || version == kVersion2_5)
+ samples_per_frame = 576;
+ else
+ samples_per_frame = 1152;
+ break;
+
+ default:
+ return -1;
+ }
+
+ if (sample_count)
+ *sample_count = samples_per_frame;
+
+ // http://teslabs.com/openplayer/docs/docs/specs/mp3_structure2.pdf
+ // Text just below Table 2.1.5.
+ if (layer == kLayer1) {
+ // This formulation is a slight variation on the equation below,
+ // but has slightly different truncation characteristics to deal
+ // with the fact that Layer 1 has 4 byte "slots" instead of single
+ // byte ones.
+ *frame_size = 4 * (12 * bitrate * 1000 / frame_sample_rate);
+ } else {
+ *frame_size =
+ ((samples_per_frame / 8) * bitrate * 1000) / frame_sample_rate;
+ }
+
+ if (has_padding)
+ *frame_size += (layer == kLayer1) ? 4 : 1;
+
+ if (channel_layout) {
+ // Map Stereo(0), Joint Stereo(1), and Dual Channel (2) to
+ // CHANNEL_LAYOUT_STEREO and Single Channel (3) to CHANNEL_LAYOUT_MONO.
+ *channel_layout =
+ (channel_mode == 3) ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ }
+
+ return 4;
+}
+
+int MP3StreamParser::ParseMP3Frame(const uint8* data, int size) {
+ DVLOG(2) << __FUNCTION__ << "(" << size << ")";
+
+ int sample_rate;
+ ChannelLayout channel_layout;
+ int frame_size;
+ int sample_count;
+ int bytes_read = ParseFrameHeader(
+ data, size, &frame_size, &sample_rate, &channel_layout, &sample_count);
+
+ if (bytes_read <= 0)
+ return bytes_read;
+
+ // Make sure data contains the entire frame.
+ if (size < frame_size)
+ return 0;
+
+ DVLOG(2) << " sample_rate " << sample_rate
+ << " channel_layout " << channel_layout
+ << " frame_size " << frame_size;
+
+ if (config_.IsValidConfig() &&
+ (config_.samples_per_second() != sample_rate ||
+ config_.channel_layout() != channel_layout)) {
+ // Clear config data so that a config change is initiated.
+ config_ = AudioDecoderConfig();
+ }
+
+ if (!config_.IsValidConfig()) {
+ config_.Initialize(kCodecMP3, kSampleFormatF32, channel_layout,
+ sample_rate, NULL, 0, false, false,
+ base::TimeDelta(), base::TimeDelta());
+
+ base::TimeDelta base_timestamp;
+ if (timestamp_helper_)
+ base_timestamp = timestamp_helper_->GetTimestamp();
+
+ timestamp_helper_.reset(new AudioTimestampHelper(sample_rate));
+ timestamp_helper_->SetBaseTimestamp(base_timestamp);
+
+ VideoDecoderConfig video_config;
+ bool success = config_cb_.Run(config_, video_config);
+
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run(success, kInfiniteDuration());
+
+ if (!success)
+ return -1;
+ }
+
+ if (!in_media_segment_) {
+ in_media_segment_ = true;
+ new_segment_cb_.Run();
+ }
+
+ BufferQueue audio_buffers;
+ BufferQueue video_buffers;
+
+ // TODO(acolwell): Change this code to parse as many frames as
+ // possible before calling |new_buffers_cb_|.
+ scoped_refptr<StreamParserBuffer> buffer =
+ StreamParserBuffer::CopyFrom(data, frame_size, true);
+ audio_buffers.push_back(buffer);
+
+ if (!new_buffers_cb_.Run(audio_buffers, video_buffers))
+ return -1;
+
+ timestamp_helper_->AddFrames(sample_count);
+
+ return frame_size;
+}
+
+int MP3StreamParser::ParseIcecastHeader(const uint8* data, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+
+ if (size < 4)
+ return 0;
+
+ if (memcmp("ICY ", data, 4))
+ return -1;
+
+ int locate_size = std::min(size, kMaxIcecastHeaderSize);
+ int offset = net::HttpUtil::LocateEndOfHeaders(
+ reinterpret_cast<const char*>(data), locate_size, 4);
+ if (offset < 0) {
+ if (locate_size == kMaxIcecastHeaderSize) {
+ MEDIA_LOG(log_cb_) << "Icecast header is too large.";
+ return -1;
+ }
+
+ return 0;
+ }
+
+ return offset;
+}
+
+int MP3StreamParser::ParseID3v1(const uint8* data, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+
+ if (size < kID3v1Size)
+ return 0;
+
+ // TODO(acolwell): Add code to actually validate ID3v1 data and
+ // expose it as a metadata text track.
+ return !memcmp(data, "TAG+", 4) ? kID3v1ExtendedSize : kID3v1Size;
+}
+
+int MP3StreamParser::ParseID3v2(const uint8* data, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+
+ if (size < 10)
+ return 0;
+
+ BitReader reader(data, size);
+ int32 id;
+ int version;
+ uint8 flags;
+ int32 id3_size;
+
+ if (!reader.ReadBits(24, &id) ||
+ !reader.ReadBits(16, &version) ||
+ !reader.ReadBits(8, &flags) ||
+ !ParseSyncSafeInt(&reader, &id3_size)) {
+ return -1;
+ }
+
+ int32 actual_tag_size = 10 + id3_size;
+
+ // Increment size if 'Footer present' flag is set.
+ if (flags & 0x10)
+ actual_tag_size += 10;
+
+ // Make sure we have the entire tag.
+ if (size < actual_tag_size)
+ return 0;
+
+ // TODO(acolwell): Add code to actually validate ID3v2 data and
+ // expose it as a metadata text track.
+ return actual_tag_size;
+}
+
+bool MP3StreamParser::ParseSyncSafeInt(BitReader* reader, int32* value) {
+ *value = 0;
+ for (int i = 0; i < 4; ++i) {
+ uint8 tmp;
+ if (!reader->ReadBits(1, &tmp) || tmp != 0) {
+ MEDIA_LOG(log_cb_) << "ID3 syncsafe integer byte MSb is not 0!";
+ return false;
+ }
+
+ if (!reader->ReadBits(7, &tmp))
+ return false;
+
+ *value <<= 7;
+ *value += tmp;
+ }
+
+ return true;
+}
+
+int MP3StreamParser::FindNextValidStartCode(const uint8* data, int size) const {
+ const uint8* start = data;
+ const uint8* end = data + size;
+
+ while (start < end) {
+ int bytes_left = end - start;
+ const uint8* candidate_start_code =
+ static_cast<const uint8*>(memchr(start, 0xff, bytes_left));
+
+ if (!candidate_start_code)
+ return 0;
+
+ bool parse_header_failed = false;
+ const uint8* sync = candidate_start_code;
+ // Try to find 3 valid frames in a row. 3 was selected to decrease
+ // the probability of false positives.
+ for (int i = 0; i < 3; ++i) {
+ int sync_size = end - sync;
+ int frame_size;
+ int sync_bytes = ParseFrameHeader(
+ sync, sync_size, &frame_size, NULL, NULL, NULL);
+
+ if (sync_bytes == 0)
+ return 0;
+
+ if (sync_bytes > 0) {
+ DCHECK_LT(sync_bytes, sync_size);
+
+ // Skip over this frame so we can check the next one.
+ sync += frame_size;
+
+ // Make sure the next frame starts inside the buffer.
+ if (sync >= end)
+ return 0;
+ } else {
+ DVLOG(1) << "ParseFrameHeader() " << i << " failed @" << (sync - data);
+ parse_header_failed = true;
+ break;
+ }
+ }
+
+ if (parse_header_failed) {
+ // One of the frame header parses failed so |candidate_start_code|
+ // did not point to the start of a real frame. Move |start| forward
+ // so we can find the next candidate.
+ start = candidate_start_code + 1;
+ continue;
+ }
+
+ return candidate_start_code - data;
+ }
+
+ return 0;
+}
+
+} // namespace media
diff --git a/media/mp3/mp3_stream_parser.h b/media/mp3/mp3_stream_parser.h
new file mode 100644
index 0000000000..a4b40576a7
--- /dev/null
+++ b/media/mp3/mp3_stream_parser.h
@@ -0,0 +1,119 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP3_MP3_STREAM_PARSER_H_
+#define MEDIA_MP3_MP3_STREAM_PARSER_H_
+
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/byte_queue.h"
+#include "media/base/media_export.h"
+#include "media/base/stream_parser.h"
+
+namespace media {
+
+class BitReader;
+
+class MEDIA_EXPORT MP3StreamParser : public StreamParser {
+ public:
+ MP3StreamParser();
+ virtual ~MP3StreamParser();
+
+ // StreamParser implementation.
+ virtual void Init(const InitCB& init_cb, const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ const NewTextBuffersCB& text_cb,
+ const NeedKeyCB& need_key_cb,
+ const AddTextTrackCB& add_text_track_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) OVERRIDE;
+ virtual void Flush() OVERRIDE;
+ virtual bool Parse(const uint8* buf, int size) OVERRIDE;
+
+ private:
+ enum State {
+ UNINITIALIZED,
+ INITIALIZED,
+ PARSE_ERROR
+ };
+
+ State state_;
+
+ InitCB init_cb_;
+ NewConfigCB config_cb_;
+ NewBuffersCB new_buffers_cb_;
+ NewMediaSegmentCB new_segment_cb_;
+ base::Closure end_of_segment_cb_;
+ LogCB log_cb_;
+
+ ByteQueue queue_;
+
+ AudioDecoderConfig config_;
+ scoped_ptr<AudioTimestampHelper> timestamp_helper_;
+ bool in_media_segment_;
+
+ void ChangeState(State state);
+
+ // Parsing functions for various byte stream elements.
+ // |data| & |size| describe the data available for parsing.
+ // These functions are expected to consume an entire frame/header.
+ // It should only return a value greater than 0 when |data| has
+ // enough bytes to successfully parse & consume the entire element.
+ //
+ // |frame_size| - Required parameter that is set to the size of the frame, in
+ // bytes, including the frame header if the function returns a value > 0.
+ // |sample_rate| - Optional parameter that is set to the sample rate
+ // of the frame if this function returns a value > 0.
+ // |channel_layout| - Optional parameter that is set to the channel_layout
+ // of the frame if this function returns a value > 0.
+ // |sample_count| - Optional parameter that is set to the number of samples
+ // in the frame if this function returns a value > 0.
+ //
+ // |sample_rate|, |channel_layout|, |sample_count| may be NULL if the caller
+ // is not interested in receiving these values from the frame header.
+ //
+ // Returns:
+ // > 0 : The number of bytes parsed.
+ // 0 : If more data is needed to parse the entire element.
+ // < 0 : An error was encountered during parsing.
+ int ParseFrameHeader(const uint8* data, int size,
+ int* frame_size,
+ int* sample_rate,
+ ChannelLayout* channel_layout,
+ int* sample_count) const;
+ int ParseMP3Frame(const uint8* data, int size);
+ int ParseIcecastHeader(const uint8* data, int size);
+ int ParseID3v1(const uint8* data, int size);
+ int ParseID3v2(const uint8* data, int size);
+
+ // Parses an ID3v2 "sync safe" integer.
+ // |reader| - A BitReader to read from.
+ // |value| - Set to the integer value read, if true is returned.
+ //
+ // Returns true if the integer was successfully parsed and |value|
+ // was set.
+ // Returns false if an error was encountered. The state of |value| is
+ // undefined when false is returned.
+ bool ParseSyncSafeInt(BitReader* reader, int32* value);
+
+ // Scans |data| for the next valid start code.
+ // Returns:
+ // > 0 : The number of bytes that should be skipped to reach the
+ // next start code..
+ // 0 : If a valid start code was not found and more data is needed.
+ // < 0 : An error was encountered during parsing.
+ int FindNextValidStartCode(const uint8* data, int size) const;
+
+ DISALLOW_COPY_AND_ASSIGN(MP3StreamParser);
+};
+
+} // namespace media
+
+#endif // MEDIA_MP3_MP3_STREAM_PARSER_H_
diff --git a/media/mp4/mp4_stream_parser.cc b/media/mp4/mp4_stream_parser.cc
index 51ed7566a8..26cee44d14 100644
--- a/media/mp4/mp4_stream_parser.cc
+++ b/media/mp4/mp4_stream_parser.cc
@@ -257,7 +257,8 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
audio_config.Initialize(
codec, sample_format, channel_layout, sample_per_second,
extra_data.size() ? &extra_data[0] : NULL, extra_data.size(),
- is_audio_track_encrypted_, false);
+ is_audio_track_encrypted_, false, base::TimeDelta(),
+ base::TimeDelta());
has_audio_ = true;
audio_track_id_ = track->header.track_id;
}
diff --git a/media/player_android.target.darwin-arm.mk b/media/player_android.target.darwin-arm.mk
index 3f86418b7b..b3c2b39686 100644
--- a/media/player_android.target.darwin-arm.mk
+++ b/media/player_android.target.darwin-arm.mk
@@ -74,6 +74,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -86,6 +87,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -164,6 +166,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -176,6 +179,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -236,7 +240,9 @@ LOCAL_LDFLAGS_Debug := \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
-Wl,--icf=safe \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -255,7 +261,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--icf=safe \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/player_android.target.darwin-mips.mk b/media/player_android.target.darwin-mips.mk
index 472e70ff13..52b3b69a6f 100644
--- a/media/player_android.target.darwin-mips.mk
+++ b/media/player_android.target.darwin-mips.mk
@@ -73,6 +73,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -85,6 +86,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -162,6 +164,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -174,6 +177,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -232,7 +236,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -249,7 +255,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/player_android.target.darwin-x86.mk b/media/player_android.target.darwin-x86.mk
index b530e7c08c..ecd9f18cf1 100644
--- a/media/player_android.target.darwin-x86.mk
+++ b/media/player_android.target.darwin-x86.mk
@@ -76,6 +76,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -88,6 +89,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -169,6 +171,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -181,6 +184,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -238,7 +242,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -255,7 +261,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/player_android.target.linux-arm.mk b/media/player_android.target.linux-arm.mk
index 3f86418b7b..b3c2b39686 100644
--- a/media/player_android.target.linux-arm.mk
+++ b/media/player_android.target.linux-arm.mk
@@ -74,6 +74,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -86,6 +87,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -164,6 +166,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -176,6 +179,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -236,7 +240,9 @@ LOCAL_LDFLAGS_Debug := \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
-Wl,--icf=safe \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -255,7 +261,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--icf=safe \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/player_android.target.linux-mips.mk b/media/player_android.target.linux-mips.mk
index 472e70ff13..52b3b69a6f 100644
--- a/media/player_android.target.linux-mips.mk
+++ b/media/player_android.target.linux-mips.mk
@@ -73,6 +73,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -85,6 +86,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -162,6 +164,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -174,6 +177,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -232,7 +236,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -249,7 +255,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/player_android.target.linux-x86.mk b/media/player_android.target.linux-x86.mk
index b530e7c08c..ecd9f18cf1 100644
--- a/media/player_android.target.linux-x86.mk
+++ b/media/player_android.target.linux-x86.mk
@@ -76,6 +76,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -88,6 +89,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -169,6 +171,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -181,6 +184,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-DMESA_EGL_NO_X11_HEADERS' \
'-D__STDC_CONSTANT_MACROS' \
@@ -238,7 +242,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -255,7 +261,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/shared_memory_support.target.darwin-arm.mk b/media/shared_memory_support.target.darwin-arm.mk
index 4ee89e5f3e..e9b444c10a 100644
--- a/media/shared_memory_support.target.darwin-arm.mk
+++ b/media/shared_memory_support.target.darwin-arm.mk
@@ -65,6 +65,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -77,6 +78,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -146,6 +148,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -158,6 +161,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -210,7 +214,9 @@ LOCAL_LDFLAGS_Debug := \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
-Wl,--icf=safe \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -229,7 +235,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--icf=safe \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/shared_memory_support.target.darwin-mips.mk b/media/shared_memory_support.target.darwin-mips.mk
index fa39740d0b..66440b6362 100644
--- a/media/shared_memory_support.target.darwin-mips.mk
+++ b/media/shared_memory_support.target.darwin-mips.mk
@@ -64,6 +64,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -76,6 +77,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -144,6 +146,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -156,6 +159,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -206,7 +210,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -223,7 +229,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/shared_memory_support.target.darwin-x86.mk b/media/shared_memory_support.target.darwin-x86.mk
index 8f72081c9a..549b9015a7 100644
--- a/media/shared_memory_support.target.darwin-x86.mk
+++ b/media/shared_memory_support.target.darwin-x86.mk
@@ -67,6 +67,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -79,6 +80,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -151,6 +153,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -163,6 +166,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -212,7 +216,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -229,7 +235,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/shared_memory_support.target.linux-arm.mk b/media/shared_memory_support.target.linux-arm.mk
index 4ee89e5f3e..e9b444c10a 100644
--- a/media/shared_memory_support.target.linux-arm.mk
+++ b/media/shared_memory_support.target.linux-arm.mk
@@ -65,6 +65,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -77,6 +78,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -146,6 +148,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -158,6 +161,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -210,7 +214,9 @@ LOCAL_LDFLAGS_Debug := \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
-Wl,--icf=safe \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -229,7 +235,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--icf=safe \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/shared_memory_support.target.linux-mips.mk b/media/shared_memory_support.target.linux-mips.mk
index fa39740d0b..66440b6362 100644
--- a/media/shared_memory_support.target.linux-mips.mk
+++ b/media/shared_memory_support.target.linux-mips.mk
@@ -64,6 +64,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -76,6 +77,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -144,6 +146,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -156,6 +159,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -206,7 +210,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -223,7 +229,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/shared_memory_support.target.linux-x86.mk b/media/shared_memory_support.target.linux-x86.mk
index 8f72081c9a..549b9015a7 100644
--- a/media/shared_memory_support.target.linux-x86.mk
+++ b/media/shared_memory_support.target.linux-x86.mk
@@ -67,6 +67,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -79,6 +80,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -151,6 +153,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -163,6 +166,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -212,7 +216,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -229,7 +235,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/shared_memory_support_sse.target.darwin-x86.mk b/media/shared_memory_support_sse.target.darwin-x86.mk
index 72b2eff1e3..d0d7b79ffb 100644
--- a/media/shared_memory_support_sse.target.darwin-x86.mk
+++ b/media/shared_memory_support_sse.target.darwin-x86.mk
@@ -64,6 +64,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -76,6 +77,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -148,6 +150,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -160,6 +163,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -208,7 +212,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -225,7 +231,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/shared_memory_support_sse.target.linux-x86.mk b/media/shared_memory_support_sse.target.linux-x86.mk
index 72b2eff1e3..d0d7b79ffb 100644
--- a/media/shared_memory_support_sse.target.linux-x86.mk
+++ b/media/shared_memory_support_sse.target.linux-x86.mk
@@ -64,6 +64,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -76,6 +77,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -148,6 +150,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -160,6 +163,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-DMEDIA_IMPLEMENTATION' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
@@ -208,7 +212,9 @@ LOCAL_LDFLAGS_Debug := \
-nostdlib \
-Wl,--no-undefined \
-Wl,--exclude-libs=ALL \
+ -Wl,--fatal-warnings \
-Wl,--gc-sections \
+ -Wl,--warn-shared-textrel \
-Wl,-O1 \
-Wl,--as-needed
@@ -225,7 +231,9 @@ LOCAL_LDFLAGS_Release := \
-Wl,--exclude-libs=ALL \
-Wl,-O1 \
-Wl,--as-needed \
- -Wl,--gc-sections
+ -Wl,--gc-sections \
+ -Wl,--fatal-warnings \
+ -Wl,--warn-shared-textrel
LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
diff --git a/media/test/data/icy_sfx.mp3 b/media/test/data/icy_sfx.mp3
new file mode 100644
index 0000000000..52fc5a9821
--- /dev/null
+++ b/media/test/data/icy_sfx.mp3
Binary files differ
diff --git a/media/video/capture/android/video_capture_device_android.cc b/media/video/capture/android/video_capture_device_android.cc
index d4d7350779..152cfda087 100644
--- a/media/video/capture/android/video_capture_device_android.cc
+++ b/media/video/capture/android/video_capture_device_android.cc
@@ -151,7 +151,7 @@ void VideoCaptureDeviceAndroid::Allocate(
Java_VideoCapture_queryHeight(env, j_capture_.obj());
current_settings_.frame_rate =
Java_VideoCapture_queryFrameRate(env, j_capture_.obj());
- current_settings_.color = VideoCaptureCapability::kYV12;
+ current_settings_.color = PIXEL_FORMAT_YV12;
CHECK(current_settings_.width > 0 && !(current_settings_.width % 2));
CHECK(current_settings_.height > 0 && !(current_settings_.height % 2));
diff --git a/media/video/capture/fake_video_capture_device.cc b/media/video/capture/fake_video_capture_device.cc
index ae7c07bc04..8434bc3ebb 100644
--- a/media/video/capture/fake_video_capture_device.cc
+++ b/media/video/capture/fake_video_capture_device.cc
@@ -79,7 +79,7 @@ void FakeVideoCaptureDevice::Allocate(
}
observer_ = observer;
- capture_format_.color = VideoCaptureCapability::kI420;
+ capture_format_.color = PIXEL_FORMAT_I420;
capture_format_.expected_capture_delay = 0;
capture_format_.interlaced = false;
if (capture_format.width > 320) { // VGA
@@ -105,7 +105,7 @@ void FakeVideoCaptureDevice::Reallocate() {
DCHECK_EQ(state_, kCapturing);
capture_format_ = capabilities_roster_.at(++capabilities_roster_index_ %
capabilities_roster_.size());
- DCHECK_EQ(capture_format_.color, VideoCaptureCapability::kI420);
+ DCHECK_EQ(capture_format_.color, PIXEL_FORMAT_I420);
DVLOG(3) << "Reallocating FakeVideoCaptureDevice, new capture resolution ("
<< capture_format_.width << "x" << capture_format_.height << ")";
@@ -230,7 +230,7 @@ void FakeVideoCaptureDevice::PopulateCapabilitiesRoster() {
media::VideoCaptureCapability(320,
240,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
VariableResolutionVideoCaptureDevice));
@@ -238,7 +238,7 @@ void FakeVideoCaptureDevice::PopulateCapabilitiesRoster() {
media::VideoCaptureCapability(640,
480,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
VariableResolutionVideoCaptureDevice));
@@ -246,7 +246,7 @@ void FakeVideoCaptureDevice::PopulateCapabilitiesRoster() {
media::VideoCaptureCapability(800,
600,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
VariableResolutionVideoCaptureDevice));
diff --git a/media/video/capture/linux/video_capture_device_linux.cc b/media/video/capture/linux/video_capture_device_linux.cc
index dd431a8b54..6e685c381f 100644
--- a/media/video/capture/linux/video_capture_device_linux.cc
+++ b/media/video/capture/linux/video_capture_device_linux.cc
@@ -50,22 +50,22 @@ static const char kUsbSuffixStart[] = " (";
static const size_t kUsbModelSize = 9;
static const char kUsbSuffixEnd[] = ")";
-static VideoCaptureCapability::Format V4l2ColorToVideoCaptureColorFormat(
+static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(
int32 v4l2_fourcc) {
- VideoCaptureCapability::Format result = VideoCaptureCapability::kColorUnknown;
+ VideoPixelFormat result = PIXEL_FORMAT_UNKNOWN;
switch (v4l2_fourcc) {
case V4L2_PIX_FMT_YUV420:
- result = VideoCaptureCapability::kI420;
+ result = PIXEL_FORMAT_I420;
break;
case V4L2_PIX_FMT_YUYV:
- result = VideoCaptureCapability::kYUY2;
+ result = PIXEL_FORMAT_YUY2;
break;
case V4L2_PIX_FMT_MJPEG:
case V4L2_PIX_FMT_JPEG:
- result = VideoCaptureCapability::kMJPEG;
+ result = PIXEL_FORMAT_MJPEG;
break;
}
- DCHECK_NE(result, VideoCaptureCapability::kColorUnknown);
+ DCHECK_NE(result, PIXEL_FORMAT_UNKNOWN);
return result;
}
diff --git a/media/video/capture/mac/video_capture_device_mac.mm b/media/video/capture/mac/video_capture_device_mac.mm
index 957486e805..7614fd3f09 100644
--- a/media/video/capture/mac/video_capture_device_mac.mm
+++ b/media/video/capture/mac/video_capture_device_mac.mm
@@ -149,7 +149,7 @@ void VideoCaptureDeviceMac::Allocate(
state_ = kAllocated;
VideoCaptureCapability current_settings;
- current_settings.color = VideoCaptureCapability::kARGB;
+ current_settings.color = PIXEL_FORMAT_UYVY;
current_settings.width = width;
current_settings.height = height;
current_settings.frame_rate = frame_rate;
diff --git a/media/video/capture/mac/video_capture_device_qtkit_mac.mm b/media/video/capture/mac/video_capture_device_qtkit_mac.mm
index b7e407a6da..9f9ea1c529 100644
--- a/media/video/capture/mac/video_capture_device_qtkit_mac.mm
+++ b/media/video/capture/mac/video_capture_device_qtkit_mac.mm
@@ -171,7 +171,7 @@
(id)kCVPixelBufferWidthKey,
[NSNumber numberWithDouble:frameHeight_],
(id)kCVPixelBufferHeightKey,
- [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA],
+ [NSNumber numberWithUnsignedInt:kCVPixelFormatType_422YpCbCr8],
(id)kCVPixelBufferPixelFormatTypeKey,
nil];
[[[captureSession_ outputs] objectAtIndex:0]
@@ -266,7 +266,7 @@
captureCapability.width = frameWidth_;
captureCapability.height = frameHeight_;
captureCapability.frame_rate = frameRate_;
- captureCapability.color = media::VideoCaptureCapability::kARGB;
+ captureCapability.color = media::PIXEL_FORMAT_UYVY;
captureCapability.expected_capture_delay = 0;
captureCapability.interlaced = false;
diff --git a/media/video/capture/video_capture_device_unittest.cc b/media/video/capture/video_capture_device_unittest.cc
index e39c59b054..5a19d272c9 100644
--- a/media/video/capture/video_capture_device_unittest.cc
+++ b/media/video/capture/video_capture_device_unittest.cc
@@ -159,7 +159,7 @@ TEST_F(VideoCaptureDeviceTest, CaptureVGA) {
VideoCaptureCapability capture_format(640,
480,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
@@ -197,7 +197,7 @@ TEST_F(VideoCaptureDeviceTest, Capture720p) {
VideoCaptureCapability capture_format(1280,
720,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
@@ -231,7 +231,7 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
VideoCaptureCapability capture_format(637,
472,
35,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
@@ -258,21 +258,21 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
VideoCaptureCapability capture_format_1(640,
480,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
VideoCaptureCapability capture_format_2(1280,
1024,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
VideoCaptureCapability capture_format_3(320,
240,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
@@ -321,7 +321,7 @@ TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
VideoCaptureCapability capture_format(640,
480,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
@@ -359,7 +359,7 @@ TEST_F(VideoCaptureDeviceTest, FakeCapture) {
VideoCaptureCapability capture_format(640,
480,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
@@ -396,7 +396,7 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
VideoCaptureCapability capture_format(1280,
720,
30,
- VideoCaptureCapability::kMJPEG,
+ PIXEL_FORMAT_MJPEG,
0,
false,
ConstantResolutionVideoCaptureDevice);
@@ -406,7 +406,7 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
// Get captured video frames.
PostQuitTask();
EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
- EXPECT_EQ(rx_capability.color, VideoCaptureCapability::kMJPEG);
+ EXPECT_EQ(rx_capability.color, PIXEL_FORMAT_MJPEG);
device->DeAllocate();
}
diff --git a/media/video/capture/video_capture_types.cc b/media/video/capture/video_capture_types.cc
new file mode 100644
index 0000000000..5b8e226536
--- /dev/null
+++ b/media/video/capture/video_capture_types.cc
@@ -0,0 +1,60 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/video_capture_types.h"
+
+#include "media/base/limits.h"
+
+namespace media {
+
+VideoCaptureFormat::VideoCaptureFormat()
+ : width(0),
+ height(0),
+ frame_rate(0),
+ frame_size_type(ConstantResolutionVideoCaptureDevice) {}
+
+VideoCaptureFormat::VideoCaptureFormat(
+ int width,
+ int height,
+ int frame_rate,
+ VideoCaptureResolutionType frame_size_type)
+ : width(width),
+ height(height),
+ frame_rate(frame_rate),
+ frame_size_type(frame_size_type) {}
+
+bool VideoCaptureFormat::IsValid() const {
+ return (width > 0) && (height > 0) && (frame_rate > 0) &&
+ (frame_rate < media::limits::kMaxFramesPerSecond) &&
+ (width < media::limits::kMaxDimension) &&
+ (height < media::limits::kMaxDimension) &&
+ (width * height < media::limits::kMaxCanvas) &&
+ (frame_size_type >= 0) &&
+ (frame_size_type < media::MaxVideoCaptureResolutionType);
+}
+
+VideoCaptureParams::VideoCaptureParams()
+ : session_id(0) {}
+
+VideoCaptureCapability::VideoCaptureCapability()
+ : color(PIXEL_FORMAT_UNKNOWN),
+ expected_capture_delay(0),
+ interlaced(false),
+ session_id(0) {}
+
+VideoCaptureCapability::VideoCaptureCapability(
+ int width,
+ int height,
+ int frame_rate,
+ VideoPixelFormat color,
+ int delay,
+ bool interlaced,
+ VideoCaptureResolutionType frame_size_type)
+ : VideoCaptureFormat(width, height, frame_rate, frame_size_type),
+ color(color),
+ expected_capture_delay(delay),
+ interlaced(interlaced),
+ session_id(0) {}
+
+} // namespace media
diff --git a/media/video/capture/video_capture_types.h b/media/video/capture/video_capture_types.h
index 57712727ef..1a170aaf5e 100644
--- a/media/video/capture/video_capture_types.h
+++ b/media/video/capture/video_capture_types.h
@@ -19,68 +19,61 @@ enum VideoCaptureResolutionType {
MaxVideoCaptureResolutionType, // Must be last.
};
-// Parameters for starting video capture and device information.
-struct VideoCaptureParams {
- VideoCaptureParams()
- : width(0),
- height(0),
- frame_per_second(0),
- session_id(0),
- frame_size_type(ConstantResolutionVideoCaptureDevice) {};
+// Color formats from camera.
+enum VideoPixelFormat {
+ PIXEL_FORMAT_UNKNOWN, // Color format not set.
+ PIXEL_FORMAT_I420,
+ PIXEL_FORMAT_YUY2,
+ PIXEL_FORMAT_UYVY,
+ PIXEL_FORMAT_RGB24,
+ PIXEL_FORMAT_ARGB,
+ PIXEL_FORMAT_MJPEG,
+ PIXEL_FORMAT_NV21,
+ PIXEL_FORMAT_YV12,
+};
+
+// Video capture format specification.
+class MEDIA_EXPORT VideoCaptureFormat {
+ public:
+ VideoCaptureFormat();
+ VideoCaptureFormat(int width,
+ int height,
+ int frame_rate,
+ VideoCaptureResolutionType frame_size_type);
+
+ // Checks that all values are in the expected range. All limits are specified
+ // in media::Limits.
+ bool IsValid() const;
+
int width;
int height;
- int frame_per_second;
- VideoCaptureSessionId session_id;
+ int frame_rate;
VideoCaptureResolutionType frame_size_type;
};
-// Capabilities describe the format a camera capture video in.
-struct VideoCaptureCapability {
- // Color formats from camera.
- enum Format {
- kColorUnknown, // Color format not set.
- kI420,
- kYUY2,
- kUYVY,
- kRGB24,
- kARGB,
- kMJPEG,
- kNV21,
- kYV12,
- };
+// Parameters for starting video capture and device information.
+class MEDIA_EXPORT VideoCaptureParams : public VideoCaptureFormat {
+ public:
+ VideoCaptureParams();
+
+ VideoCaptureSessionId session_id;
+};
- VideoCaptureCapability()
- : width(0),
- height(0),
- frame_rate(0),
- color(kColorUnknown),
- expected_capture_delay(0),
- interlaced(false),
- frame_size_type(ConstantResolutionVideoCaptureDevice),
- session_id(0) {};
+// Capabilities describe the format a camera capture video in.
+class MEDIA_EXPORT VideoCaptureCapability : public VideoCaptureFormat {
+ public:
+ VideoCaptureCapability();
VideoCaptureCapability(int width,
int height,
int frame_rate,
- Format color,
+ VideoPixelFormat color,
int delay,
bool interlaced,
- VideoCaptureResolutionType frame_size_type)
- : width(width),
- height(height),
- frame_rate(frame_rate),
- color(color),
- expected_capture_delay(delay),
- interlaced(interlaced),
- frame_size_type(frame_size_type),
- session_id(0) {};
+ VideoCaptureResolutionType frame_size_type);
- int width; // Desired width.
- int height; // Desired height.
- int frame_rate; // Desired frame rate.
- Format color; // Desired video type.
+ VideoPixelFormat color; // Desired video type.
int expected_capture_delay; // Expected delay in millisecond.
bool interlaced; // Need interlace format.
- VideoCaptureResolutionType frame_size_type;
VideoCaptureSessionId session_id;
};
diff --git a/media/video/capture/win/sink_input_pin_win.cc b/media/video/capture/win/sink_input_pin_win.cc
index 7e55531b89..1de1ea1671 100644
--- a/media/video/capture/win/sink_input_pin_win.cc
+++ b/media/video/capture/win/sink_input_pin_win.cc
@@ -114,17 +114,17 @@ bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
}
if (sub_type == kMediaSubTypeI420 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
- resulting_capability_.color = VideoCaptureCapability::kI420;
+ resulting_capability_.color = PIXEL_FORMAT_I420;
return true; // This format is acceptable.
}
if (sub_type == MEDIASUBTYPE_YUY2 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
- resulting_capability_.color = VideoCaptureCapability::kYUY2;
+ resulting_capability_.color = PIXEL_FORMAT_YUY2;
return true; // This format is acceptable.
}
if (sub_type == MEDIASUBTYPE_RGB24 &&
pvi->bmiHeader.biCompression == BI_RGB) {
- resulting_capability_.color = VideoCaptureCapability::kRGB24;
+ resulting_capability_.color = PIXEL_FORMAT_RGB24;
return true; // This format is acceptable.
}
return false;
@@ -146,7 +146,7 @@ void SinkInputPin::SetRequestedMediaCapability(
resulting_capability_.width = 0;
resulting_capability_.height = 0;
resulting_capability_.frame_rate = 0;
- resulting_capability_.color = VideoCaptureCapability::kColorUnknown;
+ resulting_capability_.color = PIXEL_FORMAT_UNKNOWN;
resulting_capability_.expected_capture_delay = 0;
resulting_capability_.interlaced = false;
}
diff --git a/media/video/capture/win/video_capture_device_mf_win.cc b/media/video/capture/win/video_capture_device_mf_win.cc
index dea97b7c26..874408fb2c 100644
--- a/media/video/capture/win/video_capture_device_mf_win.cc
+++ b/media/video/capture/win/video_capture_device_mf_win.cc
@@ -70,18 +70,18 @@ bool CreateVideoCaptureDevice(const char* sym_link, IMFMediaSource** source) {
return SUCCEEDED(MFCreateDeviceSource(attributes, source));
}
-bool FormatFromGuid(const GUID& guid, VideoCaptureCapability::Format* format) {
+bool FormatFromGuid(const GUID& guid, VideoPixelFormat* format) {
struct {
const GUID& guid;
- const VideoCaptureCapability::Format format;
+ const VideoPixelFormat format;
} static const kFormatMap[] = {
- { MFVideoFormat_I420, VideoCaptureCapability::kI420 },
- { MFVideoFormat_YUY2, VideoCaptureCapability::kYUY2 },
- { MFVideoFormat_UYVY, VideoCaptureCapability::kUYVY },
- { MFVideoFormat_RGB24, VideoCaptureCapability::kRGB24 },
- { MFVideoFormat_ARGB32, VideoCaptureCapability::kARGB },
- { MFVideoFormat_MJPG, VideoCaptureCapability::kMJPEG },
- { MFVideoFormat_YV12, VideoCaptureCapability::kYV12 },
+ { MFVideoFormat_I420, PIXEL_FORMAT_I420 },
+ { MFVideoFormat_YUY2, PIXEL_FORMAT_YUY2 },
+ { MFVideoFormat_UYVY, PIXEL_FORMAT_UYVY },
+ { MFVideoFormat_RGB24, PIXEL_FORMAT_RGB24 },
+ { MFVideoFormat_ARGB32, PIXEL_FORMAT_ARGB },
+ { MFVideoFormat_MJPG, PIXEL_FORMAT_MJPEG },
+ { MFVideoFormat_YV12, PIXEL_FORMAT_YV12 },
};
for (int i = 0; i < arraysize(kFormatMap); ++i) {
diff --git a/media/video/capture/win/video_capture_device_win.cc b/media/video/capture/win/video_capture_device_win.cc
index 3aaef0486b..307ab2967b 100644
--- a/media/video/capture/win/video_capture_device_win.cc
+++ b/media/video/capture/win/video_capture_device_win.cc
@@ -382,7 +382,7 @@ void VideoCaptureDeviceWin::Allocate(
if (FAILED(hr))
SetErrorState("Failed to set capture device output format");
- if (capability.color == VideoCaptureCapability::kMJPEG &&
+ if (capability.color == PIXEL_FORMAT_MJPEG &&
!mjpg_filter_.get()) {
// Create MJPG filter if we need it.
hr = mjpg_filter_.CreateInstance(CLSID_MjpegDec, NULL, CLSCTX_INPROC);
@@ -401,7 +401,7 @@ void VideoCaptureDeviceWin::Allocate(
}
}
- if (capability.color == VideoCaptureCapability::kMJPEG &&
+ if (capability.color == PIXEL_FORMAT_MJPEG &&
mjpg_filter_.get()) {
// Connect the camera to the MJPEG decoder.
hr = graph_builder_->ConnectDirect(output_capture_pin_, input_mjpg_pin_,
@@ -581,20 +581,20 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
// We can't switch MEDIATYPE :~(.
if (media_type->subtype == kMediaSubTypeI420) {
- capability.color = VideoCaptureCapability::kI420;
+ capability.color = PIXEL_FORMAT_I420;
} else if (media_type->subtype == MEDIASUBTYPE_IYUV) {
- // This is identical to kI420.
- capability.color = VideoCaptureCapability::kI420;
+ // This is identical to PIXEL_FORMAT_I420.
+ capability.color = PIXEL_FORMAT_I420;
} else if (media_type->subtype == MEDIASUBTYPE_RGB24) {
- capability.color = VideoCaptureCapability::kRGB24;
+ capability.color = PIXEL_FORMAT_RGB24;
} else if (media_type->subtype == MEDIASUBTYPE_YUY2) {
- capability.color = VideoCaptureCapability::kYUY2;
+ capability.color = PIXEL_FORMAT_YUY2;
} else if (media_type->subtype == MEDIASUBTYPE_MJPG) {
- capability.color = VideoCaptureCapability::kMJPEG;
+ capability.color = PIXEL_FORMAT_MJPEG;
} else if (media_type->subtype == MEDIASUBTYPE_UYVY) {
- capability.color = VideoCaptureCapability::kUYVY;
+ capability.color = PIXEL_FORMAT_UYVY;
} else if (media_type->subtype == MEDIASUBTYPE_ARGB32) {
- capability.color = VideoCaptureCapability::kARGB;
+ capability.color = PIXEL_FORMAT_ARGB;
} else {
WCHAR guid_str[128];
StringFromGUID2(media_type->subtype, guid_str, arraysize(guid_str));
diff --git a/media/video_capture_android_jni_headers.target.darwin-arm.mk b/media/video_capture_android_jni_headers.target.darwin-arm.mk
index 643c5ec372..b4d7d604e5 100644
--- a/media/video_capture_android_jni_headers.target.darwin-arm.mk
+++ b/media/video_capture_android_jni_headers.target.darwin-arm.mk
@@ -78,6 +78,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -90,6 +91,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -156,6 +158,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -168,6 +171,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/video_capture_android_jni_headers.target.darwin-mips.mk b/media/video_capture_android_jni_headers.target.darwin-mips.mk
index d02f39e194..3af7894a66 100644
--- a/media/video_capture_android_jni_headers.target.darwin-mips.mk
+++ b/media/video_capture_android_jni_headers.target.darwin-mips.mk
@@ -77,6 +77,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -89,6 +90,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -154,6 +156,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -166,6 +169,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/video_capture_android_jni_headers.target.darwin-x86.mk b/media/video_capture_android_jni_headers.target.darwin-x86.mk
index 481ac3e61d..1b76e94507 100644
--- a/media/video_capture_android_jni_headers.target.darwin-x86.mk
+++ b/media/video_capture_android_jni_headers.target.darwin-x86.mk
@@ -80,6 +80,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -92,6 +93,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -161,6 +163,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -173,6 +176,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/video_capture_android_jni_headers.target.linux-arm.mk b/media/video_capture_android_jni_headers.target.linux-arm.mk
index 643c5ec372..b4d7d604e5 100644
--- a/media/video_capture_android_jni_headers.target.linux-arm.mk
+++ b/media/video_capture_android_jni_headers.target.linux-arm.mk
@@ -78,6 +78,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -90,6 +91,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -156,6 +158,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -168,6 +171,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/video_capture_android_jni_headers.target.linux-mips.mk b/media/video_capture_android_jni_headers.target.linux-mips.mk
index d02f39e194..3af7894a66 100644
--- a/media/video_capture_android_jni_headers.target.linux-mips.mk
+++ b/media/video_capture_android_jni_headers.target.linux-mips.mk
@@ -77,6 +77,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -89,6 +90,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -154,6 +156,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -166,6 +169,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/video_capture_android_jni_headers.target.linux-x86.mk b/media/video_capture_android_jni_headers.target.linux-x86.mk
index 481ac3e61d..1b76e94507 100644
--- a/media/video_capture_android_jni_headers.target.linux-x86.mk
+++ b/media/video_capture_android_jni_headers.target.linux-x86.mk
@@ -80,6 +80,7 @@ MY_CFLAGS_Debug := \
MY_DEFS_Debug := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -92,6 +93,7 @@ MY_DEFS_Debug := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
@@ -161,6 +163,7 @@ MY_CFLAGS_Release := \
MY_DEFS_Release := \
'-DANGLE_DX11' \
+ '-DWTF_VECTOR_INITIAL_SIZE=16' \
'-D_FILE_OFFSET_BITS=64' \
'-DNO_TCMALLOC' \
'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
@@ -173,6 +176,7 @@ MY_DEFS_Release := \
'-DENABLE_GPU=1' \
'-DUSE_OPENSSL=1' \
'-DENABLE_EGLIMAGE=1' \
+ '-DCLD_VERSION=1' \
'-D__STDC_CONSTANT_MACROS' \
'-D__STDC_FORMAT_MACROS' \
'-DANDROID' \
diff --git a/media/webm/webm_audio_client.cc b/media/webm/webm_audio_client.cc
index e52f44b4a9..1ef640c0dc 100644
--- a/media/webm/webm_audio_client.cc
+++ b/media/webm/webm_audio_client.cc
@@ -26,12 +26,15 @@ void WebMAudioClient::Reset() {
bool WebMAudioClient::InitializeConfig(
const std::string& codec_id, const std::vector<uint8>& codec_private,
- bool is_encrypted, AudioDecoderConfig* config) {
+ int64 seek_preroll, int64 codec_delay, bool is_encrypted,
+ AudioDecoderConfig* config) {
DCHECK(config);
AudioCodec audio_codec = kUnknownAudioCodec;
if (codec_id == "A_VORBIS") {
audio_codec = kCodecVorbis;
+ } else if (codec_id == "A_OPUS") {
+ audio_codec = kCodecOpus;
} else {
MEDIA_LOG(log_cb_) << "Unsupported audio codec_id " << codec_id;
return false;
@@ -63,8 +66,14 @@ bool WebMAudioClient::InitializeConfig(
}
config->Initialize(
- audio_codec, kSampleFormatPlanarF32, channel_layout,
- samples_per_second, extra_data, extra_data_size, is_encrypted, true);
+ audio_codec,
+ (audio_codec == kCodecOpus) ? kSampleFormatS16 : kSampleFormatPlanarF32,
+ channel_layout,
+ samples_per_second, extra_data, extra_data_size, is_encrypted, true,
+ base::TimeDelta::FromMicroseconds(
+ (seek_preroll != -1 ? seek_preroll : 0) / 1000),
+ base::TimeDelta::FromMicroseconds(
+ (codec_delay != -1 ? codec_delay : 0) / 1000));
return config->IsValidConfig();
}
diff --git a/media/webm/webm_audio_client.h b/media/webm/webm_audio_client.h
index 1338f5cbd6..7874cec4be 100644
--- a/media/webm/webm_audio_client.h
+++ b/media/webm/webm_audio_client.h
@@ -31,6 +31,8 @@ class WebMAudioClient : public WebMParserClient {
// audio track element fields.
bool InitializeConfig(const std::string& codec_id,
const std::vector<uint8>& codec_private,
+ const int64 seek_preroll,
+ const int64 codec_delay,
bool is_encrypted,
AudioDecoderConfig* config);
diff --git a/media/webm/webm_cluster_parser.cc b/media/webm/webm_cluster_parser.cc
index 9991d6b4d1..87cccae4da 100644
--- a/media/webm/webm_cluster_parser.cc
+++ b/media/webm/webm_cluster_parser.cc
@@ -64,6 +64,7 @@ WebMClusterParser::WebMClusterParser(
block_duration_(-1),
block_add_id_(-1),
block_additional_data_size_(-1),
+ discard_padding_(-1),
cluster_timecode_(-1),
cluster_start_time_(kNoTimestamp()),
cluster_ended_(false),
@@ -137,6 +138,8 @@ WebMParserClient* WebMClusterParser::OnListStart(int id) {
block_data_.reset();
block_data_size_ = -1;
block_duration_ = -1;
+ discard_padding_ = -1;
+ discard_padding_set_ = false;
} else if (id == kWebMIdBlockAdditions) {
block_add_id_ = -1;
block_additional_data_.reset();
@@ -158,13 +161,16 @@ bool WebMClusterParser::OnListEnd(int id) {
bool result = ParseBlock(false, block_data_.get(), block_data_size_,
block_additional_data_.get(),
- block_additional_data_size_, block_duration_);
+ block_additional_data_size_, block_duration_,
+ discard_padding_set_ ? discard_padding_ : 0);
block_data_.reset();
block_data_size_ = -1;
block_duration_ = -1;
block_add_id_ = -1;
block_additional_data_.reset();
block_additional_data_size_ = -1;
+ discard_padding_ = -1;
+ discard_padding_set_ = false;
return result;
}
@@ -180,6 +186,12 @@ bool WebMClusterParser::OnUInt(int id, int64 val) {
case kWebMIdBlockAddID:
dst = &block_add_id_;
break;
+ case kWebMIdDiscardPadding:
+ if (discard_padding_set_)
+ return false;
+ discard_padding_set_ = true;
+ discard_padding_ = val;
+ return true;
default:
return true;
}
@@ -191,7 +203,8 @@ bool WebMClusterParser::OnUInt(int id, int64 val) {
bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
int size, const uint8* additional,
- int additional_size, int duration) {
+ int additional_size, int duration,
+ int64 discard_padding) {
if (size < 4)
return false;
@@ -219,13 +232,14 @@ bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
const uint8* frame_data = buf + 4;
int frame_size = size - (frame_data - buf);
return OnBlock(is_simple_block, track_num, timecode, duration, flags,
- frame_data, frame_size, additional, additional_size);
+ frame_data, frame_size, additional, additional_size,
+ discard_padding);
}
bool WebMClusterParser::OnBinary(int id, const uint8* data, int size) {
switch (id) {
case kWebMIdSimpleBlock:
- return ParseBlock(true, data, size, NULL, -1, -1);
+ return ParseBlock(true, data, size, NULL, -1, -1, 0);
case kWebMIdBlock:
if (block_data_) {
@@ -270,7 +284,8 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
int block_duration,
int flags,
const uint8* data, int size,
- const uint8* additional, int additional_size) {
+ const uint8* additional, int additional_size,
+ int64 discard_padding) {
DCHECK_GE(size, 0);
if (cluster_timecode_ == -1) {
MEDIA_LOG(log_cb_) << "Got a block before cluster timecode.";
@@ -350,6 +365,11 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
block_duration * timecode_multiplier_));
}
+ if (discard_padding != 0) {
+ buffer->set_discard_padding(base::TimeDelta::FromMicroseconds(
+ discard_padding / 1000));
+ }
+
return track->AddBuffer(buffer);
}
diff --git a/media/webm/webm_cluster_parser.h b/media/webm/webm_cluster_parser.h
index e156d47c23..5aa957cdee 100644
--- a/media/webm/webm_cluster_parser.h
+++ b/media/webm/webm_cluster_parser.h
@@ -110,10 +110,12 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
virtual bool OnBinary(int id, const uint8* data, int size) OVERRIDE;
bool ParseBlock(bool is_simple_block, const uint8* buf, int size,
- const uint8* additional, int additional_size, int duration);
+ const uint8* additional, int additional_size, int duration,
+ int64 discard_padding);
bool OnBlock(bool is_simple_block, int track_num, int timecode, int duration,
int flags, const uint8* data, int size,
- const uint8* additional, int additional_size);
+ const uint8* additional, int additional_size,
+ int64 discard_padding);
// Resets the Track objects associated with each text track.
void ResetTextTracks();
@@ -137,6 +139,8 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
int64 block_add_id_;
scoped_ptr<uint8[]> block_additional_data_;
int block_additional_data_size_;
+ int64 discard_padding_;
+ bool discard_padding_set_;
int64 cluster_timecode_;
base::TimeDelta cluster_start_time_;
diff --git a/media/webm/webm_constants.h b/media/webm/webm_constants.h
index cda45e00d4..3a35dbab37 100644
--- a/media/webm/webm_constants.h
+++ b/media/webm/webm_constants.h
@@ -57,6 +57,7 @@ const int kWebMIdChapterTranslateID = 0x69A5;
const int kWebMIdChapterUID = 0x73C4;
const int kWebMIdCluster = 0x1F43B675;
const int kWebMIdCodecDecodeAll = 0xAA;
+const int kWebMIdCodecDelay = 0x56AA;
const int kWebMIdCodecID = 0x86;
const int kWebMIdCodecName = 0x258688;
const int kWebMIdCodecPrivate = 0x63A2;
@@ -91,6 +92,7 @@ const int kWebMIdCueTrack = 0xF7;
const int kWebMIdCueTrackPositions = 0xB7;
const int kWebMIdDateUTC = 0x4461;
const int kWebMIdDefaultDuration = 0x23E383;
+const int kWebMIdDiscardPadding = 0x75A2;
const int kWebMIdDisplayHeight = 0x54BA;
const int kWebMIdDisplayUnit = 0x54B2;
const int kWebMIdDisplayWidth = 0x54B0;
@@ -118,6 +120,7 @@ const int kWebMIdFlagEnabled = 0xB9;
const int kWebMIdFlagForced = 0x55AA;
const int kWebMIdFlagInterlaced = 0x9A;
const int kWebMIdFlagLacing = 0x9C;
+const int kWebMIdFrameRate = 0x2383E3;
const int kWebMIdInfo = 0x1549A966;
const int kWebMIdJoinBlocks = 0xE9;
const int kWebMIdLaceNumber = 0xCC;
@@ -147,6 +150,7 @@ const int kWebMIdSeek = 0x4DBB;
const int kWebMIdSeekHead = 0x114D9B74;
const int kWebMIdSeekID = 0x53AB;
const int kWebMIdSeekPosition = 0x53AC;
+const int kWebMIdSeekPreRoll = 0x56BB;
const int kWebMIdSegment = 0x18538067;
const int kWebMIdSegmentFamily = 0x4444;
const int kWebMIdSegmentFilename = 0x7384;
diff --git a/media/webm/webm_parser.cc b/media/webm/webm_parser.cc
index 30e5c1b5e5..f1509abb83 100644
--- a/media/webm/webm_parser.cc
+++ b/media/webm/webm_parser.cc
@@ -118,6 +118,7 @@ static const ElementIdInfo kBlockGroupIds[] = {
{UINT, kWebMIdReferencePriority},
{BINARY, kWebMIdReferenceBlock},
{BINARY, kWebMIdCodecState},
+ {UINT, kWebMIdDiscardPadding},
{LIST, kWebMIdSlices},
};
@@ -163,6 +164,8 @@ static const ElementIdInfo kTrackEntryIds[] = {
{UINT, kWebMIdAttachmentLink},
{UINT, kWebMIdCodecDecodeAll},
{UINT, kWebMIdTrackOverlay},
+ {UINT, kWebMIdCodecDelay},
+ {UINT, kWebMIdSeekPreRoll},
{LIST, kWebMIdTrackTranslate},
{LIST, kWebMIdVideo},
{LIST, kWebMIdAudio},
@@ -191,6 +194,7 @@ static const ElementIdInfo kVideoIds[] = {
{UINT, kWebMIdDisplayUnit},
{UINT, kWebMIdAspectRatioType},
{BINARY, kWebMIdColorSpace},
+ {FLOAT, kWebMIdFrameRate},
};
static const ElementIdInfo kAudioIds[] = {
diff --git a/media/webm/webm_tracks_parser.cc b/media/webm/webm_tracks_parser.cc
index 67bac044e4..aa28d6feef 100644
--- a/media/webm/webm_tracks_parser.cc
+++ b/media/webm/webm_tracks_parser.cc
@@ -31,6 +31,8 @@ static TextKind CodecIdToTextKind(const std::string& codec_id) {
WebMTracksParser::WebMTracksParser(const LogCB& log_cb, bool ignore_text_tracks)
: track_type_(-1),
track_num_(-1),
+ seek_preroll_(-1),
+ codec_delay_(-1),
audio_track_num_(-1),
video_track_num_(-1),
ignore_text_tracks_(ignore_text_tracks),
@@ -161,8 +163,8 @@ bool WebMTracksParser::OnListEnd(int id) {
DCHECK(!audio_decoder_config_.IsValidConfig());
if (!audio_client_.InitializeConfig(
- codec_id_, codec_private_, !audio_encryption_key_id_.empty(),
- &audio_decoder_config_)) {
+ codec_id_, codec_private_, seek_preroll_, codec_delay_,
+ !audio_encryption_key_id_.empty(), &audio_decoder_config_)) {
return false;
}
} else {
@@ -226,6 +228,12 @@ bool WebMTracksParser::OnUInt(int id, int64 val) {
case kWebMIdTrackType:
dst = &track_type_;
break;
+ case kWebMIdSeekPreRoll:
+ dst = &seek_preroll_;
+ break;
+ case kWebMIdCodecDelay:
+ dst = &codec_delay_;
+ break;
default:
return true;
}
diff --git a/media/webm/webm_tracks_parser.h b/media/webm/webm_tracks_parser.h
index 81588e4b51..d3993207a1 100644
--- a/media/webm/webm_tracks_parser.h
+++ b/media/webm/webm_tracks_parser.h
@@ -83,6 +83,8 @@ class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
std::string track_language_;
std::string codec_id_;
std::vector<uint8> codec_private_;
+ int64 seek_preroll_;
+ int64 codec_delay_;
scoped_ptr<WebMContentEncodingsClient> track_content_encodings_client_;
int64 audio_track_num_;