summaryrefslogtreecommitdiff
path: root/media
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2014-04-16 11:17:03 +0100
committerBen Murdoch <benm@google.com>2014-04-16 11:17:03 +0100
commita02191e04bc25c4935f804f2c080ae28663d096d (patch)
tree3cf38961650b5734763e473336009287244306ac /media
parent8bad47e0f7d0c250a0443923cceb52f4a4abcd40 (diff)
downloadchromium_org-a02191e04bc25c4935f804f2c080ae28663d096d.tar.gz
Merge from Chromium at DEPS revision 263965
This commit was generated by merge_to_master.py. Change-Id: Ia1121eddd985123f160afde6372525c3d25975bf
Diffstat (limited to 'media')
-rw-r--r--media/audio/audio_input_controller.cc12
-rw-r--r--media/audio/audio_manager.h5
-rw-r--r--media/audio/audio_manager_base.cc28
-rw-r--r--media/audio/audio_manager_base.h2
-rw-r--r--media/audio/audio_output_controller.cc11
-rw-r--r--media/audio/audio_output_dispatcher.h7
-rw-r--r--media/audio/audio_output_dispatcher_impl.cc13
-rw-r--r--media/audio/audio_output_dispatcher_impl.h3
-rw-r--r--media/audio/audio_output_proxy_unittest.cc71
-rw-r--r--media/audio/audio_output_resampler.cc31
-rw-r--r--media/audio/audio_output_resampler.h2
-rw-r--r--media/audio/mock_audio_manager.cc2
-rw-r--r--media/audio/mock_audio_manager.h2
-rw-r--r--media/audio/pulse/audio_manager_pulse.cc8
-rw-r--r--media/base/android/media_source_player_unittest.cc9
-rw-r--r--media/base/stream_parser_buffer.cc4
-rw-r--r--media/base/video_decoder.cc4
-rw-r--r--media/base/video_decoder.h5
-rw-r--r--media/base/video_frame.cc4
-rw-r--r--media/cast/audio_sender/audio_encoder.cc17
-rw-r--r--media/cast/logging/encoding_event_subscriber.cc6
-rw-r--r--media/cast/logging/encoding_event_subscriber_unittest.cc16
-rw-r--r--media/cast/logging/logging_defines.cc3
-rw-r--r--media/cast/logging/logging_defines.h6
-rw-r--r--media/cast/logging/logging_impl.cc13
-rw-r--r--media/cast/logging/logging_impl.h6
-rw-r--r--media/cast/logging/logging_impl_unittest.cc12
-rw-r--r--media/cast/logging/logging_raw.cc18
-rw-r--r--media/cast/logging/logging_raw.h10
-rw-r--r--media/cast/logging/logging_raw_unittest.cc8
-rw-r--r--media/cast/logging/proto/raw_events.proto7
-rw-r--r--media/cast/logging/simple_event_subscriber_unittest.cc4
-rw-r--r--media/cast/logging/stats_event_subscriber_unittest.cc4
-rw-r--r--media/cast/video_sender/video_sender.cc8
-rw-r--r--media/cdm/ppapi/cdm_adapter.cc6
-rw-r--r--media/cdm/ppapi/external_clear_key/cdm_video_decoder.cc3
-rw-r--r--media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc8
-rw-r--r--media/filters/decoder_stream_traits.cc6
-rw-r--r--media/filters/decoder_stream_traits.h2
-rw-r--r--media/filters/gpu_video_decoder.cc5
-rw-r--r--media/filters/gpu_video_decoder.h1
-rw-r--r--media/filters/pipeline_integration_test.cc5
-rw-r--r--media/filters/pipeline_integration_test_base.cc3
-rw-r--r--media/filters/pipeline_integration_test_base.h1
-rw-r--r--media/filters/source_buffer_stream.cc5
-rw-r--r--media/filters/source_buffer_stream_unittest.cc12
-rw-r--r--media/filters/video_frame_stream_unittest.cc2
-rw-r--r--media/filters/video_renderer_impl.cc8
-rw-r--r--media/filters/video_renderer_impl.h10
-rw-r--r--media/filters/video_renderer_impl_unittest.cc6
-rw-r--r--media/filters/vpx_video_decoder.cc4
-rw-r--r--media/filters/vpx_video_decoder.h1
-rw-r--r--media/formats/mp4/mp4_stream_parser.cc6
-rw-r--r--media/formats/mp4/mp4_stream_parser_unittest.cc7
-rw-r--r--media/formats/webm/webm_cluster_parser.cc23
-rw-r--r--media/formats/webm/webm_cluster_parser.h22
-rw-r--r--media/media.target.darwin-arm.mk8
-rw-r--r--media/media.target.darwin-mips.mk8
-rw-r--r--media/media.target.darwin-x86.mk8
-rw-r--r--media/media.target.darwin-x86_64.mk8
-rw-r--r--media/media.target.linux-arm.mk8
-rw-r--r--media/media.target.linux-mips.mk8
-rw-r--r--media/media.target.linux-x86.mk8
-rw-r--r--media/media.target.linux-x86_64.mk8
-rw-r--r--media/midi/midi_manager.cc21
-rw-r--r--media/midi/midi_manager.h17
-rw-r--r--media/midi/midi_manager_alsa.cc6
-rw-r--r--media/midi/midi_manager_alsa.h2
-rw-r--r--media/midi/midi_manager_mac.cc10
-rw-r--r--media/midi/midi_manager_mac.h2
-rw-r--r--media/midi/midi_manager_usb.cc4
-rw-r--r--media/midi/midi_manager_usb.h2
-rw-r--r--media/midi/midi_manager_usb_unittest.cc5
-rw-r--r--media/midi/midi_manager_win.cc4
-rw-r--r--media/midi/midi_manager_win.h2
-rw-r--r--media/midi/midi_result.h19
-rw-r--r--media/test/data/README1
-rw-r--r--media/test/data/bear-1280x720-a_frag-cenc_missing-saiz-saio.mp4bin0 -> 1251 bytes
-rw-r--r--media/tools/player_x11/player_x11.cc4
-rw-r--r--media/video/capture/mac/video_capture_device_mac.mm89
-rw-r--r--media/video/capture/video_capture_device.h31
-rw-r--r--media/video/capture/video_capture_device_unittest.cc70
82 files changed, 422 insertions, 438 deletions
diff --git a/media/audio/audio_input_controller.cc b/media/audio/audio_input_controller.cc
index 6a62b6d73a..70647cdfeb 100644
--- a/media/audio/audio_input_controller.cc
+++ b/media/audio/audio_input_controller.cc
@@ -202,15 +202,12 @@ void AudioInputController::DoCreateForStream(
DCHECK(!no_data_timer_.get());
- // This is a fix for crbug.com/357501. The timer can trigger when closing
- // the lid on Macs, which causes more problems than the timer fixes.
- // Also, in crbug.com/357569, the goal is to remove usage of this timer
- // since it was added to solve a crash on Windows that no longer can be
- // reproduced.
+ // The timer is enabled for logging purposes. The NO_DATA_ERROR triggered
+ // from the timer must be ignored by the EventHandler.
// TODO(henrika): remove usage of timer when it has been verified on Canary
// that we are safe doing so. Goal is to get rid of |no_data_timer_| and
- // everything that is tied to it.
- enable_nodata_timer = false;
+ // everything that is tied to it. crbug.com/357569.
+ enable_nodata_timer = true;
if (enable_nodata_timer) {
// Create the data timer which will call DoCheckForNoData(). The timer
@@ -324,7 +321,6 @@ void AudioInputController::DoCheckForNoData() {
// one second since a data packet was recorded. This can happen if a
// capture device has been removed or disabled.
handler_->OnError(this, NO_DATA_ERROR);
- return;
}
// Mark data as non-active. The flag will be re-enabled in OnData() each
diff --git a/media/audio/audio_manager.h b/media/audio/audio_manager.h
index a15dab5758..ca385706c0 100644
--- a/media/audio/audio_manager.h
+++ b/media/audio/audio_manager.h
@@ -176,11 +176,6 @@ class MEDIA_EXPORT AudioManager {
virtual scoped_ptr<AudioLog> CreateAudioLog(
AudioLogFactory::AudioComponent component) = 0;
- // Called when a component has detected a OS level audio wedge. Shuts down
- // all active audio streams and then restarts them transparently. See
- // http://crbug.com/160920
- virtual void FixWedgedAudio() = 0;
-
protected:
AudioManager();
diff --git a/media/audio/audio_manager_base.cc b/media/audio/audio_manager_base.cc
index eab950d588..60f6d238f9 100644
--- a/media/audio/audio_manager_base.cc
+++ b/media/audio/audio_manager_base.cc
@@ -400,32 +400,4 @@ scoped_ptr<AudioLog> AudioManagerBase::CreateAudioLog(
return audio_log_factory_->CreateAudioLog(component);
}
-void AudioManagerBase::FixWedgedAudio() {
- DCHECK(task_runner_->BelongsToCurrentThread());
-#if defined(OS_MACOSX)
- // Through trial and error, we've found that one way to restore audio after a
- // hang is to close all outstanding audio streams. Once all streams have been
- // closed, new streams appear to work correctly.
- //
- // In Chrome terms, this means we need to ask all AudioOutputDispatchers to
- // close all Open()'d streams. Once all streams across all dispatchers have
- // been closed, we ask for all previously Start()'d streams to be recreated
- // using the same AudioSourceCallback they had before.
- //
- // Since this operation takes place on the audio thread we can be sure that no
- // other state-changing stream operations will take place while the fix is in
- // progress.
- //
- // See http://crbug.com/160920 for additional details.
- for (AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
- it != output_dispatchers_.end(); ++it) {
- (*it)->dispatcher->CloseStreamsForWedgeFix();
- }
- for (AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
- it != output_dispatchers_.end(); ++it) {
- (*it)->dispatcher->RestartStreamsForWedgeFix();
- }
-#endif
-}
-
} // namespace media
diff --git a/media/audio/audio_manager_base.h b/media/audio/audio_manager_base.h
index 4c088fb54f..237eaeedd2 100644
--- a/media/audio/audio_manager_base.h
+++ b/media/audio/audio_manager_base.h
@@ -115,8 +115,6 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual scoped_ptr<AudioLog> CreateAudioLog(
AudioLogFactory::AudioComponent component) OVERRIDE;
- virtual void FixWedgedAudio() OVERRIDE;
-
protected:
AudioManagerBase(AudioLogFactory* audio_log_factory);
diff --git a/media/audio/audio_output_controller.cc b/media/audio/audio_output_controller.cc
index 6d30f4b1fb..4dafadca4e 100644
--- a/media/audio/audio_output_controller.cc
+++ b/media/audio/audio_output_controller.cc
@@ -435,15 +435,8 @@ void AudioOutputController::WedgeCheck() {
// If we should be playing and we haven't, that's a wedge.
if (state_ == kPlaying) {
- const bool playback_success =
- base::AtomicRefCountIsOne(&on_more_io_data_called_);
-
- UMA_HISTOGRAM_BOOLEAN(
- "Media.AudioOutputControllerPlaybackStartupSuccess", playback_success);
-
- // Let the AudioManager try and fix it.
- if (!playback_success)
- audio_manager_->FixWedgedAudio();
+ UMA_HISTOGRAM_BOOLEAN("Media.AudioOutputControllerPlaybackStartupSuccess",
+ base::AtomicRefCountIsOne(&on_more_io_data_called_));
}
}
diff --git a/media/audio/audio_output_dispatcher.h b/media/audio/audio_output_dispatcher.h
index d070b6b243..079cba0ed7 100644
--- a/media/audio/audio_output_dispatcher.h
+++ b/media/audio/audio_output_dispatcher.h
@@ -65,13 +65,6 @@ class MEDIA_EXPORT AudioOutputDispatcher
// Called on the audio thread when the AudioManager is shutting down.
virtual void Shutdown() = 0;
- // Called by the AudioManager to restart streams when a wedge is detected. A
- // wedge means the OS failed to request any audio after StartStream(). When a
- // wedge is detected all streams across all dispatchers must be closed. After
- // all streams are closed, streams are restarted. See http://crbug.com/160920
- virtual void CloseStreamsForWedgeFix() = 0;
- virtual void RestartStreamsForWedgeFix() = 0;
-
const std::string& device_id() const { return device_id_; }
protected:
diff --git a/media/audio/audio_output_dispatcher_impl.cc b/media/audio/audio_output_dispatcher_impl.cc
index 4989900ec2..0cb3db85ca 100644
--- a/media/audio/audio_output_dispatcher_impl.cc
+++ b/media/audio/audio_output_dispatcher_impl.cc
@@ -172,17 +172,4 @@ void AudioOutputDispatcherImpl::CloseIdleStreams(size_t keep_alive) {
idle_streams_.erase(idle_streams_.begin() + keep_alive, idle_streams_.end());
}
-void AudioOutputDispatcherImpl::CloseStreamsForWedgeFix() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- CloseAllIdleStreams();
-}
-
-void AudioOutputDispatcherImpl::RestartStreamsForWedgeFix() {
- DCHECK(task_runner_->BelongsToCurrentThread());
-
- // Should only be called when the dispatcher is used with fake streams which
- // don't need to be shutdown or restarted.
- CHECK_EQ(params_.format(), AudioParameters::AUDIO_FAKE);
-}
-
} // namespace media
diff --git a/media/audio/audio_output_dispatcher_impl.h b/media/audio/audio_output_dispatcher_impl.h
index cb1ddb9d9c..52d647a3be 100644
--- a/media/audio/audio_output_dispatcher_impl.h
+++ b/media/audio/audio_output_dispatcher_impl.h
@@ -61,9 +61,6 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
virtual void Shutdown() OVERRIDE;
- virtual void CloseStreamsForWedgeFix() OVERRIDE;
- virtual void RestartStreamsForWedgeFix() OVERRIDE;
-
private:
friend class base::RefCountedThreadSafe<AudioOutputDispatcherImpl>;
virtual ~AudioOutputDispatcherImpl();
diff --git a/media/audio/audio_output_proxy_unittest.cc b/media/audio/audio_output_proxy_unittest.cc
index 534a6d9daa..b4afe5ae23 100644
--- a/media/audio/audio_output_proxy_unittest.cc
+++ b/media/audio/audio_output_proxy_unittest.cc
@@ -673,75 +673,4 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
EXPECT_TRUE(stream2.start_called());
}
-// Ensures the methods used to fix audio output wedges are working correctly.
-TEST_F(AudioOutputResamplerTest, WedgeFix) {
- MockAudioOutputStream stream1(&manager_, params_);
- MockAudioOutputStream stream2(&manager_, params_);
- MockAudioOutputStream stream3(&manager_, params_);
-
- // Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
- .WillOnce(Return(&stream1))
- .WillOnce(Return(&stream2))
- .WillOnce(Return(&stream3));
-
- // Stream1 should be able to successfully open and start.
- EXPECT_CALL(stream1, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream1, SetVolume(_));
- EXPECT_CALL(stream2, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream2, SetVolume(_));
-
- // Open and start the first proxy and stream.
- AudioOutputProxy* proxy1 = new AudioOutputProxy(resampler_.get());
- EXPECT_TRUE(proxy1->Open());
- proxy1->Start(&callback_);
- OnStart();
-
- // Open, but do not start the second proxy.
- AudioOutputProxy* proxy2 = new AudioOutputProxy(resampler_.get());
- EXPECT_TRUE(proxy2->Open());
-
- // Open, start and then stop the third proxy.
- AudioOutputProxy* proxy3 = new AudioOutputProxy(resampler_.get());
- EXPECT_TRUE(proxy3->Open());
- proxy3->Start(&callback_);
- OnStart();
- proxy3->Stop();
-
- // Wait for stream to timeout and shutdown.
- WaitForCloseTimer(&stream2);
-
- EXPECT_CALL(stream1, Close());
- resampler_->CloseStreamsForWedgeFix();
-
- // Don't pump the MessageLoop between CloseStreamsForWedgeFix() and
- // RestartStreamsForWedgeFix() to simulate intended usage. The OnStart() call
- // will take care of necessary work.
-
- // Stream3 should take Stream1's place after RestartStreamsForWedgeFix(). No
- // additional streams should be opened for proxy2 and proxy3.
- EXPECT_CALL(stream3, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream3, SetVolume(_));
-
- resampler_->RestartStreamsForWedgeFix();
- OnStart();
-
- // Perform the required Stop()/Close() shutdown dance for each proxy.
- proxy3->Close();
- proxy2->Close();
- proxy1->Stop();
- CloseAndWaitForCloseTimer(proxy1, &stream3);
-
- // Wait for all of the messages to fly and then verify stream behavior.
- EXPECT_TRUE(stream1.stop_called());
- EXPECT_TRUE(stream1.start_called());
- EXPECT_TRUE(stream2.stop_called());
- EXPECT_TRUE(stream2.start_called());
- EXPECT_TRUE(stream3.stop_called());
- EXPECT_TRUE(stream3.start_called());
-}
-
} // namespace media
diff --git a/media/audio/audio_output_resampler.cc b/media/audio/audio_output_resampler.cc
index 376b113f7f..4863351614 100644
--- a/media/audio/audio_output_resampler.cc
+++ b/media/audio/audio_output_resampler.cc
@@ -298,37 +298,6 @@ void AudioOutputResampler::Shutdown() {
DCHECK(callbacks_.empty());
}
-void AudioOutputResampler::CloseStreamsForWedgeFix() {
- DCHECK(task_runner_->BelongsToCurrentThread());
-
- // Stop and close all active streams. Once all streams across all dispatchers
- // have been closed the AudioManager will call RestartStreamsForWedgeFix().
- for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
- ++it) {
- if (it->second->started())
- dispatcher_->StopStream(it->first);
- dispatcher_->CloseStream(it->first);
- }
-
- // Close all idle streams as well.
- dispatcher_->CloseStreamsForWedgeFix();
-}
-
-void AudioOutputResampler::RestartStreamsForWedgeFix() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- // By opening all streams first and then starting them one by one we ensure
- // the dispatcher only opens streams for those which will actually be used.
- for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
- ++it) {
- dispatcher_->OpenStream();
- }
- for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
- ++it) {
- if (it->second->started())
- dispatcher_->StartStream(it->second, it->first);
- }
-}
-
OnMoreDataConverter::OnMoreDataConverter(const AudioParameters& input_params,
const AudioParameters& output_params)
: io_ratio_(static_cast<double>(input_params.GetBytesPerSecond()) /
diff --git a/media/audio/audio_output_resampler.h b/media/audio/audio_output_resampler.h
index 80c9d77d16..fa488aa195 100644
--- a/media/audio/audio_output_resampler.h
+++ b/media/audio/audio_output_resampler.h
@@ -52,8 +52,6 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
double volume) OVERRIDE;
virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
virtual void Shutdown() OVERRIDE;
- virtual void CloseStreamsForWedgeFix() OVERRIDE;
- virtual void RestartStreamsForWedgeFix() OVERRIDE;
private:
friend class base::RefCountedThreadSafe<AudioOutputResampler>;
diff --git a/media/audio/mock_audio_manager.cc b/media/audio/mock_audio_manager.cc
index 318bf09269..e774b8cf2f 100644
--- a/media/audio/mock_audio_manager.cc
+++ b/media/audio/mock_audio_manager.cc
@@ -107,6 +107,4 @@ scoped_ptr<AudioLog> MockAudioManager::CreateAudioLog(
return scoped_ptr<AudioLog>();
}
-void MockAudioManager::FixWedgedAudio() {}
-
} // namespace media.
diff --git a/media/audio/mock_audio_manager.h b/media/audio/mock_audio_manager.h
index 8ca400935e..520205d21b 100644
--- a/media/audio/mock_audio_manager.h
+++ b/media/audio/mock_audio_manager.h
@@ -70,8 +70,6 @@ class MockAudioManager : public media::AudioManager {
virtual scoped_ptr<AudioLog> CreateAudioLog(
AudioLogFactory::AudioComponent component) OVERRIDE;
- virtual void FixWedgedAudio() OVERRIDE;
-
protected:
virtual ~MockAudioManager();
diff --git a/media/audio/pulse/audio_manager_pulse.cc b/media/audio/pulse/audio_manager_pulse.cc
index d01c8acc4c..3369fd5a05 100644
--- a/media/audio/pulse/audio_manager_pulse.cc
+++ b/media/audio/pulse/audio_manager_pulse.cc
@@ -160,7 +160,7 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
// TODO(tommi): Support |output_device_id|.
- DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
+ VLOG_IF(0, !output_device_id.empty()) << "Not implemented!";
static const int kDefaultOutputBufferSize = 512;
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
@@ -220,7 +220,7 @@ bool AudioManagerPulse::Init() {
// Check if the pulse library is avialbale.
paths[kModulePulse].push_back(kPulseLib);
if (!InitializeStubs(paths)) {
- DLOG(WARNING) << "Failed on loading the Pulse library and symbols";
+ VLOG(1) << "Failed on loading the Pulse library and symbols";
return false;
}
#endif // defined(DLOPEN_PULSEAUDIO)
@@ -248,8 +248,8 @@ bool AudioManagerPulse::Init() {
pa_context_set_state_callback(input_context_, &pulse::ContextStateCallback,
input_mainloop_);
if (pa_context_connect(input_context_, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL)) {
- DLOG(ERROR) << "Failed to connect to the context. Error: "
- << pa_strerror(pa_context_errno(input_context_));
+ VLOG(0) << "Failed to connect to the context. Error: "
+ << pa_strerror(pa_context_errno(input_context_));
return false;
}
diff --git a/media/base/android/media_source_player_unittest.cc b/media/base/android/media_source_player_unittest.cc
index 9c6a0627d1..a3e69e5bd3 100644
--- a/media/base/android/media_source_player_unittest.cc
+++ b/media/base/android/media_source_player_unittest.cc
@@ -1861,8 +1861,7 @@ TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenDemuxerSeekAndDone) {
EXPECT_EQ(1, demuxer_->num_seek_requests());
}
-TEST_F(MediaSourcePlayerTest,
- DISABLED_SeekToThenReleaseThenDemuxerSeekThenStart) {
+TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenDemuxerSeekThenStart) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test if Release() occurs after SeekTo(), but the DemuxerSeek IPC request
@@ -1890,8 +1889,7 @@ TEST_F(MediaSourcePlayerTest,
EXPECT_EQ(1, demuxer_->num_seek_requests());
}
-TEST_F(MediaSourcePlayerTest,
- DISABLED_SeekToThenDemuxerSeekThenReleaseThenSeekDone) {
+TEST_F(MediaSourcePlayerTest, SeekToThenDemuxerSeekThenReleaseThenSeekDone) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test if Release() occurs after a SeekTo()'s subsequent DemuxerSeek IPC
@@ -1918,8 +1916,7 @@ TEST_F(MediaSourcePlayerTest,
EXPECT_EQ(1, demuxer_->num_seek_requests());
}
-// Flaky. See http://crbug.com/361359.
-TEST_F(MediaSourcePlayerTest, DISABLED_SeekToThenReleaseThenStart) {
+TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenStart) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test if Release() occurs after a SeekTo()'s subsequent DemuxerSeeK IPC
diff --git a/media/base/stream_parser_buffer.cc b/media/base/stream_parser_buffer.cc
index 8f46f5fd42..e2c25ae6fc 100644
--- a/media/base/stream_parser_buffer.cc
+++ b/media/base/stream_parser_buffer.cc
@@ -27,6 +27,7 @@ static scoped_refptr<StreamParserBuffer> CopyBuffer(
copied_buffer->set_timestamp(buffer.timestamp());
copied_buffer->set_duration(buffer.duration());
copied_buffer->set_discard_padding(buffer.discard_padding());
+ copied_buffer->set_splice_timestamp(buffer.splice_timestamp());
const DecryptConfig* decrypt_config = buffer.decrypt_config();
if (decrypt_config) {
copied_buffer->set_decrypt_config(
@@ -105,6 +106,7 @@ void StreamParserBuffer::ConvertToSpliceBuffer(
// Make a copy of this first, before making any changes.
scoped_refptr<StreamParserBuffer> overlapping_buffer = CopyBuffer(*this);
+ overlapping_buffer->set_splice_timestamp(kNoTimestamp());
const scoped_refptr<StreamParserBuffer>& first_splice_buffer =
pre_splice_buffers.front();
@@ -140,8 +142,8 @@ void StreamParserBuffer::ConvertToSpliceBuffer(
const scoped_refptr<StreamParserBuffer>& buffer = *it;
DCHECK(!buffer->end_of_stream());
DCHECK(buffer->get_splice_buffers().empty());
- buffer->set_splice_timestamp(splice_timestamp());
splice_buffers_.push_back(CopyBuffer(*buffer));
+ splice_buffers_.back()->set_splice_timestamp(splice_timestamp());
}
splice_buffers_.push_back(overlapping_buffer);
diff --git a/media/base/video_decoder.cc b/media/base/video_decoder.cc
index eedb70a364..ed875f48ea 100644
--- a/media/base/video_decoder.cc
+++ b/media/base/video_decoder.cc
@@ -16,10 +16,6 @@ scoped_refptr<VideoFrame> VideoDecoder::GetDecodeOutput() {
return NULL;
}
-bool VideoDecoder::HasAlpha() const {
- return false;
-}
-
bool VideoDecoder::NeedsBitstreamConversion() const {
return false;
}
diff --git a/media/base/video_decoder.h b/media/base/video_decoder.h
index 5b6664af4f..81b93f7778 100644
--- a/media/base/video_decoder.h
+++ b/media/base/video_decoder.h
@@ -79,11 +79,6 @@ class MEDIA_EXPORT VideoDecoder {
// must be called before destructing the decoder.
virtual void Stop() = 0;
- // Returns true if the output format has an alpha channel. Most formats do not
- // have alpha so the default is false. Override and return true for decoders
- // that return formats with an alpha channel.
- virtual bool HasAlpha() const;
-
// Returns true if the decoder needs bitstream conversion before decoding.
virtual bool NeedsBitstreamConversion() const;
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index 1e33fbef24..87766651ce 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -231,6 +231,10 @@ scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
const base::Closure& no_longer_needed_cb) {
+ // NATIVE_TEXTURE frames need mailbox info propagated, and there's no support
+ // for that here yet, see http://crbug/362521.
+ CHECK(frame->format() != NATIVE_TEXTURE);
+
DCHECK(frame->visible_rect().Contains(visible_rect));
scoped_refptr<VideoFrame> wrapped_frame(new VideoFrame(
frame->format(), frame->coded_size(), visible_rect, natural_size,
diff --git a/media/cast/audio_sender/audio_encoder.cc b/media/cast/audio_sender/audio_encoder.cc
index ab39a6aa3b..6c336dcabd 100644
--- a/media/cast/audio_sender/audio_encoder.cc
+++ b/media/cast/audio_sender/audio_encoder.cc
@@ -30,6 +30,18 @@ void LogAudioFrameEvent(
event_time, event_type, rtp_timestamp, frame_id);
}
+void LogAudioFrameEncodedEvent(
+ const scoped_refptr<media::cast::CastEnvironment>& cast_environment,
+ base::TimeTicks event_time,
+ media::cast::CastLoggingEvent event_type,
+ media::cast::RtpTimestamp rtp_timestamp,
+ uint32 frame_id,
+ size_t frame_size) {
+ cast_environment->Logging()->InsertEncodedFrameEvent(
+ event_time, event_type, rtp_timestamp, frame_id,
+ static_cast<int>(frame_size), /* key_frame - unused */ false);
+}
+
} // namespace
namespace media {
@@ -109,12 +121,13 @@ class AudioEncoder::ImplBase
cast_environment_->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
- base::Bind(&LogAudioFrameEvent,
+ base::Bind(&LogAudioFrameEncodedEvent,
cast_environment_,
cast_environment_->Clock()->NowTicks(),
kAudioFrameEncoded,
audio_frame->rtp_timestamp,
- audio_frame->frame_id));
+ audio_frame->frame_id,
+ audio_frame->data.size()));
// Compute an offset to determine the recorded time for the first
// audio sample in the buffer.
const base::TimeDelta buffer_time_offset =
diff --git a/media/cast/logging/encoding_event_subscriber.cc b/media/cast/logging/encoding_event_subscriber.cc
index 50aaf4fa95..e50a74c302 100644
--- a/media/cast/logging/encoding_event_subscriber.cc
+++ b/media/cast/logging/encoding_event_subscriber.cc
@@ -55,9 +55,11 @@ void EncodingEventSubscriber::OnReceiveFrameEvent(
event_proto->add_event_timestamp_ms(
(frame_event.timestamp - base::TimeTicks()).InMilliseconds());
- if (frame_event.type == kAudioFrameEncoded ||
- frame_event.type == kVideoFrameEncoded) {
+ if (frame_event.type == kAudioFrameEncoded) {
event_proto->set_encoded_frame_size(frame_event.size);
+ } else if (frame_event.type == kVideoFrameEncoded) {
+ event_proto->set_encoded_frame_size(frame_event.size);
+ event_proto->set_key_frame(frame_event.key_frame);
} else if (frame_event.type == kAudioPlayoutDelay ||
frame_event.type == kVideoRenderDelay) {
event_proto->set_delay_millis(frame_event.delay_delta.InMilliseconds());
diff --git a/media/cast/logging/encoding_event_subscriber_unittest.cc b/media/cast/logging/encoding_event_subscriber_unittest.cc
index 119ec07312..d00489ad42 100644
--- a/media/cast/logging/encoding_event_subscriber_unittest.cc
+++ b/media/cast/logging/encoding_event_subscriber_unittest.cc
@@ -209,6 +209,7 @@ TEST_F(EncodingEventSubscriberTest, FrameEventDelay) {
EXPECT_EQ(0, event->encoded_frame_size());
EXPECT_EQ(100, event->delay_millis());
+ EXPECT_FALSE(event->has_key_frame());
}
TEST_F(EncodingEventSubscriberTest, FrameEventSize) {
@@ -216,9 +217,10 @@ TEST_F(EncodingEventSubscriberTest, FrameEventSize) {
base::TimeTicks now(testing_clock_->NowTicks());
RtpTimestamp rtp_timestamp = 100;
int size = 123;
- cast_environment_->Logging()->InsertFrameEventWithSize(
+ bool key_frame = true;
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
now, kVideoFrameEncoded, rtp_timestamp,
- /*frame_id*/ 0, size);
+ /*frame_id*/ 0, size, key_frame);
GetEventsAndReset();
@@ -239,6 +241,8 @@ TEST_F(EncodingEventSubscriberTest, FrameEventSize) {
EXPECT_EQ(size, event->encoded_frame_size());
EXPECT_EQ(0, event->delay_millis());
+ EXPECT_TRUE(event->has_key_frame());
+ EXPECT_EQ(key_frame, event->key_frame());
}
TEST_F(EncodingEventSubscriberTest, MultipleFrameEvents) {
@@ -252,9 +256,9 @@ TEST_F(EncodingEventSubscriberTest, MultipleFrameEvents) {
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
base::TimeTicks now2(testing_clock_->NowTicks());
- cast_environment_->Logging()->InsertFrameEventWithSize(
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
now2, kAudioFrameEncoded, rtp_timestamp2,
- /*frame_id*/ 0, /*size*/ 123);
+ /*frame_id*/ 0, /*size*/ 123, /* key_frame - unused */ false );
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
base::TimeTicks now3(testing_clock_->NowTicks());
@@ -281,6 +285,8 @@ TEST_F(EncodingEventSubscriberTest, MultipleFrameEvents) {
EXPECT_EQ(InMilliseconds(now1), event->event_timestamp_ms(0));
EXPECT_EQ(InMilliseconds(now3), event->event_timestamp_ms(1));
+ EXPECT_FALSE(event->has_key_frame());
+
relative_rtp_timestamp = rtp_timestamp2 - first_rtp_timestamp_;
it = frame_events_.find(relative_rtp_timestamp);
ASSERT_TRUE(it != frame_events_.end());
@@ -294,6 +300,8 @@ TEST_F(EncodingEventSubscriberTest, MultipleFrameEvents) {
ASSERT_EQ(1, event->event_timestamp_ms_size());
EXPECT_EQ(InMilliseconds(now2), event->event_timestamp_ms(0));
+
+ EXPECT_FALSE(event->has_key_frame());
}
TEST_F(EncodingEventSubscriberTest, PacketEvent) {
diff --git a/media/cast/logging/logging_defines.cc b/media/cast/logging/logging_defines.cc
index 5dbf9d2818..c00c420031 100644
--- a/media/cast/logging/logging_defines.cc
+++ b/media/cast/logging/logging_defines.cc
@@ -91,7 +91,8 @@ EventMediaType GetEventMediaType(CastLoggingEvent event) {
}
FrameEvent::FrameEvent()
- : rtp_timestamp(0u), frame_id(kFrameIdUnknown), size(0u), type(kUnknown) {}
+ : rtp_timestamp(0u), frame_id(kFrameIdUnknown), size(0u), type(kUnknown),
+ key_frame(false) {}
FrameEvent::~FrameEvent() {}
PacketEvent::PacketEvent()
diff --git a/media/cast/logging/logging_defines.h b/media/cast/logging/logging_defines.h
index 348034558b..2b7d2d31da 100644
--- a/media/cast/logging/logging_defines.h
+++ b/media/cast/logging/logging_defines.h
@@ -73,7 +73,8 @@ struct FrameEvent {
RtpTimestamp rtp_timestamp;
uint32 frame_id;
- size_t size; // Encoded size only.
+ // Size of encoded frame. Only set for kVideoFrameEncoded event.
+ size_t size;
// Time of event logged.
base::TimeTicks timestamp;
@@ -82,6 +83,9 @@ struct FrameEvent {
// Render / playout delay. Only set for kAudioPlayoutDelay and
// kVideoRenderDelay events.
base::TimeDelta delay_delta;
+
+ // Whether the frame is a key frame. Only set for kVideoFrameEncoded event.
+ bool key_frame;
};
struct PacketEvent {
diff --git a/media/cast/logging/logging_impl.cc b/media/cast/logging/logging_impl.cc
index 3ab4b4d0b9..03d6c4781a 100644
--- a/media/cast/logging/logging_impl.cc
+++ b/media/cast/logging/logging_impl.cc
@@ -25,13 +25,14 @@ void LoggingImpl::InsertFrameEvent(const base::TimeTicks& time_of_event,
raw_.InsertFrameEvent(time_of_event, event, rtp_timestamp, frame_id);
}
-void LoggingImpl::InsertFrameEventWithSize(const base::TimeTicks& time_of_event,
- CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id, int frame_size) {
+void LoggingImpl::InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id, int frame_size,
+ bool key_frame) {
DCHECK(thread_checker_.CalledOnValidThread());
- raw_.InsertFrameEventWithSize(time_of_event, event, rtp_timestamp, frame_id,
- frame_size);
+ raw_.InsertEncodedFrameEvent(time_of_event, event, rtp_timestamp, frame_id,
+ frame_size, key_frame);
}
void LoggingImpl::InsertFrameEventWithDelay(
diff --git a/media/cast/logging/logging_impl.h b/media/cast/logging/logging_impl.h
index 5b35dffe47..fbbfa4e4a2 100644
--- a/media/cast/logging/logging_impl.h
+++ b/media/cast/logging/logging_impl.h
@@ -29,9 +29,9 @@ class LoggingImpl {
CastLoggingEvent event, uint32 rtp_timestamp,
uint32 frame_id);
- void InsertFrameEventWithSize(const base::TimeTicks& time_of_event,
- CastLoggingEvent event, uint32 rtp_timestamp,
- uint32 frame_id, int frame_size);
+ void InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event, uint32 rtp_timestamp,
+ uint32 frame_id, int frame_size, bool key_frame);
void InsertFrameEventWithDelay(const base::TimeTicks& time_of_event,
CastLoggingEvent event, uint32 rtp_timestamp,
diff --git a/media/cast/logging/logging_impl_unittest.cc b/media/cast/logging/logging_impl_unittest.cc
index 4aceecb226..44212e0de1 100644
--- a/media/cast/logging/logging_impl_unittest.cc
+++ b/media/cast/logging/logging_impl_unittest.cc
@@ -82,9 +82,9 @@ TEST_F(LoggingImplTest, FrameLoggingWithSize) {
int size = kBaseFrameSizeBytes +
base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
sum_size += static_cast<size_t>(size);
- logging_.InsertFrameEventWithSize(testing_clock_.NowTicks(),
- kAudioFrameCaptured, rtp_timestamp,
- frame_id, size);
+ logging_.InsertEncodedFrameEvent(testing_clock_.NowTicks(),
+ kVideoFrameEncoded, rtp_timestamp,
+ frame_id, size, true);
testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
rtp_timestamp += kFrameIntervalMs * 90;
++frame_id;
@@ -135,9 +135,9 @@ TEST_F(LoggingImplTest, MultipleEventFrameLogging) {
rtp_timestamp, frame_id);
++num_events;
if (frame_id % 2) {
- logging_.InsertFrameEventWithSize(testing_clock_.NowTicks(),
- kAudioFrameEncoded, rtp_timestamp,
- frame_id, 1500);
+ logging_.InsertEncodedFrameEvent(testing_clock_.NowTicks(),
+ kAudioFrameEncoded, rtp_timestamp,
+ frame_id, 1500, true);
} else if (frame_id % 3) {
logging_.InsertFrameEvent(testing_clock_.NowTicks(), kVideoFrameDecoded,
rtp_timestamp, frame_id);
diff --git a/media/cast/logging/logging_raw.cc b/media/cast/logging/logging_raw.cc
index 1427b60701..8132bcd667 100644
--- a/media/cast/logging/logging_raw.cc
+++ b/media/cast/logging/logging_raw.cc
@@ -20,15 +20,15 @@ void LoggingRaw::InsertFrameEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event, uint32 rtp_timestamp,
uint32 frame_id) {
InsertBaseFrameEvent(time_of_event, event, frame_id, rtp_timestamp,
- base::TimeDelta(), 0);
+ base::TimeDelta(), 0, false);
}
-void LoggingRaw::InsertFrameEventWithSize(const base::TimeTicks& time_of_event,
- CastLoggingEvent event,
- uint32 rtp_timestamp, uint32 frame_id,
- int size) {
+void LoggingRaw::InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ uint32 rtp_timestamp, uint32 frame_id,
+ int size, bool key_frame) {
InsertBaseFrameEvent(time_of_event, event, frame_id, rtp_timestamp,
- base::TimeDelta(), size);
+ base::TimeDelta(), size, key_frame);
}
void LoggingRaw::InsertFrameEventWithDelay(const base::TimeTicks& time_of_event,
@@ -37,13 +37,14 @@ void LoggingRaw::InsertFrameEventWithDelay(const base::TimeTicks& time_of_event,
uint32 frame_id,
base::TimeDelta delay) {
InsertBaseFrameEvent(time_of_event, event, frame_id, rtp_timestamp, delay,
- 0);
+ 0, false);
}
void LoggingRaw::InsertBaseFrameEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event, uint32 frame_id,
uint32 rtp_timestamp,
- base::TimeDelta delay, int size) {
+ base::TimeDelta delay, int size,
+ bool key_frame) {
FrameEvent frame_event;
frame_event.rtp_timestamp = rtp_timestamp;
frame_event.frame_id = frame_id;
@@ -51,6 +52,7 @@ void LoggingRaw::InsertBaseFrameEvent(const base::TimeTicks& time_of_event,
frame_event.timestamp = time_of_event;
frame_event.type = event;
frame_event.delay_delta = delay;
+ frame_event.key_frame = key_frame;
for (std::vector<RawEventSubscriber*>::const_iterator it =
subscribers_.begin();
it != subscribers_.end(); ++it) {
diff --git a/media/cast/logging/logging_raw.h b/media/cast/logging/logging_raw.h
index 29f203b3e0..e4b8c9ecd6 100644
--- a/media/cast/logging/logging_raw.h
+++ b/media/cast/logging/logging_raw.h
@@ -30,12 +30,14 @@ class LoggingRaw : public base::NonThreadSafe {
CastLoggingEvent event, uint32 rtp_timestamp,
uint32 frame_id);
- // Size - Inserting the size implies that this is an encoded frame.
// This function is only applicable for the following frame events:
// kAudioFrameEncoded, kVideoFrameEncoded
- void InsertFrameEventWithSize(const base::TimeTicks& time_of_event,
+ // |size| - Size of encoded frame.
+ // |key_frame| - Whether the frame is a key frame. This field is only
+ // applicable for kVideoFrameEncoded event.
+ void InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event, uint32 rtp_timestamp,
- uint32 frame_id, int size);
+ uint32 frame_id, int size, bool key_frame);
// Render/playout delay
// This function is only applicable for the following frame events:
@@ -70,7 +72,7 @@ class LoggingRaw : public base::NonThreadSafe {
void InsertBaseFrameEvent(const base::TimeTicks& time_of_event,
CastLoggingEvent event, uint32 frame_id,
uint32 rtp_timestamp, base::TimeDelta delay,
- int size);
+ int size, bool key_frame);
// List of subscriber pointers. This class does not own the subscribers.
std::vector<RawEventSubscriber*> subscribers_;
diff --git a/media/cast/logging/logging_raw_unittest.cc b/media/cast/logging/logging_raw_unittest.cc
index 135aed5ea1..25b522a9c6 100644
--- a/media/cast/logging/logging_raw_unittest.cc
+++ b/media/cast/logging/logging_raw_unittest.cc
@@ -48,14 +48,15 @@ TEST_F(LoggingRawTest, FrameEvent) {
EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
}
-TEST_F(LoggingRawTest, FrameEventWithSize) {
+TEST_F(LoggingRawTest, EncodedFrameEvent) {
CastLoggingEvent event_type = kVideoFrameEncoded;
uint32 frame_id = 456u;
RtpTimestamp rtp_timestamp = 123u;
base::TimeTicks timestamp = base::TimeTicks();
int size = 1024;
- raw_.InsertFrameEventWithSize(timestamp, event_type, rtp_timestamp, frame_id,
- size);
+ bool key_frame = true;
+ raw_.InsertEncodedFrameEvent(timestamp, event_type, rtp_timestamp, frame_id,
+ size, key_frame);
event_subscriber_.GetPacketEventsAndReset(&packet_events_);
EXPECT_TRUE(packet_events_.empty());
@@ -71,6 +72,7 @@ TEST_F(LoggingRawTest, FrameEventWithSize) {
EXPECT_EQ(timestamp, frame_events_[0].timestamp);
EXPECT_EQ(event_type, frame_events_[0].type);
EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
+ EXPECT_EQ(key_frame, frame_events_[0].key_frame);
}
TEST_F(LoggingRawTest, FrameEventWithDelay) {
diff --git a/media/cast/logging/proto/raw_events.proto b/media/cast/logging/proto/raw_events.proto
index afca046437..7ead104eb4 100644
--- a/media/cast/logging/proto/raw_events.proto
+++ b/media/cast/logging/proto/raw_events.proto
@@ -77,11 +77,14 @@ message AggregatedFrameEvent {
repeated EventType event_type = 2 [packed = true];
repeated int64 event_timestamp_ms = 3 [packed = true];
- // Size is set only for kAudioFrameEncoded and kVideoFrameEncoded.
+ // Only set if there is a kAudioFrameEncoded and kVideoFrameEncoded event.
optional int32 encoded_frame_size = 4;
- // Delay is only set for kAudioPlayoutDelay and kVideoRenderDelay.
+ // Only set if there is a kAudioPlayoutDelay or kVideoRenderDelay event.
optional int32 delay_millis = 5;
+
+ // Only set if there is a kVideoFrameEncoded event.
+ optional bool key_frame = 6;
};
message BasePacketEvent {
diff --git a/media/cast/logging/simple_event_subscriber_unittest.cc b/media/cast/logging/simple_event_subscriber_unittest.cc
index e6905e1ebf..fec7e00ed4 100644
--- a/media/cast/logging/simple_event_subscriber_unittest.cc
+++ b/media/cast/logging/simple_event_subscriber_unittest.cc
@@ -40,9 +40,9 @@ class SimpleEventSubscriberTest : public ::testing::Test {
TEST_F(SimpleEventSubscriberTest, GetAndResetEvents) {
// Log some frame events.
- cast_environment_->Logging()->InsertFrameEventWithSize(
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
testing_clock_->NowTicks(), kAudioFrameEncoded, /*rtp_timestamp*/ 100u,
- /*frame_id*/ 0u, /*frame_size*/ 123);
+ /*frame_id*/ 0u, /*frame_size*/ 123, /*key_frame*/ false);
cast_environment_->Logging()->InsertFrameEventWithDelay(
testing_clock_->NowTicks(), kAudioPlayoutDelay, /*rtp_timestamp*/ 100u,
/*frame_id*/ 0u, /*delay*/ base::TimeDelta::FromMilliseconds(100));
diff --git a/media/cast/logging/stats_event_subscriber_unittest.cc b/media/cast/logging/stats_event_subscriber_unittest.cc
index 9af336974a..8f4d0c5271 100644
--- a/media/cast/logging/stats_event_subscriber_unittest.cc
+++ b/media/cast/logging/stats_event_subscriber_unittest.cc
@@ -57,8 +57,8 @@ TEST_F(StatsEventSubscriberTest, FrameStats) {
now, kVideoFrameReceived, rtp_timestamp, frame_id);
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(30));
- cast_environment_->Logging()->InsertFrameEventWithSize(
- now, kVideoFrameEncoded, rtp_timestamp, i, frame_size);
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ now, kVideoFrameEncoded, rtp_timestamp, i, frame_size, true);
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(30));
cast_environment_->Logging()->InsertFrameEventWithDelay(
diff --git a/media/cast/video_sender/video_sender.cc b/media/cast/video_sender/video_sender.cc
index 6bb56274a3..150891f018 100644
--- a/media/cast/video_sender/video_sender.cc
+++ b/media/cast/video_sender/video_sender.cc
@@ -168,10 +168,10 @@ void VideoSender::SendEncodedVideoFrameMainThread(
}
uint32 frame_id = encoded_frame->frame_id;
- cast_environment_->Logging()->InsertFrameEvent(last_send_time_,
- kVideoFrameEncoded,
- encoded_frame->rtp_timestamp,
- frame_id);
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ last_send_time_, kVideoFrameEncoded, encoded_frame->rtp_timestamp,
+ frame_id, static_cast<int>(encoded_frame->data.size()),
+ encoded_frame->key_frame);
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT1(
diff --git a/media/cdm/ppapi/cdm_adapter.cc b/media/cdm/ppapi/cdm_adapter.cc
index bfa4795bef..7e58176d8a 100644
--- a/media/cdm/ppapi/cdm_adapter.cc
+++ b/media/cdm/ppapi/cdm_adapter.cc
@@ -154,6 +154,8 @@ cdm::VideoDecoderConfig::VideoCodec PpVideoCodecToCdmVideoCodec(
return cdm::VideoDecoderConfig::kCodecVp8;
case PP_VIDEOCODEC_H264:
return cdm::VideoDecoderConfig::kCodecH264;
+ case PP_VIDEOCODEC_VP9:
+ return cdm::VideoDecoderConfig::kCodecVp9;
default:
return cdm::VideoDecoderConfig::kUnknownVideoCodec;
}
@@ -162,8 +164,8 @@ cdm::VideoDecoderConfig::VideoCodec PpVideoCodecToCdmVideoCodec(
cdm::VideoDecoderConfig::VideoCodecProfile PpVCProfileToCdmVCProfile(
PP_VideoCodecProfile profile) {
switch (profile) {
- case PP_VIDEOCODECPROFILE_VP8_MAIN:
- return cdm::VideoDecoderConfig::kVp8ProfileMain;
+ case PP_VIDEOCODECPROFILE_NOT_NEEDED:
+ return cdm::VideoDecoderConfig::kProfileNotNeeded;
case PP_VIDEOCODECPROFILE_H264_BASELINE:
return cdm::VideoDecoderConfig::kH264ProfileBaseline;
case PP_VIDEOCODECPROFILE_H264_MAIN:
diff --git a/media/cdm/ppapi/external_clear_key/cdm_video_decoder.cc b/media/cdm/ppapi/external_clear_key/cdm_video_decoder.cc
index 74a1b28f68..297ea84491 100644
--- a/media/cdm/ppapi/external_clear_key/cdm_video_decoder.cc
+++ b/media/cdm/ppapi/external_clear_key/cdm_video_decoder.cc
@@ -31,7 +31,8 @@ scoped_ptr<CdmVideoDecoder> CreateVideoDecoder(
#else
#if defined(CLEAR_KEY_CDM_USE_LIBVPX_DECODER)
- if (config.codec == cdm::VideoDecoderConfig::kCodecVp8) {
+ if (config.codec == cdm::VideoDecoderConfig::kCodecVp8 ||
+ config.codec == cdm::VideoDecoderConfig::kCodecVp9) {
video_decoder.reset(new LibvpxCdmVideoDecoder(host));
if (!video_decoder->Initialize(config))
diff --git a/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc b/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
index 83b16b1ea4..320362f547 100644
--- a/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
+++ b/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
@@ -51,6 +51,8 @@ static AVCodecID CdmVideoCodecToCodecID(
return AV_CODEC_ID_VP8;
case cdm::VideoDecoderConfig::kCodecH264:
return AV_CODEC_ID_H264;
+ case cdm::VideoDecoderConfig::kCodecVp9:
+ return AV_CODEC_ID_VP9;
case cdm::VideoDecoderConfig::kUnknownVideoCodec:
default:
NOTREACHED() << "Unsupported cdm::VideoCodec: " << video_codec;
@@ -61,8 +63,10 @@ static AVCodecID CdmVideoCodecToCodecID(
static int CdmVideoCodecProfileToProfileID(
cdm::VideoDecoderConfig::VideoCodecProfile profile) {
switch (profile) {
- case cdm::VideoDecoderConfig::kVp8ProfileMain:
- return FF_PROFILE_UNKNOWN; // VP8 does not define an FFmpeg profile.
+ case cdm::VideoDecoderConfig::kProfileNotNeeded:
+ // For codecs that do not need a profile (e.g. VP8/VP9), does not define
+ // an FFmpeg profile.
+ return FF_PROFILE_UNKNOWN;
case cdm::VideoDecoderConfig::kH264ProfileBaseline:
return FF_PROFILE_H264_BASELINE;
case cdm::VideoDecoderConfig::kH264ProfileMain:
diff --git a/media/filters/decoder_stream_traits.cc b/media/filters/decoder_stream_traits.cc
index 0eb4aa30b1..4eb2e48a28 100644
--- a/media/filters/decoder_stream_traits.cc
+++ b/media/filters/decoder_stream_traits.cc
@@ -53,14 +53,12 @@ bool DecoderStreamTraits<DemuxerStream::VIDEO>::FinishInitialization(
DemuxerStream* stream) {
DCHECK(stream);
if (!decoder) {
- init_cb.Run(false, false);
+ init_cb.Run(false);
return false;
}
if (decoder->NeedsBitstreamConversion())
stream->EnableBitstreamConverter();
- // TODO(xhwang): We assume |decoder_->HasAlpha()| does not change after
- // reinitialization. Check this condition.
- init_cb.Run(true, decoder->HasAlpha());
+ init_cb.Run(true);
return true;
}
diff --git a/media/filters/decoder_stream_traits.h b/media/filters/decoder_stream_traits.h
index 1036417011..602c4be946 100644
--- a/media/filters/decoder_stream_traits.h
+++ b/media/filters/decoder_stream_traits.h
@@ -44,7 +44,7 @@ struct DecoderStreamTraits<DemuxerStream::VIDEO> {
typedef VideoDecoder DecoderType;
typedef VideoDecoderConfig DecoderConfigType;
typedef DecryptingVideoDecoder DecryptingDecoderType;
- typedef base::Callback<void(bool success, bool has_alpha)> StreamInitCB;
+ typedef base::Callback<void(bool success)> StreamInitCB;
static std::string ToString();
static bool FinishInitialization(const StreamInitCB& init_cb,
diff --git a/media/filters/gpu_video_decoder.cc b/media/filters/gpu_video_decoder.cc
index d796a96a2d..392448d6a4 100644
--- a/media/filters/gpu_video_decoder.cc
+++ b/media/filters/gpu_video_decoder.cc
@@ -328,11 +328,6 @@ void GpuVideoDecoder::GetBufferData(int32 id, base::TimeDelta* timestamp,
NOTREACHED() << "Missing bitstreambuffer id: " << id;
}
-bool GpuVideoDecoder::HasAlpha() const {
- DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
- return true;
-}
-
bool GpuVideoDecoder::NeedsBitstreamConversion() const {
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
return needs_bitstream_conversion_;
diff --git a/media/filters/gpu_video_decoder.h b/media/filters/gpu_video_decoder.h
index 166cd339d6..e8f5f756fa 100644
--- a/media/filters/gpu_video_decoder.h
+++ b/media/filters/gpu_video_decoder.h
@@ -48,7 +48,6 @@ class MEDIA_EXPORT GpuVideoDecoder
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
virtual void Stop() OVERRIDE;
- virtual bool HasAlpha() const OVERRIDE;
virtual bool NeedsBitstreamConversion() const OVERRIDE;
virtual bool CanReadWithoutStalling() const OVERRIDE;
diff --git a/media/filters/pipeline_integration_test.cc b/media/filters/pipeline_integration_test.cc
index dfea318086..aa73531845 100644
--- a/media/filters/pipeline_integration_test.cc
+++ b/media/filters/pipeline_integration_test.cc
@@ -564,7 +564,6 @@ TEST_P(PipelineIntegrationTest, BasicPlayback_MediaSource_VP9_WebM) {
}
TEST_P(PipelineIntegrationTest, BasicPlayback_MediaSource_VP8A_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
MockMediaSource source("bear-vp8a.webm", kVideoOnlyWebM, kAppendWholeFile,
GetParam());
StartPipelineWithMediaSource(&source);
@@ -583,7 +582,6 @@ TEST_P(PipelineIntegrationTest, BasicPlayback_MediaSource_VP8A_WebM) {
}
TEST_P(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
MockMediaSource source("bear-opus-end-trimming.webm", kOpusAudioOnlyWebM,
kAppendWholeFile, GetParam());
StartPipelineWithMediaSource(&source);
@@ -606,7 +604,6 @@ TEST_P(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus_WebM) {
// Flaky. http://crbug.com/304776
TEST_P(PipelineIntegrationTest, DISABLED_MediaSource_Opus_Seeking_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
MockMediaSource source("bear-opus-end-trimming.webm", kOpusAudioOnlyWebM,
kAppendWholeFile, GetParam());
StartHashedPipelineWithMediaSource(&source);
@@ -1189,7 +1186,6 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_VP9_Opus_WebM) {
// Verify that VP8 video with alpha channel can be played back.
TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8a.webm"),
PIPELINE_OK));
Play();
@@ -1199,7 +1195,6 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_WebM) {
// Verify that VP8A video with odd width/height can be played back.
TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_Odd_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8a-odd-dimensions.webm"),
PIPELINE_OK));
Play();
diff --git a/media/filters/pipeline_integration_test_base.cc b/media/filters/pipeline_integration_test_base.cc
index bf639ccaa5..b6ac6273f1 100644
--- a/media/filters/pipeline_integration_test_base.cc
+++ b/media/filters/pipeline_integration_test_base.cc
@@ -36,7 +36,6 @@ PipelineIntegrationTestBase::PipelineIntegrationTestBase()
last_video_frame_format_(VideoFrame::UNKNOWN),
hardware_config_(AudioParameters(), AudioParameters()) {
base::MD5Init(&md5_context_);
- EXPECT_CALL(*this, OnSetOpaque(true)).Times(AnyNumber());
}
PipelineIntegrationTestBase::~PipelineIntegrationTestBase() {
@@ -250,8 +249,6 @@ PipelineIntegrationTestBase::CreateFilterCollection(
decryptor),
base::Bind(&PipelineIntegrationTestBase::OnVideoRendererPaint,
base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnSetOpaque,
- base::Unretained(this)),
false));
collection->SetVideoRenderer(renderer.Pass());
diff --git a/media/filters/pipeline_integration_test_base.h b/media/filters/pipeline_integration_test_base.h
index 25a5924cc8..1294a6211d 100644
--- a/media/filters/pipeline_integration_test_base.h
+++ b/media/filters/pipeline_integration_test_base.h
@@ -132,7 +132,6 @@ class PipelineIntegrationTestBase {
const DecryptorReadyCB& decryptor_ready_cb);
void OnVideoRendererPaint(const scoped_refptr<VideoFrame>& frame);
- MOCK_METHOD1(OnSetOpaque, void(bool));
MOCK_METHOD1(OnMetadata, void(PipelineMetadata));
MOCK_METHOD0(OnPrerollCompleted, void());
};
diff --git a/media/filters/source_buffer_stream.cc b/media/filters/source_buffer_stream.cc
index 0ff0f3f0ef..4b892d78b6 100644
--- a/media/filters/source_buffer_stream.cc
+++ b/media/filters/source_buffer_stream.cc
@@ -1152,6 +1152,10 @@ SourceBufferStream::Status SourceBufferStream::GetNextBuffer(
return SourceBufferStream::kConfigChange;
}
+ // Every pre splice buffer must have the same splice_timestamp().
+ DCHECK(splice_buffer_->splice_timestamp() ==
+ splice_buffers[splice_buffers_index_]->splice_timestamp());
+
*out_buffer = splice_buffers[splice_buffers_index_++];
return SourceBufferStream::kSuccess;
}
@@ -1169,6 +1173,7 @@ SourceBufferStream::Status SourceBufferStream::GetNextBuffer(
// always issued prior to handing out this buffer, any changes in config id
// have been inherently handled.
DCHECK_GE(splice_buffers_index_, splice_buffers.size());
+ DCHECK(splice_buffers.back()->splice_timestamp() == kNoTimestamp());
*out_buffer = splice_buffers.back();
splice_buffer_ = NULL;
splice_buffers_index_ = 0;
diff --git a/media/filters/source_buffer_stream_unittest.cc b/media/filters/source_buffer_stream_unittest.cc
index 3a92732e5f..db54a41106 100644
--- a/media/filters/source_buffer_stream_unittest.cc
+++ b/media/filters/source_buffer_stream_unittest.cc
@@ -254,6 +254,7 @@ class SourceBufferStreamTest : public testing::Test {
base::SplitString(expected, ' ', &timestamps);
std::stringstream ss;
const SourceBufferStream::Type type = stream_->GetType();
+ base::TimeDelta active_splice_timestamp = kNoTimestamp();
for (size_t i = 0; i < timestamps.size(); i++) {
scoped_refptr<StreamParserBuffer> buffer;
SourceBufferStream::Status status = stream_->GetNextBuffer(&buffer);
@@ -288,6 +289,17 @@ class SourceBufferStreamTest : public testing::Test {
ss << buffer->GetDecodeTimestamp().InMilliseconds();
if (buffer->IsKeyframe())
ss << "K";
+
+ // Until the last splice frame is seen, indicated by a matching timestamp,
+ // all buffers must have the same splice_timestamp().
+ if (buffer->timestamp() == active_splice_timestamp) {
+ ASSERT_EQ(buffer->splice_timestamp(), kNoTimestamp());
+ } else {
+ ASSERT_TRUE(active_splice_timestamp == kNoTimestamp() ||
+ active_splice_timestamp == buffer->splice_timestamp());
+ }
+
+ active_splice_timestamp = buffer->splice_timestamp();
}
EXPECT_EQ(expected, ss.str());
}
diff --git a/media/filters/video_frame_stream_unittest.cc b/media/filters/video_frame_stream_unittest.cc
index dc45638a47..5225e98a54 100644
--- a/media/filters/video_frame_stream_unittest.cc
+++ b/media/filters/video_frame_stream_unittest.cc
@@ -87,7 +87,7 @@ class VideoFrameStreamTest
total_bytes_decoded_ += statistics.video_bytes_decoded;
}
- void OnInitialized(bool success, bool has_alpha) {
+ void OnInitialized(bool success) {
DCHECK(!pending_read_);
DCHECK(!pending_reset_);
DCHECK(pending_initialize_);
diff --git a/media/filters/video_renderer_impl.cc b/media/filters/video_renderer_impl.cc
index 93deb573de..27cd9f375f 100644
--- a/media/filters/video_renderer_impl.cc
+++ b/media/filters/video_renderer_impl.cc
@@ -27,7 +27,6 @@ VideoRendererImpl::VideoRendererImpl(
ScopedVector<VideoDecoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb,
const PaintCB& paint_cb,
- const SetOpaqueCB& set_opaque_cb,
bool drop_frames)
: task_runner_(task_runner),
video_frame_stream_(task_runner, decoders.Pass(), set_decryptor_ready_cb),
@@ -39,7 +38,6 @@ VideoRendererImpl::VideoRendererImpl(
drop_frames_(drop_frames),
playback_rate_(0),
paint_cb_(paint_cb),
- set_opaque_cb_(set_opaque_cb),
last_timestamp_(kNoTimestamp()),
frames_decoded_(0),
frames_dropped_(0),
@@ -189,8 +187,7 @@ void VideoRendererImpl::Initialize(DemuxerStream* stream,
weak_factory_.GetWeakPtr()));
}
-void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success,
- bool has_alpha) {
+void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
@@ -211,9 +208,6 @@ void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success,
// have not populated any buffers yet.
state_ = kFlushed;
- set_opaque_cb_.Run(!has_alpha);
- set_opaque_cb_.Reset();
-
// Create our video thread.
if (!base::PlatformThread::Create(0, this, &thread_)) {
NOTREACHED() << "Video thread creation failed";
diff --git a/media/filters/video_renderer_impl.h b/media/filters/video_renderer_impl.h
index b45f847259..e633f6be03 100644
--- a/media/filters/video_renderer_impl.h
+++ b/media/filters/video_renderer_impl.h
@@ -36,7 +36,6 @@ class MEDIA_EXPORT VideoRendererImpl
public base::PlatformThread::Delegate {
public:
typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> PaintCB;
- typedef base::Callback<void(bool)> SetOpaqueCB;
// Maximum duration of the last frame.
static base::TimeDelta kMaxLastFrameDuration();
@@ -46,9 +45,6 @@ class MEDIA_EXPORT VideoRendererImpl
// |paint_cb| is executed on the video frame timing thread whenever a new
// frame is available for painting.
//
- // |set_opaque_cb| is executed when the renderer is initialized to inform
- // the player whether the decoded output will be opaque or not.
- //
// Implementors should avoid doing any sort of heavy work in this method and
// instead post a task to a common/worker thread to handle rendering. Slowing
// down the video thread may result in losing synchronization with audio.
@@ -59,7 +55,6 @@ class MEDIA_EXPORT VideoRendererImpl
ScopedVector<VideoDecoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb,
const PaintCB& paint_cb,
- const SetOpaqueCB& set_opaque_cb,
bool drop_frames);
virtual ~VideoRendererImpl();
@@ -85,7 +80,7 @@ class MEDIA_EXPORT VideoRendererImpl
private:
// Callback for |video_frame_stream_| initialization.
- void OnVideoFrameStreamInitialized(bool success, bool has_alpha);
+ void OnVideoFrameStreamInitialized(bool success);
// Callback for |video_frame_stream_| to deliver decoded video frames and
// report video decoding status.
@@ -225,9 +220,6 @@ class MEDIA_EXPORT VideoRendererImpl
// Embedder callback for notifying a new frame is available for painting.
PaintCB paint_cb_;
- // Callback to execute to inform the player if the decoded output is opaque.
- SetOpaqueCB set_opaque_cb_;
-
// The timestamp of the last frame removed from the |ready_frames_| queue,
// either for calling |paint_cb_| or for dropping. Set to kNoTimestamp()
// during flushing.
diff --git a/media/filters/video_renderer_impl_unittest.cc b/media/filters/video_renderer_impl_unittest.cc
index f4cc46fd5e..b48d9071c2 100644
--- a/media/filters/video_renderer_impl_unittest.cc
+++ b/media/filters/video_renderer_impl_unittest.cc
@@ -49,7 +49,6 @@ class VideoRendererImplTest : public ::testing::Test {
decoders.Pass(),
media::SetDecryptorReadyCB(),
base::Bind(&VideoRendererImplTest::OnPaint, base::Unretained(this)),
- base::Bind(&VideoRendererImplTest::OnSetOpaque, base::Unretained(this)),
true));
demuxer_stream_.set_video_decoder_config(TestVideoConfig::Normal());
@@ -64,15 +63,10 @@ class VideoRendererImplTest : public ::testing::Test {
.Times(AnyNumber());
EXPECT_CALL(*this, OnTimeUpdate(_))
.Times(AnyNumber());
- EXPECT_CALL(*this, OnSetOpaque(_))
- .Times(AnyNumber());
}
virtual ~VideoRendererImplTest() {}
- // Callbacks passed into VideoRendererImpl().
- MOCK_CONST_METHOD1(OnSetOpaque, void(bool));
-
// Callbacks passed into Initialize().
MOCK_METHOD1(OnTimeUpdate, void(base::TimeDelta));
diff --git a/media/filters/vpx_video_decoder.cc b/media/filters/vpx_video_decoder.cc
index 286629d822..861dd0e405 100644
--- a/media/filters/vpx_video_decoder.cc
+++ b/media/filters/vpx_video_decoder.cc
@@ -340,10 +340,6 @@ void VpxVideoDecoder::Stop() {
state_ = kUninitialized;
}
-bool VpxVideoDecoder::HasAlpha() const {
- return vpx_codec_alpha_ != NULL;
-}
-
void VpxVideoDecoder::DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_NE(state_, kUninitialized);
diff --git a/media/filters/vpx_video_decoder.h b/media/filters/vpx_video_decoder.h
index 91948944c7..3e35548022 100644
--- a/media/filters/vpx_video_decoder.h
+++ b/media/filters/vpx_video_decoder.h
@@ -38,7 +38,6 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
virtual void Stop() OVERRIDE;
- virtual bool HasAlpha() const OVERRIDE;
private:
enum DecoderState {
diff --git a/media/formats/mp4/mp4_stream_parser.cc b/media/formats/mp4/mp4_stream_parser.cc
index 2fac274a7d..b8ddf309f1 100644
--- a/media/formats/mp4/mp4_stream_parser.cc
+++ b/media/formats/mp4/mp4_stream_parser.cc
@@ -433,6 +433,12 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
if (!audio && !video)
runs_->AdvanceRun();
+ // AuxInfo is required for encrypted samples.
+ // See ISO Common Encryption spec: ISO/IEC FDIS 23001-7:2011(E);
+ // Section 7: Common Encryption Sample Auxiliary Information.
+ if (runs_->is_encrypted() && !runs_->aux_info_size())
+ return false;
+
// Attempt to cache the auxiliary information first. Aux info is usually
// placed in a contiguous block before the sample data, rather than being
// interleaved. If we didn't cache it, this would require that we retain the
diff --git a/media/formats/mp4/mp4_stream_parser_unittest.cc b/media/formats/mp4/mp4_stream_parser_unittest.cc
index 226943e999..4418d6d3be 100644
--- a/media/formats/mp4/mp4_stream_parser_unittest.cc
+++ b/media/formats/mp4/mp4_stream_parser_unittest.cc
@@ -206,6 +206,13 @@ TEST_F(MP4StreamParserTest, NoMoovAfterFlush) {
512));
}
+// Test an invalid file where there are encrypted samples, but
+// SampleAuxiliaryInformation{Sizes|Offsets}Box (saiz|saio) are missing.
+// The parser should fail instead of crash. See http://crbug.com/361347
+TEST_F(MP4StreamParserTest, MissingSampleAuxInfo) {
+ ParseMP4File("bear-1280x720-a_frag-cenc_missing-saiz-saio.mp4", 512);
+}
+
// TODO(strobe): Create and test media which uses CENC auxiliary info stored
// inside a private box
diff --git a/media/formats/webm/webm_cluster_parser.cc b/media/formats/webm/webm_cluster_parser.cc
index 6e2424b101..5530815caa 100644
--- a/media/formats/webm/webm_cluster_parser.cc
+++ b/media/formats/webm/webm_cluster_parser.cc
@@ -113,13 +113,13 @@ int WebMClusterParser::Parse(const uint8* buf, int size) {
const WebMClusterParser::BufferQueue& WebMClusterParser::GetAudioBuffers() {
if (cluster_ended_)
- audio_.ApplyDurationDefaultOrEstimateIfNeeded();
+ audio_.ApplyDurationEstimateIfNeeded();
return audio_.buffers();
}
const WebMClusterParser::BufferQueue& WebMClusterParser::GetVideoBuffers() {
if (cluster_ended_)
- video_.ApplyDurationDefaultOrEstimateIfNeeded();
+ video_.ApplyDurationEstimateIfNeeded();
return video_.buffers();
}
@@ -133,7 +133,7 @@ WebMClusterParser::GetTextBuffers() {
++itr) {
// Per OnBlock(), all text buffers should already have valid durations, so
// there is no need to call
- // itr->second.ApplyDurationDefaultOrEstimateIfNeeded() here.
+ // itr->second.ApplyDurationEstimateIfNeeded() here.
const BufferQueue& text_buffers = itr->second.buffers();
if (!text_buffers.empty())
text_buffers_map_.insert(std::make_pair(itr->first, text_buffers));
@@ -407,6 +407,9 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
if (block_duration >= 0) {
buffer->set_duration(base::TimeDelta::FromMicroseconds(
block_duration * timecode_multiplier_));
+ } else {
+ DCHECK_NE(buffer_type, DemuxerStream::TEXT);
+ buffer->set_duration(track->default_duration());
}
if (discard_padding != 0) {
@@ -465,14 +468,13 @@ bool WebMClusterParser::Track::AddBuffer(
return QueueBuffer(buffer);
}
-void WebMClusterParser::Track::ApplyDurationDefaultOrEstimateIfNeeded() {
+void WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
if (!last_added_buffer_missing_duration_)
return;
- last_added_buffer_missing_duration_->set_duration(
- GetDurationDefaultOrEstimate());
+ last_added_buffer_missing_duration_->set_duration(GetDurationEstimate());
- DVLOG(2) << "ApplyDurationDefaultOrEstimateIfNeeded() : new dur : "
+ DVLOG(2) << "ApplyDurationEstimateIfNeeded() : new dur : "
<< " ts "
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
<< " dur "
@@ -536,13 +538,10 @@ bool WebMClusterParser::Track::QueueBuffer(
return true;
}
-base::TimeDelta WebMClusterParser::Track::GetDurationDefaultOrEstimate() {
- base::TimeDelta duration = default_duration_;
+base::TimeDelta WebMClusterParser::Track::GetDurationEstimate() {
+ base::TimeDelta duration = estimated_next_frame_duration_;
if (duration != kNoTimestamp()) {
- DVLOG(3) << __FUNCTION__ << " : using TrackEntry DefaultDuration";
- } else if (estimated_next_frame_duration_ != kNoTimestamp()) {
DVLOG(3) << __FUNCTION__ << " : using estimated duration";
- duration = estimated_next_frame_duration_;
} else {
DVLOG(3) << __FUNCTION__ << " : using hardcoded default duration";
if (is_video_) {
diff --git a/media/formats/webm/webm_cluster_parser.h b/media/formats/webm/webm_cluster_parser.h
index 749b6bdb48..5657e90547 100644
--- a/media/formats/webm/webm_cluster_parser.h
+++ b/media/formats/webm/webm_cluster_parser.h
@@ -43,13 +43,12 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// otherwise adds |buffer| to |buffers_|.
bool AddBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
- // If |last_added_buffer_missing_duration_| is set, updates its duration
- // to be the first non-kNoTimestamp() value of |default_duration_|,
- // |estimated_next_frame_duration_|, or an arbitrary default, then adds it
- // to |buffers_| and unsets |last_added_buffer_missing_duration_|. (This
- // method helps stream parser emit all buffers in a media segment before
- // signaling end of segment.)
- void ApplyDurationDefaultOrEstimateIfNeeded();
+ // If |last_added_buffer_missing_duration_| is set, updates its duration to
+ // be non-kNoTimestamp() value of |estimated_next_frame_duration_| or an
+ // arbitrary default, then adds it to |buffers_| and unsets
+ // |last_added_buffer_missing_duration_|. (This method helps stream parser
+ // emit all buffers in a media segment before signaling end of segment.)
+ void ApplyDurationEstimateIfNeeded();
// Clears all buffer state, except a possibly held-aside buffer that is
// missing duration.
@@ -65,6 +64,8 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// |size| indicates the number of bytes in |data|.
bool IsKeyframe(const uint8* data, int size) const;
+ base::TimeDelta default_duration() const { return default_duration_; }
+
private:
// Helper that sanity-checks |buffer| duration, updates
// |estimated_next_frame_duration_|, and adds |buffer| to |buffers_|.
@@ -73,8 +74,8 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
bool QueueBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
// Helper that calculates the buffer duration to use in
- // ApplyDurationDefaultOrEstimateIfNeeded().
- base::TimeDelta GetDurationDefaultOrEstimate();
+ // ApplyDurationEstimateIfNeeded().
+ base::TimeDelta GetDurationEstimate();
int track_num_;
std::deque<scoped_refptr<StreamParserBuffer> > buffers_;
@@ -86,6 +87,9 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// If kNoTimestamp(), then a default value will be used. This estimate is
// the maximum duration seen or derived so far for this track, and is valid
// only if |default_duration_| is kNoTimestamp().
+ //
+ // TODO(wolenetz): Add unittests for duration estimation and default
+ // duration handling. http://crbug.com/361786 .
base::TimeDelta estimated_next_frame_duration_;
};
diff --git a/media/media.target.darwin-arm.mk b/media/media.target.darwin-arm.mk
index e041db4e37..5457708b22 100644
--- a/media/media.target.darwin-arm.mk
+++ b/media/media.target.darwin-arm.mk
@@ -270,8 +270,10 @@ MY_DEFS_Debug := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -393,8 +395,10 @@ MY_DEFS_Release := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
diff --git a/media/media.target.darwin-mips.mk b/media/media.target.darwin-mips.mk
index 2f5c7d4d0f..9c7a4c0247 100644
--- a/media/media.target.darwin-mips.mk
+++ b/media/media.target.darwin-mips.mk
@@ -269,8 +269,10 @@ MY_DEFS_Debug := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -391,8 +393,10 @@ MY_DEFS_Release := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
diff --git a/media/media.target.darwin-x86.mk b/media/media.target.darwin-x86.mk
index 265ba6456c..a7f8537d11 100644
--- a/media/media.target.darwin-x86.mk
+++ b/media/media.target.darwin-x86.mk
@@ -272,8 +272,10 @@ MY_DEFS_Debug := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -395,8 +397,10 @@ MY_DEFS_Release := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
diff --git a/media/media.target.darwin-x86_64.mk b/media/media.target.darwin-x86_64.mk
index ca8a98b265..932314df3d 100644
--- a/media/media.target.darwin-x86_64.mk
+++ b/media/media.target.darwin-x86_64.mk
@@ -272,8 +272,10 @@ MY_DEFS_Debug := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -395,8 +397,10 @@ MY_DEFS_Release := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
diff --git a/media/media.target.linux-arm.mk b/media/media.target.linux-arm.mk
index e041db4e37..5457708b22 100644
--- a/media/media.target.linux-arm.mk
+++ b/media/media.target.linux-arm.mk
@@ -270,8 +270,10 @@ MY_DEFS_Debug := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -393,8 +395,10 @@ MY_DEFS_Release := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
diff --git a/media/media.target.linux-mips.mk b/media/media.target.linux-mips.mk
index 2f5c7d4d0f..9c7a4c0247 100644
--- a/media/media.target.linux-mips.mk
+++ b/media/media.target.linux-mips.mk
@@ -269,8 +269,10 @@ MY_DEFS_Debug := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -391,8 +393,10 @@ MY_DEFS_Release := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
diff --git a/media/media.target.linux-x86.mk b/media/media.target.linux-x86.mk
index 265ba6456c..a7f8537d11 100644
--- a/media/media.target.linux-x86.mk
+++ b/media/media.target.linux-x86.mk
@@ -272,8 +272,10 @@ MY_DEFS_Debug := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -395,8 +397,10 @@ MY_DEFS_Release := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
diff --git a/media/media.target.linux-x86_64.mk b/media/media.target.linux-x86_64.mk
index ca8a98b265..932314df3d 100644
--- a/media/media.target.linux-x86_64.mk
+++ b/media/media.target.linux-x86_64.mk
@@ -272,8 +272,10 @@ MY_DEFS_Debug := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -395,8 +397,10 @@ MY_DEFS_Release := \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
'-DSK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS=1' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CAN_RECORD' \
+ '-DSK_SUPPORT_LEGACY_N32_NAME' \
+ '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
diff --git a/media/midi/midi_manager.cc b/media/midi/midi_manager.cc
index 77b764f9d2..7384703b80 100644
--- a/media/midi/midi_manager.cc
+++ b/media/midi/midi_manager.cc
@@ -18,23 +18,26 @@ MidiManager* MidiManager::Create() {
#endif
MidiManager::MidiManager()
- : initialized_(false) {
+ : initialized_(false),
+ result_(MIDI_NOT_SUPPORTED) {
}
MidiManager::~MidiManager() {
}
-bool MidiManager::StartSession(MidiManagerClient* client) {
+void MidiManager::StartSession(MidiManagerClient* client, int client_id) {
// Lazily initialize the MIDI back-end.
- if (!initialized_)
- initialized_ = Initialize();
+ if (!initialized_) {
+ initialized_ = true;
+ result_ = Initialize();
+ }
- if (initialized_) {
+ if (result_ == MIDI_OK) {
base::AutoLock auto_lock(clients_lock_);
clients_.insert(client);
}
-
- return initialized_;
+ // TODO(toyoshim): Make Initialize() asynchronous.
+ client->CompleteStartSession(client_id, result_);
}
void MidiManager::EndSession(MidiManagerClient* client) {
@@ -51,9 +54,9 @@ void MidiManager::DispatchSendMidiData(MidiManagerClient* client,
NOTREACHED();
}
-bool MidiManager::Initialize() {
+MidiResult MidiManager::Initialize() {
TRACE_EVENT0("midi", "MidiManager::Initialize");
- return false;
+ return MIDI_NOT_SUPPORTED;
}
void MidiManager::AddInputPort(const MidiPortInfo& info) {
diff --git a/media/midi/midi_manager.h b/media/midi/midi_manager.h
index dd7828bdad..88c930bbc4 100644
--- a/media/midi/midi_manager.h
+++ b/media/midi/midi_manager.h
@@ -12,6 +12,7 @@
#include "base/synchronization/lock.h"
#include "media/base/media_export.h"
#include "media/midi/midi_port_info.h"
+#include "media/midi/midi_result.h"
namespace media {
@@ -20,7 +21,11 @@ namespace media {
// for details.
class MEDIA_EXPORT MidiManagerClient {
public:
- virtual ~MidiManagerClient() {}
+ virtual ~MidiManagerClient() {}
+
+ // CompleteStartSession() is called when platform dependent preparation is
+ // finished.
+ virtual void CompleteStartSession(int client_id, MidiResult result) = 0;
// ReceiveMidiData() is called when MIDI data has been received from the
// MIDI system.
@@ -51,10 +56,11 @@ class MEDIA_EXPORT MidiManager {
// A client calls StartSession() to receive and send MIDI data.
// If the session is ready to start, the MIDI system is lazily initialized
// and the client is registered to receive MIDI data.
- // Returns |true| if the session succeeds to start.
- bool StartSession(MidiManagerClient* client);
+ // CompleteStartSession() is called with MIDI_OK if the session is started.
+ // Otherwise CompleteStartSession() is called with proper MidiResult code.
+ void StartSession(MidiManagerClient* client, int client_id);
- // A client calls ReleaseSession() to stop receiving MIDI data.
+ // A client calls EndSession() to stop receiving MIDI data.
void EndSession(MidiManagerClient* client);
// DispatchSendMidiData() is called when MIDI data should be sent to the MIDI
@@ -84,7 +90,7 @@ class MEDIA_EXPORT MidiManager {
protected:
// Initializes the MIDI system, returning |true| on success.
// The default implementation is for unsupported platforms.
- virtual bool Initialize();
+ virtual MidiResult Initialize();
void AddInputPort(const MidiPortInfo& info);
void AddOutputPort(const MidiPortInfo& info);
@@ -96,6 +102,7 @@ class MEDIA_EXPORT MidiManager {
double timestamp);
bool initialized_;
+ MidiResult result_;
// Keeps track of all clients who wish to receive MIDI data.
typedef std::set<MidiManagerClient*> ClientList;
diff --git a/media/midi/midi_manager_alsa.cc b/media/midi/midi_manager_alsa.cc
index 8f580e1ba3..eb79540b82 100644
--- a/media/midi/midi_manager_alsa.cc
+++ b/media/midi/midi_manager_alsa.cc
@@ -127,7 +127,7 @@ MidiManagerAlsa::MidiManagerAlsa()
pipe_fd_[i] = -1;
}
-bool MidiManagerAlsa::Initialize() {
+MidiResult MidiManagerAlsa::Initialize() {
// TODO(toyoshim): Make Initialize() asynchronous.
// See http://crbug.com/339746.
TRACE_EVENT0("midi", "MidiManagerAlsa::Initialize");
@@ -188,14 +188,14 @@ bool MidiManagerAlsa::Initialize() {
if (pipe(pipe_fd_) < 0) {
VPLOG(1) << "pipe() failed";
- return false;
+ return MIDI_INITIALIZATION_ERROR;
}
event_thread_.Start();
event_thread_.message_loop()->PostTask(
FROM_HERE,
base::Bind(&MidiManagerAlsa::EventReset, base::Unretained(this)));
- return true;
+ return MIDI_OK;
}
MidiManagerAlsa::~MidiManagerAlsa() {
diff --git a/media/midi/midi_manager_alsa.h b/media/midi/midi_manager_alsa.h
index d97650c294..803496b787 100644
--- a/media/midi/midi_manager_alsa.h
+++ b/media/midi/midi_manager_alsa.h
@@ -21,7 +21,7 @@ class MidiManagerAlsa : public MidiManager {
virtual ~MidiManagerAlsa();
// MidiManager implementation.
- virtual bool Initialize() OVERRIDE;
+ virtual MidiResult Initialize() OVERRIDE;
virtual void DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
diff --git a/media/midi/midi_manager_mac.cc b/media/midi/midi_manager_mac.cc
index b172f86e18..0c9c869d1b 100644
--- a/media/midi/midi_manager_mac.cc
+++ b/media/midi/midi_manager_mac.cc
@@ -36,7 +36,7 @@ MidiManagerMac::MidiManagerMac()
send_thread_("MidiSendThread") {
}
-bool MidiManagerMac::Initialize() {
+MidiResult MidiManagerMac::Initialize() {
TRACE_EVENT0("midi", "MidiManagerMac::Initialize");
// CoreMIDI registration.
@@ -48,7 +48,7 @@ bool MidiManagerMac::Initialize() {
&midi_client_);
if (result != noErr)
- return false;
+ return MIDI_INITIALIZATION_ERROR;
coremidi_input_ = 0;
@@ -60,14 +60,14 @@ bool MidiManagerMac::Initialize() {
this,
&coremidi_input_);
if (result != noErr)
- return false;
+ return MIDI_INITIALIZATION_ERROR;
result = MIDIOutputPortCreate(
midi_client_,
CFSTR("MIDI Output"),
&coremidi_output_);
if (result != noErr)
- return false;
+ return MIDI_INITIALIZATION_ERROR;
uint32 destination_count = MIDIGetNumberOfDestinations();
destinations_.resize(destination_count);
@@ -102,7 +102,7 @@ bool MidiManagerMac::Initialize() {
packet_list_ = reinterpret_cast<MIDIPacketList*>(midi_buffer_);
midi_packet_ = MIDIPacketListInit(packet_list_);
- return true;
+ return MIDI_OK;
}
void MidiManagerMac::DispatchSendMidiData(MidiManagerClient* client,
diff --git a/media/midi/midi_manager_mac.h b/media/midi/midi_manager_mac.h
index 5b278374a1..5bd072d45e 100644
--- a/media/midi/midi_manager_mac.h
+++ b/media/midi/midi_manager_mac.h
@@ -24,7 +24,7 @@ class MEDIA_EXPORT MidiManagerMac : public MidiManager {
virtual ~MidiManagerMac();
// MidiManager implementation.
- virtual bool Initialize() OVERRIDE;
+ virtual MidiResult Initialize() OVERRIDE;
virtual void DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
diff --git a/media/midi/midi_manager_usb.cc b/media/midi/midi_manager_usb.cc
index 844c621020..0991f1d7d3 100644
--- a/media/midi/midi_manager_usb.cc
+++ b/media/midi/midi_manager_usb.cc
@@ -34,10 +34,10 @@ MidiManagerUsb::MidiManagerUsb(scoped_ptr<UsbMidiDevice::Factory> factory)
MidiManagerUsb::~MidiManagerUsb() {
}
-bool MidiManagerUsb::Initialize() {
+MidiResult MidiManagerUsb::Initialize() {
TRACE_EVENT0("midi", "MidiManagerUsb::Initialize");
Initialize(base::Bind(Noop));
- return true;
+ return MIDI_OK;
}
void MidiManagerUsb::Initialize(base::Callback<void(bool result)> callback) {
diff --git a/media/midi/midi_manager_usb.h b/media/midi/midi_manager_usb.h
index a08b986099..bccc1b8626 100644
--- a/media/midi/midi_manager_usb.h
+++ b/media/midi/midi_manager_usb.h
@@ -31,7 +31,7 @@ class MEDIA_EXPORT MidiManagerUsb : public MidiManager,
virtual ~MidiManagerUsb();
// MidiManager implementation.
- virtual bool Initialize() OVERRIDE;
+ virtual MidiResult Initialize() OVERRIDE;
virtual void DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
diff --git a/media/midi/midi_manager_usb_unittest.cc b/media/midi/midi_manager_usb_unittest.cc
index 567b60c057..2bfb85e496 100644
--- a/media/midi/midi_manager_usb_unittest.cc
+++ b/media/midi/midi_manager_usb_unittest.cc
@@ -73,6 +73,9 @@ class FakeMidiManagerClient : public MidiManagerClient {
explicit FakeMidiManagerClient(Logger* logger) : logger_(logger) {}
virtual ~FakeMidiManagerClient() {}
+ virtual void CompleteStartSession(int client_id, MidiResult result) OVERRIDE {
+ }
+
virtual void ReceiveMidiData(uint32 port_index,
const uint8* data,
size_t size,
@@ -300,7 +303,7 @@ TEST_F(MidiManagerUsbTest, Receive) {
ASSERT_TRUE(initialize_callback_run_);
ASSERT_TRUE(initialize_result_);
- manager_->StartSession(&client);
+ manager_->StartSession(&client, 0);
manager_->ReceiveUsbMidiData(device_raw, 2, data, arraysize(data), 0);
manager_->EndSession(&client);
diff --git a/media/midi/midi_manager_win.cc b/media/midi/midi_manager_win.cc
index a2343f6c11..95d7379a65 100644
--- a/media/midi/midi_manager_win.cc
+++ b/media/midi/midi_manager_win.cc
@@ -501,7 +501,7 @@ MidiManagerWin::MidiManagerWin()
: send_thread_("MidiSendThread") {
}
-bool MidiManagerWin::Initialize() {
+MidiResult MidiManagerWin::Initialize() {
TRACE_EVENT0("midi", "MidiManagerWin::Initialize");
const UINT num_in_devices = midiInGetNumDevs();
in_devices_.reserve(num_in_devices);
@@ -548,7 +548,7 @@ bool MidiManagerWin::Initialize() {
out_devices_.push_back(out_port.Pass());
}
- return true;
+ return MIDI_OK;
}
MidiManagerWin::~MidiManagerWin() {
diff --git a/media/midi/midi_manager_win.h b/media/midi/midi_manager_win.h
index 8e1cc363ac..7b38ab4b4a 100644
--- a/media/midi/midi_manager_win.h
+++ b/media/midi/midi_manager_win.h
@@ -20,7 +20,7 @@ class MidiManagerWin : public MidiManager {
virtual ~MidiManagerWin();
// MidiManager implementation.
- virtual bool Initialize() OVERRIDE;
+ virtual MidiResult Initialize() OVERRIDE;
virtual void DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
diff --git a/media/midi/midi_result.h b/media/midi/midi_result.h
new file mode 100644
index 0000000000..99233c4d86
--- /dev/null
+++ b/media/midi/midi_result.h
@@ -0,0 +1,19 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_RESULT_H_
+#define MEDIA_MIDI_MIDI_RESULT_H_
+
+namespace media {
+
+// Result codes for MIDI.
+enum MidiResult {
+ MIDI_OK,
+ MIDI_NOT_SUPPORTED,
+ MIDI_INITIALIZATION_ERROR,
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_RESULT_H_
diff --git a/media/test/data/README b/media/test/data/README
index 9d8ddf398c..9a26660254 100644
--- a/media/test/data/README
+++ b/media/test/data/README
@@ -39,6 +39,7 @@ bear-1280x720-a_frag-cenc.mp4 - A fragmented MP4 version of the audio track of b
bear-1280x720-a_frag-cenc_clear-all.mp4 - Same as bear-1280x720-a_frag-cenc.mp4 but no fragments are encrypted.
bear-1280x720-v_frag-cenc.mp4 - A fragmented MP4 version of the video track of bear-1280x720.mp4 encrypted (ISO CENC) using key ID [1] and key [2].
bear-1280x720-v_frag-cenc_clear-all.mp4 - Same as bear-1280x720-v_frag-cenc.mp4 but no fragments are encrypted.
+bear-1280x720-a_frag-cenc_missing-saiz-saio.mp4 - An invalid file similar to bear-1280x720-a_frag-cenc.mp4 but has no saiz and saio boxes. To save space, it has only one encrypted sample.
bear-320x240-16x9-aspect-av_enc-av.webm - bear-320x240-16x9-aspect.webm with audio & video encrypted using key ID [1] and key [2]
bear-320x240-av_enc-av.webm - bear-320x240.webm with audio & video encrypted using key ID [1] and key [2].
bear-320x240-av_enc-av_clear-1s.webm - Same as bear-320x240-av_enc-av.webm but with no frames in the first second encrypted.
diff --git a/media/test/data/bear-1280x720-a_frag-cenc_missing-saiz-saio.mp4 b/media/test/data/bear-1280x720-a_frag-cenc_missing-saiz-saio.mp4
new file mode 100644
index 0000000000..fb38ace904
--- /dev/null
+++ b/media/test/data/bear-1280x720-a_frag-cenc_missing-saiz-saio.mp4
Binary files differ
diff --git a/media/tools/player_x11/player_x11.cc b/media/tools/player_x11/player_x11.cc
index 0cec7b825d..d71cd1a876 100644
--- a/media/tools/player_x11/player_x11.cc
+++ b/media/tools/player_x11/player_x11.cc
@@ -79,9 +79,6 @@ bool InitX11() {
return true;
}
-void SetOpaque(bool /*opaque*/) {
-}
-
typedef base::Callback<void(media::VideoFrame*)> PaintCB;
void Paint(base::MessageLoop* message_loop, const PaintCB& paint_cb,
const scoped_refptr<media::VideoFrame>& video_frame) {
@@ -132,7 +129,6 @@ void InitPipeline(
video_decoders.Pass(),
media::SetDecryptorReadyCB(),
base::Bind(&Paint, paint_message_loop, paint_cb),
- base::Bind(&SetOpaque),
true));
collection->SetVideoRenderer(video_renderer.Pass());
diff --git a/media/video/capture/mac/video_capture_device_mac.mm b/media/video/capture/mac/video_capture_device_mac.mm
index 75f3937e89..1a65e99091 100644
--- a/media/video/capture/mac/video_capture_device_mac.mm
+++ b/media/video/capture/mac/video_capture_device_mac.mm
@@ -19,27 +19,33 @@ namespace media {
const int kMinFrameRate = 1;
const int kMaxFrameRate = 30;
-// In QT device identifiers, the USB VID and PID are stored in 4 bytes each.
+// In device identifiers, the USB VID and PID are stored in 4 bytes each.
const size_t kVidPidSize = 4;
-struct Resolution {
- int width;
- int height;
-};
-
-const Resolution kQVGA = { 320, 240 },
- kVGA = { 640, 480 },
- kHD = { 1280, 720 };
-
-const Resolution* const kWellSupportedResolutions[] = {
- &kQVGA,
- &kVGA,
- &kHD,
+// Some devices are not correctly supported in AVFoundation, f.i. Blackmagic,
+// see http://crbug.com/347371. The devices are identified by USB Vendor ID and
+// by a characteristic substring of the name, usually the vendor's name.
+const struct NameAndVid {
+ const char* vid;
+ const char* name;
+} kBlacklistedCameras[] = { { "a82c", "Blackmagic" } };
+
+const struct Resolution {
+ const int width;
+ const int height;
+} kQVGA = { 320, 240 },
+ kVGA = { 640, 480 },
+ kHD = { 1280, 720 };
+
+const struct Resolution* const kWellSupportedResolutions[] = {
+ &kQVGA,
+ &kVGA,
+ &kHD,
};
// Rescaling the image to fix the pixel aspect ratio runs the risk of making
// the aspect ratio worse, if QTKit selects a new source mode with a different
-// shape. This constant ensures that we don't take this risk if the current
+// shape. This constant ensures that we don't take this risk if the current
// aspect ratio is tolerable.
const float kMaxPixelAspectRatio = 1.15;
@@ -63,29 +69,63 @@ void GetBestMatchSupportedResolution(int* width, int* height) {
*height = matched_height;
}
+//static
void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
// Loop through all available devices and add to |device_names|.
- device_names->clear();
-
NSDictionary* capture_devices;
if (AVFoundationGlue::IsAVFoundationSupported()) {
+ bool is_any_device_blacklisted = false;
DVLOG(1) << "Enumerating video capture devices using AVFoundation";
capture_devices = [VideoCaptureDeviceAVFoundation deviceNames];
+ std::string device_vid;
+ // Enumerate all devices found by AVFoundation, translate the info for each
+ // to class Name and add it to |device_names|.
+ for (NSString* key in capture_devices) {
+ Name name([[capture_devices valueForKey:key] UTF8String],
+ [key UTF8String], Name::AVFOUNDATION);
+ device_names->push_back(name);
+ // Extract the device's Vendor ID and compare to all blacklisted ones.
+ device_vid = name.GetModel().substr(0, kVidPidSize);
+ for (size_t i = 0; i < arraysize(kBlacklistedCameras); ++i) {
+ is_any_device_blacklisted |= (device_vid == kBlacklistedCameras[i].vid);
+ if (is_any_device_blacklisted)
+ break;
+ }
+ }
+ // If there is any device blacklisted in the system, walk the QTKit device
+ // list and add those devices with a blacklisted name to the |device_names|.
+ // AVFoundation and QTKit device lists partially overlap, so add a "QTKit"
+ // prefix to the latter ones to distinguish them from the AVFoundation ones.
+ if (is_any_device_blacklisted) {
+ capture_devices = [VideoCaptureDeviceQTKit deviceNames];
+ for (NSString* key in capture_devices) {
+ NSString* device_name = [capture_devices valueForKey:key];
+ for (size_t i = 0; i < arraysize(kBlacklistedCameras); ++i) {
+ if ([device_name rangeOfString:@(kBlacklistedCameras[i].name)
+ options:NSCaseInsensitiveSearch].length != 0) {
+ DVLOG(1) << "Enumerated blacklisted " << [device_name UTF8String];
+ Name name("QTKit " + std::string([device_name UTF8String]),
+ [key UTF8String], Name::QTKIT);
+ device_names->push_back(name);
+ }
+ }
+ }
+ }
} else {
DVLOG(1) << "Enumerating video capture devices using QTKit";
capture_devices = [VideoCaptureDeviceQTKit deviceNames];
- }
- for (NSString* key in capture_devices) {
- Name name([[capture_devices valueForKey:key] UTF8String],
- [key UTF8String]);
- device_names->push_back(name);
+ for (NSString* key in capture_devices) {
+ Name name([[capture_devices valueForKey:key] UTF8String],
+ [key UTF8String], Name::QTKIT);
+ device_names->push_back(name);
+ }
}
}
// static
void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
VideoCaptureFormats* formats) {
- if (AVFoundationGlue::IsAVFoundationSupported()) {
+ if (device.capture_api_type() == Name::AVFOUNDATION) {
DVLOG(1) << "Enumerating video capture capabilities, AVFoundation";
[VideoCaptureDeviceAVFoundation getDevice:device
supportedFormats:formats];
@@ -219,7 +259,8 @@ bool VideoCaptureDeviceMac::Init() {
if (it == device_names.end())
return false;
- if (AVFoundationGlue::IsAVFoundationSupported()) {
+ DCHECK_NE(it->capture_api_type(), Name::API_TYPE_UNKNOWN);
+ if (it->capture_api_type() == Name::AVFOUNDATION) {
capture_device_ =
[[VideoCaptureDeviceAVFoundation alloc] initWithFrameReceiver:this];
} else {
diff --git a/media/video/capture/video_capture_device.h b/media/video/capture/video_capture_device.h
index 4e060a1dcb..8229716022 100644
--- a/media/video/capture/video_capture_device.h
+++ b/media/video/capture/video_capture_device.h
@@ -48,12 +48,21 @@ class MEDIA_EXPORT VideoCaptureDevice {
DIRECT_SHOW,
API_TYPE_UNKNOWN
};
-
+#endif
+#if defined(OS_MACOSX)
+ // Mac targets Capture Api type: it can only be set on construction.
+ enum CaptureApiType {
+ AVFOUNDATION,
+ QTKIT,
+ API_TYPE_UNKNOWN
+ };
+#endif
+#if defined(OS_WIN) || defined(OS_MACOSX)
Name(const std::string& name,
const std::string& id,
const CaptureApiType api_type)
: device_name_(name), unique_id_(id), capture_api_class_(api_type) {}
-#endif // if defined(OS_WIN)
+#endif
~Name() {}
// Friendly name of a device
@@ -63,7 +72,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
// friendly name connected to the computer this will be unique.
const std::string& id() const { return unique_id_; }
- // The unique hardware model identifier of the capture device. Returns
+ // The unique hardware model identifier of the capture device. Returns
// "[vid]:[pid]" when a USB device is detected, otherwise "".
// The implementation of this method is platform-dependent.
const std::string GetModel() const;
@@ -81,7 +90,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
return unique_id_ < other.id();
}
-#if defined(OS_WIN)
+#if defined(OS_WIN) || defined(OS_MACOSX)
CaptureApiType capture_api_type() const {
return capture_api_class_.capture_api_type();
}
@@ -90,16 +99,16 @@ class MEDIA_EXPORT VideoCaptureDevice {
private:
std::string device_name_;
std::string unique_id_;
-#if defined(OS_WIN)
- // This class wraps the CaptureApiType, so it has a by default value if not
- // inititalized, and I (mcasas) do a DCHECK on reading its value.
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ // This class wraps the CaptureApiType to give it a by default value if not
+ // initialized.
class CaptureApiClass {
public:
- CaptureApiClass(): capture_api_type_(API_TYPE_UNKNOWN) {}
+ CaptureApiClass(): capture_api_type_(API_TYPE_UNKNOWN) {}
CaptureApiClass(const CaptureApiType api_type)
- : capture_api_type_(api_type) {}
+ : capture_api_type_(api_type) {}
CaptureApiType capture_api_type() const {
- DCHECK_NE(capture_api_type_, API_TYPE_UNKNOWN);
+ DCHECK_NE(capture_api_type_, API_TYPE_UNKNOWN);
return capture_api_type_;
}
private:
@@ -107,7 +116,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
};
CaptureApiClass capture_api_class_;
-#endif // if defined(OS_WIN)
+#endif
// Allow generated copy constructor and assignment.
};
diff --git a/media/video/capture/video_capture_device_unittest.cc b/media/video/capture/video_capture_device_unittest.cc
index 1742b9fb51..afeced3dbf 100644
--- a/media/video/capture/video_capture_device_unittest.cc
+++ b/media/video/capture/video_capture_device_unittest.cc
@@ -128,6 +128,32 @@ class VideoCaptureDeviceTest : public testing::Test {
const VideoCaptureFormat& last_format() const { return last_format_; }
+ scoped_ptr<VideoCaptureDevice::Name> GetFirstDeviceNameSupportingPixelFormat(
+ const VideoPixelFormat& pixel_format) {
+ VideoCaptureDevice::GetDeviceNames(&names_);
+ if (!names_.size()) {
+ DVLOG(1) << "No camera available.";
+ return scoped_ptr<VideoCaptureDevice::Name>();
+ }
+ VideoCaptureDevice::Names::iterator names_iterator;
+ for (names_iterator = names_.begin(); names_iterator != names_.end();
+ ++names_iterator) {
+ VideoCaptureFormats supported_formats;
+ VideoCaptureDevice::GetDeviceSupportedFormats(*names_iterator,
+ &supported_formats);
+ VideoCaptureFormats::iterator formats_iterator;
+ for (formats_iterator = supported_formats.begin();
+ formats_iterator != supported_formats.end(); ++formats_iterator) {
+ if (formats_iterator->pixel_format == pixel_format) {
+ return scoped_ptr<VideoCaptureDevice::Name>(
+ new VideoCaptureDevice::Name(*names_iterator));
+ }
+ }
+ }
+ DVLOG(1) << "No camera can capture the format: " << pixel_format;
+ return scoped_ptr<VideoCaptureDevice::Name>();
+ }
+
#if defined(OS_WIN)
base::win::ScopedCOMInitializer initialize_com_;
#endif
@@ -161,7 +187,7 @@ TEST_F(VideoCaptureDeviceTest, CaptureVGA) {
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
- ASSERT_FALSE(device.get() == NULL);
+ ASSERT_TRUE(device);
DVLOG(1) << names_.front().id();
EXPECT_CALL(*client_, OnErr())
@@ -189,7 +215,7 @@ TEST_F(VideoCaptureDeviceTest, Capture720p) {
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
- ASSERT_FALSE(device.get() == NULL);
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -213,7 +239,7 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
}
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
- ASSERT_TRUE(device.get() != NULL);
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -284,7 +310,7 @@ TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
}
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
- ASSERT_TRUE(device.get() != NULL);
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -312,7 +338,7 @@ TEST_F(VideoCaptureDeviceTest, FakeCapture) {
scoped_ptr<VideoCaptureDevice> device(
FakeVideoCaptureDevice::Create(names.front()));
- ASSERT_TRUE(device.get() != NULL);
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -332,14 +358,14 @@ TEST_F(VideoCaptureDeviceTest, FakeCapture) {
// Start the camera in 720p to capture MJPEG instead of a raw format.
TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
- VideoCaptureDevice::GetDeviceNames(&names_);
- if (!names_.size()) {
- DVLOG(1) << "No camera available. Exiting test.";
+ scoped_ptr<VideoCaptureDevice::Name> name =
+ GetFirstDeviceNameSupportingPixelFormat(PIXEL_FORMAT_MJPEG);
+ if (!name) {
+ DVLOG(1) << "No camera supports MJPEG format. Exiting test.";
return;
}
- scoped_ptr<VideoCaptureDevice> device(
- VideoCaptureDevice::Create(names_.front()));
- ASSERT_TRUE(device.get() != NULL);
+ scoped_ptr<VideoCaptureDevice> device(VideoCaptureDevice::Create(*name));
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -359,19 +385,13 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
}
TEST_F(VideoCaptureDeviceTest, GetDeviceSupportedFormats) {
- VideoCaptureDevice::GetDeviceNames(&names_);
- if (!names_.size()) {
- DVLOG(1) << "No camera available. Exiting test.";
- return;
- }
- VideoCaptureFormats supported_formats;
- VideoCaptureDevice::Names::iterator names_iterator;
- for (names_iterator = names_.begin(); names_iterator != names_.end();
- ++names_iterator) {
- VideoCaptureDevice::GetDeviceSupportedFormats(*names_iterator,
- &supported_formats);
- // Nothing to test here since we cannot forecast the hardware capabilities.
- }
+ // Use PIXEL_FORMAT_MAX to iterate all device names for testing
+ // GetDeviceSupportedFormats().
+ scoped_ptr<VideoCaptureDevice::Name> name =
+ GetFirstDeviceNameSupportingPixelFormat(PIXEL_FORMAT_MAX);
+ // Verify no camera returned for PIXEL_FORMAT_MAX. Nothing else to test here
+ // since we cannot forecast the hardware capabilities.
+ ASSERT_FALSE(name);
}
TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
@@ -388,7 +408,7 @@ TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
scoped_ptr<VideoCaptureDevice> device(
FakeVideoCaptureDevice::Create(names.front()));
- ASSERT_TRUE(device.get() != NULL);
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);