summaryrefslogtreecommitdiff
path: root/media
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2013-08-05 13:57:33 +0100
committerTorne (Richard Coles) <torne@google.com>2013-08-05 13:57:33 +0100
commita36e5920737c6adbddd3e43b760e5de8431db6e0 (patch)
tree347d048bb8c8828d50113bf94ace40bf0613f2cd /media
parent34378da0e9429d394aafdaa771301aff58447cb1 (diff)
downloadchromium_org-a36e5920737c6adbddd3e43b760e5de8431db6e0.tar.gz
Merge from Chromium at DEPS revision r215573
This commit was generated by merge_to_master.py. Change-Id: Ib95814f98e5765b459dd32425f9bf9138edf2bca
Diffstat (limited to 'media')
-rw-r--r--media/audio/agc_audio_stream.h34
-rw-r--r--media/audio/cras/audio_manager_cras.cc7
-rw-r--r--media/audio/cras/audio_manager_cras.h4
-rw-r--r--media/audio/cras/cras_input.cc10
-rw-r--r--media/audio/cras/cras_input.h10
-rw-r--r--media/audio/cras/cras_input_unittest.cc20
-rw-r--r--media/audio/pulse/audio_manager_pulse.cc41
-rw-r--r--media/audio/pulse/audio_manager_pulse.h9
-rw-r--r--media/audio/pulse/pulse.sigs3
-rw-r--r--media/audio/pulse/pulse_input.cc6
-rw-r--r--media/base/android/demuxer_stream_player_params.cc9
-rw-r--r--media/base/android/demuxer_stream_player_params.h28
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaCodecBridge.java7
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java4
-rw-r--r--media/base/android/java/src/org/chromium/media/VideoCapture.java55
-rw-r--r--media/base/android/media_codec_bridge.cc5
-rw-r--r--media/base/android/media_codec_bridge.h8
-rw-r--r--media/base/android/media_player_android.h2
-rw-r--r--media/base/android/media_player_bridge.cc4
-rw-r--r--media/base/android/media_player_bridge.h2
-rw-r--r--media/base/android/media_source_player.cc120
-rw-r--r--media/base/android/media_source_player.h44
-rw-r--r--media/base/android/media_source_player_unittest.cc14
-rw-r--r--media/base/audio_buffer.cc83
-rw-r--r--media/base/audio_buffer.h22
-rw-r--r--media/base/audio_buffer_unittest.cc28
-rw-r--r--media/base/container_names.cc4
-rw-r--r--media/base/container_names_unittest.cc8
-rw-r--r--media/base/simd/filter_yuv_mmx.cc3
-rw-r--r--media/crypto/aes_decryptor.cc11
-rw-r--r--media/crypto/aes_decryptor_unittest.cc95
-rw-r--r--media/ffmpeg/ffmpeg_common_unittest.cc2
-rw-r--r--media/filters/chunk_demuxer.cc5
-rw-r--r--media/filters/chunk_demuxer_unittest.cc536
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc151
-rw-r--r--media/filters/ffmpeg_audio_decoder.h7
-rw-r--r--media/filters/ffmpeg_demuxer.cc5
-rw-r--r--media/filters/ffmpeg_demuxer.h6
-rw-r--r--media/filters/ffmpeg_demuxer_unittest.cc7
-rw-r--r--media/filters/gpu_video_decoder.cc29
-rw-r--r--media/filters/pipeline_integration_test_base.cc12
-rw-r--r--media/media_untrusted.gyp1
-rw-r--r--media/midi/midi_manager.cc25
-rw-r--r--media/midi/midi_manager.h35
-rw-r--r--media/midi/midi_manager_mac.cc8
-rw-r--r--media/midi/midi_manager_mac.h3
-rw-r--r--media/mp4/aac_unittest.cc4
-rw-r--r--media/mp4/box_reader_unittest.cc2
-rw-r--r--media/mp4/mp4_stream_parser_unittest.cc14
-rw-r--r--media/mp4/offset_byte_queue_unittest.cc6
-rw-r--r--media/tools/demuxer_bench/demuxer_bench.cc7
-rw-r--r--media/tools/player_x11/player_x11.cc2
-rw-r--r--media/tools/seek_tester/seek_tester.cc8
-rw-r--r--media/video/capture/android/video_capture_device_android.cc10
-rw-r--r--media/video/capture/android/video_capture_device_android.h6
-rw-r--r--media/video/capture/fake_video_capture_device.cc120
-rw-r--r--media/video/capture/fake_video_capture_device.h20
-rw-r--r--media/video/capture/linux/video_capture_device_linux.cc19
-rw-r--r--media/video/capture/linux/video_capture_device_linux.h6
-rw-r--r--media/video/capture/mac/video_capture_device_mac.h6
-rw-r--r--media/video/capture/mac/video_capture_device_mac.mm11
-rw-r--r--media/video/capture/video_capture_device.h4
-rw-r--r--media/video/capture/video_capture_device_dummy.cc5
-rw-r--r--media/video/capture/video_capture_device_dummy.h4
-rw-r--r--media/video/capture/video_capture_device_unittest.cc197
-rw-r--r--media/video/capture/video_capture_types.h50
-rw-r--r--media/video/capture/win/video_capture_device_mf_win.cc8
-rw-r--r--media/video/capture/win/video_capture_device_mf_win.h6
-rw-r--r--media/video/capture/win/video_capture_device_win.cc12
-rw-r--r--media/video/capture/win/video_capture_device_win.h6
-rw-r--r--media/webm/webm_cluster_parser_unittest.cc2
-rw-r--r--media/webm/webm_parser_unittest.cc2
-rw-r--r--media/webm/webm_tracks_parser_unittest.cc10
-rw-r--r--media/webm/webm_webvtt_parser_unittest.cc8
74 files changed, 1326 insertions, 761 deletions
diff --git a/media/audio/agc_audio_stream.h b/media/audio/agc_audio_stream.h
index beb2b4fe71..b289a0b15e 100644
--- a/media/audio/agc_audio_stream.h
+++ b/media/audio/agc_audio_stream.h
@@ -44,8 +44,8 @@
// a state.
//
// Calling SetAutomaticGainControl(true) enables the AGC and StartAgc() starts
-// a periodic timer which calls OnTimer() approximately once every second.
-// OnTimer() calls the QueryAndStoreNewMicrophoneVolume() method which asks
+// a periodic timer which calls QueryAndStoreNewMicrophoneVolume()
+// approximately once every second. QueryAndStoreNewMicrophoneVolume() asks
// the actual microphone about its current volume level. This value is
// normalized and stored so it can be read by GetAgcVolume() when the real-time
// audio thread needs the value. The main idea behind this scheme is to avoid
@@ -91,9 +91,15 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
DCHECK(thread_checker_.CalledOnValidThread());
if (!agc_is_enabled_ || timer_.IsRunning())
return;
+
+ // Query and cache the volume to avoid sending 0 as volume to AGC at the
+ // beginning of the audio stream, otherwise AGC will try to raise the
+ // volume from 0.
+ QueryAndStoreNewMicrophoneVolume();
+
timer_.Start(FROM_HERE,
base::TimeDelta::FromMilliseconds(kIntervalBetweenVolumeUpdatesMs),
- this, &AgcAudioStream::OnTimer);
+ this, &AgcAudioStream::QueryAndStoreNewMicrophoneVolume);
}
// Stops the periodic timer which periodically checks and updates the
@@ -110,6 +116,9 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
void UpdateAgcVolume() {
DCHECK(thread_checker_.CalledOnValidThread());
+ if (!timer_.IsRunning())
+ return;
+
// We take new volume samples once every second when the AGC is enabled.
// To ensure that a new setting has an immediate effect, the new volume
// setting is cached here. It will ensure that the next OnData() callback
@@ -147,13 +156,14 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
// Takes a new microphone volume sample and stores it in |normalized_volume_|.
// Range is normalized to [0.0,1.0] or [0.0, 1.5] on Linux.
+ // This method is called periodically when AGC is enabled and always on the
+ // audio manager thread. We use it to read the current microphone level and
+ // to store it so it can be read by the main capture thread. By using this
+ // approach, we can avoid accessing audio hardware from a real-time audio
+ // thread and it leads to a more stable capture performance.
void QueryAndStoreNewMicrophoneVolume() {
DCHECK(thread_checker_.CalledOnValidThread());
- // Avoid updating the volume member if AGC is not running.
- if (!timer_.IsRunning())
- return;
-
// Cach the maximum volume if this is the first time we ask for it.
if (max_volume_ == 0.0)
max_volume_ = static_cast<AudioInterface*>(this)->GetMaxVolume();
@@ -168,16 +178,6 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
}
}
- // This method is called periodically when AGC is enabled and always on the
- // audio manager thread. We use it to read the current microphone level and
- // to store it so it can be read by the main capture thread. By using this
- // approach, we can avoid accessing audio hardware from a real-time audio
- // thread and it leads to a more stable capture performance.
- void OnTimer() {
- DCHECK(thread_checker_.CalledOnValidThread());
- QueryAndStoreNewMicrophoneVolume();
- }
-
// Ensures that this class is created and destroyed on the same thread.
base::ThreadChecker thread_checker_;
diff --git a/media/audio/cras/audio_manager_cras.cc b/media/audio/cras/audio_manager_cras.cc
index 73d689a99d..165d642922 100644
--- a/media/audio/cras/audio_manager_cras.cc
+++ b/media/audio/cras/audio_manager_cras.cc
@@ -22,6 +22,8 @@ static const int kMaxOutputStreams = 50;
// Default sample rate for input and output streams.
static const int kDefaultSampleRate = 48000;
+const char AudioManagerCras::kLoopbackDeviceId[] = "loopback";
+
bool AudioManagerCras::HasAudioOutputDevices() {
return true;
}
@@ -52,7 +54,8 @@ void AudioManagerCras::GetAudioInputDeviceNames(
AudioParameters AudioManagerCras::GetInputStreamParameters(
const std::string& device_id) {
static const int kDefaultInputBufferSize = 1024;
-
+ // TODO(hshi): Fine-tune audio parameters based on |device_id|. The optimal
+ // parameters for the loopback stream may differ from the default.
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
kDefaultSampleRate, 16, kDefaultInputBufferSize);
@@ -124,7 +127,7 @@ AudioOutputStream* AudioManagerCras::MakeOutputStream(
AudioInputStream* AudioManagerCras::MakeInputStream(
const AudioParameters& params, const std::string& device_id) {
- return new CrasInputStream(params, this);
+ return new CrasInputStream(params, this, device_id);
}
} // namespace media
diff --git a/media/audio/cras/audio_manager_cras.h b/media/audio/cras/audio_manager_cras.h
index 4e69c8f10e..fdc5b02688 100644
--- a/media/audio/cras/audio_manager_cras.h
+++ b/media/audio/cras/audio_manager_cras.h
@@ -15,6 +15,10 @@ namespace media {
class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
public:
+ // Unique ID of the "loopback" input device. This device captures post-mix,
+ // pre-DSP system audio.
+ static const char kLoopbackDeviceId[];
+
AudioManagerCras();
// AudioManager implementation.
diff --git a/media/audio/cras/cras_input.cc b/media/audio/cras/cras_input.cc
index dec96b8580..a82fe283f7 100644
--- a/media/audio/cras/cras_input.cc
+++ b/media/audio/cras/cras_input.cc
@@ -17,14 +17,18 @@
namespace media {
CrasInputStream::CrasInputStream(const AudioParameters& params,
- AudioManagerCras* manager)
+ AudioManagerCras* manager,
+ const std::string& device_id)
: audio_manager_(manager),
bytes_per_frame_(0),
callback_(NULL),
client_(NULL),
params_(params),
started_(false),
- stream_id_(0) {
+ stream_id_(0),
+ stream_direction_(device_id == AudioManagerCras::kLoopbackDeviceId
+ ? CRAS_STREAM_POST_MIX_PRE_DSP
+ : CRAS_STREAM_INPUT) {
DCHECK(audio_manager_);
}
@@ -127,7 +131,7 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
unsigned int frames_per_packet = params_.frames_per_buffer();
cras_stream_params* stream_params = cras_client_stream_params_create(
- CRAS_STREAM_INPUT,
+ stream_direction_,
frames_per_packet, // Total latency.
frames_per_packet, // Call back when this many ready.
frames_per_packet, // Minimum Callback level ignored for capture streams.
diff --git a/media/audio/cras/cras_input.h b/media/audio/cras/cras_input.h
index 1b2a611681..dd2cb5474a 100644
--- a/media/audio/cras/cras_input.h
+++ b/media/audio/cras/cras_input.h
@@ -27,7 +27,8 @@ class CrasInputStream : public AgcAudioStream<AudioInputStream> {
public:
// The ctor takes all the usual parameters, plus |manager| which is the
// audio manager who is creating this object.
- CrasInputStream(const AudioParameters& params, AudioManagerCras* manager);
+ CrasInputStream(const AudioParameters& params, AudioManagerCras* manager,
+ const std::string& device_id);
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioOutputStream::Close().
@@ -76,7 +77,7 @@ class CrasInputStream : public AgcAudioStream<AudioInputStream> {
// want circular references. Additionally, stream objects live on the audio
// thread, which is owned by the audio manager and we don't want to addref
// the manager from that thread.
- AudioManagerCras* audio_manager_;
+ AudioManagerCras* const audio_manager_;
// Size of frame in bytes.
uint32 bytes_per_frame_;
@@ -88,7 +89,7 @@ class CrasInputStream : public AgcAudioStream<AudioInputStream> {
cras_client* client_;
// PCM parameters for the stream.
- AudioParameters params_;
+ const AudioParameters params_;
// True if the stream has been started.
bool started_;
@@ -96,6 +97,9 @@ class CrasInputStream : public AgcAudioStream<AudioInputStream> {
// ID of the playing stream.
cras_stream_id_t stream_id_;
+ // Direction of the stream.
+ const CRAS_STREAM_DIRECTION stream_direction_;
+
DISALLOW_COPY_AND_ASSIGN(CrasInputStream);
};
diff --git a/media/audio/cras/cras_input_unittest.cc b/media/audio/cras/cras_input_unittest.cc
index aba89f3c46..27ea9858ba 100644
--- a/media/audio/cras/cras_input_unittest.cc
+++ b/media/audio/cras/cras_input_unittest.cc
@@ -62,13 +62,14 @@ class CrasInputStreamTest : public testing::Test {
kTestSampleRate,
kTestBitsPerSample,
samples_per_packet);
- return new CrasInputStream(params, mock_manager_.get());
+ return new CrasInputStream(params, mock_manager_.get(),
+ AudioManagerBase::kDefaultDeviceId);
}
void CaptureSomeFrames(const AudioParameters &params,
unsigned int duration_ms) {
- CrasInputStream* test_stream = new CrasInputStream(params,
- mock_manager_.get());
+ CrasInputStream* test_stream = new CrasInputStream(
+ params, mock_manager_.get(), AudioManagerBase::kDefaultDeviceId);
ASSERT_TRUE(test_stream->Open());
@@ -137,8 +138,8 @@ TEST_F(CrasInputStreamTest, BadBitsPerSample) {
kTestSampleRate,
kTestBitsPerSample - 1,
kTestFramesPerPacket);
- CrasInputStream* test_stream =
- new CrasInputStream(bad_bps_params, mock_manager_.get());
+ CrasInputStream* test_stream = new CrasInputStream(
+ bad_bps_params, mock_manager_.get(), AudioManagerBase::kDefaultDeviceId);
EXPECT_FALSE(test_stream->Open());
test_stream->Close();
}
@@ -149,8 +150,9 @@ TEST_F(CrasInputStreamTest, BadFormat) {
kTestSampleRate,
kTestBitsPerSample,
kTestFramesPerPacket);
- CrasInputStream* test_stream =
- new CrasInputStream(bad_format_params, mock_manager_.get());
+ CrasInputStream* test_stream = new CrasInputStream(
+ bad_format_params, mock_manager_.get(),
+ AudioManagerBase::kDefaultDeviceId);
EXPECT_FALSE(test_stream->Open());
test_stream->Close();
}
@@ -161,8 +163,8 @@ TEST_F(CrasInputStreamTest, BadSampleRate) {
0,
kTestBitsPerSample,
kTestFramesPerPacket);
- CrasInputStream* test_stream =
- new CrasInputStream(bad_rate_params, mock_manager_.get());
+ CrasInputStream* test_stream = new CrasInputStream(
+ bad_rate_params, mock_manager_.get(), AudioManagerBase::kDefaultDeviceId);
EXPECT_FALSE(test_stream->Open());
test_stream->Close();
}
diff --git a/media/audio/pulse/audio_manager_pulse.cc b/media/audio/pulse/audio_manager_pulse.cc
index 18effa6bef..dcdd328222 100644
--- a/media/audio/pulse/audio_manager_pulse.cc
+++ b/media/audio/pulse/audio_manager_pulse.cc
@@ -66,13 +66,21 @@ AudioManagerPulse::~AudioManagerPulse() {
// Implementation of AudioManager.
bool AudioManagerPulse::HasAudioOutputDevices() {
- // TODO(xians): implement this function.
- return true;
+ DCHECK(input_mainloop_);
+ DCHECK(input_context_);
+ media::AudioDeviceNames devices;
+ AutoPulseLock auto_lock(input_mainloop_);
+ devices_ = &devices;
+ pa_operation* operation = pa_context_get_sink_info_list(
+ input_context_, OutputDevicesInfoCallback, this);
+ WaitForOperationCompletion(input_mainloop_, operation);
+ return !devices.empty();
}
bool AudioManagerPulse::HasAudioInputDevices() {
- // TODO(xians): implement this function.
- return true;
+ media::AudioDeviceNames devices;
+ GetAudioInputDeviceNames(&devices);
+ return !devices.empty();
}
void AudioManagerPulse::ShowAudioInputSettings() {
@@ -84,10 +92,10 @@ void AudioManagerPulse::GetAudioInputDeviceNames(
DCHECK(device_names->empty());
DCHECK(input_mainloop_);
DCHECK(input_context_);
- devices_ = device_names;
AutoPulseLock auto_lock(input_mainloop_);
+ devices_ = device_names;
pa_operation* operation = pa_context_get_source_info_list(
- input_context_, DevicesInfoCallback, this);
+ input_context_, InputDevicesInfoCallback, this);
WaitForOperationCompletion(input_mainloop_, operation);
// Append the default device on the top of the list if the list is not empty.
@@ -265,9 +273,9 @@ void AudioManagerPulse::DestroyPulse() {
input_mainloop_ = NULL;
}
-void AudioManagerPulse::DevicesInfoCallback(pa_context* context,
- const pa_source_info* info,
- int error, void *user_data) {
+void AudioManagerPulse::InputDevicesInfoCallback(pa_context* context,
+ const pa_source_info* info,
+ int error, void *user_data) {
AudioManagerPulse* manager = reinterpret_cast<AudioManagerPulse*>(user_data);
if (error) {
@@ -283,6 +291,21 @@ void AudioManagerPulse::DevicesInfoCallback(pa_context* context,
}
}
+void AudioManagerPulse::OutputDevicesInfoCallback(pa_context* context,
+ const pa_sink_info* info,
+ int error, void *user_data) {
+ AudioManagerPulse* manager = reinterpret_cast<AudioManagerPulse*>(user_data);
+
+ if (error) {
+ // Signal the pulse object that it is done.
+ pa_threaded_mainloop_signal(manager->input_mainloop_, 0);
+ return;
+ }
+
+ manager->devices_->push_back(media::AudioDeviceName(info->description,
+ info->name));
+}
+
void AudioManagerPulse::SampleRateInfoCallback(pa_context* context,
const pa_server_info* info,
void* user_data) {
diff --git a/media/audio/pulse/audio_manager_pulse.h b/media/audio/pulse/audio_manager_pulse.h
index d5cb93e98d..6dfebaeff3 100644
--- a/media/audio/pulse/audio_manager_pulse.h
+++ b/media/audio/pulse/audio_manager_pulse.h
@@ -50,9 +50,12 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
void DestroyPulse();
// Callback to get the devices' info like names, used by GetInputDevices().
- static void DevicesInfoCallback(pa_context* context,
- const pa_source_info* info,
- int error, void* user_data);
+ static void InputDevicesInfoCallback(pa_context* context,
+ const pa_source_info* info,
+ int error, void* user_data);
+ static void OutputDevicesInfoCallback(pa_context* context,
+ const pa_sink_info* info,
+ int error, void* user_data);
// Callback to get the native sample rate of PulseAudio, used by
// GetNativeSampleRate().
diff --git a/media/audio/pulse/pulse.sigs b/media/audio/pulse/pulse.sigs
index 667e6be411..b5d927c754 100644
--- a/media/audio/pulse/pulse.sigs
+++ b/media/audio/pulse/pulse.sigs
@@ -21,6 +21,7 @@ void pa_context_disconnect(pa_context* c);
pa_operation* pa_context_get_server_info(pa_context* c, pa_server_info_cb_t cb, void* userdata);
pa_operation* pa_context_get_source_info_by_index(pa_context* c, uint32_t idx, pa_source_info_cb_t cb, void* userdata);
pa_operation* pa_context_get_source_info_list(pa_context* c, pa_source_info_cb_t cb, void* userdata);
+pa_operation* pa_context_get_sink_info_list(pa_context* c, pa_sink_info_cb_t cb, void* userdata);
pa_context_state_t pa_context_get_state(pa_context* c);
pa_context* pa_context_new(pa_mainloop_api* mainloop, const char* name);
pa_operation* pa_context_set_source_volume_by_index(pa_context* c, uint32_t idx, const pa_cvolume* volume, pa_context_success_cb_t cb, void* userdata);
@@ -48,4 +49,4 @@ void pa_stream_set_write_callback(pa_stream *p, pa_stream_request_cb_t cb, void
void pa_stream_unref(pa_stream* s);
int pa_context_errno(pa_context *c);
const char* pa_strerror(int error);
-pa_cvolume* pa_cvolume_set(pa_cvolume* a, unsigned channels, pa_volume_t v); \ No newline at end of file
+pa_cvolume* pa_cvolume_set(pa_cvolume* a, unsigned channels, pa_volume_t v);
diff --git a/media/audio/pulse/pulse_input.cc b/media/audio/pulse/pulse_input.cc
index cc6208527d..54dfc1e05a 100644
--- a/media/audio/pulse/pulse_input.cc
+++ b/media/audio/pulse/pulse_input.cc
@@ -61,13 +61,15 @@ void PulseAudioInputStream::Start(AudioInputCallback* callback) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(callback);
DCHECK(handle_);
+
+ // AGC needs to be started out of the lock.
+ StartAgc();
+
AutoPulseLock auto_lock(pa_mainloop_);
if (stream_started_)
return;
- StartAgc();
-
// Clean up the old buffer.
pa_stream_drop(handle_);
buffer_->Clear();
diff --git a/media/base/android/demuxer_stream_player_params.cc b/media/base/android/demuxer_stream_player_params.cc
index 3ed1a8c446..827be11956 100644
--- a/media/base/android/demuxer_stream_player_params.cc
+++ b/media/base/android/demuxer_stream_player_params.cc
@@ -19,6 +19,10 @@ MediaPlayerHostMsg_DemuxerReady_Params::
MediaPlayerHostMsg_DemuxerReady_Params::
~MediaPlayerHostMsg_DemuxerReady_Params() {}
+AccessUnit::AccessUnit() : end_of_stream(false) {}
+
+AccessUnit::~AccessUnit() {}
+
MediaPlayerHostMsg_ReadFromDemuxerAck_Params::
MediaPlayerHostMsg_ReadFromDemuxerAck_Params()
: type(DemuxerStream::UNKNOWN) {}
@@ -26,9 +30,4 @@ MediaPlayerHostMsg_ReadFromDemuxerAck_Params::
MediaPlayerHostMsg_ReadFromDemuxerAck_Params::
~MediaPlayerHostMsg_ReadFromDemuxerAck_Params() {}
-MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit::AccessUnit()
- : end_of_stream(false) {}
-
-MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit::~AccessUnit() {}
-
} // namespace media
diff --git a/media/base/android/demuxer_stream_player_params.h b/media/base/android/demuxer_stream_player_params.h
index 585af2b4c7..a9fb0520ae 100644
--- a/media/base/android/demuxer_stream_player_params.h
+++ b/media/base/android/demuxer_stream_player_params.h
@@ -36,21 +36,21 @@ struct MEDIA_EXPORT MediaPlayerHostMsg_DemuxerReady_Params {
std::string key_system;
};
-struct MEDIA_EXPORT MediaPlayerHostMsg_ReadFromDemuxerAck_Params {
- struct MEDIA_EXPORT AccessUnit {
- AccessUnit();
- ~AccessUnit();
-
- DemuxerStream::Status status;
- bool end_of_stream;
- // TODO(ycheo): Use the shared memory to transfer the block data.
- std::vector<uint8> data;
- base::TimeDelta timestamp;
- std::vector<char> key_id;
- std::vector<char> iv;
- std::vector<media::SubsampleEntry> subsamples;
- };
+struct MEDIA_EXPORT AccessUnit {
+ AccessUnit();
+ ~AccessUnit();
+
+ DemuxerStream::Status status;
+ bool end_of_stream;
+ // TODO(ycheo): Use the shared memory to transfer the block data.
+ std::vector<uint8> data;
+ base::TimeDelta timestamp;
+ std::vector<char> key_id;
+ std::vector<char> iv;
+ std::vector<media::SubsampleEntry> subsamples;
+};
+struct MEDIA_EXPORT MediaPlayerHostMsg_ReadFromDemuxerAck_Params {
MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
~MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
diff --git a/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java b/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
index 26c0731ce2..ed5d9478c5 100644
--- a/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
+++ b/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
@@ -285,6 +285,13 @@ class MediaCodecBridge {
}
}
+ @CalledByNative
+ private void setVolume(double volume) {
+ if (mAudioTrack != null) {
+ mAudioTrack.setStereoVolume((float) volume, (float) volume);
+ }
+ }
+
private void resetLastPresentationTimeIfNeeded(long presentationTimeUs) {
if (mFlushed) {
mLastPresentationTimeUs =
diff --git a/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java b/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
index e1b0e09475..4b0a1aa6d1 100644
--- a/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
+++ b/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
@@ -85,8 +85,8 @@ public class MediaPlayerBridge {
}
@CalledByNative
- protected void setVolume(float leftVolume, float rightVolume) {
- getLocalPlayer().setVolume(leftVolume, rightVolume);
+ protected void setVolume(double volume) {
+ getLocalPlayer().setVolume((float) volume, (float) volume);
}
@CalledByNative
diff --git a/media/base/android/java/src/org/chromium/media/VideoCapture.java b/media/base/android/java/src/org/chromium/media/VideoCapture.java
index 8d67f5ed9f..f055f35ed6 100644
--- a/media/base/android/java/src/org/chromium/media/VideoCapture.java
+++ b/media/base/android/java/src/org/chromium/media/VideoCapture.java
@@ -31,9 +31,35 @@ public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
public int mDesiredFps = 0;
}
+ // Some devices with OS older than JELLY_BEAN don't support YV12 format correctly.
+ // Some devices don't support YV12 format correctly even with JELLY_BEAN or newer OS.
+ // To work around the issues on those devices, we'd have to request NV21.
+ // This is a temporary hack till device manufacturers fix the problem or
+ // we don't need to support those devices any more.
+ private static class DeviceImageFormatHack {
+ private static final String[] sBUGGY_DEVICE_LIST = {
+ "SAMSUNG-SGH-I747",
+ };
+
+ static int getImageFormat() {
+ if (android.os.Build.VERSION.SDK_INT < android.os.Build.VERSION_CODES.JELLY_BEAN) {
+ return ImageFormat.NV21;
+ }
+
+ for (String buggyDevice : sBUGGY_DEVICE_LIST) {
+ if (buggyDevice.contentEquals(android.os.Build.MODEL)) {
+ return ImageFormat.NV21;
+ }
+ }
+
+ return ImageFormat.YV12;
+ }
+ }
+
private Camera mCamera;
public ReentrantLock mPreviewBufferLock = new ReentrantLock();
- private int mPixelFormat = ImageFormat.YV12;
+ private int mImageFormat = ImageFormat.YV12;
+ private byte[] mColorPlane = null;
private Context mContext = null;
// True when native code has started capture.
private boolean mIsRunning = false;
@@ -147,8 +173,10 @@ public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
Log.d(TAG, "allocate: matched width=" + matchedWidth +
", height=" + matchedHeight);
+ calculateImageFormat(matchedWidth, matchedHeight);
+
parameters.setPreviewSize(matchedWidth, matchedHeight);
- parameters.setPreviewFormat(mPixelFormat);
+ parameters.setPreviewFormat(mImageFormat);
parameters.setPreviewFpsRange(fpsMin, fpsMax);
mCamera.setParameters(parameters);
@@ -174,7 +202,7 @@ public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
mCamera.setPreviewTexture(mSurfaceTexture);
int bufSize = matchedWidth * matchedHeight *
- ImageFormat.getBitsPerPixel(mPixelFormat) / 8;
+ ImageFormat.getBitsPerPixel(mImageFormat) / 8;
for (int i = 0; i < NUM_CAPTURE_BUFFERS; i++) {
byte[] buffer = new byte[bufSize];
mCamera.addCallbackBuffer(buffer);
@@ -291,6 +319,9 @@ public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
} else {
rotation = (mCameraOrientation - rotation + 360) % 360;
}
+ if (mImageFormat == ImageFormat.NV21) {
+ convertNV21ToYV12(data);
+ }
nativeOnFrameAvailable(mNativeVideoCaptureDeviceAndroid,
data, mExpectedFrameSize,
rotation, flipVertical, flipHorizontal);
@@ -377,4 +408,22 @@ public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
}
return orientation;
}
+
+ private void calculateImageFormat(int width, int height) {
+ mImageFormat = DeviceImageFormatHack.getImageFormat();
+ if (mImageFormat == ImageFormat.NV21) {
+ mColorPlane = new byte[width * height / 4];
+ }
+ }
+
+ private void convertNV21ToYV12(byte[] data) {
+ final int ySize = mCurrentCapability.mWidth * mCurrentCapability.mHeight;
+ final int uvSize = ySize / 4;
+ for (int i = 0; i < uvSize; i++) {
+ final int index = ySize + i * 2;
+ data[ySize + i] = data[index];
+ mColorPlane[i] = data[index + 1];
+ }
+ System.arraycopy(mColorPlane, 0, data, ySize + uvSize, uvSize);
+ }
}
diff --git a/media/base/android/media_codec_bridge.cc b/media/base/android/media_codec_bridge.cc
index a3c15c0b89..ab54936780 100644
--- a/media/base/android/media_codec_bridge.cc
+++ b/media/base/android/media_codec_bridge.cc
@@ -373,6 +373,11 @@ void AudioCodecBridge::PlayOutputBuffer(int index, size_t size) {
env, media_codec(), byte_array.obj());
}
+void AudioCodecBridge::SetVolume(double volume) {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_setVolume(env, media_codec(), volume);
+}
+
VideoCodecBridge::VideoCodecBridge(const char* mime)
: MediaCodecBridge(mime) {
}
diff --git a/media/base/android/media_codec_bridge.h b/media/base/android/media_codec_bridge.h
index d28d39df12..3469b1804e 100644
--- a/media/base/android/media_codec_bridge.h
+++ b/media/base/android/media_codec_bridge.h
@@ -75,8 +75,9 @@ class MEDIA_EXPORT MediaCodecBridge {
// Submits an empty buffer with a EOS (END OF STREAM) flag.
void QueueEOS(int input_buffer_index);
- // Returns the index of an input buffer to be filled with valid data or
- // INFO_TRY_AGAIN_LATER if no such buffer is currently available.
+ // Returns an index (>=0) of an input buffer to be filled with valid data,
+ // INFO_TRY_AGAIN_LATER if no such buffer is currently available, or
+ // INFO_MEDIA_CODEC_ERROR if unexpected error happens.
// Use kTimeOutInfinity for infinite timeout.
int DequeueInputBuffer(base::TimeDelta timeout);
@@ -132,6 +133,9 @@ class AudioCodecBridge : public MediaCodecBridge {
// DequeueOutputBuffer() and before ReleaseOutputBuffer.
void PlayOutputBuffer(int index, size_t size);
+ // Set the volume of the audio output.
+ void SetVolume(double volume);
+
private:
explicit AudioCodecBridge(const char* mime);
diff --git a/media/base/android/media_player_android.h b/media/base/android/media_player_android.h
index 18831b668d..f1c9c37ee0 100644
--- a/media/base/android/media_player_android.h
+++ b/media/base/android/media_player_android.h
@@ -71,7 +71,7 @@ class MEDIA_EXPORT MediaPlayerAndroid {
virtual void Release() = 0;
// Set the player volume.
- virtual void SetVolume(float leftVolume, float rightVolume) = 0;
+ virtual void SetVolume(double volume) = 0;
// Get the media information from the player.
virtual int GetVideoWidth() = 0;
diff --git a/media/base/android/media_player_bridge.cc b/media/base/android/media_player_bridge.cc
index 15a26ece3f..e0d9868827 100644
--- a/media/base/android/media_player_bridge.cc
+++ b/media/base/android/media_player_bridge.cc
@@ -300,14 +300,14 @@ void MediaPlayerBridge::Release() {
listener_.ReleaseMediaPlayerListenerResources();
}
-void MediaPlayerBridge::SetVolume(float left_volume, float right_volume) {
+void MediaPlayerBridge::SetVolume(double volume) {
if (j_media_player_bridge_.is_null())
return;
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
Java_MediaPlayerBridge_setVolume(
- env, j_media_player_bridge_.obj(), left_volume, right_volume);
+ env, j_media_player_bridge_.obj(), volume);
}
void MediaPlayerBridge::OnVideoSizeChanged(int width, int height) {
diff --git a/media/base/android/media_player_bridge.h b/media/base/android/media_player_bridge.h
index 421bcb3e01..85a2960405 100644
--- a/media/base/android/media_player_bridge.h
+++ b/media/base/android/media_player_bridge.h
@@ -58,7 +58,7 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
virtual void Pause() OVERRIDE;
virtual void SeekTo(base::TimeDelta time) OVERRIDE;
virtual void Release() OVERRIDE;
- virtual void SetVolume(float leftVolume, float rightVolume) OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
virtual int GetVideoWidth() OVERRIDE;
virtual int GetVideoHeight() OVERRIDE;
virtual base::TimeDelta GetCurrentTime() OVERRIDE;
diff --git a/media/base/android/media_source_player.cc b/media/base/android/media_source_player.cc
index 5673b926a0..5d4aa98b66 100644
--- a/media/base/android/media_source_player.cc
+++ b/media/base/android/media_source_player.cc
@@ -22,7 +22,7 @@ namespace {
// Timeout value for media codec operations. Because the first
// DequeInputBuffer() can take about 150 milliseconds, use 250 milliseconds
// here. See b/9357571.
-const int kMediaCodecTimeoutInMicroseconds = 250000;
+const int kMediaCodecTimeoutInMilliseconds = 250;
// Use 16bit PCM for audio output. Keep this value in sync with the output
// format we passed to AudioTrack in MediaCodecBridge.
@@ -82,6 +82,8 @@ class AudioDecoderJob : public MediaDecoderJob {
const AudioCodec audio_codec, int sample_rate, int channel_count,
const uint8* extra_data, size_t extra_data_size, jobject media_crypto);
+ void SetVolume(double volume);
+
private:
AudioDecoderJob(MediaCodecBridge* media_codec_bridge);
};
@@ -100,7 +102,7 @@ class VideoDecoderJob : public MediaDecoderJob {
};
void MediaDecoderJob::Decode(
- const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
+ const AccessUnit& unit,
const base::TimeTicks& start_time_ticks,
const base::TimeDelta& start_presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback) {
@@ -114,43 +116,55 @@ void MediaDecoderJob::Decode(
needs_flush_ = false;
}
+MediaDecoderJob::DecodeStatus MediaDecoderJob::QueueInputBuffer(
+ const AccessUnit& unit) {
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
+ kMediaCodecTimeoutInMilliseconds);
+ int input_buf_index = media_codec_bridge_->DequeueInputBuffer(timeout);
+ if (input_buf_index == MediaCodecBridge::INFO_MEDIA_CODEC_ERROR)
+ return DECODE_FAILED;
+ if (input_buf_index == MediaCodecBridge::INFO_TRY_AGAIN_LATER)
+ return DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER;
+
+ // TODO(qinmin): skip frames if video is falling far behind.
+ DCHECK(input_buf_index >= 0);
+ if (unit.end_of_stream || unit.data.empty()) {
+ media_codec_bridge_->QueueEOS(input_buf_index);
+ } else if (unit.key_id.empty()) {
+ media_codec_bridge_->QueueInputBuffer(
+ input_buf_index, &unit.data[0], unit.data.size(), unit.timestamp);
+ } else {
+ if (unit.iv.empty() || unit.subsamples.empty()) {
+ LOG(ERROR) << "The access unit doesn't have iv or subsamples while it "
+ << "has key IDs!";
+ return DECODE_FAILED;
+ }
+ media_codec_bridge_->QueueSecureInputBuffer(
+ input_buf_index, &unit.data[0], unit.data.size(),
+ reinterpret_cast<const uint8*>(&unit.key_id[0]), unit.key_id.size(),
+ reinterpret_cast<const uint8*>(&unit.iv[0]), unit.iv.size(),
+ &unit.subsamples[0], unit.subsamples.size(), unit.timestamp);
+ }
+
+ return DECODE_SUCCEEDED;
+}
+
void MediaDecoderJob::DecodeInternal(
- const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
+ const AccessUnit& unit,
const base::TimeTicks& start_time_ticks,
const base::TimeDelta& start_presentation_timestamp,
bool needs_flush,
const MediaDecoderJob::DecoderCallback& callback) {
- if (needs_flush)
+ if (needs_flush) {
+ DVLOG(1) << "DecodeInternal needs flush.";
media_codec_bridge_->Reset();
- base::TimeDelta timeout = base::TimeDelta::FromMicroseconds(
- kMediaCodecTimeoutInMicroseconds);
- int input_buf_index = media_codec_bridge_->DequeueInputBuffer(timeout);
- if (input_buf_index == MediaCodecBridge::INFO_MEDIA_CODEC_ERROR) {
- ui_loop_->PostTask(FROM_HERE, base::Bind(
- callback, DECODE_FAILED, start_presentation_timestamp, 0));
- return;
}
- // TODO(qinmin): skip frames if video is falling far behind.
- if (input_buf_index >= 0) {
- if (unit.end_of_stream || unit.data.empty()) {
- media_codec_bridge_->QueueEOS(input_buf_index);
- } else if (unit.key_id.empty()){
- media_codec_bridge_->QueueInputBuffer(
- input_buf_index, &unit.data[0], unit.data.size(), unit.timestamp);
- } else {
- if (unit.iv.empty() || unit.subsamples.empty()) {
- LOG(ERROR) << "The access unit doesn't have iv or subsamples while it "
- << "has key IDs!";
- ui_loop_->PostTask(FROM_HERE, base::Bind(
- callback, DECODE_FAILED, start_presentation_timestamp, 0));
- return;
- }
- media_codec_bridge_->QueueSecureInputBuffer(
- input_buf_index, &unit.data[0], unit.data.size(),
- reinterpret_cast<const uint8*>(&unit.key_id[0]), unit.key_id.size(),
- reinterpret_cast<const uint8*>(&unit.iv[0]), unit.iv.size(),
- &unit.subsamples[0], unit.subsamples.size(), unit.timestamp);
- }
+
+ DecodeStatus decode_status = QueueInputBuffer(unit);
+ if (decode_status != DECODE_SUCCEEDED) {
+ ui_loop_->PostTask(FROM_HERE,
+ base::Bind(callback, decode_status, start_presentation_timestamp, 0));
+ return;
}
size_t offset = 0;
@@ -158,9 +172,11 @@ void MediaDecoderJob::DecodeInternal(
base::TimeDelta presentation_timestamp;
bool end_of_stream = false;
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
+ kMediaCodecTimeoutInMilliseconds);
int outputBufferIndex = media_codec_bridge_->DequeueOutputBuffer(
timeout, &offset, &size, &presentation_timestamp, &end_of_stream);
- DecodeStatus decode_status = DECODE_SUCCEEDED;
+
if (end_of_stream)
decode_status = DECODE_END_OF_STREAM;
switch (outputBufferIndex) {
@@ -172,7 +188,7 @@ void MediaDecoderJob::DecodeInternal(
decode_status = DECODE_FORMAT_CHANGED;
break;
case MediaCodecBridge::INFO_TRY_AGAIN_LATER:
- decode_status = DECODE_TRY_AGAIN_LATER;
+ decode_status = DECODE_TRY_DEQUEUE_OUTPUT_AGAIN_LATER;
break;
case MediaCodecBridge::INFO_MEDIA_CODEC_ERROR:
decode_status = DECODE_FAILED;
@@ -257,7 +273,7 @@ VideoDecoderJob* VideoDecoderJob::Create(
const VideoCodec video_codec, const gfx::Size& size, jobject surface,
jobject media_crypto) {
scoped_ptr<VideoCodecBridge> codec(VideoCodecBridge::Create(video_codec));
- if (codec->Start(video_codec, size, surface, media_crypto))
+ if (codec && codec->Start(video_codec, size, surface, media_crypto))
return new VideoDecoderJob(codec.release());
return NULL;
}
@@ -275,8 +291,8 @@ AudioDecoderJob* AudioDecoderJob::Create(
size_t extra_data_size,
jobject media_crypto) {
scoped_ptr<AudioCodecBridge> codec(AudioCodecBridge::Create(audio_codec));
- if (codec->Start(audio_codec, sample_rate, channel_count, extra_data,
- extra_data_size, true, media_crypto)) {
+ if (codec && codec->Start(audio_codec, sample_rate, channel_count, extra_data,
+ extra_data_size, true, media_crypto)) {
return new AudioDecoderJob(codec.release());
}
return NULL;
@@ -287,6 +303,10 @@ AudioDecoderJob::AudioDecoderJob(MediaCodecBridge* media_codec_bridge)
media_codec_bridge,
true) {}
+void AudioDecoderJob::SetVolume(double volume) {
+ static_cast<AudioCodecBridge*>(media_codec_bridge_.get())->SetVolume(volume);
+}
+
MediaSourcePlayer::MediaSourcePlayer(
int player_id,
MediaPlayerManager* manager)
@@ -304,6 +324,7 @@ MediaSourcePlayer::MediaSourcePlayer(
playing_(false),
is_audio_encrypted_(false),
is_video_encrypted_(false),
+ volume_(-1.0),
clock_(&default_tick_clock_),
reconfig_audio_decoder_(false),
reconfig_video_decoder_(false),
@@ -407,7 +428,9 @@ void MediaSourcePlayer::Release() {
ReleaseMediaResourcesFromManager();
}
-void MediaSourcePlayer::SetVolume(float leftVolume, float rightVolume) {
+void MediaSourcePlayer::SetVolume(double volume) {
+ volume_ = volume;
+ SetVolumeInternal();
}
bool MediaSourcePlayer::CanPause() {
@@ -546,6 +569,7 @@ void MediaSourcePlayer::SetDrmBridge(MediaDrmBridge* drm_bridge) {
}
void MediaSourcePlayer::OnSeekRequestAck(unsigned seek_request_id) {
+ DVLOG(1) << "OnSeekRequestAck(" << seek_request_id << ")";
// Do nothing until the most recent seek request is processed.
if (seek_request_id_ != seek_request_id)
return;
@@ -613,7 +637,9 @@ void MediaSourcePlayer::MediaDecoderCallback(
Release();
OnMediaError(MEDIA_ERROR_DECODE);
return;
- } else if (decode_status == MediaDecoderJob::DECODE_SUCCEEDED) {
+ }
+
+ if (decode_status != MediaDecoderJob::DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER) {
if (is_audio)
audio_access_unit_index_++;
else
@@ -698,6 +724,7 @@ void MediaSourcePlayer::DecodeMoreAudio() {
}
void MediaSourcePlayer::DecodeMoreVideo() {
+ DVLOG(1) << "DecodeMoreVideo()";
DCHECK(!video_decoder_job_->is_decoding());
DCHECK(HasVideoData());
@@ -712,6 +739,9 @@ void MediaSourcePlayer::DecodeMoreVideo() {
return;
}
+ DVLOG(3) << "VideoDecoderJob::Decode(" << video_access_unit_index_ << ", "
+ << start_time_ticks_.ToInternalValue() << ", "
+ << start_presentation_timestamp_.InMilliseconds() << ")";
video_decoder_job_->Decode(
received_video_.access_units[video_access_unit_index_],
start_time_ticks_, start_presentation_timestamp_,
@@ -734,6 +764,7 @@ void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
}
void MediaSourcePlayer::ClearDecodingData() {
+ DVLOG(1) << "ClearDecodingData()";
if (audio_decoder_job_)
audio_decoder_job_->Flush();
if (video_decoder_job_)
@@ -786,8 +817,10 @@ void MediaSourcePlayer::ConfigureAudioDecoderJob() {
audio_codec_, sampling_rate_, num_channels_, &audio_extra_data_[0],
audio_extra_data_.size(), media_codec.obj()));
- if (audio_decoder_job_)
+ if (audio_decoder_job_) {
+ SetVolumeInternal();
reconfig_audio_decoder_ = false;
+ }
}
void MediaSourcePlayer::ConfigureVideoDecoderJob() {
@@ -866,6 +899,7 @@ void MediaSourcePlayer::SyncAndStartDecoderJobs() {
}
void MediaSourcePlayer::RequestAudioData() {
+ DVLOG(2) << "RequestAudioData()";
DCHECK(HasAudio());
if (waiting_for_audio_data_)
@@ -878,6 +912,7 @@ void MediaSourcePlayer::RequestAudioData() {
}
void MediaSourcePlayer::RequestVideoData() {
+ DVLOG(2) << "RequestVideoData()";
DCHECK(HasVideo());
if (waiting_for_video_data_)
return;
@@ -896,4 +931,9 @@ bool MediaSourcePlayer::HasVideoData() const {
return video_access_unit_index_ < received_video_.access_units.size();
}
+void MediaSourcePlayer::SetVolumeInternal() {
+ if (audio_decoder_job_ && volume_ >= 0)
+ audio_decoder_job_.get()->SetVolume(volume_);
+}
+
} // namespace media
diff --git a/media/base/android/media_source_player.h b/media/base/android/media_source_player.h
index efab0a2f6c..7253d565e3 100644
--- a/media/base/android/media_source_player.h
+++ b/media/base/android/media_source_player.h
@@ -40,7 +40,8 @@ class MediaDecoderJob {
public:
enum DecodeStatus {
DECODE_SUCCEEDED,
- DECODE_TRY_AGAIN_LATER,
+ DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER,
+ DECODE_TRY_DEQUEUE_OUTPUT_AGAIN_LATER,
DECODE_FORMAT_CHANGED,
DECODE_END_OF_STREAM,
DECODE_FAILED,
@@ -54,19 +55,14 @@ class MediaDecoderJob {
size_t)> DecoderCallback;
// Called by MediaSourcePlayer to decode some data.
- void Decode(
- const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback);
+ void Decode(const AccessUnit& unit,
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback);
// Flush the decoder.
void Flush();
- struct Deleter {
- inline void operator()(MediaDecoderJob* ptr) const { ptr->Release(); }
- };
-
// Causes this instance to be deleted on the thread it is bound to.
void Release();
@@ -86,18 +82,19 @@ class MediaDecoderJob {
const base::TimeDelta& presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback, DecodeStatus status);
+ DecodeStatus QueueInputBuffer(const AccessUnit& unit);
+
// Helper function to decoder data on |thread_|. |unit| contains all the data
// to be decoded. |start_time_ticks| and |start_presentation_timestamp|
// represent the system time and the presentation timestamp when the first
// frame is rendered. We use these information to estimate when the current
// frame should be rendered. If |needs_flush| is true, codec needs to be
// flushed at the beginning of this call.
- void DecodeInternal(
- const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
- bool needs_flush,
- const MediaDecoderJob::DecoderCallback& callback);
+ void DecodeInternal(const AccessUnit& unit,
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ bool needs_flush,
+ const MediaDecoderJob::DecoderCallback& callback);
// The UI message loop where callbacks should be dispatched.
scoped_refptr<base::MessageLoopProxy> ui_loop_;
@@ -122,8 +119,9 @@ class MediaDecoderJob {
bool is_decoding_;
};
-typedef scoped_ptr<MediaDecoderJob, MediaDecoderJob::Deleter>
- ScopedMediaDecoderJob;
+struct DecoderJobDeleter {
+ inline void operator()(MediaDecoderJob* ptr) const { ptr->Release(); }
+};
// This class handles media source extensions on Android. It uses Android
// MediaCodec to decode audio and video streams in two separate threads.
@@ -142,7 +140,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
virtual void Pause() OVERRIDE;
virtual void SeekTo(base::TimeDelta timestamp) OVERRIDE;
virtual void Release() OVERRIDE;
- virtual void SetVolume(float leftVolume, float rightVolume) OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
virtual int GetVideoWidth() OVERRIDE;
virtual int GetVideoHeight() OVERRIDE;
virtual base::TimeDelta GetCurrentTime() OVERRIDE;
@@ -219,6 +217,9 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
bool HasAudioData() const;
bool HasVideoData() const;
+ // Helper function to set the volume.
+ void SetVolumeInternal();
+
enum PendingEventFlags {
NO_EVENT_PENDING = 0,
SEEK_EVENT_PENDING = 1 << 0,
@@ -246,6 +247,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
bool playing_;
bool is_audio_encrypted_;
bool is_video_encrypted_;
+ double volume_;
// base::TickClock used by |clock_|.
base::DefaultTickClock default_tick_clock_;
@@ -266,8 +268,8 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
gfx::ScopedJavaSurface surface_;
// Decoder jobs
- ScopedMediaDecoderJob audio_decoder_job_;
- ScopedMediaDecoderJob video_decoder_job_;
+ scoped_ptr<AudioDecoderJob, DecoderJobDeleter> audio_decoder_job_;
+ scoped_ptr<VideoDecoderJob, DecoderJobDeleter> video_decoder_job_;
bool reconfig_audio_decoder_;
bool reconfig_video_decoder_;
diff --git a/media/base/android/media_source_player_unittest.cc b/media/base/android/media_source_player_unittest.cc
index 31466400c9..cc99cf7cde 100644
--- a/media/base/android/media_source_player_unittest.cc
+++ b/media/base/android/media_source_player_unittest.cc
@@ -94,9 +94,12 @@ class MediaSourcePlayerTest : public testing::Test {
protected:
// Get the decoder job from the MediaSourcePlayer.
MediaDecoderJob* GetMediaDecoderJob(bool is_audio) {
- if (is_audio)
- return player_->audio_decoder_job_.get();
- return player_->video_decoder_job_.get();
+ if (is_audio) {
+ return reinterpret_cast<MediaDecoderJob*>(
+ player_->audio_decoder_job_.get());
+ }
+ return reinterpret_cast<MediaDecoderJob*>(
+ player_->video_decoder_job_.get());
}
// Starts an audio decoder job.
@@ -393,7 +396,10 @@ TEST_F(MediaSourcePlayerTest, DecoderJobsCannotStartWithoutAudio) {
EXPECT_TRUE(video_decoder_job->is_decoding());
}
-TEST_F(MediaSourcePlayerTest, StartTimeTicksResetAfterDecoderUnderruns) {
+// Disabled due to http://crbug.com/266041.
+// TODO(xhwang/qinmin): Fix this test and reenable it.
+TEST_F(MediaSourcePlayerTest,
+ DISABLED_StartTimeTicksResetAfterDecoderUnderruns) {
if (!MediaCodecBridge::IsAvailable())
return;
diff --git a/media/base/audio_buffer.cc b/media/base/audio_buffer.cc
index 61296dad87..b2cdd8c41a 100644
--- a/media/base/audio_buffer.cc
+++ b/media/base/audio_buffer.cc
@@ -11,22 +11,23 @@
namespace media {
-// Alignment of each channel's data; use 8-byte alignment as that is bigger
-// than maximum size of a sample, and the minimum alignment.
-enum { kChannelAlignment = 8 };
+// Alignment of each channel's data; this must match what ffmpeg expects
+// (which may be 0, 16, or 32, depending on the processor). Selecting 32 in
+// order to work on all processors.
+enum { kChannelAlignment = 32 };
AudioBuffer::AudioBuffer(SampleFormat sample_format,
int channel_count,
int frame_count,
+ bool create_buffer,
const uint8* const* data,
const base::TimeDelta timestamp,
const base::TimeDelta duration)
: sample_format_(sample_format),
channel_count_(channel_count),
- frame_count_(frame_count),
adjusted_frame_count_(frame_count),
trim_start_(0),
- end_of_stream_(data == NULL && frame_count_ == 0),
+ end_of_stream_(!create_buffer && data == NULL && frame_count == 0),
timestamp_(timestamp),
duration_(duration) {
CHECK_GE(channel_count, 0);
@@ -37,7 +38,7 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
int data_size = frame_count * bytes_per_channel;
// Empty buffer?
- if (!data)
+ if (!create_buffer)
return;
if (sample_format == kSampleFormatPlanarF32 ||
@@ -56,7 +57,8 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
// Copy each channel's data into the appropriate spot.
for (int i = 0; i < channel_count; ++i) {
channel_data_.push_back(data_.get() + i * block_size_per_channel);
- memcpy(channel_data_[i], data[i], data_size);
+ if (data)
+ memcpy(channel_data_[i], data[i], data_size);
}
return;
}
@@ -71,7 +73,8 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
data_size *= channel_count;
data_.reset(
static_cast<uint8*>(base::AlignedAlloc(data_size, kChannelAlignment)));
- memcpy(data_.get(), data[0], data_size);
+ if (data)
+ memcpy(data_.get(), data[0], data_size);
}
AudioBuffer::~AudioBuffer() {}
@@ -85,9 +88,29 @@ scoped_refptr<AudioBuffer> AudioBuffer::CopyFrom(
const base::TimeDelta timestamp,
const base::TimeDelta duration) {
// If you hit this CHECK you likely have a bug in a demuxer. Go fix it.
+ CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
CHECK(data[0]);
- return make_scoped_refptr(new AudioBuffer(
- sample_format, channel_count, frame_count, data, timestamp, duration));
+ return make_scoped_refptr(new AudioBuffer(sample_format,
+ channel_count,
+ frame_count,
+ true,
+ data,
+ timestamp,
+ duration));
+}
+
+// static
+scoped_refptr<AudioBuffer> AudioBuffer::CreateBuffer(SampleFormat sample_format,
+ int channel_count,
+ int frame_count) {
+ CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
+ return make_scoped_refptr(new AudioBuffer(sample_format,
+ channel_count,
+ frame_count,
+ true,
+ NULL,
+ kNoTimestamp(),
+ kNoTimestamp()));
}
// static
@@ -98,14 +121,19 @@ scoped_refptr<AudioBuffer> AudioBuffer::CreateEmptyBuffer(
const base::TimeDelta duration) {
CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
// Since data == NULL, format doesn't matter.
- return make_scoped_refptr(new AudioBuffer(
- kSampleFormatF32, channel_count, frame_count, NULL, timestamp, duration));
+ return make_scoped_refptr(new AudioBuffer(kSampleFormatF32,
+ channel_count,
+ frame_count,
+ false,
+ NULL,
+ timestamp,
+ duration));
}
// static
scoped_refptr<AudioBuffer> AudioBuffer::CreateEOSBuffer() {
return make_scoped_refptr(new AudioBuffer(
- kUnknownSampleFormat, 1, 0, NULL, kNoTimestamp(), kNoTimestamp()));
+ kUnknownSampleFormat, 1, 0, false, NULL, kNoTimestamp(), kNoTimestamp()));
}
// Convert int16 values in the range [kint16min, kint16max] to [-1.0, 1.0].
@@ -124,10 +152,12 @@ void AudioBuffer::ReadFrames(int frames_to_copy,
// specified must be in range.
DCHECK(!end_of_stream());
DCHECK_EQ(dest->channels(), channel_count_);
- source_frame_offset += trim_start_;
- DCHECK_LE(source_frame_offset + frames_to_copy, frame_count_);
+ DCHECK_LE(source_frame_offset + frames_to_copy, adjusted_frame_count_);
DCHECK_LE(dest_frame_offset + frames_to_copy, dest->frames());
+ // Move the start past any frames that have been trimmed.
+ source_frame_offset += trim_start_;
+
if (!data_) {
// Special case for an empty buffer.
dest->ZeroFramesPartial(dest_frame_offset, frames_to_copy);
@@ -189,8 +219,8 @@ void AudioBuffer::ReadFrames(int frames_to_copy,
}
void AudioBuffer::TrimStart(int frames_to_trim) {
- CHECK_LT(frames_to_trim, adjusted_frame_count_);
- trim_start_ += frames_to_trim;
+ CHECK_GE(frames_to_trim, 0);
+ CHECK_LE(frames_to_trim, adjusted_frame_count_);
// Adjust timestamp_ and duration_ to reflect the smaller number of frames.
double offset = static_cast<double>(duration_.InMicroseconds()) *
@@ -200,8 +230,25 @@ void AudioBuffer::TrimStart(int frames_to_trim) {
timestamp_ += offset_as_time;
duration_ -= offset_as_time;
+ // Finally adjust the number of frames in this buffer and where the start
+ // really is.
+ adjusted_frame_count_ -= frames_to_trim;
+ trim_start_ += frames_to_trim;
+}
+
+void AudioBuffer::TrimEnd(int frames_to_trim) {
+ CHECK_GE(frames_to_trim, 0);
+ CHECK_LE(frames_to_trim, adjusted_frame_count_);
+
+ // Adjust duration_ only to reflect the smaller number of frames.
+ double offset = static_cast<double>(duration_.InMicroseconds()) *
+ frames_to_trim / adjusted_frame_count_;
+ base::TimeDelta offset_as_time =
+ base::TimeDelta::FromMicroseconds(static_cast<int64>(offset));
+ duration_ -= offset_as_time;
+
// Finally adjust the number of frames in this buffer.
- adjusted_frame_count_ = frame_count_ - trim_start_;
+ adjusted_frame_count_ -= frames_to_trim;
}
} // namespace media
diff --git a/media/base/audio_buffer.h b/media/base/audio_buffer.h
index 9200666cb5..e52355ac4c 100644
--- a/media/base/audio_buffer.h
+++ b/media/base/audio_buffer.h
@@ -37,6 +37,12 @@ class MEDIA_EXPORT AudioBuffer
const base::TimeDelta timestamp,
const base::TimeDelta duration);
+ // Create an AudioBuffer with |frame_count| frames. Buffer is allocated, but
+ // not initialized. Timestamp and duration are set to kNoTimestamp().
+ static scoped_refptr<AudioBuffer> CreateBuffer(SampleFormat sample_format,
+ int channel_count,
+ int frame_count);
+
// Create an empty AudioBuffer with |frame_count| frames.
static scoped_refptr<AudioBuffer> CreateEmptyBuffer(
int channel_count,
@@ -60,10 +66,15 @@ class MEDIA_EXPORT AudioBuffer
AudioBus* dest);
// Trim an AudioBuffer by removing |frames_to_trim| frames from the start.
+ // Timestamp and duration are adjusted to reflect the fewer frames.
// Note that repeated calls to TrimStart() may result in timestamp() and
// duration() being off by a few microseconds due to rounding issues.
void TrimStart(int frames_to_trim);
+ // Trim an AudioBuffer by removing |frames_to_trim| frames from the end.
+ // Duration is adjusted to reflect the fewer frames.
+ void TrimEnd(int frames_to_trim);
+
// Return the number of channels.
int channel_count() const { return channel_count_; }
@@ -83,16 +94,22 @@ class MEDIA_EXPORT AudioBuffer
// If there's no data in this buffer, it represents end of stream.
bool end_of_stream() const { return end_of_stream_; }
+ // Access to the raw buffer for ffmpeg to write directly to. Data for planar
+ // data is grouped by channel.
+ uint8* writable_data() { return data_.get(); }
+
private:
friend class base::RefCountedThreadSafe<AudioBuffer>;
// Allocates aligned contiguous buffer to hold all channel data (1 block for
// interleaved data, |channel_count| blocks for planar data), copies
- // [data,data+data_size) to the allocated buffer(s). If |data| is null an end
- // of stream buffer is created.
+ // [data,data+data_size) to the allocated buffer(s). If |data| is null, no
+ // data is copied. If |create_buffer| is false, no data buffer is created (or
+ // copied to).
AudioBuffer(SampleFormat sample_format,
int channel_count,
int frame_count,
+ bool create_buffer,
const uint8* const* data,
const base::TimeDelta timestamp,
const base::TimeDelta duration);
@@ -101,7 +118,6 @@ class MEDIA_EXPORT AudioBuffer
const SampleFormat sample_format_;
const int channel_count_;
- const int frame_count_;
int adjusted_frame_count_;
int trim_start_;
const bool end_of_stream_;
diff --git a/media/base/audio_buffer_unittest.cc b/media/base/audio_buffer_unittest.cc
index f6384e880f..473778a6b5 100644
--- a/media/base/audio_buffer_unittest.cc
+++ b/media/base/audio_buffer_unittest.cc
@@ -256,7 +256,7 @@ TEST(AudioBufferTest, Trim) {
buffer->ReadFrames(20, 0, 0, bus.get());
VerifyResult(bus->channel(0), 20, 1.0f, 1.0f);
- // Trim off 10 frames.
+ // Trim off 10 frames from the start.
buffer->TrimStart(10);
EXPECT_EQ(buffer->frame_count(), frames - 10);
EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(10));
@@ -264,13 +264,27 @@ TEST(AudioBufferTest, Trim) {
buffer->ReadFrames(20, 0, 0, bus.get());
VerifyResult(bus->channel(0), 20, 11.0f, 1.0f);
- // Trim off 80 more.
- buffer->TrimStart(80);
- EXPECT_EQ(buffer->frame_count(), frames - 90);
- EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(90));
- EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(10));
+ // Trim off 10 frames from the end.
+ buffer->TrimEnd(10);
+ EXPECT_EQ(buffer->frame_count(), frames - 20);
+ EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(10));
+ EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(80));
+ buffer->ReadFrames(20, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 20, 11.0f, 1.0f);
+
+ // Trim off 50 more from the start.
+ buffer->TrimStart(50);
+ EXPECT_EQ(buffer->frame_count(), frames - 70);
+ EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(60));
+ EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(30));
buffer->ReadFrames(10, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 10, 91.0f, 1.0f);
+ VerifyResult(bus->channel(0), 10, 61.0f, 1.0f);
+
+ // Trim off the last 30 frames.
+ buffer->TrimEnd(30);
+ EXPECT_EQ(buffer->frame_count(), 0);
+ EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(60));
+ EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(0));
}
} // namespace media
diff --git a/media/base/container_names.cc b/media/base/container_names.cc
index 8b68e31e24..f062929d54 100644
--- a/media/base/container_names.cc
+++ b/media/base/container_names.cc
@@ -975,8 +975,6 @@ static bool CheckMov(const uint8* buffer, int buffer_size) {
default:
return false;
}
- if (atomsize <= 0)
- break; // Indicates the last atom or length too big.
if (atomsize == 1) {
// Indicates that the length is the next 64bits.
if (offset + 16 > buffer_size)
@@ -985,6 +983,8 @@ static bool CheckMov(const uint8* buffer, int buffer_size) {
break; // Offset is way past buffer size.
atomsize = Read32(buffer + offset + 12);
}
+ if (atomsize <= 0)
+ break; // Indicates the last atom or length too big.
offset += atomsize;
}
return true;
diff --git a/media/base/container_names_unittest.cc b/media/base/container_names_unittest.cc
index 4aa20638ba..21f80af6d9 100644
--- a/media/base/container_names_unittest.cc
+++ b/media/base/container_names_unittest.cc
@@ -74,6 +74,13 @@ const char kRm1Buffer[12] = ".RMF\0\0";
const char kRm2Buffer[12] = ".ra\xfd";
uint8 kWtvBuffer[] = { 0xb7, 0xd8, 0x00, 0x20, 0x37, 0x49, 0xda, 0x11, 0xa6,
0x4e, 0x00, 0x07, 0xe9, 0x5e, 0xad, 0x8d };
+uint8 kBug263073Buffer[] = {
+ 0x00, 0x00, 0x00, 0x18, 0x66, 0x74, 0x79, 0x70, 0x6d, 0x70, 0x34, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x69, 0x73, 0x6f, 0x6d, 0x6d, 0x70, 0x34, 0x32,
+ 0x00, 0x00, 0x00, 0x01, 0x6d, 0x64, 0x61, 0x74, 0x00, 0x00, 0x00, 0x00,
+ 0xaa, 0x2e, 0x22, 0xcf, 0x00, 0x00, 0x00, 0x37, 0x67, 0x64, 0x00, 0x28,
+ 0xac, 0x2c, 0xa4, 0x01, 0xe0, 0x08, 0x9f, 0x97, 0x01, 0x52, 0x02, 0x02,
+ 0x02, 0x80, 0x00, 0x01};
// Test that containers that start with fixed strings are handled correctly.
// This is to verify that the TAG matches the first 4 characters of the string.
@@ -91,6 +98,7 @@ TEST(ContainerNamesTest, CheckFixedStrings) {
VERIFY(kRm1Buffer, CONTAINER_RM);
VERIFY(kRm2Buffer, CONTAINER_RM);
VERIFY(kWtvBuffer, CONTAINER_WTV);
+ VERIFY(kBug263073Buffer, CONTAINER_MOV);
}
// Determine the container type of a specified file.
diff --git a/media/base/simd/filter_yuv_mmx.cc b/media/base/simd/filter_yuv_mmx.cc
index c69d9de651..3991fe72fe 100644
--- a/media/base/simd/filter_yuv_mmx.cc
+++ b/media/base/simd/filter_yuv_mmx.cc
@@ -16,6 +16,7 @@ namespace media {
#if defined(COMPILER_MSVC)
// Warning 4799 is about calling emms before the function exits.
// We calls emms in a frame level so suppress this warning.
+#pragma warning(push)
#pragma warning(disable: 4799)
#endif
@@ -72,7 +73,7 @@ void FilterYUVRows_MMX(uint8* dest,
}
#if defined(COMPILER_MSVC)
-#pragma warning(default: 4799)
+#pragma warning(pop)
#endif
} // namespace media
diff --git a/media/crypto/aes_decryptor.cc b/media/crypto/aes_decryptor.cc
index a45b6875ad..fd7c628066 100644
--- a/media/crypto/aes_decryptor.cc
+++ b/media/crypto/aes_decryptor.cc
@@ -69,6 +69,10 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
reinterpret_cast<const char*>(input.data() + data_offset);
int sample_size = input.data_size() - data_offset;
+ DCHECK_GT(sample_size, 0) << "No sample data to be decrypted.";
+ if (sample_size <= 0)
+ return NULL;
+
if (input.decrypt_config()->subsamples().empty()) {
std::string decrypted_text;
base::StringPiece encrypted_text(sample, sample_size);
@@ -97,6 +101,12 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
return NULL;
}
+ // No need to decrypt if there is no encrypted data.
+ if (total_encrypted_size <= 0) {
+ return DecoderBuffer::CopyFrom(reinterpret_cast<const uint8*>(sample),
+ sample_size);
+ }
+
// The encrypted portions of all subsamples must form a contiguous block,
// such that an encrypted subsample that ends away from a block boundary is
// immediately followed by the start of the next encrypted subsample. We
@@ -115,6 +125,7 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
DVLOG(1) << "Could not decrypt data.";
return NULL;
}
+ DCHECK_EQ(decrypted_text.size(), encrypted_text.size());
scoped_refptr<DecoderBuffer> output = DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8*>(sample), sample_size);
diff --git a/media/crypto/aes_decryptor_unittest.cc b/media/crypto/aes_decryptor_unittest.cc
index 85f864ac1b..8feaec4b1a 100644
--- a/media/crypto/aes_decryptor_unittest.cc
+++ b/media/crypto/aes_decryptor_unittest.cc
@@ -133,35 +133,55 @@ static const uint8 kSubsampleIv[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-static const uint8 kSubsampleData[] = {
+// kSubsampleOriginalData encrypted with kSubsampleKey and kSubsampleIv using
+// kSubsampleEntriesNormal.
+static const uint8 kSubsampleEncryptedData[] = {
0x4f, 0x72, 0x09, 0x16, 0x09, 0xe6, 0x79, 0xad,
0x70, 0x73, 0x75, 0x62, 0x09, 0xbb, 0x83, 0x1d,
0x4d, 0x08, 0xd7, 0x78, 0xa4, 0xa7, 0xf1, 0x2e
};
-static const uint8 kPaddedSubsampleData[] = {
+// kSubsampleEncryptedData with 8 bytes padding at the beginning.
+static const uint8 kPaddedSubsampleEncryptedData[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x4f, 0x72, 0x09, 0x16, 0x09, 0xe6, 0x79, 0xad,
0x70, 0x73, 0x75, 0x62, 0x09, 0xbb, 0x83, 0x1d,
0x4d, 0x08, 0xd7, 0x78, 0xa4, 0xa7, 0xf1, 0x2e
};
-// Encrypted with kSubsampleKey and kSubsampleIv but without subsamples.
-static const uint8 kNoSubsampleData[] = {
+// kSubsampleOriginalData encrypted with kSubsampleKey and kSubsampleIv but
+// without any subsamples (or equivalently using kSubsampleEntriesCypherOnly).
+static const uint8 kEncryptedData[] = {
0x2f, 0x03, 0x09, 0xef, 0x71, 0xaf, 0x31, 0x16,
0xfa, 0x9d, 0x18, 0x43, 0x1e, 0x96, 0x71, 0xb5,
0xbf, 0xf5, 0x30, 0x53, 0x9a, 0x20, 0xdf, 0x95
};
-static const SubsampleEntry kSubsampleEntries[] = {
+// Subsample entries for testing. The sum of |cypher_bytes| and |clear_bytes| of
+// all entries must be equal to kSubsampleOriginalDataSize to make the subsample
+// entries valid.
+
+static const SubsampleEntry kSubsampleEntriesNormal[] = {
{ 2, 7 },
{ 3, 11 },
{ 1, 0 }
};
+static const SubsampleEntry kSubsampleEntriesClearOnly[] = {
+ { 7, 0 },
+ { 8, 0 },
+ { 9, 0 }
+};
+
+static const SubsampleEntry kSubsampleEntriesCypherOnly[] = {
+ { 0, 6 },
+ { 0, 8 },
+ { 0, 10 }
+};
+
// Generates a 16 byte CTR counter block. The CTR counter block format is a
// CTR IV appended with a CTR block counter. |iv| is an 8 byte CTR IV.
-// |iv_size| is the size of |iv| in btyes. Returns a string of
+// |iv_size| is the size of |iv| in bytes. Returns a string of
// kDecryptionKeySize bytes.
static std::string GenerateCounterBlock(const uint8* iv, int iv_size) {
CHECK_GT(iv_size, 0);
@@ -209,6 +229,9 @@ static scoped_refptr<DecoderBuffer> CreateWebMEncryptedBuffer(
return encrypted_buffer;
}
+// TODO(xhwang): Refactor this function to encapsulate more details about
+// creating an encrypted DecoderBuffer with subsamples so we don't have so much
+// boilerplate code in each test before calling this function.
static scoped_refptr<DecoderBuffer> CreateSubsampleEncryptedBuffer(
const uint8* data, int data_size,
const uint8* key_id, int key_id_size,
@@ -236,8 +259,9 @@ class AesDecryptorTest : public testing::Test {
base::Bind(&AesDecryptorTest::KeyMessage, base::Unretained(this))),
decrypt_cb_(base::Bind(&AesDecryptorTest::BufferDecrypted,
base::Unretained(this))),
- subsample_entries_(kSubsampleEntries,
- kSubsampleEntries + arraysize(kSubsampleEntries)) {
+ subsample_entries_normal_(
+ kSubsampleEntriesNormal,
+ kSubsampleEntriesNormal + arraysize(kSubsampleEntriesNormal)) {
}
protected:
@@ -319,7 +343,7 @@ class AesDecryptorTest : public testing::Test {
AesDecryptor decryptor_;
std::string session_id_string_;
AesDecryptor::DecryptCB decrypt_cb_;
- std::vector<SubsampleEntry> subsample_entries_;
+ std::vector<SubsampleEntry> subsample_entries_normal_;
};
TEST_F(AesDecryptorTest, GenerateKeyRequestWithNullInitData) {
@@ -543,11 +567,11 @@ TEST_F(AesDecryptorTest, SubsampleDecryption) {
AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
kSubsampleKey, arraysize(kSubsampleKey));
scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kSubsampleData, arraysize(kSubsampleData),
+ kSubsampleEncryptedData, arraysize(kSubsampleEncryptedData),
kSubsampleKeyId, arraysize(kSubsampleKeyId),
kSubsampleIv, arraysize(kSubsampleIv),
0,
- subsample_entries_);
+ subsample_entries_normal_);
ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(
encrypted_data, kSubsampleOriginalData, kSubsampleOriginalDataSize));
}
@@ -560,11 +584,12 @@ TEST_F(AesDecryptorTest, SubsampleDecryptionWithOffset) {
AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
kSubsampleKey, arraysize(kSubsampleKey));
scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kPaddedSubsampleData, arraysize(kPaddedSubsampleData),
+ kPaddedSubsampleEncryptedData, arraysize(kPaddedSubsampleEncryptedData),
kSubsampleKeyId, arraysize(kSubsampleKeyId),
kSubsampleIv, arraysize(kSubsampleIv),
- arraysize(kPaddedSubsampleData) - arraysize(kSubsampleData),
- subsample_entries_);
+ arraysize(kPaddedSubsampleEncryptedData)
+ - arraysize(kSubsampleEncryptedData),
+ subsample_entries_normal_);
ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(
encrypted_data, kSubsampleOriginalData, kSubsampleOriginalDataSize));
}
@@ -575,7 +600,7 @@ TEST_F(AesDecryptorTest, NormalDecryption) {
AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
kSubsampleKey, arraysize(kSubsampleKey));
scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kNoSubsampleData, arraysize(kNoSubsampleData),
+ kEncryptedData, arraysize(kEncryptedData),
kSubsampleKeyId, arraysize(kSubsampleKeyId),
kSubsampleIv, arraysize(kSubsampleIv),
0,
@@ -588,11 +613,11 @@ TEST_F(AesDecryptorTest, IncorrectSubsampleSize) {
GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
kSubsampleKey, arraysize(kSubsampleKey));
- std::vector<SubsampleEntry> entries = subsample_entries_;
+ std::vector<SubsampleEntry> entries = subsample_entries_normal_;
entries[2].cypher_bytes += 1;
scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kSubsampleData, arraysize(kSubsampleData),
+ kSubsampleEncryptedData, arraysize(kSubsampleEncryptedData),
kSubsampleKeyId, arraysize(kSubsampleKeyId),
kSubsampleIv, arraysize(kSubsampleIv),
0,
@@ -600,4 +625,40 @@ TEST_F(AesDecryptorTest, IncorrectSubsampleSize) {
ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToFail(encrypted_data));
}
+// No cypher bytes in any of the subsamples.
+TEST_F(AesDecryptorTest, SubsampleClearBytesOnly) {
+ GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
+ AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
+ kSubsampleKey, arraysize(kSubsampleKey));
+ std::vector<SubsampleEntry> subsample_entries_clear_only(
+ kSubsampleEntriesClearOnly,
+ kSubsampleEntriesClearOnly + arraysize(kSubsampleEntriesClearOnly));
+ scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
+ kSubsampleOriginalData, kSubsampleOriginalDataSize,
+ kSubsampleKeyId, arraysize(kSubsampleKeyId),
+ kSubsampleIv, arraysize(kSubsampleIv),
+ 0,
+ subsample_entries_clear_only);
+ ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
+ kSubsampleOriginalData, kSubsampleOriginalDataSize));
+}
+
+// No clear bytes in any of the subsamples.
+TEST_F(AesDecryptorTest, SubsampleCypherBytesOnly) {
+ GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
+ AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
+ kSubsampleKey, arraysize(kSubsampleKey));
+ std::vector<SubsampleEntry> subsample_entries_cypher_only(
+ kSubsampleEntriesCypherOnly,
+ kSubsampleEntriesCypherOnly + arraysize(kSubsampleEntriesCypherOnly));
+ scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
+ kEncryptedData, arraysize(kEncryptedData),
+ kSubsampleKeyId, arraysize(kSubsampleKeyId),
+ kSubsampleIv, arraysize(kSubsampleIv),
+ 0,
+ subsample_entries_cypher_only);
+ ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
+ kSubsampleOriginalData, kSubsampleOriginalDataSize));
+}
+
} // namespace media
diff --git a/media/ffmpeg/ffmpeg_common_unittest.cc b/media/ffmpeg/ffmpeg_common_unittest.cc
index 30c065de28..33ad46ed10 100644
--- a/media/ffmpeg/ffmpeg_common_unittest.cc
+++ b/media/ffmpeg/ffmpeg_common_unittest.cc
@@ -58,7 +58,7 @@ FFmpegCommonTest::FFmpegCommonTest() {
FFmpegCommonTest::~FFmpegCommonTest() {}
-TEST_F(FFmpegCommonTest, TestTimeBaseConversions) {
+TEST_F(FFmpegCommonTest, TimeBaseConversions) {
int64 test_data[][5] = {
{1, 2, 1, 500000, 1 },
{1, 3, 1, 333333, 1 },
diff --git a/media/filters/chunk_demuxer.cc b/media/filters/chunk_demuxer.cc
index 6e360bb1cd..6f45a1f9dd 100644
--- a/media/filters/chunk_demuxer.cc
+++ b/media/filters/chunk_demuxer.cc
@@ -837,10 +837,11 @@ TimeDelta ChunkDemuxer::GetStartTime() const {
void ChunkDemuxer::StartWaitingForSeek(TimeDelta seek_time) {
DVLOG(1) << "StartWaitingForSeek()";
base::AutoLock auto_lock(lock_);
- DCHECK(state_ == INITIALIZED || state_ == ENDED || state_ == SHUTDOWN);
+ DCHECK(state_ == INITIALIZED || state_ == ENDED || state_ == SHUTDOWN ||
+ state_ == PARSE_ERROR) << state_;
DCHECK(seek_cb_.is_null());
- if (state_ == SHUTDOWN)
+ if (state_ == SHUTDOWN || state_ == PARSE_ERROR)
return;
AbortPendingReads();
diff --git a/media/filters/chunk_demuxer_unittest.cc b/media/filters/chunk_demuxer_unittest.cc
index 07e7c6cf72..e3a84e66d3 100644
--- a/media/filters/chunk_demuxer_unittest.cc
+++ b/media/filters/chunk_demuxer_unittest.cc
@@ -285,9 +285,17 @@ class ChunkDemuxerTest : public testing::Test {
AppendData(kSourceId, data, length);
}
+ void AppendCluster(const std::string& source_id,
+ scoped_ptr<Cluster> cluster) {
+ AppendData(source_id, cluster->data(), cluster->size());
+ }
+
+ void AppendCluster(scoped_ptr<Cluster> cluster) {
+ AppendCluster(kSourceId, cluster.Pass());
+ }
+
void AppendCluster(int timecode, int block_count) {
- scoped_ptr<Cluster> cluster(GenerateCluster(timecode, block_count));
- AppendData(kSourceId, cluster->data(), cluster->size());
+ AppendCluster(GenerateCluster(timecode, block_count));
}
void AppendSingleStreamCluster(const std::string& source_id, int track_number,
@@ -303,9 +311,9 @@ class ChunkDemuxerTest : public testing::Test {
}
ASSERT_NE(block_duration, 0);
int end_timecode = timecode + block_count * block_duration;
- scoped_ptr<Cluster> cluster(GenerateSingleStreamCluster(
- timecode, end_timecode, track_number, block_duration));
- AppendData(source_id, cluster->data(), cluster->size());
+ AppendCluster(source_id,
+ GenerateSingleStreamCluster(
+ timecode, end_timecode, track_number, block_duration));
}
void AppendSingleStreamCluster(const std::string& source_id, int track_number,
@@ -332,8 +340,7 @@ class ChunkDemuxerTest : public testing::Test {
cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
&data[0], data.size());
}
- scoped_ptr<Cluster> cluster(cb.Finish());
- AppendData(source_id, cluster->data(), cluster->size());
+ AppendCluster(source_id, cb.Finish());
}
void AppendData(const std::string& source_id,
@@ -394,8 +401,7 @@ class ChunkDemuxerTest : public testing::Test {
}
void AppendEmptyCluster(int timecode) {
- scoped_ptr<Cluster> empty_cluster = GenerateEmptyCluster(timecode);
- AppendData(empty_cluster->data(), empty_cluster->size());
+ AppendCluster(GenerateEmptyCluster(timecode));
}
PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
@@ -880,7 +886,7 @@ class ChunkDemuxerTest : public testing::Test {
DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
};
-TEST_F(ChunkDemuxerTest, TestInit) {
+TEST_F(ChunkDemuxerTest, Init) {
// Test no streams, audio-only, video-only, and audio & video scenarios.
// Audio and video streams can be encrypted or not encrypted.
for (int i = 0; i < 16; i++) {
@@ -942,7 +948,7 @@ TEST_F(ChunkDemuxerTest, TestInit) {
// Make sure that the demuxer reports an error if Shutdown()
// is called before all the initialization segments are appended.
-TEST_F(ChunkDemuxerTest, TestShutdownBeforeAllInitSegmentsAppended) {
+TEST_F(ChunkDemuxerTest, ShutdownBeforeAllInitSegmentsAppended) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
@@ -956,10 +962,9 @@ TEST_F(ChunkDemuxerTest, TestShutdownBeforeAllInitSegmentsAppended) {
// Test that Seek() completes successfully when the first cluster
// arrives.
-TEST_F(ChunkDemuxerTest, TestAppendDataAfterSeek) {
+TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> first_cluster(kDefaultFirstCluster());
- AppendData(first_cluster->data(), first_cluster->size());
+ AppendCluster(kDefaultFirstCluster());
InSequence s;
@@ -969,11 +974,9 @@ TEST_F(ChunkDemuxerTest, TestAppendDataAfterSeek) {
EXPECT_CALL(*this, Checkpoint(2));
- scoped_ptr<Cluster> cluster(kDefaultSecondCluster());
-
Checkpoint(1);
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(kDefaultSecondCluster());
message_loop_.RunUntilIdle();
@@ -981,10 +984,9 @@ TEST_F(ChunkDemuxerTest, TestAppendDataAfterSeek) {
}
// Test that parsing errors are handled for clusters appended after init.
-TEST_F(ChunkDemuxerTest, TestErrorWhileParsingClusterAfterInit) {
+TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> first_cluster(kDefaultFirstCluster());
- AppendData(first_cluster->data(), first_cluster->size());
+ AppendCluster(kDefaultFirstCluster());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendGarbage();
@@ -993,7 +995,7 @@ TEST_F(ChunkDemuxerTest, TestErrorWhileParsingClusterAfterInit) {
// Test the case where a Seek() is requested while the parser
// is in the middle of cluster. This is to verify that the parser
// does not reset itself on a seek.
-TEST_F(ChunkDemuxerTest, TestSeekWhileParsingCluster) {
+TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
ASSERT_TRUE(InitDemuxer(true, true));
InSequence s;
@@ -1022,13 +1024,12 @@ TEST_F(ChunkDemuxerTest, TestSeekWhileParsingCluster) {
// Append the new cluster and verify that only the blocks
// in the new cluster are returned.
- scoped_ptr<Cluster> cluster_b(GenerateCluster(5000, 6));
- AppendData(cluster_b->data(), cluster_b->size());
+ AppendCluster(GenerateCluster(5000, 6));
GenerateExpectedReads(5000, 6);
}
// Test the case where AppendData() is called before Init().
-TEST_F(ChunkDemuxerTest, TestAppendDataBeforeInit) {
+TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
CreateInitSegment(true, true, false, false, &info_tracks, &info_tracks_size);
@@ -1037,11 +1038,10 @@ TEST_F(ChunkDemuxerTest, TestAppendDataBeforeInit) {
}
// Make sure Read() callbacks are dispatched with the proper data.
-TEST_F(ChunkDemuxerTest, TestRead) {
+TEST_F(ChunkDemuxerTest, Read) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster(kDefaultFirstCluster());
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(kDefaultFirstCluster());
bool audio_read_done = false;
bool video_read_done = false;
@@ -1056,30 +1056,23 @@ TEST_F(ChunkDemuxerTest, TestRead) {
EXPECT_TRUE(video_read_done);
}
-TEST_F(ChunkDemuxerTest, TestOutOfOrderClusters) {
+TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster(kDefaultFirstCluster());
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(kDefaultFirstCluster());
+ AppendCluster(GenerateCluster(10, 4));
- scoped_ptr<Cluster> cluster_a(GenerateCluster(10, 4));
- AppendData(cluster_a->data(), cluster_a->size());
-
- // Cluster B starts before cluster_a and has data
- // that overlaps.
- scoped_ptr<Cluster> cluster_b(GenerateCluster(5, 4));
-
- // Make sure that AppendData() does not fail.
- AppendData(cluster_b->data(), cluster_b->size());
+ // Make sure that AppendCluster() does not fail with a cluster that has
+ // overlaps with the previously appended cluster.
+ AppendCluster(GenerateCluster(5, 4));
// Verify that AppendData() can still accept more data.
scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size());
}
-TEST_F(ChunkDemuxerTest, TestNonMonotonicButAboveClusterTimecode) {
+TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> first_cluster(kDefaultFirstCluster());
- AppendData(first_cluster->data(), first_cluster->size());
+ AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
@@ -1090,20 +1083,18 @@ TEST_F(ChunkDemuxerTest, TestNonMonotonicButAboveClusterTimecode) {
AddSimpleBlock(&cb, kVideoTrackNum, 10);
AddSimpleBlock(&cb, kAudioTrackNum, 7);
AddSimpleBlock(&cb, kVideoTrackNum, 15);
- scoped_ptr<Cluster> cluster_a(cb.Finish());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
- AppendData(cluster_a->data(), cluster_a->size());
+ AppendCluster(cb.Finish());
// Verify that AppendData() ignores data after the error.
scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size());
}
-TEST_F(ChunkDemuxerTest, TestBackwardsAndBeforeClusterTimecode) {
+TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> first_cluster(kDefaultFirstCluster());
- AppendData(first_cluster->data(), first_cluster->size());
+ AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
@@ -1114,10 +1105,9 @@ TEST_F(ChunkDemuxerTest, TestBackwardsAndBeforeClusterTimecode) {
AddSimpleBlock(&cb, kVideoTrackNum, 5);
AddSimpleBlock(&cb, kAudioTrackNum, 3);
AddSimpleBlock(&cb, kVideoTrackNum, 3);
- scoped_ptr<Cluster> cluster_a(cb.Finish());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
- AppendData(cluster_a->data(), cluster_a->size());
+ AppendCluster(cb.Finish());
// Verify that AppendData() ignores data after the error.
scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
@@ -1125,10 +1115,9 @@ TEST_F(ChunkDemuxerTest, TestBackwardsAndBeforeClusterTimecode) {
}
-TEST_F(ChunkDemuxerTest, TestPerStreamMonotonicallyIncreasingTimestamps) {
+TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> first_cluster(kDefaultFirstCluster());
- AppendData(first_cluster->data(), first_cluster->size());
+ AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
@@ -1139,35 +1128,32 @@ TEST_F(ChunkDemuxerTest, TestPerStreamMonotonicallyIncreasingTimestamps) {
AddSimpleBlock(&cb, kVideoTrackNum, 5);
AddSimpleBlock(&cb, kAudioTrackNum, 4);
AddSimpleBlock(&cb, kVideoTrackNum, 7);
- scoped_ptr<Cluster> cluster(cb.Finish());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(cb.Finish());
}
-// Test the case where a cluster is passed to AppendData() before
+// Test the case where a cluster is passed to AppendCluster() before
// INFO & TRACKS data.
-TEST_F(ChunkDemuxerTest, TestClusterBeforeInitSegment) {
+TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN));
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
- scoped_ptr<Cluster> cluster(GenerateCluster(0, 1));
-
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(GenerateCluster(0, 1));
}
// Test cases where we get an MarkEndOfStream() call during initialization.
-TEST_F(ChunkDemuxerTest, TestEOSDuringInit) {
+TEST_F(ChunkDemuxerTest, EOSDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN));
MarkEndOfStream(PIPELINE_OK);
}
-TEST_F(ChunkDemuxerTest, TestEndOfStreamWithNoAppend) {
+TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN));
@@ -1182,7 +1168,7 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamWithNoAppend) {
demuxer_.reset();
}
-TEST_F(ChunkDemuxerTest, TestEndOfStreamWithNoMediaAppend) {
+TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
ASSERT_TRUE(InitDemuxer(true, true));
CheckExpectedRanges("{ }");
@@ -1190,11 +1176,10 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamWithNoMediaAppend) {
CheckExpectedRanges("{ }");
}
-TEST_F(ChunkDemuxerTest, TestDecodeErrorEndOfStream) {
+TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster(kDefaultFirstCluster());
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(kDefaultFirstCluster());
CheckExpectedRanges(kDefaultFirstClusterRange);
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
@@ -1202,11 +1187,10 @@ TEST_F(ChunkDemuxerTest, TestDecodeErrorEndOfStream) {
CheckExpectedRanges(kDefaultFirstClusterRange);
}
-TEST_F(ChunkDemuxerTest, TestNetworkErrorEndOfStream) {
+TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster(kDefaultFirstCluster());
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(kDefaultFirstCluster());
CheckExpectedRanges(kDefaultFirstClusterRange);
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
@@ -1263,11 +1247,10 @@ class EndOfStreamHelper {
// Make sure that all pending reads that we don't have media data for get an
// "end of stream" buffer when MarkEndOfStream() is called.
-TEST_F(ChunkDemuxerTest, TestEndOfStreamWithPendingReads) {
+TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster(GenerateCluster(0, 2));
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(GenerateCluster(0, 2));
bool audio_read_done_1 = false;
bool video_read_done_1 = false;
@@ -1299,11 +1282,10 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamWithPendingReads) {
// Make sure that all Read() calls after we get an MarkEndOfStream()
// call return an "end of stream" buffer.
-TEST_F(ChunkDemuxerTest, TestReadsAfterEndOfStream) {
+TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster(GenerateCluster(0, 2));
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(GenerateCluster(0, 2));
bool audio_read_done_1 = false;
bool video_read_done_1 = false;
@@ -1339,7 +1321,7 @@ TEST_F(ChunkDemuxerTest, TestReadsAfterEndOfStream) {
end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
}
-TEST_F(ChunkDemuxerTest, TestEndOfStreamDuringCanceledSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
ASSERT_TRUE(InitDemuxer(true, true));
AppendCluster(0, 10);
@@ -1370,7 +1352,7 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamDuringCanceledSeek) {
}
// Make sure AppendData() will accept elements that span multiple calls.
-TEST_F(ChunkDemuxerTest, TestAppendingInPieces) {
+TEST_F(ChunkDemuxerTest, AppendingInPieces) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK));
@@ -1401,7 +1383,7 @@ TEST_F(ChunkDemuxerTest, TestAppendingInPieces) {
GenerateExpectedReads(0, 9);
}
-TEST_F(ChunkDemuxerTest, TestWebMFile_AudioAndVideo) {
+TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
@@ -1415,7 +1397,7 @@ TEST_F(ChunkDemuxerTest, TestWebMFile_AudioAndVideo) {
base::TimeDelta::FromMilliseconds(2744)));
}
-TEST_F(ChunkDemuxerTest, TestWebMFile_LiveAudioAndVideo) {
+TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
@@ -1429,7 +1411,7 @@ TEST_F(ChunkDemuxerTest, TestWebMFile_LiveAudioAndVideo) {
kInfiniteDuration()));
}
-TEST_F(ChunkDemuxerTest, TestWebMFile_AudioOnly) {
+TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
struct BufferTimestamps buffer_timestamps[] = {
{kSkip, 0},
{kSkip, 3},
@@ -1444,7 +1426,7 @@ TEST_F(ChunkDemuxerTest, TestWebMFile_AudioOnly) {
true, false));
}
-TEST_F(ChunkDemuxerTest, TestWebMFile_VideoOnly) {
+TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
struct BufferTimestamps buffer_timestamps[] = {
{0, kSkip},
{33, kSkip},
@@ -1459,7 +1441,7 @@ TEST_F(ChunkDemuxerTest, TestWebMFile_VideoOnly) {
false, true));
}
-TEST_F(ChunkDemuxerTest, TestWebMFile_AltRefFrames) {
+TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
@@ -1474,7 +1456,7 @@ TEST_F(ChunkDemuxerTest, TestWebMFile_AltRefFrames) {
}
// Verify that we output buffers before the entire cluster has been parsed.
-TEST_F(ChunkDemuxerTest, TestIncrementalClusterParsing) {
+TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
ASSERT_TRUE(InitDemuxer(true, true));
AppendEmptyCluster(0);
@@ -1537,7 +1519,7 @@ TEST_F(ChunkDemuxerTest, TestIncrementalClusterParsing) {
EXPECT_TRUE(video_read_done);
}
-TEST_F(ChunkDemuxerTest, TestParseErrorDuringInit) {
+TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
@@ -1549,7 +1531,7 @@ TEST_F(ChunkDemuxerTest, TestParseErrorDuringInit) {
demuxer_->AppendData(kSourceId, &tmp, 1);
}
-TEST_F(ChunkDemuxerTest, TestAVHeadersWithAudioOnlyType) {
+TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
@@ -1563,7 +1545,7 @@ TEST_F(ChunkDemuxerTest, TestAVHeadersWithAudioOnlyType) {
AppendInitSegment(true, true);
}
-TEST_F(ChunkDemuxerTest, TestAVHeadersWithVideoOnlyType) {
+TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
@@ -1577,40 +1559,34 @@ TEST_F(ChunkDemuxerTest, TestAVHeadersWithVideoOnlyType) {
AppendInitSegment(true, true);
}
-TEST_F(ChunkDemuxerTest, TestMultipleHeaders) {
+TEST_F(ChunkDemuxerTest, MultipleHeaders) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
- AppendData(cluster_a->data(), cluster_a->size());
+ AppendCluster(kDefaultFirstCluster());
// Append another identical initialization segment.
AppendInitSegment(true, true);
- scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
- AppendData(cluster_b->data(), cluster_b->size());
+ AppendCluster(kDefaultSecondCluster());
GenerateExpectedReads(0, 9);
}
-TEST_F(ChunkDemuxerTest, TestAddSeparateSourcesForAudioAndVideo) {
+TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
- scoped_ptr<Cluster> cluster_a(
- GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
-
- scoped_ptr<Cluster> cluster_v(
- GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
-
// Append audio and video data into separate source ids.
- AppendData(audio_id, cluster_a->data(), cluster_a->size());
+ AppendCluster(audio_id,
+ GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
GenerateAudioStreamExpectedReads(0, 4);
- AppendData(video_id, cluster_v->data(), cluster_v->size());
+ AppendCluster(video_id,
+ GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
GenerateVideoStreamExpectedReads(0, 4);
}
-TEST_F(ChunkDemuxerTest, TestAddIdFailures) {
+TEST_F(ChunkDemuxerTest, AddIdFailures) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK));
@@ -1630,21 +1606,17 @@ TEST_F(ChunkDemuxerTest, TestAddIdFailures) {
}
// Test that Read() calls after a RemoveId() return "end of stream" buffers.
-TEST_F(ChunkDemuxerTest, TestRemoveId) {
+TEST_F(ChunkDemuxerTest, RemoveId) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
- scoped_ptr<Cluster> cluster_a(
+ // Append audio and video data into separate source ids.
+ AppendCluster(audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
-
- scoped_ptr<Cluster> cluster_v(
+ AppendCluster(video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
- // Append audio and video data into separate source ids.
- AppendData(audio_id, cluster_a->data(), cluster_a->size());
- AppendData(video_id, cluster_v->data(), cluster_v->size());
-
// Read() from audio should return normal buffers.
GenerateAudioStreamExpectedReads(0, 4);
@@ -1663,7 +1635,7 @@ TEST_F(ChunkDemuxerTest, TestRemoveId) {
// Test that removing an ID immediately after adding it does not interfere with
// quota for new IDs in the future.
-TEST_F(ChunkDemuxerTest, TestRemoveAndAddId) {
+TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
std::string audio_id_1 = "audio1";
ASSERT_TRUE(AddId(audio_id_1, true, false) == ChunkDemuxer::kOk);
demuxer_->RemoveId(audio_id_1);
@@ -1672,12 +1644,11 @@ TEST_F(ChunkDemuxerTest, TestRemoveAndAddId) {
ASSERT_TRUE(AddId(audio_id_2, true, false) == ChunkDemuxer::kOk);
}
-TEST_F(ChunkDemuxerTest, TestSeekCanceled) {
+TEST_F(ChunkDemuxerTest, SeekCanceled) {
ASSERT_TRUE(InitDemuxer(true, true));
// Append cluster at the beginning of the stream.
- scoped_ptr<Cluster> start_cluster(GenerateCluster(0, 4));
- AppendData(start_cluster->data(), start_cluster->size());
+ AppendCluster(GenerateCluster(0, 4));
// Seek to an unbuffered region.
Seek(base::TimeDelta::FromSeconds(50));
@@ -1703,12 +1674,11 @@ TEST_F(ChunkDemuxerTest, TestSeekCanceled) {
GenerateExpectedReads(0, 4);
}
-TEST_F(ChunkDemuxerTest, TestSeekCanceledWhileWaitingForSeek) {
+TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
ASSERT_TRUE(InitDemuxer(true, true));
// Append cluster at the beginning of the stream.
- scoped_ptr<Cluster> start_cluster(GenerateCluster(0, 4));
- AppendData(start_cluster->data(), start_cluster->size());
+ AppendCluster(GenerateCluster(0, 4));
// Start waiting for a seek.
base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
@@ -1733,20 +1703,18 @@ TEST_F(ChunkDemuxerTest, TestSeekCanceledWhileWaitingForSeek) {
}
// Test that Seek() successfully seeks to all source IDs.
-TEST_F(ChunkDemuxerTest, TestSeekAudioAndVideoSources) {
+TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
- scoped_ptr<Cluster> cluster_a1(
+ AppendCluster(
+ audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
-
- scoped_ptr<Cluster> cluster_v1(
+ AppendCluster(
+ video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
- AppendData(audio_id, cluster_a1->data(), cluster_a1->size());
- AppendData(video_id, cluster_v1->data(), cluster_v1->size());
-
// Read() should return buffers at 0.
bool audio_read_done = false;
bool video_read_done = false;
@@ -1774,16 +1742,12 @@ TEST_F(ChunkDemuxerTest, TestSeekAudioAndVideoSources) {
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
- scoped_ptr<Cluster> cluster_a2(
- GenerateSingleStreamCluster(3000, 3092, kAudioTrackNum,
- kAudioBlockDuration));
-
- scoped_ptr<Cluster> cluster_v2(
- GenerateSingleStreamCluster(3000, 3132, kVideoTrackNum,
- kVideoBlockDuration));
-
- AppendData(audio_id, cluster_a2->data(), cluster_a2->size());
- AppendData(video_id, cluster_v2->data(), cluster_v2->size());
+ AppendCluster(audio_id,
+ GenerateSingleStreamCluster(
+ 3000, 3092, kAudioTrackNum, kAudioBlockDuration));
+ AppendCluster(video_id,
+ GenerateSingleStreamCluster(
+ 3000, 3132, kVideoTrackNum, kVideoBlockDuration));
message_loop_.RunUntilIdle();
@@ -1796,16 +1760,11 @@ TEST_F(ChunkDemuxerTest, TestSeekAudioAndVideoSources) {
// is called before data is available for that seek point.
// This scenario might be useful if seeking past the end of stream
// of either audio or video (or both).
-TEST_F(ChunkDemuxerTest, TestEndOfStreamAfterPastEosSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster_a1(
- GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
- scoped_ptr<Cluster> cluster_v1(
- GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
-
- AppendData(cluster_a1->data(), cluster_a1->size());
- AppendData(cluster_v1->data(), cluster_v1->size());
+ AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
+ AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
// Seeking past the end of video.
// Note: audio data is available for that seek point.
@@ -1830,23 +1789,13 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamAfterPastEosSeek) {
// Test that EndOfStream is ignored if coming during a pending seek
// whose seek time is before some existing ranges.
-TEST_F(ChunkDemuxerTest, TestEndOfStreamDuringPendingSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster_a1(
- GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
- scoped_ptr<Cluster> cluster_v1(
- GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
-
- scoped_ptr<Cluster> cluster_a2(
- GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
- scoped_ptr<Cluster> cluster_v2(
- GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
-
- AppendData(cluster_a1->data(), cluster_a1->size());
- AppendData(cluster_v1->data(), cluster_v1->size());
- AppendData(cluster_a2->data(), cluster_a2->size());
- AppendData(cluster_v2->data(), cluster_v2->size());
+ AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
+ AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
+ AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
+ AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
bool seek_cb_was_called = false;
base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
@@ -1857,8 +1806,7 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamDuringPendingSeek) {
EXPECT_FALSE(seek_cb_was_called);
- EXPECT_CALL(host_, SetDuration(
- base::TimeDelta::FromMilliseconds(300)));
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
MarkEndOfStream(PIPELINE_OK);
message_loop_.RunUntilIdle();
@@ -1866,12 +1814,8 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamDuringPendingSeek) {
demuxer_->UnmarkEndOfStream();
- scoped_ptr<Cluster> cluster_a3(
- GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
- scoped_ptr<Cluster> cluster_v3(
- GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
- AppendData(cluster_a3->data(), cluster_a3->size());
- AppendData(cluster_v3->data(), cluster_v3->size());
+ AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
+ AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
message_loop_.RunUntilIdle();
@@ -1890,17 +1834,14 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
AppendInitSegment(true, false);
// Test a simple cluster.
- scoped_ptr<Cluster> cluster_1(GenerateSingleStreamCluster(0, 92,
- kAudioTrackNum, kAudioBlockDuration));
- AppendData(cluster_1->data(), cluster_1->size());
+ AppendCluster(
+ GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
CheckExpectedRanges("{ [0,92) }");
// Append a disjoint cluster to check for two separate ranges.
- scoped_ptr<Cluster> cluster_2(GenerateSingleStreamCluster(150, 219,
- kAudioTrackNum, kAudioBlockDuration));
-
- AppendData(cluster_2->data(), cluster_2->size());
+ AppendCluster(GenerateSingleStreamCluster(
+ 150, 219, kAudioTrackNum, kAudioBlockDuration));
CheckExpectedRanges("{ [0,92) [150,219) }");
}
@@ -1915,18 +1856,14 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
AppendInitSegment(false, true);
// Test a simple cluster.
- scoped_ptr<Cluster> cluster_1(GenerateSingleStreamCluster(0, 132,
- kVideoTrackNum, kVideoBlockDuration));
-
- AppendData(cluster_1->data(), cluster_1->size());
+ AppendCluster(
+ GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
CheckExpectedRanges("{ [0,132) }");
// Append a disjoint cluster to check for two separate ranges.
- scoped_ptr<Cluster> cluster_2(GenerateSingleStreamCluster(200, 299,
- kVideoTrackNum, kVideoBlockDuration));
-
- AppendData(cluster_2->data(), cluster_2->size());
+ AppendCluster(GenerateSingleStreamCluster(
+ 200, 299, kVideoTrackNum, kVideoBlockDuration));
CheckExpectedRanges("{ [0,132) [200,299) }");
}
@@ -1939,86 +1876,52 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
// Buffered Range: 0 -> 23
// Audio block duration is smaller than video block duration,
// so the buffered ranges should correspond to the audio blocks.
- scoped_ptr<Cluster> cluster_a0(
- GenerateSingleStreamCluster(0, kAudioBlockDuration, kAudioTrackNum,
- kAudioBlockDuration));
-
- scoped_ptr<Cluster> cluster_v0(
- GenerateSingleStreamCluster(0, kVideoBlockDuration, kVideoTrackNum,
- kVideoBlockDuration));
-
- AppendData(cluster_a0->data(), cluster_a0->size());
- AppendData(cluster_v0->data(), cluster_v0->size());
+ AppendCluster(GenerateSingleStreamCluster(
+ 0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
+ AppendCluster(GenerateSingleStreamCluster(
+ 0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
CheckExpectedRanges("{ [0,23) }");
// Audio: 300 -> 400
// Video: 320 -> 420
// Buffered Range: 320 -> 400 (end overlap)
- scoped_ptr<Cluster> cluster_a1(
- GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
-
- scoped_ptr<Cluster> cluster_v1(
- GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
-
- AppendData(cluster_a1->data(), cluster_a1->size());
- AppendData(cluster_v1->data(), cluster_v1->size());
+ AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
+ AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
CheckExpectedRanges("{ [0,23) [320,400) }");
// Audio: 520 -> 590
// Video: 500 -> 570
// Buffered Range: 520 -> 570 (front overlap)
- scoped_ptr<Cluster> cluster_a2(
- GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
-
- scoped_ptr<Cluster> cluster_v2(
- GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
-
- AppendData(cluster_a2->data(), cluster_a2->size());
- AppendData(cluster_v2->data(), cluster_v2->size());
+ AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
+ AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
// Audio: 720 -> 750
// Video: 700 -> 770
// Buffered Range: 720 -> 750 (complete overlap, audio)
- scoped_ptr<Cluster> cluster_a3(
- GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
-
- scoped_ptr<Cluster> cluster_v3(
- GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
-
- AppendData(cluster_a3->data(), cluster_a3->size());
- AppendData(cluster_v3->data(), cluster_v3->size());
+ AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
+ AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
// Audio: 900 -> 970
// Video: 920 -> 950
// Buffered Range: 920 -> 950 (complete overlap, video)
- scoped_ptr<Cluster> cluster_a4(
- GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
-
- scoped_ptr<Cluster> cluster_v4(
- GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
-
- AppendData(cluster_a4->data(), cluster_a4->size());
- AppendData(cluster_v4->data(), cluster_v4->size());
+ AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
+ AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
// Appending within buffered range should not affect buffered ranges.
- scoped_ptr<Cluster> cluster_a5(
- GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
- AppendData(cluster_a5->data(), cluster_a5->size());
+ AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
// Appending to single stream outside buffered ranges should not affect
// buffered ranges.
- scoped_ptr<Cluster> cluster_v5(
- GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
- AppendData(cluster_v5->data(), cluster_v5->size());
+ AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
}
@@ -2028,13 +1931,8 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster_a(
- GenerateSingleStreamCluster(0, 90, kAudioTrackNum, 90));
- scoped_ptr<Cluster> cluster_v(
- GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 100));
-
- AppendData(cluster_a->data(), cluster_a->size());
- AppendData(cluster_v->data(), cluster_v->size());
+ AppendCluster(GenerateSingleStreamCluster(0, 90, kAudioTrackNum, 90));
+ AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 100));
CheckExpectedRanges("{ [0,90) }");
@@ -2044,12 +1942,11 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
CheckExpectedRanges("{ [0,100) }");
}
-TEST_F(ChunkDemuxerTest, TestDifferentStreamTimecodes) {
+TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
ASSERT_TRUE(InitDemuxer(true, true));
// Create a cluster where the video timecode begins 25ms after the audio.
- scoped_ptr<Cluster> start_cluster(GenerateCluster(0, 25, 8));
- AppendData(start_cluster->data(), start_cluster->size());
+ AppendCluster(GenerateCluster(0, 25, 8));
Seek(base::TimeDelta::FromSeconds(0));
GenerateExpectedReads(0, 25, 8);
@@ -2059,26 +1956,21 @@ TEST_F(ChunkDemuxerTest, TestDifferentStreamTimecodes) {
// Generate a cluster to fulfill this seek, where audio timecode begins 25ms
// after the video.
- scoped_ptr<Cluster> middle_cluster(GenerateCluster(5025, 5000, 8));
- AppendData(middle_cluster->data(), middle_cluster->size());
+ AppendCluster(GenerateCluster(5025, 5000, 8));
GenerateExpectedReads(5025, 5000, 8);
}
-TEST_F(ChunkDemuxerTest, TestDifferentStreamTimecodesSeparateSources) {
+TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
// Generate two streams where the video stream starts 5ms after the audio
// stream and append them.
- scoped_ptr<Cluster> cluster_v(
- GenerateSingleStreamCluster(30, 4 * kVideoBlockDuration + 30,
- kVideoTrackNum, kVideoBlockDuration));
- scoped_ptr<Cluster> cluster_a(
- GenerateSingleStreamCluster(25, 4 * kAudioBlockDuration + 25,
- kAudioTrackNum, kAudioBlockDuration));
- AppendData(audio_id, cluster_a->data(), cluster_a->size());
- AppendData(video_id, cluster_v->data(), cluster_v->size());
+ AppendCluster(audio_id, GenerateSingleStreamCluster(
+ 25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
+ AppendCluster(video_id, GenerateSingleStreamCluster(
+ 30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
// Both streams should be able to fulfill a seek to 25.
Seek(base::TimeDelta::FromMilliseconds(25));
@@ -2086,21 +1978,17 @@ TEST_F(ChunkDemuxerTest, TestDifferentStreamTimecodesSeparateSources) {
GenerateVideoStreamExpectedReads(30, 4);
}
-TEST_F(ChunkDemuxerTest, TestDifferentStreamTimecodesOutOfRange) {
+TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
// Generate two streams where the video stream starts 10s after the audio
// stream and append them.
- scoped_ptr<Cluster> cluster_v(
- GenerateSingleStreamCluster(10000, 4 * kVideoBlockDuration + 10000,
- kVideoTrackNum, kVideoBlockDuration));
- scoped_ptr<Cluster> cluster_a(
- GenerateSingleStreamCluster(0, 4 * kAudioBlockDuration + 0,
- kAudioTrackNum, kAudioBlockDuration));
- AppendData(audio_id, cluster_a->data(), cluster_a->size());
- AppendData(video_id, cluster_v->data(), cluster_v->size());
+ AppendCluster(audio_id, GenerateSingleStreamCluster(0,
+ 4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
+ AppendCluster(video_id, GenerateSingleStreamCluster(10000,
+ 4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
// Should not be able to fulfill a seek to 0.
base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
@@ -2111,20 +1999,19 @@ TEST_F(ChunkDemuxerTest, TestDifferentStreamTimecodesOutOfRange) {
ExpectEndOfStream(DemuxerStream::VIDEO);
}
-TEST_F(ChunkDemuxerTest, TestClusterWithNoBuffers) {
+TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
ASSERT_TRUE(InitDemuxer(true, true));
// Generate and append an empty cluster beginning at 0.
AppendEmptyCluster(0);
// Sanity check that data can be appended after this cluster correctly.
- scoped_ptr<Cluster> media_data(GenerateCluster(0, 2));
- AppendData(media_data->data(), media_data->size());
+ AppendCluster(GenerateCluster(0, 2));
ExpectRead(DemuxerStream::AUDIO, 0);
ExpectRead(DemuxerStream::VIDEO, 0);
}
-TEST_F(ChunkDemuxerTest, TestCodecPrefixMatching) {
+TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
@@ -2139,7 +2026,7 @@ TEST_F(ChunkDemuxerTest, TestCodecPrefixMatching) {
// Test codec ID's that are not compliant with RFC6381, but have been
// seen in the wild.
-TEST_F(ChunkDemuxerTest, TestCodecIDsThatAreNotRFC6381Compliant) {
+TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
@@ -2166,19 +2053,17 @@ TEST_F(ChunkDemuxerTest, TestCodecIDsThatAreNotRFC6381Compliant) {
}
}
-TEST_F(ChunkDemuxerTest, TestEndOfStreamStillSetAfterSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
ASSERT_TRUE(InitDemuxer(true, true));
EXPECT_CALL(host_, SetDuration(_))
.Times(AnyNumber());
- scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
- scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
- AppendData(cluster_a->data(), cluster_a->size());
- AppendData(cluster_b->data(), cluster_b->size());
+ AppendCluster(kDefaultFirstCluster());
+ AppendCluster(kDefaultSecondCluster());
MarkEndOfStream(PIPELINE_OK);
DemuxerStream::Status status;
@@ -2205,7 +2090,7 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamStillSetAfterSeek) {
EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
}
-TEST_F(ChunkDemuxerTest, TestGetBufferedRangesBeforeInitSegment) {
+TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK));
ASSERT_EQ(AddId("audio", true, false), ChunkDemuxer::kOk);
@@ -2217,19 +2102,17 @@ TEST_F(ChunkDemuxerTest, TestGetBufferedRangesBeforeInitSegment) {
// Test that Seek() completes successfully when the first cluster
// arrives.
-TEST_F(ChunkDemuxerTest, TestEndOfStreamDuringSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
InSequence s;
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
- scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
- AppendData(cluster_a->data(), cluster_a->size());
+ AppendCluster(kDefaultFirstCluster());
base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
demuxer_->StartWaitingForSeek(seek_time);
- AppendData(cluster_b->data(), cluster_b->size());
+ AppendCluster(kDefaultSecondCluster());
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
MarkEndOfStream(PIPELINE_OK);
@@ -2244,7 +2127,7 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamDuringSeek) {
end_of_stream_helper.CheckIfReadDonesWereCalled(true);
}
-TEST_F(ChunkDemuxerTest, TestConfigChange_Video) {
+TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
InSequence s;
ASSERT_TRUE(InitDemuxerWithConfigChangeData());
@@ -2291,7 +2174,7 @@ TEST_F(ChunkDemuxerTest, TestConfigChange_Video) {
ASSERT_EQ(status, DemuxerStream::kOk);
}
-TEST_F(ChunkDemuxerTest, TestConfigChange_Audio) {
+TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
InSequence s;
ASSERT_TRUE(InitDemuxerWithConfigChangeData());
@@ -2338,7 +2221,7 @@ TEST_F(ChunkDemuxerTest, TestConfigChange_Audio) {
ASSERT_EQ(status, DemuxerStream::kOk);
}
-TEST_F(ChunkDemuxerTest, TestConfigChange_Seek) {
+TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
InSequence s;
ASSERT_TRUE(InitDemuxerWithConfigChangeData());
@@ -2385,59 +2268,41 @@ TEST_F(ChunkDemuxerTest, TestConfigChange_Seek) {
ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
}
-TEST_F(ChunkDemuxerTest, TestTimestampPositiveOffset) {
+TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
ASSERT_TRUE(InitDemuxer(true, true));
ASSERT_TRUE(demuxer_->SetTimestampOffset(
kSourceId, base::TimeDelta::FromSeconds(30)));
- scoped_ptr<Cluster> cluster(GenerateCluster(0, 2));
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(GenerateCluster(0, 2));
Seek(base::TimeDelta::FromMilliseconds(30000));
GenerateExpectedReads(30000, 2);
}
-TEST_F(ChunkDemuxerTest, TestTimestampNegativeOffset) {
+TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
ASSERT_TRUE(InitDemuxer(true, true));
ASSERT_TRUE(demuxer_->SetTimestampOffset(
kSourceId, base::TimeDelta::FromSeconds(-1)));
- scoped_ptr<Cluster> cluster = GenerateCluster(1000, 2);
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(GenerateCluster(1000, 2));
GenerateExpectedReads(0, 2);
}
-TEST_F(ChunkDemuxerTest, TestTimestampOffsetSeparateStreams) {
+TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
- scoped_ptr<Cluster> cluster_a1(
- GenerateSingleStreamCluster(
- 2500, 2500 + kAudioBlockDuration * 4, kAudioTrackNum,
- kAudioBlockDuration));
-
- scoped_ptr<Cluster> cluster_v1(
- GenerateSingleStreamCluster(
- 2500, 2500 + kVideoBlockDuration * 4, kVideoTrackNum,
- kVideoBlockDuration));
-
- scoped_ptr<Cluster> cluster_a2(
- GenerateSingleStreamCluster(
- 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
-
- scoped_ptr<Cluster> cluster_v2(
- GenerateSingleStreamCluster(
- 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
-
ASSERT_TRUE(demuxer_->SetTimestampOffset(
audio_id, base::TimeDelta::FromMilliseconds(-2500)));
ASSERT_TRUE(demuxer_->SetTimestampOffset(
video_id, base::TimeDelta::FromMilliseconds(-2500)));
- AppendData(audio_id, cluster_a1->data(), cluster_a1->size());
- AppendData(video_id, cluster_v1->data(), cluster_v1->size());
+ AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
+ 2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
+ AppendCluster(video_id, GenerateSingleStreamCluster(2500,
+ 2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
GenerateAudioStreamExpectedReads(0, 4);
GenerateVideoStreamExpectedReads(0, 4);
@@ -2447,13 +2312,15 @@ TEST_F(ChunkDemuxerTest, TestTimestampOffsetSeparateStreams) {
audio_id, base::TimeDelta::FromMilliseconds(27300)));
ASSERT_TRUE(demuxer_->SetTimestampOffset(
video_id, base::TimeDelta::FromMilliseconds(27300)));
- AppendData(audio_id, cluster_a2->data(), cluster_a2->size());
- AppendData(video_id, cluster_v2->data(), cluster_v2->size());
+ AppendCluster(audio_id, GenerateSingleStreamCluster(
+ 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
+ AppendCluster(video_id, GenerateSingleStreamCluster(
+ 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
GenerateVideoStreamExpectedReads(27300, 4);
GenerateAudioStreamExpectedReads(27300, 4);
}
-TEST_F(ChunkDemuxerTest, TestTimestampOffsetMidParse) {
+TEST_F(ChunkDemuxerTest, TimestampOffsetMidParse) {
ASSERT_TRUE(InitDemuxer(true, true));
scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
@@ -2471,22 +2338,19 @@ TEST_F(ChunkDemuxerTest, TestTimestampOffsetMidParse) {
kSourceId, base::TimeDelta::FromSeconds(25)));
}
-TEST_F(ChunkDemuxerTest, TestDurationChange) {
+TEST_F(ChunkDemuxerTest, DurationChange) {
ASSERT_TRUE(InitDemuxer(true, true));
static const int kStreamDuration = kDefaultDuration().InMilliseconds();
// Add data leading up to the currently set duration.
- scoped_ptr<Cluster> first_cluster = GenerateCluster(
- kStreamDuration - kAudioBlockDuration,
- kStreamDuration - kVideoBlockDuration, 2);
- AppendData(first_cluster->data(), first_cluster->size());
+ AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
+ kStreamDuration - kVideoBlockDuration,
+ 2));
CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
// Add data at the currently set duration. The duration should not increase.
- scoped_ptr<Cluster> second_cluster = GenerateCluster(
- kDefaultDuration().InMilliseconds(), 2);
- AppendData(second_cluster->data(), second_cluster->size());
+ AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
// Range should not be affected.
CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
@@ -2494,34 +2358,31 @@ TEST_F(ChunkDemuxerTest, TestDurationChange) {
// Now add data past the duration and expect a new duration to be signalled.
static const int kNewStreamDuration =
kStreamDuration + kAudioBlockDuration * 2;
- scoped_ptr<Cluster> third_cluster = GenerateCluster(
- kStreamDuration + kAudioBlockDuration,
- kStreamDuration + kVideoBlockDuration, 2);
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kNewStreamDuration)));
- AppendData(third_cluster->data(), third_cluster->size());
+ AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
+ kStreamDuration + kVideoBlockDuration,
+ 2));
// See that the range has increased appropriately.
CheckExpectedRanges(kSourceId, "{ [201191,201270) }");
}
-TEST_F(ChunkDemuxerTest, TestDurationChangeTimestampOffset) {
+TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
ASSERT_TRUE(InitDemuxer(true, true));
ASSERT_TRUE(demuxer_->SetTimestampOffset(kSourceId, kDefaultDuration()));
- scoped_ptr<Cluster> cluster = GenerateCluster(0, 4);
EXPECT_CALL(host_, SetDuration(
kDefaultDuration() + base::TimeDelta::FromMilliseconds(
kAudioBlockDuration * 2)));
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(GenerateCluster(0, 4));
}
-TEST_F(ChunkDemuxerTest, TestEndOfStreamTruncateDuration) {
+TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
- AppendData(cluster_a->data(), cluster_a->size());
+ AppendCluster(kDefaultFirstCluster());
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
@@ -2529,32 +2390,30 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamTruncateDuration) {
}
-TEST_F(ChunkDemuxerTest, TestZeroLengthAppend) {
+TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
ASSERT_TRUE(InitDemuxer(true, true));
AppendData(NULL, 0);
}
-TEST_F(ChunkDemuxerTest, TestAppendAfterEndOfStream) {
+TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
ASSERT_TRUE(InitDemuxer(true, true));
EXPECT_CALL(host_, SetDuration(_))
.Times(AnyNumber());
- scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
- AppendData(cluster_a->data(), cluster_a->size());
+ AppendCluster(kDefaultFirstCluster());
MarkEndOfStream(PIPELINE_OK);
demuxer_->UnmarkEndOfStream();
- scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
- AppendData(cluster_b->data(), cluster_b->size());
+ AppendCluster(kDefaultSecondCluster());
MarkEndOfStream(PIPELINE_OK);
}
// Test receiving a Shutdown() call before we get an Initialize()
// call. This can happen if video element gets destroyed before
// the pipeline has a chance to initialize the demuxer.
-TEST_F(ChunkDemuxerTest, TestShutdownBeforeInitialize) {
+TEST_F(ChunkDemuxerTest, ShutdownBeforeInitialize) {
demuxer_->Shutdown();
demuxer_->Initialize(
&host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN));
@@ -2563,8 +2422,7 @@ TEST_F(ChunkDemuxerTest, TestShutdownBeforeInitialize) {
TEST_F(ChunkDemuxerTest, ReadAfterAudioDisabled) {
ASSERT_TRUE(InitDemuxer(true, true));
- scoped_ptr<Cluster> cluster(kDefaultFirstCluster());
- AppendData(cluster->data(), cluster->size());
+ AppendCluster(kDefaultFirstCluster());
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
ASSERT_TRUE(stream);
@@ -2584,7 +2442,7 @@ TEST_F(ChunkDemuxerTest, ReadAfterAudioDisabled) {
// Verifies that signalling end of stream while stalled at a gap
// boundary does not trigger end of stream buffers to be returned.
-TEST_F(ChunkDemuxerTest, TestEndOfStreamWhileWaitingForGapToBeFilled) {
+TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
ASSERT_TRUE(InitDemuxer(true, true));
AppendCluster(0, 10);
@@ -2647,7 +2505,7 @@ TEST_F(ChunkDemuxerTest, TestEndOfStreamWhileWaitingForGapToBeFilled) {
EXPECT_TRUE(video_read_done);
}
-TEST_F(ChunkDemuxerTest, TestCanceledSeekDuringInitialPreroll) {
+TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
ASSERT_TRUE(InitDemuxer(true, true));
// Cancel preroll.
@@ -2661,7 +2519,7 @@ TEST_F(ChunkDemuxerTest, TestCanceledSeekDuringInitialPreroll) {
AppendCluster(seek_time.InMilliseconds(), 10);
}
-TEST_F(ChunkDemuxerTest, TestGCDuringSeek) {
+TEST_F(ChunkDemuxerTest, GCDuringSeek) {
ASSERT_TRUE(InitDemuxer(true, false));
demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
@@ -2746,4 +2604,12 @@ TEST_F(ChunkDemuxerTest, AppendWindow) {
CheckExpectedRanges(kSourceId, "{ [120,300) [420,660) }");
}
+TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
+ ASSERT_TRUE(InitDemuxer(true, true));
+ EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
+ AppendGarbage();
+ base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
+ demuxer_->StartWaitingForSeek(seek_time);
+}
+
} // namespace media
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index 3d68b79010..34fc7936fc 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -39,13 +39,42 @@ static inline bool IsEndOfStream(int result,
return result == 0 && decoded_size == 0 && input->end_of_stream();
}
+// Return the number of channels from the data in |frame|.
+static inline int DetermineChannels(AVFrame* frame) {
+#if defined(CHROMIUM_NO_AVFRAME_CHANNELS)
+ // When use_system_ffmpeg==1, libav's AVFrame doesn't have channels field.
+ return av_get_channel_layout_nb_channels(frame->channel_layout);
+#else
+ return frame->channels;
+#endif
+}
+
+// Called by FFmpeg's allocation routine to allocate a buffer. Uses
+// AVCodecContext.opaque to get the object reference in order to call
+// GetAudioBuffer() to do the actual allocation.
+static int GetAudioBufferImpl(struct AVCodecContext* s,
+ AVFrame* frame,
+ int flags) {
+ DCHECK(s->codec->capabilities & CODEC_CAP_DR1);
+ DCHECK_EQ(s->codec_type, AVMEDIA_TYPE_AUDIO);
+ FFmpegAudioDecoder* decoder = static_cast<FFmpegAudioDecoder*>(s->opaque);
+ return decoder->GetAudioBuffer(s, frame, flags);
+}
+
+// Called by FFmpeg's allocation routine to free a buffer. |opaque| is the
+// AudioBuffer allocated, so unref it.
+static void ReleaseAudioBufferImpl(void* opaque, uint8* data) {
+ scoped_refptr<AudioBuffer> buffer;
+ buffer.swap(reinterpret_cast<AudioBuffer**>(&opaque));
+}
+
FFmpegAudioDecoder::FFmpegAudioDecoder(
const scoped_refptr<base::MessageLoopProxy>& message_loop)
: message_loop_(message_loop),
weak_factory_(this),
demuxer_stream_(NULL),
codec_context_(NULL),
- bits_per_channel_(0),
+ bytes_per_channel_(0),
channel_layout_(CHANNEL_LAYOUT_NONE),
channels_(0),
samples_per_second_(0),
@@ -104,7 +133,7 @@ void FFmpegAudioDecoder::Read(const ReadCB& read_cb) {
int FFmpegAudioDecoder::bits_per_channel() {
DCHECK(message_loop_->BelongsToCurrentThread());
- return bits_per_channel_;
+ return bytes_per_channel_ * 8;
}
ChannelLayout FFmpegAudioDecoder::channel_layout() {
@@ -133,6 +162,48 @@ FFmpegAudioDecoder::~FFmpegAudioDecoder() {
ReleaseFFmpegResources();
}
+int FFmpegAudioDecoder::GetAudioBuffer(AVCodecContext* codec,
+ AVFrame* frame,
+ int flags) {
+ // Since this routine is called by FFmpeg when a buffer is required for audio
+ // data, use the values supplied by FFmpeg (ignoring the current settings).
+ // RunDecodeLoop() gets to determine if the buffer is useable or not.
+ AVSampleFormat format = static_cast<AVSampleFormat>(frame->format);
+ SampleFormat sample_format = AVSampleFormatToSampleFormat(format);
+ int channels = DetermineChannels(frame);
+ int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format);
+ if (frame->nb_samples <= 0)
+ return AVERROR(EINVAL);
+
+ // Determine how big the buffer should be and allocate it. FFmpeg may adjust
+ // how big each channel data is in order to meet it's alignment policy, so
+ // we need to take this into consideration.
+ int buffer_size_in_bytes =
+ av_samples_get_buffer_size(NULL, channels, frame->nb_samples, format, 1);
+ int frames_required = buffer_size_in_bytes / bytes_per_channel / channels;
+ DCHECK_GE(frames_required, frame->nb_samples);
+ scoped_refptr<AudioBuffer> buffer =
+ AudioBuffer::CreateBuffer(sample_format, channels, frames_required);
+
+ // Initialize the data[], linesize[], and extended_data[] fields.
+ int ret = avcodec_fill_audio_frame(frame,
+ channels,
+ format,
+ buffer->writable_data(),
+ buffer_size_in_bytes,
+ 1);
+ if (ret < 0)
+ return ret;
+
+ // Now create an AVBufferRef for the data just allocated. It will own the
+ // reference to the AudioBuffer object.
+ void* opaque = NULL;
+ buffer.swap(reinterpret_cast<AudioBuffer**>(&opaque));
+ frame->buf[0] = av_buffer_create(
+ frame->data[0], buffer_size_in_bytes, ReleaseAudioBufferImpl, opaque, 0);
+ return 0;
+}
+
void FFmpegAudioDecoder::ReadFromDemuxerStream() {
DCHECK(!read_cb_.is_null());
demuxer_stream_->Read(base::Bind(
@@ -250,12 +321,12 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
}
if (codec_context_ &&
- (bits_per_channel_ != config.bits_per_channel() ||
+ (bytes_per_channel_ != config.bytes_per_channel() ||
channel_layout_ != config.channel_layout() ||
samples_per_second_ != config.samples_per_second())) {
DVLOG(1) << "Unsupported config change :";
- DVLOG(1) << "\tbits_per_channel : " << bits_per_channel_
- << " -> " << config.bits_per_channel();
+ DVLOG(1) << "\tbytes_per_channel : " << bytes_per_channel_
+ << " -> " << config.bytes_per_channel();
DVLOG(1) << "\tchannel_layout : " << channel_layout_
<< " -> " << config.channel_layout();
DVLOG(1) << "\tsample_rate : " << samples_per_second_
@@ -270,6 +341,9 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
codec_context_ = avcodec_alloc_context3(NULL);
AudioDecoderConfigToAVCodecContext(config, codec_context_);
+ codec_context_->opaque = this;
+ codec_context_->get_buffer2 = GetAudioBufferImpl;
+
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) {
DLOG(ERROR) << "Could not initialize audio decoder: "
@@ -289,7 +363,7 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
av_sample_format_ = codec_context_->sample_fmt;
sample_format_ = AVSampleFormatToSampleFormat(
static_cast<AVSampleFormat>(av_sample_format_));
- bits_per_channel_ = SampleFormatToBytesPerChannel(sample_format_) * 8;
+ bytes_per_channel_ = SampleFormatToBytesPerChannel(sample_format_);
return true;
}
@@ -373,13 +447,10 @@ void FFmpegAudioDecoder::RunDecodeLoop(
}
}
+ scoped_refptr<AudioBuffer> output;
int decoded_frames = 0;
-#ifdef CHROMIUM_NO_AVFRAME_CHANNELS
- int channels = av_get_channel_layout_nb_channels(
- av_frame_->channel_layout);
-#else
- int channels = av_frame_->channels;
-#endif
+ int original_frames = 0;
+ int channels = DetermineChannels(av_frame_);
if (frame_decoded) {
if (av_frame_->sample_rate != samples_per_second_ ||
channels != channels_ ||
@@ -397,40 +468,42 @@ void FFmpegAudioDecoder::RunDecodeLoop(
queued_audio_.push_back(queue_entry);
break;
}
- decoded_frames = av_frame_->nb_samples;
- }
- int frames_to_skip = 0;
- if (decoded_frames > 0 && output_frames_to_drop_ > 0) {
- frames_to_skip = std::min(decoded_frames, output_frames_to_drop_);
- output_frames_to_drop_ -= frames_to_skip;
+ // Get the AudioBuffer that the data was decoded into. Adjust the number
+ // of frames, in case fewer than requested were actually decoded.
+ output = reinterpret_cast<AudioBuffer*>(
+ av_buffer_get_opaque(av_frame_->buf[0]));
+ DCHECK_EQ(channels_, output->channel_count());
+ original_frames = av_frame_->nb_samples;
+ int unread_frames = output->frame_count() - original_frames;
+ DCHECK_GE(unread_frames, 0);
+ if (unread_frames > 0)
+ output->TrimEnd(unread_frames);
+
+ // If there are frames to drop, get rid of as many as we can.
+ if (output_frames_to_drop_ > 0) {
+ int drop = std::min(output->frame_count(), output_frames_to_drop_);
+ output->TrimStart(drop);
+ output_frames_to_drop_ -= drop;
+ }
+
+ decoded_frames = output->frame_count();
}
- scoped_refptr<AudioBuffer> output;
- if (frames_to_skip < decoded_frames) {
- DCHECK_EQ(sample_format_,
- AVSampleFormatToSampleFormat(
- static_cast<AVSampleFormat>(av_frame_->format)));
- base::TimeDelta start_time = output_timestamp_helper_->GetTimestamp();
- output = AudioBuffer::CopyFrom(
- sample_format_,
- channels_,
- decoded_frames,
- av_frame_->extended_data,
- start_time,
+ if (decoded_frames > 0) {
+ // Set the timestamp/duration once all the extra frames have been
+ // discarded.
+ output->set_timestamp(output_timestamp_helper_->GetTimestamp());
+ output->set_duration(
output_timestamp_helper_->GetFrameDuration(decoded_frames));
- if (frames_to_skip > 0) {
- output->TrimStart(frames_to_skip);
- // Reset the timestamp to the correct value since the previous frames
- // are to be ignored, not skipped. Duration will have been adjusted
- // correctly.
- output->set_timestamp(start_time);
- }
- output_timestamp_helper_->AddFrames(decoded_frames - frames_to_skip);
- } else if (IsEndOfStream(result, decoded_frames, input) &&
+ output_timestamp_helper_->AddFrames(decoded_frames);
+ } else if (IsEndOfStream(result, original_frames, input) &&
!skip_eos_append) {
DCHECK_EQ(packet.size, 0);
output = AudioBuffer::CreateEOSBuffer();
+ } else {
+ // In case all the frames in the buffer were dropped.
+ output = NULL;
}
if (output.get()) {
diff --git a/media/filters/ffmpeg_audio_decoder.h b/media/filters/ffmpeg_audio_decoder.h
index 51817963af..7ea8615c2e 100644
--- a/media/filters/ffmpeg_audio_decoder.h
+++ b/media/filters/ffmpeg_audio_decoder.h
@@ -43,6 +43,11 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
virtual int samples_per_second() OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
+ // Callback called from within FFmpeg to allocate a buffer based on
+ // the dimensions of |codec_context|. See AVCodecContext.get_buffer2
+ // documentation inside FFmpeg.
+ int GetAudioBuffer(AVCodecContext* codec, AVFrame* frame, int flags);
+
private:
// Reads from the demuxer stream with corresponding callback method.
void ReadFromDemuxerStream();
@@ -64,7 +69,7 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
AVCodecContext* codec_context_;
// Decoded audio format.
- int bits_per_channel_;
+ int bytes_per_channel_;
ChannelLayout channel_layout_;
int channels_;
int samples_per_second_;
diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc
index 451ca65ee6..70c55963f1 100644
--- a/media/filters/ffmpeg_demuxer.cc
+++ b/media/filters/ffmpeg_demuxer.cc
@@ -24,6 +24,7 @@
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/limits.h"
+#include "media/base/media_log.h"
#include "media/base/media_switches.h"
#include "media/base/video_decoder_config.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -285,7 +286,8 @@ base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp(
FFmpegDemuxer::FFmpegDemuxer(
const scoped_refptr<base::MessageLoopProxy>& message_loop,
DataSource* data_source,
- const FFmpegNeedKeyCB& need_key_cb)
+ const FFmpegNeedKeyCB& need_key_cb,
+ const scoped_refptr<MediaLog>& media_log)
: host_(NULL),
message_loop_(message_loop),
weak_factory_(this),
@@ -293,6 +295,7 @@ FFmpegDemuxer::FFmpegDemuxer(
pending_read_(false),
pending_seek_(false),
data_source_(data_source),
+ media_log_(media_log),
bitrate_(0),
start_time_(kNoTimestamp()),
audio_disabled_(false),
diff --git a/media/filters/ffmpeg_demuxer.h b/media/filters/ffmpeg_demuxer.h
index b1c2a37770..92b3eab831 100644
--- a/media/filters/ffmpeg_demuxer.h
+++ b/media/filters/ffmpeg_demuxer.h
@@ -52,6 +52,7 @@ typedef base::Callback<void(const std::string& type,
scoped_ptr<uint8[]> init_data,
int init_data_size)> FFmpegNeedKeyCB;
+class MediaLog;
class FFmpegDemuxer;
class FFmpegGlue;
class FFmpegH264ToAnnexBBitstreamConverter;
@@ -137,7 +138,8 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
public:
FFmpegDemuxer(const scoped_refptr<base::MessageLoopProxy>& message_loop,
DataSource* data_source,
- const FFmpegNeedKeyCB& need_key_cb);
+ const FFmpegNeedKeyCB& need_key_cb,
+ const scoped_refptr<MediaLog>& media_log);
virtual ~FFmpegDemuxer();
// Demuxer implementation.
@@ -226,6 +228,8 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
// integrate with libavformat.
DataSource* data_source_;
+ scoped_refptr<MediaLog> media_log_;
+
// Derived bitrate after initialization has completed.
int bitrate_;
diff --git a/media/filters/ffmpeg_demuxer_unittest.cc b/media/filters/ffmpeg_demuxer_unittest.cc
index 1860c90e96..c1da0cc1b4 100644
--- a/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/media/filters/ffmpeg_demuxer_unittest.cc
@@ -11,6 +11,7 @@
#include "base/path_service.h"
#include "base/threading/thread.h"
#include "media/base/decrypt_config.h"
+#include "media/base/media_log.h"
#include "media/base/mock_demuxer_host.h"
#include "media/base/test_helpers.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -82,8 +83,10 @@ class FFmpegDemuxerTest : public testing::Test {
media::FFmpegNeedKeyCB need_key_cb =
base::Bind(&FFmpegDemuxerTest::NeedKeyCB, base::Unretained(this));
- demuxer_.reset(new FFmpegDemuxer(
- message_loop_.message_loop_proxy(), data_source_.get(), need_key_cb));
+ demuxer_.reset(new FFmpegDemuxer(message_loop_.message_loop_proxy(),
+ data_source_.get(),
+ need_key_cb,
+ new MediaLog()));
}
MOCK_METHOD1(CheckPoint, void(int v));
diff --git a/media/filters/gpu_video_decoder.cc b/media/filters/gpu_video_decoder.cc
index 00d2fd50df..a106ab51db 100644
--- a/media/filters/gpu_video_decoder.cc
+++ b/media/filters/gpu_video_decoder.cc
@@ -136,21 +136,28 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
"Media.GpuVideoDecoderInitializeStatus",
BindToCurrentLoop(orig_status_cb));
- if (config_.IsValidConfig()) {
+ bool previously_initialized = config_.IsValidConfig();
+#if !defined(OS_CHROMEOS) || !defined(ARCH_CPU_X86_FAMILY)
+ if (previously_initialized) {
// TODO(xhwang): Make GpuVideoDecoder reinitializable.
// See http://crbug.com/233608
DVLOG(1) << "GpuVideoDecoder reinitialization not supported.";
status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
+#endif
+ DVLOG(1) << "(Re)initializing GVD with config: "
+ << config.AsHumanReadableString();
- if (!IsCodedSizeSupported(config.coded_size())) {
+ // TODO(posciak): destroy and create a new VDA on codec/profile change
+ // (http://crbug.com/260224).
+ if (previously_initialized && (config_.profile() != config.profile())) {
+ DVLOG(1) << "Codec or profile changed, cannot reinitialize.";
status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
- vda_.reset(factories_->CreateVideoDecodeAccelerator(config.profile(), this));
- if (!vda_) {
+ if (!IsCodedSizeSupported(config.coded_size())) {
status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
@@ -158,6 +165,20 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
config_ = config;
needs_bitstream_conversion_ = (config.codec() == kCodecH264);
+ if (previously_initialized) {
+ // Reinitialization with a different config (but same codec and profile).
+ // VDA should handle it by detecting this in-stream by itself,
+ // no need to notify it.
+ status_cb.Run(PIPELINE_OK);
+ return;
+ }
+
+ vda_.reset(factories_->CreateVideoDecodeAccelerator(config.profile(), this));
+ if (!vda_) {
+ status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
+ return;
+ }
+
DVLOG(3) << "GpuVideoDecoder::Initialize() succeeded.";
status_cb.Run(PIPELINE_OK);
}
diff --git a/media/filters/pipeline_integration_test_base.cc b/media/filters/pipeline_integration_test_base.cc
index 8ba90196c0..e2567adfbf 100644
--- a/media/filters/pipeline_integration_test_base.cc
+++ b/media/filters/pipeline_integration_test_base.cc
@@ -210,11 +210,13 @@ PipelineIntegrationTestBase::CreateFilterCollection(
CHECK(file_data_source->Initialize(file_path));
data_source_.reset(file_data_source);
- media::FFmpegNeedKeyCB need_key_cb =
- base::Bind(&PipelineIntegrationTestBase::DemuxerNeedKeyCB,
- base::Unretained(this));
- scoped_ptr<Demuxer> demuxer(new FFmpegDemuxer(
- message_loop_.message_loop_proxy(), data_source_.get(), need_key_cb));
+ media::FFmpegNeedKeyCB need_key_cb = base::Bind(
+ &PipelineIntegrationTestBase::DemuxerNeedKeyCB, base::Unretained(this));
+ scoped_ptr<Demuxer> demuxer(
+ new FFmpegDemuxer(message_loop_.message_loop_proxy(),
+ data_source_.get(),
+ need_key_cb,
+ new MediaLog()));
return CreateFilterCollection(demuxer.Pass(), decryptor);
}
diff --git a/media/media_untrusted.gyp b/media/media_untrusted.gyp
index 4abbe56666..dade625c33 100644
--- a/media/media_untrusted.gyp
+++ b/media/media_untrusted.gyp
@@ -20,6 +20,7 @@
'nlib_target': 'libshared_memory_support_untrusted.a',
'build_glibc': 0,
'build_newlib': 1,
+ 'build_irt': 1,
},
'dependencies': [
'../native_client/tools.gyp:prep_toolchain',
diff --git a/media/midi/midi_manager.cc b/media/midi/midi_manager.cc
index 3f74477186..05fcfa45a9 100644
--- a/media/midi/midi_manager.cc
+++ b/media/midi/midi_manager.cc
@@ -4,6 +4,10 @@
#include "media/midi/midi_manager.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/threading/thread.h"
+
namespace media {
#if !defined(OS_MACOSX)
@@ -54,9 +58,26 @@ void MIDIManager::ReceiveMIDIData(
double timestamp) {
base::AutoLock auto_lock(clients_lock_);
- // TODO(crogers): Filter out sysex.
for (ClientList::iterator i = clients_.begin(); i != clients_.end(); ++i)
(*i)->ReceiveMIDIData(port_index, data, length, timestamp);
-};
+}
+
+void MIDIManager::DispatchSendMIDIData(MIDIManagerClient* client,
+ int port_index,
+ const uint8* data,
+ size_t length,
+ double timestamp) {
+ // Lazily create the thread when first needed.
+ if (!send_thread_) {
+ send_thread_.reset(new base::Thread("MIDISendThread"));
+ send_thread_->Start();
+ send_message_loop_ = send_thread_->message_loop_proxy();
+ }
+
+ send_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&MIDIManager::SendMIDIData, base::Unretained(this),
+ client, port_index, data, length, timestamp));
+}
} // namespace media
diff --git a/media/midi/midi_manager.h b/media/midi/midi_manager.h
index e13b2c3be0..c2b26ab1b1 100644
--- a/media/midi/midi_manager.h
+++ b/media/midi/midi_manager.h
@@ -8,10 +8,16 @@
#include <set>
#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop_proxy.h"
#include "base/synchronization/lock.h"
#include "media/base/media_export.h"
#include "media/midi/midi_port_info.h"
+namespace base {
+class Thread;
+}
+
namespace media {
// A MIDIManagerClient registers with the MIDIManager to receive MIDI data.
@@ -31,6 +37,12 @@ class MEDIA_EXPORT MIDIManagerClient {
const uint8* data,
size_t length,
double timestamp) = 0;
+
+ // AccumulateMIDIBytesSent() is called to acknowledge when bytes have
+ // successfully been sent to the hardware.
+ // This happens as a result of the client having previously called
+ // MIDIManager::DispatchSendMIDIData().
+ virtual void AccumulateMIDIBytesSent(size_t n) = 0;
};
// Manages access to all MIDI hardware.
@@ -50,15 +62,18 @@ class MEDIA_EXPORT MIDIManager {
// A client calls ReleaseSession() to stop receiving MIDI data.
void EndSession(MIDIManagerClient* client);
- // SendMIDIData() sends one or more messages at the given time.
+ // DispatchSendMIDIData() schedules one or more messages to be sent
+ // at the given time on a dedicated thread.
// |port_index| represents the specific output port from output_ports().
// |data| represents a series of bytes encoding one or more MIDI messages.
// |length| is the number of bytes in |data|.
- // |timestamp| is the time to send the data, in seconds.
- virtual void SendMIDIData(int port_index,
+ // |timestamp| is the time to send the data, in seconds. A value of 0
+ // means send "now" or as soon as possible.
+ void DispatchSendMIDIData(MIDIManagerClient* client,
+ int port_index,
const uint8* data,
size_t length,
- double timestamp) = 0;
+ double timestamp);
// input_ports() is a list of MIDI ports for receiving MIDI data.
// Each individual port in this list can be identified by its
@@ -74,6 +89,13 @@ class MEDIA_EXPORT MIDIManager {
// Initializes the MIDI system, returning |true| on success.
virtual bool Initialize() = 0;
+ // Implements the platform-specific details of sending MIDI data.
+ virtual void SendMIDIData(MIDIManagerClient* client,
+ int port_index,
+ const uint8* data,
+ size_t length,
+ double timestamp) = 0;
+
void AddInputPort(const MIDIPortInfo& info);
void AddOutputPort(const MIDIPortInfo& info);
@@ -96,6 +118,11 @@ class MEDIA_EXPORT MIDIManager {
MIDIPortInfoList input_ports_;
MIDIPortInfoList output_ports_;
+ // |send_thread_| is used to send MIDI data by calling the platform-specific
+ // API.
+ scoped_ptr<base::Thread> send_thread_;
+ scoped_refptr<base::MessageLoopProxy> send_message_loop_;
+
DISALLOW_COPY_AND_ASSIGN(MIDIManager);
};
diff --git a/media/midi/midi_manager_mac.cc b/media/midi/midi_manager_mac.cc
index fa4b3fd7e1..d766bdb6c5 100644
--- a/media/midi/midi_manager_mac.cc
+++ b/media/midi/midi_manager_mac.cc
@@ -146,12 +146,12 @@ void MIDIManagerMac::ReadMidi(MIDIEndpointRef source,
}
}
-void MIDIManagerMac::SendMIDIData(int port_index,
+void MIDIManagerMac::SendMIDIData(MIDIManagerClient* client,
+ int port_index,
const uint8* data,
size_t length,
double timestamp) {
- // TODO(crogers): Filter out sysex.
-
+ // System Exclusive has already been filtered.
MIDITimeStamp coremidi_timestamp = SecondsToMIDITimeStamp(timestamp);
midi_packet_ = MIDIPacketListAdd(
@@ -175,6 +175,8 @@ void MIDIManagerMac::SendMIDIData(int port_index,
// Re-initialize for next time.
midi_packet_ = MIDIPacketListInit(packet_list_);
+
+ client->AccumulateMIDIBytesSent(length);
}
MIDIPortInfo MIDIManagerMac::GetPortInfoFromEndpoint(
diff --git a/media/midi/midi_manager_mac.h b/media/midi/midi_manager_mac.h
index f513e119c0..ed7b524f5c 100644
--- a/media/midi/midi_manager_mac.h
+++ b/media/midi/midi_manager_mac.h
@@ -23,7 +23,8 @@ class MEDIA_EXPORT MIDIManagerMac : public MIDIManager {
// MIDIManager implementation.
virtual bool Initialize() OVERRIDE;
- virtual void SendMIDIData(int port_index,
+ virtual void SendMIDIData(MIDIManagerClient* client,
+ int port_index,
const uint8* data,
size_t length,
double timestamp) OVERRIDE;
diff --git a/media/mp4/aac_unittest.cc b/media/mp4/aac_unittest.cc
index c5b23d1161..d9ce22db3f 100644
--- a/media/mp4/aac_unittest.cc
+++ b/media/mp4/aac_unittest.cc
@@ -39,7 +39,7 @@ TEST(AACTest, ExtensionTest) {
// Mono channel layout should only be reported if SBR is not
// specified. Otherwise stereo should be reported.
// See ISO-14496-3 Section 1.6.6.1.2 for details about this special casing.
-TEST(AACTest, TestImplicitSBR_ChannelConfig0) {
+TEST(AACTest, ImplicitSBR_ChannelConfig0) {
AAC aac;
uint8 buffer[] = {0x13, 0x08};
std::vector<uint8> data;
@@ -58,7 +58,7 @@ TEST(AACTest, TestImplicitSBR_ChannelConfig0) {
}
// Tests implicit SBR with a stereo channel config.
-TEST(AACTest, TestImplicitSBR_ChannelConfig1) {
+TEST(AACTest, ImplicitSBR_ChannelConfig1) {
AAC aac;
uint8 buffer[] = {0x13, 0x10};
std::vector<uint8> data;
diff --git a/media/mp4/box_reader_unittest.cc b/media/mp4/box_reader_unittest.cc
index 95b09ec8da..99d9975fd2 100644
--- a/media/mp4/box_reader_unittest.cc
+++ b/media/mp4/box_reader_unittest.cc
@@ -181,7 +181,7 @@ TEST_F(BoxReaderTest, ReadAllChildrenTest) {
EXPECT_EQ(kids[0].val, 0xdeadbeef); // Ensure order is preserved
}
-TEST_F(BoxReaderTest, TestSkippingBloc) {
+TEST_F(BoxReaderTest, SkippingBloc) {
static const uint8 kData[] = {
0x00, 0x00, 0x00, 0x09, 'b', 'l', 'o', 'c', 0x00
};
diff --git a/media/mp4/mp4_stream_parser_unittest.cc b/media/mp4/mp4_stream_parser_unittest.cc
index 199dab6218..fa880ac38c 100644
--- a/media/mp4/mp4_stream_parser_unittest.cc
+++ b/media/mp4/mp4_stream_parser_unittest.cc
@@ -142,24 +142,24 @@ class MP4StreamParserTest : public testing::Test {
}
};
-TEST_F(MP4StreamParserTest, TestUnalignedAppend) {
+TEST_F(MP4StreamParserTest, UnalignedAppend) {
// Test small, non-segment-aligned appends (small enough to exercise
// incremental append system)
ParseMP4File("bear-1280x720-av_frag.mp4", 512);
}
-TEST_F(MP4StreamParserTest, TestBytewiseAppend) {
+TEST_F(MP4StreamParserTest, BytewiseAppend) {
// Ensure no incremental errors occur when parsing
ParseMP4File("bear-1280x720-av_frag.mp4", 1);
}
-TEST_F(MP4StreamParserTest, TestMultiFragmentAppend) {
+TEST_F(MP4StreamParserTest, MultiFragmentAppend) {
// Large size ensures multiple fragments are appended in one call (size is
// larger than this particular test file)
ParseMP4File("bear-1280x720-av_frag.mp4", 768432);
}
-TEST_F(MP4StreamParserTest, TestFlush) {
+TEST_F(MP4StreamParserTest, Flush) {
// Flush while reading sample data, then start a new stream.
InitializeParser();
@@ -172,7 +172,7 @@ TEST_F(MP4StreamParserTest, TestFlush) {
512));
}
-TEST_F(MP4StreamParserTest, TestReinitialization) {
+TEST_F(MP4StreamParserTest, Reinitialization) {
InitializeParser();
scoped_refptr<DecoderBuffer> buffer =
@@ -185,7 +185,7 @@ TEST_F(MP4StreamParserTest, TestReinitialization) {
512));
}
-TEST_F(MP4StreamParserTest, TestMPEG2_AAC_LC) {
+TEST_F(MP4StreamParserTest, MPEG2_AAC_LC) {
std::set<int> audio_object_types;
audio_object_types.insert(kISO_13818_7_AAC_LC);
parser_.reset(new MP4StreamParser(audio_object_types, false));
@@ -193,7 +193,7 @@ TEST_F(MP4StreamParserTest, TestMPEG2_AAC_LC) {
}
// Test that a moov box is not always required after Flush() is called.
-TEST_F(MP4StreamParserTest, TestNoMoovAfterFlush) {
+TEST_F(MP4StreamParserTest, NoMoovAfterFlush) {
InitializeParser();
scoped_refptr<DecoderBuffer> buffer =
diff --git a/media/mp4/offset_byte_queue_unittest.cc b/media/mp4/offset_byte_queue_unittest.cc
index e2d40dc67c..b9afbc8e1b 100644
--- a/media/mp4/offset_byte_queue_unittest.cc
+++ b/media/mp4/offset_byte_queue_unittest.cc
@@ -31,7 +31,7 @@ class OffsetByteQueueTest : public testing::Test {
scoped_ptr<OffsetByteQueue> queue_;
};
-TEST_F(OffsetByteQueueTest, TestSetUp) {
+TEST_F(OffsetByteQueueTest, SetUp) {
EXPECT_EQ(384, queue_->head());
EXPECT_EQ(512, queue_->tail());
@@ -44,7 +44,7 @@ TEST_F(OffsetByteQueueTest, TestSetUp) {
EXPECT_EQ(255, buf[size-1]);
}
-TEST_F(OffsetByteQueueTest, TestPeekAt) {
+TEST_F(OffsetByteQueueTest, PeekAt) {
const uint8* buf;
int size;
@@ -57,7 +57,7 @@ TEST_F(OffsetByteQueueTest, TestPeekAt) {
EXPECT_EQ(0, size);
}
-TEST_F(OffsetByteQueueTest, TestTrim) {
+TEST_F(OffsetByteQueueTest, Trim) {
EXPECT_TRUE(queue_->Trim(128));
EXPECT_TRUE(queue_->Trim(384));
EXPECT_EQ(384, queue_->head());
diff --git a/media/tools/demuxer_bench/demuxer_bench.cc b/media/tools/demuxer_bench/demuxer_bench.cc
index b828fd8cb6..d38e587743 100644
--- a/media/tools/demuxer_bench/demuxer_bench.cc
+++ b/media/tools/demuxer_bench/demuxer_bench.cc
@@ -15,6 +15,7 @@
#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/media.h"
+#include "media/base/media_log.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/file_data_source.h"
@@ -194,8 +195,10 @@ int main(int argc, char** argv) {
CHECK(data_source.Initialize(file_path));
media::FFmpegNeedKeyCB need_key_cb = base::Bind(&NeedKey);
- media::FFmpegDemuxer demuxer(
- message_loop.message_loop_proxy(), &data_source, need_key_cb);
+ media::FFmpegDemuxer demuxer(message_loop.message_loop_proxy(),
+ &data_source,
+ need_key_cb,
+ new media::MediaLog());
demuxer.Initialize(&demuxer_host, base::Bind(
&QuitLoopWithStatus, &message_loop));
diff --git a/media/tools/player_x11/player_x11.cc b/media/tools/player_x11/player_x11.cc
index cb2b4d7dec..cef891247a 100644
--- a/media/tools/player_x11/player_x11.cc
+++ b/media/tools/player_x11/player_x11.cc
@@ -279,7 +279,7 @@ int main(int argc, char** argv) {
CreateDataSource(filename), command_line->HasSwitch("streaming")));
scoped_ptr<media::Demuxer> demuxer(new media::FFmpegDemuxer(
media_thread.message_loop_proxy(), data_source.get(),
- base::Bind(&NeedKey)));
+ base::Bind(&NeedKey), new media::MediaLog()));
media::Pipeline pipeline(media_thread.message_loop_proxy(),
new media::MediaLog());
diff --git a/media/tools/seek_tester/seek_tester.cc b/media/tools/seek_tester/seek_tester.cc
index 4826a51a5f..d3f6a35044 100644
--- a/media/tools/seek_tester/seek_tester.cc
+++ b/media/tools/seek_tester/seek_tester.cc
@@ -20,6 +20,7 @@
#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/media.h"
+#include "media/base/media_log.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/file_data_source.h"
@@ -73,8 +74,11 @@ int main(int argc, char** argv) {
base::MessageLoop loop;
media::PipelineStatusCB quitter = base::Bind(&QuitMessageLoop, &loop);
media::FFmpegNeedKeyCB need_key_cb = base::Bind(&NeedKey);
- scoped_ptr<media::FFmpegDemuxer> demuxer(new media::FFmpegDemuxer(
- loop.message_loop_proxy(), file_data_source.get(), need_key_cb));
+ scoped_ptr<media::FFmpegDemuxer> demuxer(
+ new media::FFmpegDemuxer(loop.message_loop_proxy(),
+ file_data_source.get(),
+ need_key_cb,
+ new media::MediaLog()));
demuxer->Initialize(&host, quitter);
loop.Run();
diff --git a/media/video/capture/android/video_capture_device_android.cc b/media/video/capture/android/video_capture_device_android.cc
index f7a3d08656..d4d7350779 100644
--- a/media/video/capture/android/video_capture_device_android.cc
+++ b/media/video/capture/android/video_capture_device_android.cc
@@ -122,9 +122,7 @@ const VideoCaptureDevice::Name& VideoCaptureDeviceAndroid::device_name() {
}
void VideoCaptureDeviceAndroid::Allocate(
- int width,
- int height,
- int frame_rate,
+ const VideoCaptureCapability& capture_format,
EventHandler* observer) {
{
base::AutoLock lock(lock_);
@@ -138,9 +136,9 @@ void VideoCaptureDeviceAndroid::Allocate(
jboolean ret = Java_VideoCapture_allocate(env,
j_capture_.obj(),
- width,
- height,
- frame_rate);
+ capture_format.width,
+ capture_format.height,
+ capture_format.frame_rate);
if (!ret) {
SetErrorState("failed to allocate");
return;
diff --git a/media/video/capture/android/video_capture_device_android.h b/media/video/capture/android/video_capture_device_android.h
index 2034f1fe4c..29a5fc7cf5 100644
--- a/media/video/capture/android/video_capture_device_android.h
+++ b/media/video/capture/android/video_capture_device_android.h
@@ -28,10 +28,8 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
static bool RegisterVideoCaptureDevice(JNIEnv* env);
// VideoCaptureDevice implementation.
- virtual void Allocate(int width,
- int height,
- int frame_rate,
- EventHandler* observer) OVERRIDE;
+ virtual void Allocate(const VideoCaptureCapability& capture_format,
+ EventHandler* observer) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
virtual void DeAllocate() OVERRIDE;
diff --git a/media/video/capture/fake_video_capture_device.cc b/media/video/capture/fake_video_capture_device.cc
index 0201b098af..665f728b3c 100644
--- a/media/video/capture/fake_video_capture_device.cc
+++ b/media/video/capture/fake_video_capture_device.cc
@@ -18,6 +18,7 @@ namespace media {
static const int kFakeCaptureTimeoutMs = 50;
static const int kFakeCaptureBeepCycle = 20; // Visual beep every 1s.
+static const int kFakeCaptureCapabilityChangePeriod = 30;
enum { kNumberOfFakeDevices = 2 };
bool FakeVideoCaptureDevice::fail_next_create_ = false;
@@ -56,10 +57,7 @@ FakeVideoCaptureDevice::FakeVideoCaptureDevice(const Name& device_name)
observer_(NULL),
state_(kIdle),
capture_thread_("CaptureThread"),
- frame_size_(0),
- frame_count_(0),
- frame_width_(0),
- frame_height_(0) {
+ frame_count_(0) {
}
FakeVideoCaptureDevice::~FakeVideoCaptureDevice() {
@@ -68,39 +66,52 @@ FakeVideoCaptureDevice::~FakeVideoCaptureDevice() {
DCHECK(!capture_thread_.IsRunning());
}
-void FakeVideoCaptureDevice::Allocate(int width,
- int height,
- int frame_rate,
- EventHandler* observer) {
+void FakeVideoCaptureDevice::Allocate(
+ const VideoCaptureCapability& capture_format,
+ EventHandler* observer) {
+ capture_format_.frame_size_type = capture_format.frame_size_type;
+ if (capture_format.frame_size_type == VariableResolutionVideoCaptureDevice)
+ PopulateCapabilitiesRoster();
+
if (state_ != kIdle) {
return; // Wrong state.
}
observer_ = observer;
- VideoCaptureCapability current_settings;
- current_settings.color = VideoCaptureCapability::kI420;
- current_settings.expected_capture_delay = 0;
- current_settings.interlaced = false;
- if (width > 320) { // VGA
- current_settings.width = 640;
- current_settings.height = 480;
- current_settings.frame_rate = 30;
+ capture_format_.color = VideoCaptureCapability::kI420;
+ capture_format_.expected_capture_delay = 0;
+ capture_format_.interlaced = false;
+ if (capture_format.width > 320) { // VGA
+ capture_format_.width = 640;
+ capture_format_.height = 480;
+ capture_format_.frame_rate = 30;
} else { // QVGA
- current_settings.width = 320;
- current_settings.height = 240;
- current_settings.frame_rate = 30;
+ capture_format_.width = 320;
+ capture_format_.height = 240;
+ capture_format_.frame_rate = 30;
}
size_t fake_frame_size =
- current_settings.width * current_settings.height * 3 / 2;
+ capture_format_.width * capture_format_.height * 3 / 2;
fake_frame_.reset(new uint8[fake_frame_size]);
- memset(fake_frame_.get(), 0, fake_frame_size);
- frame_size_ = fake_frame_size;
- frame_width_ = current_settings.width;
- frame_height_ = current_settings.height;
state_ = kAllocated;
- observer_->OnFrameInfo(current_settings);
+ observer_->OnFrameInfo(capture_format_);
+}
+
+void FakeVideoCaptureDevice::Reallocate() {
+ DCHECK_EQ(state_, kCapturing);
+ capture_format_ = capabilities_roster_.at(++capabilities_roster_index_ %
+ capabilities_roster_.size());
+ DCHECK_EQ(capture_format_.color, VideoCaptureCapability::kI420);
+ DVLOG(3) << "Reallocating FakeVideoCaptureDevice, new capture resolution ("
+ << capture_format_.width << "x" << capture_format_.height << ")";
+
+ size_t fake_frame_size =
+ capture_format_.width * capture_format_.height * 3 / 2;
+ fake_frame_.reset(new uint8[fake_frame_size]);
+
+ observer_->OnFrameInfoChanged(capture_format_);
}
void FakeVideoCaptureDevice::Start() {
@@ -140,19 +151,22 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
return;
}
- memset(fake_frame_.get(), 0, frame_size_);
+ int frame_size = capture_format_.width * capture_format_.height * 3 / 2;
+ memset(fake_frame_.get(), 0, frame_size);
SkBitmap bitmap;
- bitmap.setConfig(SkBitmap::kA8_Config, frame_width_, frame_height_,
- frame_width_);
+ bitmap.setConfig(SkBitmap::kA8_Config,
+ capture_format_.width,
+ capture_format_.height,
+ capture_format_.width);
bitmap.setPixels(fake_frame_.get());
SkCanvas canvas(bitmap);
// Draw a sweeping circle to show an animation.
- int radius = std::min(frame_width_, frame_height_) / 4;
+ int radius = std::min(capture_format_.width, capture_format_.height) / 4;
SkRect rect = SkRect::MakeXYWH(
- frame_width_ / 2 - radius, frame_height_ / 2 - radius,
+ capture_format_.width / 2 - radius, capture_format_.height / 2 - radius,
2 * radius, 2 * radius);
SkPaint paint;
@@ -186,19 +200,53 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
// Generate a synchronized beep sound if there is one audio input
// stream created.
FakeAudioInputStream::BeepOnce();
- }
+ }
frame_count_++;
// Give the captured frame to the observer.
observer_->OnIncomingCapturedFrame(
- fake_frame_.get(), frame_size_, base::Time::Now(), 0, false, false);
+ fake_frame_.get(), frame_size, base::Time::Now(), 0, false, false);
+ if (!(frame_count_ % kFakeCaptureCapabilityChangePeriod) &&
+ (capture_format_.frame_size_type ==
+ VariableResolutionVideoCaptureDevice)) {
+ Reallocate();
+ }
// Reschedule next CaptureTask.
capture_thread_.message_loop()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&FakeVideoCaptureDevice::OnCaptureTask,
- base::Unretained(this)),
- base::TimeDelta::FromMilliseconds(kFakeCaptureTimeoutMs));
+ FROM_HERE,
+ base::Bind(&FakeVideoCaptureDevice::OnCaptureTask,
+ base::Unretained(this)),
+ base::TimeDelta::FromMilliseconds(kFakeCaptureTimeoutMs));
+}
+
+void FakeVideoCaptureDevice::PopulateCapabilitiesRoster() {
+ capabilities_roster_.push_back(
+ media::VideoCaptureCapability(320,
+ 240,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ VariableResolutionVideoCaptureDevice));
+ capabilities_roster_.push_back(
+ media::VideoCaptureCapability(640,
+ 480,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ VariableResolutionVideoCaptureDevice));
+ capabilities_roster_.push_back(
+ media::VideoCaptureCapability(800,
+ 600,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ VariableResolutionVideoCaptureDevice));
+
+ capabilities_roster_index_ = 0;
}
} // namespace media
diff --git a/media/video/capture/fake_video_capture_device.h b/media/video/capture/fake_video_capture_device.h
index d674bc8b34..4804c2885a 100644
--- a/media/video/capture/fake_video_capture_device.h
+++ b/media/video/capture/fake_video_capture_device.h
@@ -27,10 +27,8 @@ class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
static void GetDeviceNames(Names* device_names);
// VideoCaptureDevice implementation.
- virtual void Allocate(int width,
- int height,
- int frame_rate,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
+ virtual void Allocate(const VideoCaptureCapability& capture_format,
+ VideoCaptureDevice::EventHandler* observer) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
virtual void DeAllocate() OVERRIDE;
@@ -49,15 +47,23 @@ class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
// Called on the capture_thread_.
void OnCaptureTask();
+ // EXPERIMENTAL, similar to allocate, but changes resolution and calls
+ // observer->OnFrameInfoChanged(VideoCaptureCapability&)
+ void Reallocate();
+ void PopulateCapabilitiesRoster();
+
Name device_name_;
VideoCaptureDevice::EventHandler* observer_;
InternalState state_;
base::Thread capture_thread_;
- int frame_size_;
scoped_ptr<uint8[]> fake_frame_;
int frame_count_;
- int frame_width_;
- int frame_height_;
+ VideoCaptureCapability capture_format_;
+
+ // When the device is configured as mutating video captures, this vector
+ // holds the available ones which are used in sequence, restarting at the end.
+ std::vector<VideoCaptureCapability> capabilities_roster_;
+ int capabilities_roster_index_;
static bool fail_next_create_;
diff --git a/media/video/capture/linux/video_capture_device_linux.cc b/media/video/capture/linux/video_capture_device_linux.cc
index f8755e41fc..dd431a8b54 100644
--- a/media/video/capture/linux/video_capture_device_linux.cc
+++ b/media/video/capture/linux/video_capture_device_linux.cc
@@ -197,18 +197,21 @@ VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
}
}
-void VideoCaptureDeviceLinux::Allocate(int width,
- int height,
- int frame_rate,
- EventHandler* observer) {
+void VideoCaptureDeviceLinux::Allocate(
+ const VideoCaptureCapability& capture_format,
+ EventHandler* observer) {
if (v4l2_thread_.IsRunning()) {
return; // Wrong state.
}
v4l2_thread_.Start();
- v4l2_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnAllocate, base::Unretained(this),
- width, height, frame_rate, observer));
+ v4l2_thread_.message_loop()
+ ->PostTask(FROM_HERE,
+ base::Bind(&VideoCaptureDeviceLinux::OnAllocate,
+ base::Unretained(this),
+ capture_format.width,
+ capture_format.height,
+ capture_format.frame_rate,
+ observer));
}
void VideoCaptureDeviceLinux::Start() {
diff --git a/media/video/capture/linux/video_capture_device_linux.h b/media/video/capture/linux/video_capture_device_linux.h
index 39b12edfb6..dc35fd452c 100644
--- a/media/video/capture/linux/video_capture_device_linux.h
+++ b/media/video/capture/linux/video_capture_device_linux.h
@@ -24,10 +24,8 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice {
virtual ~VideoCaptureDeviceLinux();
// VideoCaptureDevice implementation.
- virtual void Allocate(int width,
- int height,
- int frame_rate,
- EventHandler* observer) OVERRIDE;
+ virtual void Allocate(const VideoCaptureCapability& capture_format,
+ EventHandler* observer) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
virtual void DeAllocate() OVERRIDE;
diff --git a/media/video/capture/mac/video_capture_device_mac.h b/media/video/capture/mac/video_capture_device_mac.h
index dca6020506..6ca24f3246 100644
--- a/media/video/capture/mac/video_capture_device_mac.h
+++ b/media/video/capture/mac/video_capture_device_mac.h
@@ -25,10 +25,8 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice {
virtual ~VideoCaptureDeviceMac();
// VideoCaptureDevice implementation.
- virtual void Allocate(int width,
- int height,
- int frame_rate,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
+ virtual void Allocate(const VideoCaptureCapability& capture_format,
+ VideoCaptureDevice::EventHandler* observer) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
virtual void DeAllocate() OVERRIDE;
diff --git a/media/video/capture/mac/video_capture_device_mac.mm b/media/video/capture/mac/video_capture_device_mac.mm
index c47ff5627c..18912170a1 100644
--- a/media/video/capture/mac/video_capture_device_mac.mm
+++ b/media/video/capture/mac/video_capture_device_mac.mm
@@ -102,16 +102,21 @@ VideoCaptureDeviceMac::~VideoCaptureDeviceMac() {
[capture_device_ release];
}
-void VideoCaptureDeviceMac::Allocate(int width, int height, int frame_rate,
- EventHandler* observer) {
+void VideoCaptureDeviceMac::Allocate(
+ const VideoCaptureCapability& capture_format,
+ EventHandler* observer) {
if (state_ != kIdle) {
return;
}
+ int width = capture_format.width;
+ int height = capture_format.height;
+ int frame_rate = capture_format.frame_rate;
// QTKit can scale captured frame to any size requested, which would lead to
// undesired aspect ratio change. Tries to open the camera with a natively
// supported format and let the client to crop/pad the captured frames.
- GetBestMatchSupportedResolution(&width, &height);
+ GetBestMatchSupportedResolution(&width,
+ &height);
observer_ = observer;
NSString* deviceId =
diff --git a/media/video/capture/video_capture_device.h b/media/video/capture/video_capture_device.h
index cbddce7484..4480116751 100644
--- a/media/video/capture/video_capture_device.h
+++ b/media/video/capture/video_capture_device.h
@@ -204,9 +204,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
// is called informing of the resulting resolution and frame rate.
// DeAllocate() must be called before this function can be called again and
// before the object is deleted.
- virtual void Allocate(int width,
- int height,
- int frame_rate,
+ virtual void Allocate(const VideoCaptureCapability& capture_format,
EventHandler* observer) = 0;
// Start capturing video frames. Allocate must be called before this function.
diff --git a/media/video/capture/video_capture_device_dummy.cc b/media/video/capture/video_capture_device_dummy.cc
index 8d0355d122..02752edc54 100644
--- a/media/video/capture/video_capture_device_dummy.cc
+++ b/media/video/capture/video_capture_device_dummy.cc
@@ -17,8 +17,9 @@ VideoCaptureDeviceDummy::VideoCaptureDeviceDummy() {}
VideoCaptureDeviceDummy::~VideoCaptureDeviceDummy() {}
void VideoCaptureDeviceDummy::Allocate(
- int width, int height, int frame_rate,
- VideoCaptureDevice::EventHandler* observer) {}
+ const VideoCaptureCapability& capture_format,
+ VideoCaptureDevice::EventHandler* observer) {
+}
void VideoCaptureDeviceDummy::Start() {}
diff --git a/media/video/capture/video_capture_device_dummy.h b/media/video/capture/video_capture_device_dummy.h
index 395a46b154..c4a95cb0ce 100644
--- a/media/video/capture/video_capture_device_dummy.h
+++ b/media/video/capture/video_capture_device_dummy.h
@@ -19,8 +19,8 @@ namespace media {
class VideoCaptureDeviceDummy : public VideoCaptureDevice {
public:
- virtual void Allocate(int width, int height, int frame_rate,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
+ virtual void Allocate(const VideoCaptureCapability& capture_format,
+ VideoCaptureDevice::EventHandler* observer) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
virtual void DeAllocate() OVERRIDE;
diff --git a/media/video/capture/video_capture_device_unittest.cc b/media/video/capture/video_capture_device_unittest.cc
index 93638195b4..e39c59b054 100644
--- a/media/video/capture/video_capture_device_unittest.cc
+++ b/media/video/capture/video_capture_device_unittest.cc
@@ -54,6 +54,7 @@ using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Return;
using ::testing::AtLeast;
+using ::testing::SaveArg;
namespace media {
@@ -61,8 +62,8 @@ class MockFrameObserver : public media::VideoCaptureDevice::EventHandler {
public:
MOCK_METHOD0(ReserveOutputBuffer, scoped_refptr<media::VideoFrame>());
MOCK_METHOD0(OnErr, void());
- MOCK_METHOD4(OnFrameInfo, void(int width, int height, int frame_rate,
- VideoCaptureCapability::Format format));
+ MOCK_METHOD1(OnFrameInfo, void(const VideoCaptureCapability&));
+ MOCK_METHOD1(OnFrameInfoChanged, void(const VideoCaptureCapability&));
explicit MockFrameObserver(base::WaitableEvent* wait_event)
: wait_event_(wait_event) {}
@@ -71,11 +72,6 @@ class MockFrameObserver : public media::VideoCaptureDevice::EventHandler {
OnErr();
}
- virtual void OnFrameInfo(
- const VideoCaptureCapability& info) OVERRIDE {
- OnFrameInfo(info.width, info.height, info.frame_rate, info.color);
- }
-
virtual void OnIncomingCapturedFrame(
const uint8* data,
int length,
@@ -151,19 +147,29 @@ TEST_F(VideoCaptureDeviceTest, CaptureVGA) {
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
ASSERT_FALSE(device.get() == NULL);
-
+ DVLOG(1) << names_.front().id();
// Get info about the new resolution.
- EXPECT_CALL(*frame_observer_, OnFrameInfo(640, 480, _, _))
- .Times(1);
+ VideoCaptureCapability rx_capability;
+ EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
+ .Times(1).WillOnce(SaveArg<0>(&rx_capability));
EXPECT_CALL(*frame_observer_, OnErr())
.Times(0);
- device->Allocate(640, 480, 30, frame_observer_.get());
+ VideoCaptureCapability capture_format(640,
+ 480,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+ device->Allocate(capture_format, frame_observer_.get());
device->Start();
// Get captured video frames.
PostQuitTask();
EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
+ EXPECT_EQ(rx_capability.width, 640);
+ EXPECT_EQ(rx_capability.height, 480);
device->Stop();
device->DeAllocate();
}
@@ -182,13 +188,20 @@ TEST_F(VideoCaptureDeviceTest, Capture720p) {
// Get info about the new resolution.
// We don't care about the resulting resolution or frame rate as it might
// be different from one machine to the next.
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_, _, _, _))
+ EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
.Times(1);
EXPECT_CALL(*frame_observer_, OnErr())
.Times(0);
- device->Allocate(1280, 720, 30, frame_observer_.get());
+ VideoCaptureCapability capture_format(1280,
+ 720,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+ device->Allocate(capture_format, frame_observer_.get());
device->Start();
// Get captured video frames.
PostQuitTask();
@@ -210,12 +223,22 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
EXPECT_CALL(*frame_observer_, OnErr())
.Times(0);
- // get info about the new resolution
- EXPECT_CALL(*frame_observer_, OnFrameInfo(640, 480 , _, _))
- .Times(AtLeast(1));
-
- device->Allocate(637, 472, 35, frame_observer_.get());
+ // Get info about the new resolution.
+ VideoCaptureCapability rx_capability;
+ EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
+ .Times(AtLeast(1)).WillOnce(SaveArg<0>(&rx_capability));
+
+ VideoCaptureCapability capture_format(637,
+ 472,
+ 35,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+ device->Allocate(capture_format, frame_observer_.get());
device->DeAllocate();
+ EXPECT_EQ(rx_capability.width, 640);
+ EXPECT_EQ(rx_capability.height, 480);
}
TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
@@ -229,23 +252,51 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
ASSERT_TRUE(device.get() != NULL);
EXPECT_CALL(*frame_observer_, OnErr())
.Times(0);
- // get info about the new resolution
- EXPECT_CALL(*frame_observer_, OnFrameInfo(640, 480, _, _));
-
- EXPECT_CALL(*frame_observer_, OnFrameInfo(320, 240, _, _));
-
- device->Allocate(640, 480, 30, frame_observer_.get());
+ // Get info about the new resolution.
+ VideoCaptureCapability rx_capability_1;
+ VideoCaptureCapability rx_capability_2;
+ VideoCaptureCapability capture_format_1(640,
+ 480,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+ VideoCaptureCapability capture_format_2(1280,
+ 1024,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+ VideoCaptureCapability capture_format_3(320,
+ 240,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+
+ EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
+ .WillOnce(SaveArg<0>(&rx_capability_1));
+ device->Allocate(capture_format_1, frame_observer_.get());
device->Start();
// Nothing shall happen.
- device->Allocate(1280, 1024, 30, frame_observer_.get());
+ device->Allocate(capture_format_2, frame_observer_.get());
device->DeAllocate();
// Allocate new size 320, 240
- device->Allocate(320, 240, 30, frame_observer_.get());
+ EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
+ .WillOnce(SaveArg<0>(&rx_capability_2));
+ device->Allocate(capture_format_3, frame_observer_.get());
device->Start();
// Get captured video frames.
PostQuitTask();
EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
+ EXPECT_EQ(rx_capability_1.width, 640);
+ EXPECT_EQ(rx_capability_1.height, 480);
+ EXPECT_EQ(rx_capability_2.width, 320);
+ EXPECT_EQ(rx_capability_2.height, 240);
device->Stop();
device->DeAllocate();
}
@@ -263,18 +314,30 @@ TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
EXPECT_CALL(*frame_observer_, OnErr())
.Times(0);
// Get info about the new resolution.
- EXPECT_CALL(*frame_observer_, OnFrameInfo(640, 480, 30, _));
-
- device->Allocate(640, 480, 30, frame_observer_.get());
+ VideoCaptureCapability rx_capability;
+ EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
+ .WillOnce(SaveArg<0>(&rx_capability));
+
+ VideoCaptureCapability capture_format(640,
+ 480,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+ device->Allocate(capture_format, frame_observer_.get());
device->Start();
// Get captured video frames.
PostQuitTask();
EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
+ EXPECT_EQ(rx_capability.width, 640);
+ EXPECT_EQ(rx_capability.height, 480);
+ EXPECT_EQ(rx_capability.frame_rate, 30);
device->DeAllocate();
}
-TEST_F(VideoCaptureDeviceTest, TestFakeCapture) {
+TEST_F(VideoCaptureDeviceTest, FakeCapture) {
VideoCaptureDevice::Names names;
FakeVideoCaptureDevice::GetDeviceNames(&names);
@@ -286,16 +349,27 @@ TEST_F(VideoCaptureDeviceTest, TestFakeCapture) {
ASSERT_TRUE(device.get() != NULL);
// Get info about the new resolution.
- EXPECT_CALL(*frame_observer_, OnFrameInfo(640, 480, 30, _))
- .Times(1);
+ VideoCaptureCapability rx_capability;
+ EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
+ .Times(1).WillOnce(SaveArg<0>(&rx_capability));
EXPECT_CALL(*frame_observer_, OnErr())
.Times(0);
- device->Allocate(640, 480, 30, frame_observer_.get());
+ VideoCaptureCapability capture_format(640,
+ 480,
+ 30,
+ VideoCaptureCapability::kI420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+ device->Allocate(capture_format, frame_observer_.get());
device->Start();
EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
+ EXPECT_EQ(rx_capability.width, 640);
+ EXPECT_EQ(rx_capability.height, 480);
+ EXPECT_EQ(rx_capability.frame_rate, 30);
device->Stop();
device->DeAllocate();
}
@@ -315,15 +389,64 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
.Times(0);
// Verify we get MJPEG from the device. Not all devices can capture 1280x720
// @ 30 fps, so we don't care about the exact resolution we get.
- EXPECT_CALL(*frame_observer_,
- OnFrameInfo(_, _, _, VideoCaptureCapability::kMJPEG));
-
- device->Allocate(1280, 720, 30, frame_observer_.get());
+ VideoCaptureCapability rx_capability;
+ EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
+ .WillOnce(SaveArg<0>(&rx_capability));
+
+ VideoCaptureCapability capture_format(1280,
+ 720,
+ 30,
+ VideoCaptureCapability::kMJPEG,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+ device->Allocate(capture_format, frame_observer_.get());
device->Start();
// Get captured video frames.
PostQuitTask();
EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
+ EXPECT_EQ(rx_capability.color, VideoCaptureCapability::kMJPEG);
+ device->DeAllocate();
+}
+
+TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
+ VideoCaptureDevice::Names names;
+
+ FakeVideoCaptureDevice::GetDeviceNames(&names);
+ media::VideoCaptureCapability capture_format;
+ capture_format.width = 640;
+ capture_format.height = 480;
+ capture_format.frame_rate = 30;
+ capture_format.frame_size_type = media::VariableResolutionVideoCaptureDevice;
+
+ ASSERT_GT(static_cast<int>(names.size()), 0);
+
+ scoped_ptr<VideoCaptureDevice> device(
+ FakeVideoCaptureDevice::Create(names.front()));
+ ASSERT_TRUE(device.get() != NULL);
+
+ // Get info about the new resolution.
+ EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
+ .Times(1);
+
+ EXPECT_CALL(*frame_observer_, OnErr())
+ .Times(0);
+
+ device->Allocate(capture_format, frame_observer_.get());
+
+ // The amount of times the OnFrameInfoChanged gets called depends on how often
+ // FakeDevice is supposed to change and what is its actual frame rate.
+ // We set TimeWait to 200 action timeouts and this should be enough for at
+ // least action_count/kFakeCaptureCapabilityChangePeriod calls.
+ int action_count = 200;
+ EXPECT_CALL(*frame_observer_, OnFrameInfoChanged(_))
+ .Times(AtLeast(action_count / 30));
+ device->Start();
+ for (int i = 0; i < action_count; ++i) {
+ EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_timeout()));
+ }
+ device->Stop();
device->DeAllocate();
}
diff --git a/media/video/capture/video_capture_types.h b/media/video/capture/video_capture_types.h
index 072e256427..57712727ef 100644
--- a/media/video/capture/video_capture_types.h
+++ b/media/video/capture/video_capture_types.h
@@ -13,12 +13,25 @@ namespace media {
// shared with device manager.
typedef int VideoCaptureSessionId;
+enum VideoCaptureResolutionType {
+ ConstantResolutionVideoCaptureDevice = 0,
+ VariableResolutionVideoCaptureDevice,
+ MaxVideoCaptureResolutionType, // Must be last.
+};
+
// Parameters for starting video capture and device information.
struct VideoCaptureParams {
+ VideoCaptureParams()
+ : width(0),
+ height(0),
+ frame_per_second(0),
+ session_id(0),
+ frame_size_type(ConstantResolutionVideoCaptureDevice) {};
int width;
int height;
int frame_per_second;
VideoCaptureSessionId session_id;
+ VideoCaptureResolutionType frame_size_type;
};
// Capabilities describe the format a camera capture video in.
@@ -36,12 +49,39 @@ struct VideoCaptureCapability {
kYV12,
};
- int width; // Desired width.
- int height; // Desired height.
- int frame_rate; // Desired frame rate.
- Format color; // Desired video type.
+ VideoCaptureCapability()
+ : width(0),
+ height(0),
+ frame_rate(0),
+ color(kColorUnknown),
+ expected_capture_delay(0),
+ interlaced(false),
+ frame_size_type(ConstantResolutionVideoCaptureDevice),
+ session_id(0) {};
+ VideoCaptureCapability(int width,
+ int height,
+ int frame_rate,
+ Format color,
+ int delay,
+ bool interlaced,
+ VideoCaptureResolutionType frame_size_type)
+ : width(width),
+ height(height),
+ frame_rate(frame_rate),
+ color(color),
+ expected_capture_delay(delay),
+ interlaced(interlaced),
+ frame_size_type(frame_size_type),
+ session_id(0) {};
+
+ int width; // Desired width.
+ int height; // Desired height.
+ int frame_rate; // Desired frame rate.
+ Format color; // Desired video type.
int expected_capture_delay; // Expected delay in millisecond.
- bool interlaced; // Need interlace format.
+ bool interlaced; // Need interlace format.
+ VideoCaptureResolutionType frame_size_type;
+ VideoCaptureSessionId session_id;
};
} // namespace media
diff --git a/media/video/capture/win/video_capture_device_mf_win.cc b/media/video/capture/win/video_capture_device_mf_win.cc
index b07b2d67ef..dea97b7c26 100644
--- a/media/video/capture/win/video_capture_device_mf_win.cc
+++ b/media/video/capture/win/video_capture_device_mf_win.cc
@@ -340,9 +340,7 @@ bool VideoCaptureDeviceMFWin::Init() {
}
void VideoCaptureDeviceMFWin::Allocate(
- int width,
- int height,
- int frame_rate,
+ const VideoCaptureCapability& capture_format,
VideoCaptureDevice::EventHandler* observer) {
DCHECK(CalledOnValidThread());
@@ -364,7 +362,9 @@ void VideoCaptureDeviceMFWin::Allocate(
}
const VideoCaptureCapabilityWin& found_capability =
- capabilities.GetBestMatchedCapability(width, height, frame_rate);
+ capabilities.GetBestMatchedCapability(capture_format.width,
+ capture_format.height,
+ capture_format.frame_rate);
DLOG(INFO) << "Chosen capture format= (" << found_capability.width << "x"
<< found_capability.height << ")@("
<< found_capability.frame_rate_numerator << "/"
diff --git a/media/video/capture/win/video_capture_device_mf_win.h b/media/video/capture/win/video_capture_device_mf_win.h
index e3a87e4cda..b4ef6fcb09 100644
--- a/media/video/capture/win/video_capture_device_mf_win.h
+++ b/media/video/capture/win/video_capture_device_mf_win.h
@@ -38,10 +38,8 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
bool Init();
// VideoCaptureDevice implementation.
- virtual void Allocate(int width,
- int height,
- int frame_rate,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
+ virtual void Allocate(const VideoCaptureCapability& capture_format,
+ VideoCaptureDevice::EventHandler* observer) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
virtual void DeAllocate() OVERRIDE;
diff --git a/media/video/capture/win/video_capture_device_win.cc b/media/video/capture/win/video_capture_device_win.cc
index e364aa2140..81f057ad98 100644
--- a/media/video/capture/win/video_capture_device_win.cc
+++ b/media/video/capture/win/video_capture_device_win.cc
@@ -336,9 +336,7 @@ bool VideoCaptureDeviceWin::Init() {
}
void VideoCaptureDeviceWin::Allocate(
- int width,
- int height,
- int frame_rate,
+ const VideoCaptureCapability& capture_format,
VideoCaptureDevice::EventHandler* observer) {
DCHECK(CalledOnValidThread());
if (state_ != kIdle)
@@ -348,13 +346,15 @@ void VideoCaptureDeviceWin::Allocate(
// Get the camera capability that best match the requested resolution.
const VideoCaptureCapabilityWin& found_capability =
- capabilities_.GetBestMatchedCapability(width, height, frame_rate);
+ capabilities_.GetBestMatchedCapability(capture_format.width,
+ capture_format.height,
+ capture_format.frame_rate);
VideoCaptureCapability capability = found_capability;
// Reduce the frame rate if the requested frame rate is lower
// than the capability.
- if (capability.frame_rate > frame_rate)
- capability.frame_rate = frame_rate;
+ if (capability.frame_rate > capture_format.frame_rate)
+ capability.frame_rate = capture_format.frame_rate;
AM_MEDIA_TYPE* pmt = NULL;
VIDEO_STREAM_CONFIG_CAPS caps;
diff --git a/media/video/capture/win/video_capture_device_win.h b/media/video/capture/win/video_capture_device_win.h
index 9e1f005e77..f6c1a9b042 100644
--- a/media/video/capture/win/video_capture_device_win.h
+++ b/media/video/capture/win/video_capture_device_win.h
@@ -40,10 +40,8 @@ class VideoCaptureDeviceWin
bool Init();
// VideoCaptureDevice implementation.
- virtual void Allocate(int width,
- int height,
- int frame_rate,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
+ virtual void Allocate(const VideoCaptureCapability& capture_format,
+ VideoCaptureDevice::EventHandler* observer) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
virtual void DeAllocate() OVERRIDE;
diff --git a/media/webm/webm_cluster_parser_unittest.cc b/media/webm/webm_cluster_parser_unittest.cc
index 0e8b7b2714..5c5837fa86 100644
--- a/media/webm/webm_cluster_parser_unittest.cc
+++ b/media/webm/webm_cluster_parser_unittest.cc
@@ -216,7 +216,7 @@ class WebMClusterParserTest : public testing::Test {
scoped_ptr<WebMClusterParser> parser_;
};
-TEST_F(WebMClusterParserTest, TestReset) {
+TEST_F(WebMClusterParserTest, Reset) {
InSequence s;
int block_count = arraysize(kDefaultBlockInfo);
diff --git a/media/webm/webm_parser_unittest.cc b/media/webm/webm_parser_unittest.cc
index 43ad3c2b80..cb71fe98bd 100644
--- a/media/webm/webm_parser_unittest.cc
+++ b/media/webm/webm_parser_unittest.cc
@@ -233,7 +233,7 @@ TEST_F(WebMParserTest, ParseListElementWithMultipleCalls) {
EXPECT_TRUE(parser.IsParsingComplete());
}
-TEST_F(WebMParserTest, TestReset) {
+TEST_F(WebMParserTest, Reset) {
InSequence s;
scoped_ptr<Cluster> cluster(CreateCluster(kBlockCount));
diff --git a/media/webm/webm_tracks_parser_unittest.cc b/media/webm/webm_tracks_parser_unittest.cc
index a57d039cc0..1ba3111789 100644
--- a/media/webm/webm_tracks_parser_unittest.cc
+++ b/media/webm/webm_tracks_parser_unittest.cc
@@ -46,7 +46,7 @@ static void VerifyTextTrackInfo(const uint8* buffer,
EXPECT_TRUE(info.language == language);
}
-TEST_F(WebMTracksParserTest, TestSubtitleNoNameNoLang) {
+TEST_F(WebMTracksParserTest, SubtitleNoNameNoLang) {
InSequence s;
TracksBuilder tb;
@@ -57,7 +57,7 @@ TEST_F(WebMTracksParserTest, TestSubtitleNoNameNoLang) {
VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "", "");
}
-TEST_F(WebMTracksParserTest, TestSubtitleYesNameNoLang) {
+TEST_F(WebMTracksParserTest, SubtitleYesNameNoLang) {
InSequence s;
TracksBuilder tb;
@@ -68,7 +68,7 @@ TEST_F(WebMTracksParserTest, TestSubtitleYesNameNoLang) {
VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "Spock", "");
}
-TEST_F(WebMTracksParserTest, TestSubtitleNoNameYesLang) {
+TEST_F(WebMTracksParserTest, SubtitleNoNameYesLang) {
InSequence s;
TracksBuilder tb;
@@ -79,7 +79,7 @@ TEST_F(WebMTracksParserTest, TestSubtitleNoNameYesLang) {
VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "", "eng");
}
-TEST_F(WebMTracksParserTest, TestSubtitleYesNameYesLang) {
+TEST_F(WebMTracksParserTest, SubtitleYesNameYesLang) {
InSequence s;
TracksBuilder tb;
@@ -90,7 +90,7 @@ TEST_F(WebMTracksParserTest, TestSubtitleYesNameYesLang) {
VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "Picard", "fre");
}
-TEST_F(WebMTracksParserTest, TestIgnoringTextTracks) {
+TEST_F(WebMTracksParserTest, IgnoringTextTracks) {
InSequence s;
TracksBuilder tb;
diff --git a/media/webm/webm_webvtt_parser_unittest.cc b/media/webm/webm_webvtt_parser_unittest.cc
index 6ef744eba5..db514a1247 100644
--- a/media/webm/webm_webvtt_parser_unittest.cc
+++ b/media/webm/webm_webvtt_parser_unittest.cc
@@ -33,7 +33,7 @@ class WebMWebVTTParserTest : public testing::Test {
WebMWebVTTParserTest() {}
};
-TEST_F(WebMWebVTTParserTest, TestBlank) {
+TEST_F(WebMWebVTTParserTest, Blank) {
InSequence s;
const Cue cue = EncodeCue("", "", "Subtitle");
@@ -45,7 +45,7 @@ TEST_F(WebMWebVTTParserTest, TestBlank) {
EXPECT_EQ(content, "Subtitle");
}
-TEST_F(WebMWebVTTParserTest, TestId) {
+TEST_F(WebMWebVTTParserTest, Id) {
InSequence s;
for (int i = 1; i <= 9; ++i) {
@@ -60,7 +60,7 @@ TEST_F(WebMWebVTTParserTest, TestId) {
}
}
-TEST_F(WebMWebVTTParserTest, TestSettings) {
+TEST_F(WebMWebVTTParserTest, Settings) {
InSequence s;
enum { kSettingsCount = 4 };
@@ -81,7 +81,7 @@ TEST_F(WebMWebVTTParserTest, TestSettings) {
}
}
-TEST_F(WebMWebVTTParserTest, TestContent) {
+TEST_F(WebMWebVTTParserTest, Content) {
InSequence s;
enum { kContentCount = 4 };