summaryrefslogtreecommitdiff
path: root/media/base
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2013-06-19 11:58:07 +0100
committerTorne (Richard Coles) <torne@google.com>2013-06-19 11:58:07 +0100
commit7d4cd473f85ac64c3747c96c277f9e506a0d2246 (patch)
treef5fecd524f5ac22cd38bcc6713b81f666730d5a1 /media/base
parent84f2b2352908c30e40ae12ffe850dd8470f6c048 (diff)
downloadchromium_org-7d4cd473f85ac64c3747c96c277f9e506a0d2246.tar.gz
Merge from Chromium at DEPS revision r207203
This commit was generated by merge_to_master.py. Change-Id: I5fbb6854d092096c4d39edc2865a48be1b53c418
Diffstat (limited to 'media/base')
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaCodecBridge.java40
-rw-r--r--media/base/android/media_codec_bridge.cc12
-rw-r--r--media/base/android/media_drm_bridge.cc63
-rw-r--r--media/base/android/media_drm_bridge.h62
-rw-r--r--media/base/android/media_player_android.cc8
-rw-r--r--media/base/android/media_player_android.h5
-rw-r--r--media/base/android/media_player_listener.cc2
-rw-r--r--media/base/android/media_player_manager.h23
-rw-r--r--media/base/android/media_source_player.cc205
-rw-r--r--media/base/android/media_source_player.h51
-rw-r--r--media/base/android/media_source_player_unittest.cc328
-rw-r--r--media/base/audio_buffer.cc171
-rw-r--r--media/base/audio_buffer.h98
-rw-r--r--media/base/audio_buffer_unittest.cc272
-rw-r--r--media/base/audio_decoder_config.cc34
-rw-r--r--media/base/audio_decoder_config.h22
-rw-r--r--media/base/audio_renderer_mixer.cc6
-rw-r--r--media/base/audio_renderer_mixer.h2
-rw-r--r--media/base/audio_renderer_mixer_unittest.cc8
-rw-r--r--media/base/clock.cc13
-rw-r--r--media/base/clock.h27
-rw-r--r--media/base/clock_unittest.cc8
-rw-r--r--media/base/limits.h5
-rw-r--r--media/base/media_keys.h35
-rw-r--r--media/base/media_switches.cc3
-rw-r--r--media/base/media_switches.h2
-rw-r--r--media/base/pipeline.cc10
-rw-r--r--media/base/pipeline.h8
-rw-r--r--media/base/pipeline_unittest.cc10
-rw-r--r--media/base/run_all_unittests.cc5
-rw-r--r--media/base/sample_format.cc32
-rw-r--r--media/base/sample_format.h34
32 files changed, 1349 insertions, 255 deletions
diff --git a/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java b/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
index 03483993b7..86464b71a2 100644
--- a/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
+++ b/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
@@ -161,7 +161,7 @@ class MediaCodecBridge {
int index = MEDIA_CODEC_ERROR;
try {
index = mMediaCodec.dequeueOutputBuffer(info, timeoutUs);
- } catch(IllegalStateException e) {
+ } catch (IllegalStateException e) {
Log.e(TAG, "Cannot dequeue output buffer " + e.toString());
}
return new DequeueOutputResult(
@@ -169,9 +169,15 @@ class MediaCodecBridge {
}
@CalledByNative
- private void configureVideo(MediaFormat format, Surface surface, MediaCrypto crypto,
+ private boolean configureVideo(MediaFormat format, Surface surface, MediaCrypto crypto,
int flags) {
- mMediaCodec.configure(format, surface, crypto, flags);
+ try {
+ mMediaCodec.configure(format, surface, crypto, flags);
+ return true;
+ } catch (IllegalStateException e) {
+ Log.e(TAG, "Cannot configure the video codec " + e.toString());
+ }
+ return false;
}
@CalledByNative
@@ -203,19 +209,25 @@ class MediaCodecBridge {
}
@CalledByNative
- private void configureAudio(MediaFormat format, MediaCrypto crypto, int flags,
+ private boolean configureAudio(MediaFormat format, MediaCrypto crypto, int flags,
boolean playAudio) {
- mMediaCodec.configure(format, null, crypto, flags);
- if (playAudio) {
- int sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
- int channelCount = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
- int channelConfig = (channelCount == 1) ? AudioFormat.CHANNEL_OUT_MONO :
- AudioFormat.CHANNEL_OUT_STEREO;
- int minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig,
- AudioFormat.ENCODING_PCM_16BIT);
- mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, channelConfig,
- AudioFormat.ENCODING_PCM_16BIT, minBufferSize, AudioTrack.MODE_STREAM);
+ try {
+ mMediaCodec.configure(format, null, crypto, flags);
+ if (playAudio) {
+ int sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
+ int channelCount = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
+ int channelConfig = (channelCount == 1) ? AudioFormat.CHANNEL_OUT_MONO :
+ AudioFormat.CHANNEL_OUT_STEREO;
+ int minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig,
+ AudioFormat.ENCODING_PCM_16BIT);
+ mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, channelConfig,
+ AudioFormat.ENCODING_PCM_16BIT, minBufferSize, AudioTrack.MODE_STREAM);
+ }
+ return true;
+ } catch (IllegalStateException e) {
+ Log.e(TAG, "Cannot configure the audio codec " + e.toString());
}
+ return false;
}
@CalledByNative
diff --git a/media/base/android/media_codec_bridge.cc b/media/base/android/media_codec_bridge.cc
index 12841a6d64..9dd0cea86f 100644
--- a/media/base/android/media_codec_bridge.cc
+++ b/media/base/android/media_codec_bridge.cc
@@ -197,8 +197,10 @@ bool AudioCodecBridge::Start(
if (!ConfigureMediaFormat(j_format.obj(), codec, extra_data, extra_data_size))
return false;
- Java_MediaCodecBridge_configureAudio(
- env, media_codec(), j_format.obj(), NULL, 0, play_audio);
+ if (!Java_MediaCodecBridge_configureAudio(
+ env, media_codec(), j_format.obj(), NULL, 0, play_audio)) {
+ return false;
+ }
StartInternal();
return true;
}
@@ -336,8 +338,10 @@ bool VideoCodecBridge::Start(
Java_MediaCodecBridge_createVideoFormat(
env, j_mime.obj(), size.width(), size.height()));
DCHECK(!j_format.is_null());
- Java_MediaCodecBridge_configureVideo(
- env, media_codec(), j_format.obj(), surface, NULL, 0);
+ if (!Java_MediaCodecBridge_configureVideo(
+ env, media_codec(), j_format.obj(), surface, NULL, 0)) {
+ return false;
+ }
StartInternal();
return true;
}
diff --git a/media/base/android/media_drm_bridge.cc b/media/base/android/media_drm_bridge.cc
new file mode 100644
index 0000000000..8760900a88
--- /dev/null
+++ b/media/base/android/media_drm_bridge.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_drm_bridge.h"
+
+#include "base/logging.h"
+
+using base::android::ScopedJavaLocalRef;
+
+namespace media {
+
+// static
+bool MediaDrmBridge::IsAvailable() {
+ return false;
+}
+
+MediaDrmBridge::MediaDrmBridge(
+ int media_keys_id, const std::vector<uint8>& uuid)
+ : media_keys_id_(media_keys_id) {}
+
+MediaDrmBridge::~MediaDrmBridge() {}
+
+bool MediaDrmBridge::GenerateKeyRequest(const std::string& type,
+ const uint8* init_data,
+ int init_data_length) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
+void MediaDrmBridge::CancelKeyRequest(const std::string& session_id) {
+ NOTIMPLEMENTED();
+}
+
+void MediaDrmBridge::AddKey(const uint8* key, int key_length,
+ const uint8* init_data, int init_data_length,
+ const std::string& session_id) {
+ NOTIMPLEMENTED();
+}
+
+ScopedJavaLocalRef<jobject> MediaDrmBridge::CreateMediaCrypto(
+ const std::string& session_id) {
+ NOTIMPLEMENTED();
+ return ScopedJavaLocalRef<jobject>();
+}
+
+void MediaDrmBridge::ReleaseMediaCrypto(const std::string& session_id) {
+ NOTIMPLEMENTED();
+}
+
+void MediaDrmBridge::OnKeyMessage(
+ JNIEnv* env, jobject j_media_drm, jstring session_id, jbyteArray message,
+ jstring destination_url) {
+ NOTIMPLEMENTED();
+}
+
+void MediaDrmBridge::OnDrmEvent(
+ JNIEnv* env, jobject j_media_drm, jstring session_id, jint event,
+ jint extra, jstring data) {
+ NOTIMPLEMENTED();
+}
+
+} // namespace media
diff --git a/media/base/android/media_drm_bridge.h b/media/base/android/media_drm_bridge.h
new file mode 100644
index 0000000000..60ecfa14a9
--- /dev/null
+++ b/media/base/android/media_drm_bridge.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
+#define MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
+
+#include <jni.h>
+#include <string>
+#include <vector>
+
+#include "base/android/scoped_java_ref.h"
+#include "media/base/media_export.h"
+#include "media/base/media_keys.h"
+
+namespace media {
+
+// This class provides DRM services for android EME implementation.
+// TODO(qinmin): implement all the functions in this class.
+class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
+ public:
+ // TODO(xhwang): Pass in |key_system|.
+ MediaDrmBridge(int media_keys_id, const std::vector<uint8>& uuid);
+ virtual ~MediaDrmBridge();
+
+ // Checks whether DRM is available.
+ static bool IsAvailable();
+
+ // MediaKeys implementations.
+ virtual bool GenerateKeyRequest(const std::string& type,
+ const uint8* init_data,
+ int init_data_length) OVERRIDE;
+ virtual void AddKey(const uint8* key, int key_length,
+ const uint8* init_data, int init_data_length,
+ const std::string& session_id) OVERRIDE;
+ virtual void CancelKeyRequest(const std::string& session_id) OVERRIDE;
+
+ // Drm related message was received.
+ void OnDrmEvent(JNIEnv* env, jobject, jstring session_id,
+ jint event, jint extra, jstring data);
+
+ // Called after we got the response for GenerateKeyRequest().
+ void OnKeyMessage(JNIEnv* env, jobject, jstring session_id,
+ jbyteArray message, jstring destination_url);
+
+ // Methods to create and release a MediaCrypto object.
+ base::android::ScopedJavaLocalRef<jobject> CreateMediaCrypto(
+ const std::string& session_id);
+ void ReleaseMediaCrypto(const std::string& session_id);
+
+ int media_keys_id() const { return media_keys_id_; }
+
+ private:
+ // Id of the MediaKeys object.
+ int media_keys_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaDrmBridge);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
diff --git a/media/base/android/media_player_android.cc b/media/base/android/media_player_android.cc
index 2de48870e4..a435d5dd7a 100644
--- a/media/base/android/media_player_android.cc
+++ b/media/base/android/media_player_android.cc
@@ -54,12 +54,12 @@ void MediaPlayerAndroid::OnMediaMetadataChanged(
void MediaPlayerAndroid::RequestMediaResourcesFromManager() {
if (manager_)
- manager_->RequestMediaResources(this);
+ manager_->RequestMediaResources(player_id_);
}
void MediaPlayerAndroid::ReleaseMediaResourcesFromManager() {
if (manager_)
- manager_->ReleaseMediaResources(this);
+ manager_->ReleaseMediaResources(player_id_);
}
void MediaPlayerAndroid::DemuxerReady(
@@ -76,6 +76,10 @@ void MediaPlayerAndroid::OnSeekRequestAck(unsigned seek_request_id) {
NOTREACHED() << "Unexpected ipc received";
}
+void MediaPlayerAndroid::DurationChanged(const base::TimeDelta& duration) {
+ NOTREACHED() << "Unexpected ipc received";
+}
+
GURL MediaPlayerAndroid::GetUrl() {
return GURL();
}
diff --git a/media/base/android/media_player_android.h b/media/base/android/media_player_android.h
index c5abbb9ae1..1310fbdea2 100644
--- a/media/base/android/media_player_android.h
+++ b/media/base/android/media_player_android.h
@@ -85,7 +85,7 @@ class MEDIA_EXPORT MediaPlayerAndroid {
virtual GURL GetUrl();
virtual GURL GetFirstPartyForCookies();
- // Methods for DeumxerStreamPlayer.
+ // Methods for DemuxerStreamPlayer.
// Informs DemuxerStreamPlayer that the demuxer is ready.
virtual void DemuxerReady(
const MediaPlayerHostMsg_DemuxerReady_Params& params);
@@ -96,6 +96,9 @@ class MEDIA_EXPORT MediaPlayerAndroid {
// Called when a seek request is acked by the render process.
virtual void OnSeekRequestAck(unsigned seek_request_id);
+ // Called when the demuxer has changed the duration.
+ virtual void DurationChanged(const base::TimeDelta& duration);
+
int player_id() { return player_id_; }
protected:
diff --git a/media/base/android/media_player_listener.cc b/media/base/android/media_player_listener.cc
index 94cbfd14db..730759eb30 100644
--- a/media/base/android/media_player_listener.cc
+++ b/media/base/android/media_player_listener.cc
@@ -25,7 +25,7 @@ MediaPlayerListener::MediaPlayerListener(
base::WeakPtr<MediaPlayerBridge> media_player)
: message_loop_(message_loop),
media_player_(media_player) {
- DCHECK(message_loop_);
+ DCHECK(message_loop_.get());
DCHECK(media_player_);
}
diff --git a/media/base/android/media_player_manager.h b/media/base/android/media_player_manager.h
index a7ce2203bb..69d6792745 100644
--- a/media/base/android/media_player_manager.h
+++ b/media/base/android/media_player_manager.h
@@ -16,6 +16,7 @@ class RenderViewHost;
namespace media {
+class MediaDrmBridge;
class MediaPlayerAndroid;
class MediaResourceGetter;
@@ -45,11 +46,11 @@ class MEDIA_EXPORT MediaPlayerManager {
// media streams. This helps the manager object maintain an array
// of active MediaPlayerAndroid objects and release the resources
// when needed.
- virtual void RequestMediaResources(MediaPlayerAndroid* player) = 0;
+ virtual void RequestMediaResources(int player_id) = 0;
// Called when a MediaPlayerAndroid object releases all its decoding
// resources.
- virtual void ReleaseMediaResources(MediaPlayerAndroid* player) = 0;
+ virtual void ReleaseMediaResources(int player_id) = 0;
// Return a pointer to the MediaResourceGetter object.
virtual MediaResourceGetter* GetMediaResourceGetter() = 0;
@@ -106,25 +107,25 @@ class MEDIA_EXPORT MediaPlayerManager {
// Called when player wants to read the config data from the demuxer.
virtual void OnMediaConfigRequest(int player_id) = 0;
+ // Get the MediaDrmBridge object for the given media key Id.
+ virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) = 0;
+
// TODO(xhwang): The following three methods needs to be decoupled from
// MediaPlayerManager to support the W3C Working Draft version of the EME
// spec.
- // Called when the player wants to send a KeyAdded.
- virtual void OnKeyAdded(int player_id,
- const std::string& key_system,
+ // Called when the DRM engine wants to send a KeyAdded.
+ virtual void OnKeyAdded(int key_id,
const std::string& session_id) = 0;
- // Called when the player wants to send a KeyError.
- virtual void OnKeyError(int player_id,
- const std::string& key_system,
+ // Called when the DRM engine wants to send a KeyError.
+ virtual void OnKeyError(int key_id,
const std::string& session_id,
media::MediaKeys::KeyError error_code,
int system_code) = 0;
- // Called when the player wants to send a KeyMessage.
- virtual void OnKeyMessage(int player_id,
- const std::string& key_system,
+ // Called when the DRM engine wants to send a KeyMessage.
+ virtual void OnKeyMessage(int key_id,
const std::string& session_id,
const std::string& message,
const std::string& destination_url) = 0;
diff --git a/media/base/android/media_source_player.cc b/media/base/android/media_source_player.cc
index f3aabe5f7f..9495ed082f 100644
--- a/media/base/android/media_source_player.cc
+++ b/media/base/android/media_source_player.cc
@@ -52,13 +52,17 @@ base::LazyInstance<VideoDecoderThread>::Leaky
namespace media {
-MediaDecoderJob::MediaDecoderJob(base::Thread* thread, bool is_audio)
- : message_loop_(base::MessageLoopProxy::current()),
- thread_(thread),
+MediaDecoderJob::MediaDecoderJob(
+ const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
+ MediaCodecBridge* media_codec_bridge,
+ bool is_audio)
+ : ui_loop_(base::MessageLoopProxy::current()),
+ decoder_loop_(decoder_loop),
+ media_codec_bridge_(media_codec_bridge),
needs_flush_(false),
is_audio_(is_audio),
weak_this_(this),
- decoding_(false) {
+ is_decoding_(false) {
}
MediaDecoderJob::~MediaDecoderJob() {}
@@ -66,31 +70,37 @@ MediaDecoderJob::~MediaDecoderJob() {}
// Class for managing audio decoding jobs.
class AudioDecoderJob : public MediaDecoderJob {
public:
- AudioDecoderJob(
+ virtual ~AudioDecoderJob() {}
+
+ static AudioDecoderJob* Create(
const AudioCodec audio_codec, int sample_rate,
int channel_count, const uint8* extra_data, size_t extra_data_size);
- virtual ~AudioDecoderJob() {}
+
+ private:
+ AudioDecoderJob(MediaCodecBridge* media_codec_bridge);
};
// Class for managing video decoding jobs.
class VideoDecoderJob : public MediaDecoderJob {
public:
- VideoDecoderJob(
- const VideoCodec video_codec, const gfx::Size& size, jobject surface);
virtual ~VideoDecoderJob() {}
- void Configure(
- const VideoCodec codec, const gfx::Size& size, jobject surface);
+ static VideoDecoderJob* Create(
+ const VideoCodec video_codec, const gfx::Size& size, jobject surface);
+
+ private:
+ VideoDecoderJob(MediaCodecBridge* media_codec_bridge);
};
void MediaDecoderJob::Decode(
const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::Time& start_wallclock_time,
+ const base::TimeTicks& start_wallclock_time,
const base::TimeDelta& start_presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback) {
- DCHECK(!decoding_);
- decoding_ = true;
- thread_->message_loop()->PostTask(FROM_HERE, base::Bind(
+ DCHECK(!is_decoding_);
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ is_decoding_ = true;
+ decoder_loop_->PostTask(FROM_HERE, base::Bind(
&MediaDecoderJob::DecodeInternal, base::Unretained(this), unit,
start_wallclock_time, start_presentation_timestamp, needs_flush_,
callback));
@@ -99,7 +109,7 @@ void MediaDecoderJob::Decode(
void MediaDecoderJob::DecodeInternal(
const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::Time& start_wallclock_time,
+ const base::TimeTicks& start_wallclock_time,
const base::TimeDelta& start_presentation_timestamp,
bool needs_flush,
const MediaDecoderJob::DecoderCallback& callback) {
@@ -109,7 +119,7 @@ void MediaDecoderJob::DecodeInternal(
kMediaCodecTimeoutInMicroseconds);
int input_buf_index = media_codec_bridge_->DequeueInputBuffer(timeout);
if (input_buf_index == MediaCodecBridge::INFO_MEDIA_CODEC_ERROR) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ ui_loop_->PostTask(FROM_HERE, base::Bind(
callback, DECODE_FAILED, start_presentation_timestamp,
start_wallclock_time, false));
return;
@@ -151,7 +161,7 @@ void MediaDecoderJob::DecodeInternal(
break;
base::TimeDelta time_to_render;
if (!start_wallclock_time.is_null()) {
- time_to_render = presentation_timestamp - (base::Time::Now() -
+ time_to_render = presentation_timestamp - (base::TimeTicks::Now() -
start_wallclock_time + start_presentation_timestamp);
}
if (time_to_render >= base::TimeDelta()) {
@@ -171,7 +181,7 @@ void MediaDecoderJob::DecodeInternal(
}
return;
}
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ ui_loop_->PostTask(FROM_HERE, base::Bind(
callback, decode_status, start_presentation_timestamp,
start_wallclock_time, end_of_stream));
}
@@ -187,13 +197,14 @@ void MediaDecoderJob::ReleaseOutputBuffer(
outputBufferIndex, size);
}
media_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, !is_audio_);
- message_loop_->PostTask(FROM_HERE, base::Bind(
- callback, DECODE_SUCCEEDED, presentation_timestamp, base::Time::Now(),
- end_of_stream));
+ ui_loop_->PostTask(FROM_HERE, base::Bind(
+ callback, DECODE_SUCCEEDED, presentation_timestamp,
+ base::TimeTicks::Now(), end_of_stream));
}
void MediaDecoderJob::OnDecodeCompleted() {
- decoding_ = false;
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ is_decoding_ = false;
}
void MediaDecoderJob::Flush() {
@@ -214,39 +225,51 @@ void MediaDecoderJob::Release() {
// TODO(qinmin): Figure out the logic to passing the surface to a new
// MediaDecoderJob instance after the previous one gets deleted on the decoder
// thread.
- if (decoding_ && thread_->message_loop() != base::MessageLoop::current())
- thread_->message_loop()->DeleteSoon(FROM_HERE, this);
- else
+ if (is_decoding_ && !decoder_loop_->BelongsToCurrentThread()) {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ decoder_loop_->DeleteSoon(FROM_HERE, this);
+ } else {
delete this;
+ }
}
-VideoDecoderJob::VideoDecoderJob(
- const VideoCodec video_codec, const gfx::Size& size, jobject surface)
- : MediaDecoderJob(g_video_decoder_thread.Pointer(), false) {
+VideoDecoderJob* VideoDecoderJob::Create(
+ const VideoCodec video_codec, const gfx::Size& size, jobject surface) {
scoped_ptr<VideoCodecBridge> codec(VideoCodecBridge::Create(video_codec));
- codec->Start(video_codec, size, surface);
- media_codec_bridge_.reset(codec.release());
+ if (codec->Start(video_codec, size, surface))
+ return new VideoDecoderJob(codec.release());
+ return NULL;
}
-AudioDecoderJob::AudioDecoderJob(
+VideoDecoderJob::VideoDecoderJob(MediaCodecBridge* media_codec_bridge)
+ : MediaDecoderJob(g_video_decoder_thread.Pointer()->message_loop_proxy(),
+ media_codec_bridge,
+ false) {}
+
+AudioDecoderJob* AudioDecoderJob::Create(
const AudioCodec audio_codec,
int sample_rate,
int channel_count,
const uint8* extra_data,
- size_t extra_data_size)
- : MediaDecoderJob(g_audio_decoder_thread.Pointer(), true) {
+ size_t extra_data_size) {
scoped_ptr<AudioCodecBridge> codec(AudioCodecBridge::Create(audio_codec));
- codec->Start(audio_codec, sample_rate, channel_count, extra_data,
- extra_data_size, true);
- media_codec_bridge_.reset(codec.release());
+ if (codec->Start(audio_codec, sample_rate, channel_count, extra_data,
+ extra_data_size, true)) {
+ return new AudioDecoderJob(codec.release());
+ }
+ return NULL;
}
+AudioDecoderJob::AudioDecoderJob(MediaCodecBridge* media_codec_bridge)
+ : MediaDecoderJob(g_audio_decoder_thread.Pointer()->message_loop_proxy(),
+ media_codec_bridge,
+ true) {}
+
MediaSourcePlayer::MediaSourcePlayer(
int player_id,
MediaPlayerManager* manager)
: MediaPlayerAndroid(player_id, manager),
pending_event_(NO_EVENT_PENDING),
- active_decoding_tasks_(0),
seek_request_id_(0),
width_(0),
height_(0),
@@ -254,7 +277,6 @@ MediaSourcePlayer::MediaSourcePlayer(
video_codec_(kUnknownVideoCodec),
num_channels_(0),
sampling_rate_(0),
- seekable_(true),
audio_finished_(true),
video_finished_(true),
playing_(false),
@@ -274,6 +296,10 @@ MediaSourcePlayer::~MediaSourcePlayer() {
void MediaSourcePlayer::SetVideoSurface(gfx::ScopedJavaSurface surface) {
surface_ = surface.Pass();
pending_event_ |= SURFACE_CHANGE_EVENT_PENDING;
+ if (pending_event_ & SEEK_EVENT_PENDING) {
+ // Waiting for the seek to finish.
+ return;
+ }
// Setting a new surface will require a new MediaCodec to be created.
// Request a seek so that the new decoder will decode an I-frame first.
// Or otherwise, the new MediaCodec might crash. See b/8950387.
@@ -281,18 +307,29 @@ void MediaSourcePlayer::SetVideoSurface(gfx::ScopedJavaSurface surface) {
ProcessPendingEvents();
}
+bool MediaSourcePlayer::Seekable() {
+ // If the duration TimeDelta, converted to milliseconds from microseconds,
+ // is >= 2^31, then the media is assumed to be unbounded and unseekable.
+ // 2^31 is the bound due to java player using 32-bit integer for time
+ // values at millisecond resolution.
+ return duration_ <
+ base::TimeDelta::FromMilliseconds(std::numeric_limits<int32>::max());
+}
+
void MediaSourcePlayer::Start() {
playing_ = true;
- CreateAudioDecoderJob();
- CreateVideoDecoderJob();
-
StartInternal();
}
void MediaSourcePlayer::Pause() {
+ // Since decoder jobs have their own thread, decoding is not fully paused
+ // until all the decoder jobs call MediaDecoderCallback(). It is possible
+ // that Start() is called while the player is waiting for
+ // MediaDecoderCallback(). In that case, decoding will continue when
+ // MediaDecoderCallback() is called.
playing_ = false;
- start_wallclock_time_ = base::Time();
+ start_wallclock_time_ = base::TimeTicks();
}
bool MediaSourcePlayer::IsPlaying() {
@@ -327,7 +364,6 @@ void MediaSourcePlayer::Release() {
video_decoder_job_.reset();
reconfig_audio_decoder_ = false;
reconfig_video_decoder_ = false;
- active_decoding_tasks_ = 0;
playing_ = false;
pending_event_ = NO_EVENT_PENDING;
surface_ = gfx::ScopedJavaSurface();
@@ -338,15 +374,15 @@ void MediaSourcePlayer::SetVolume(float leftVolume, float rightVolume) {
}
bool MediaSourcePlayer::CanPause() {
- return seekable_;
+ return Seekable();
}
bool MediaSourcePlayer::CanSeekForward() {
- return seekable_;
+ return Seekable();
}
bool MediaSourcePlayer::CanSeekBackward() {
- return seekable_;
+ return Seekable();
}
bool MediaSourcePlayer::IsPlayerReady() {
@@ -354,21 +390,26 @@ bool MediaSourcePlayer::IsPlayerReady() {
}
void MediaSourcePlayer::StartInternal() {
- // Do nothing if the decoders are already running.
- if (active_decoding_tasks_ > 0 || pending_event_ != NO_EVENT_PENDING)
+ // If there are pending events, wait for them finish.
+ if (pending_event_ != NO_EVENT_PENDING)
return;
+ // Create decoder jobs if they are not created
+ ConfigureAudioDecoderJob();
+ ConfigureVideoDecoderJob();
+
+
// If one of the decoder job is not ready, do nothing.
if ((HasAudio() && !audio_decoder_job_) ||
(HasVideo() && !video_decoder_job_)) {
return;
}
- if (HasAudio()) {
+ if (HasAudio() && !audio_decoder_job_->is_decoding()) {
audio_finished_ = false;
DecodeMoreAudio();
}
- if (HasVideo()) {
+ if (HasVideo() && !video_decoder_job_->is_decoding()) {
video_finished_ = false;
DecodeMoreVideo();
}
@@ -376,8 +417,6 @@ void MediaSourcePlayer::StartInternal() {
void MediaSourcePlayer::DemuxerReady(
const MediaPlayerHostMsg_DemuxerReady_Params& params) {
- if (params.duration_ms == std::numeric_limits<int>::max())
- seekable_ = false;
duration_ = base::TimeDelta::FromMilliseconds(params.duration_ms);
width_ = params.video_size.width();
height_ = params.video_size.height();
@@ -388,17 +427,17 @@ void MediaSourcePlayer::DemuxerReady(
audio_extra_data_ = params.audio_extra_data;
OnMediaMetadataChanged(duration_, width_, height_, true);
if (pending_event_ & CONFIG_CHANGE_EVENT_PENDING) {
- if (reconfig_audio_decoder_) {
- CreateAudioDecoderJob();
- }
+ if (reconfig_audio_decoder_)
+ ConfigureAudioDecoderJob();
+
// If there is a pending surface change, we can merge it with the config
// change.
if (reconfig_video_decoder_) {
pending_event_ &= ~SURFACE_CHANGE_EVENT_PENDING;
- CreateVideoDecoderJob();
+ ConfigureVideoDecoderJob();
}
pending_event_ &= ~CONFIG_CHANGE_EVENT_PENDING;
- if (playing_ && pending_event_ == NO_EVENT_PENDING)
+ if (playing_)
StartInternal();
}
}
@@ -428,6 +467,10 @@ void MediaSourcePlayer::ReadFromDemuxerAck(
}
}
+void MediaSourcePlayer::DurationChanged(const base::TimeDelta& duration) {
+ duration_ = duration;
+}
+
void MediaSourcePlayer::OnSeekRequestAck(unsigned seek_request_id) {
// Do nothing until the most recent seek request is processed.
if (seek_request_id_ != seek_request_id)
@@ -439,7 +482,7 @@ void MediaSourcePlayer::OnSeekRequestAck(unsigned seek_request_id) {
void MediaSourcePlayer::UpdateTimestamps(
const base::TimeDelta& presentation_timestamp,
- const base::Time& wallclock_time) {
+ const base::TimeTicks& wallclock_time) {
last_presentation_timestamp_ = presentation_timestamp;
OnTimeUpdated();
if (start_wallclock_time_.is_null() && playing_) {
@@ -450,8 +493,10 @@ void MediaSourcePlayer::UpdateTimestamps(
void MediaSourcePlayer::ProcessPendingEvents() {
// Wait for all the decoding jobs to finish before processing pending tasks.
- if (active_decoding_tasks_ > 0)
+ if ((audio_decoder_job_ && audio_decoder_job_->is_decoding()) ||
+ (video_decoder_job_ && video_decoder_job_->is_decoding())) {
return;
+ }
if (pending_event_ & SEEK_EVENT_PENDING) {
ClearDecodingData();
@@ -460,7 +505,7 @@ void MediaSourcePlayer::ProcessPendingEvents() {
return;
}
- start_wallclock_time_ = base::Time();
+ start_wallclock_time_ = base::TimeTicks();
if (pending_event_ & CONFIG_CHANGE_EVENT_PENDING) {
DCHECK(reconfig_audio_decoder_ || reconfig_video_decoder_);
manager()->OnMediaConfigRequest(player_id());
@@ -469,21 +514,18 @@ void MediaSourcePlayer::ProcessPendingEvents() {
if (pending_event_ & SURFACE_CHANGE_EVENT_PENDING) {
video_decoder_job_.reset();
- CreateVideoDecoderJob();
+ ConfigureVideoDecoderJob();
pending_event_ &= ~SURFACE_CHANGE_EVENT_PENDING;
}
- if (playing_ && pending_event_ == NO_EVENT_PENDING)
+ if (playing_)
StartInternal();
}
void MediaSourcePlayer::MediaDecoderCallback(
bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
const base::TimeDelta& presentation_timestamp,
- const base::Time& wallclock_time, bool end_of_stream) {
- if (active_decoding_tasks_ > 0)
- active_decoding_tasks_--;
-
+ const base::TimeTicks& wallclock_time, bool end_of_stream) {
if (is_audio && audio_decoder_job_)
audio_decoder_job_->OnDecodeCompleted();
if (!is_audio && video_decoder_job_)
@@ -549,7 +591,6 @@ void MediaSourcePlayer::DecodeMoreAudio() {
start_wallclock_time_, start_presentation_timestamp_,
base::Bind(&MediaSourcePlayer::MediaDecoderCallback,
weak_this_.GetWeakPtr(), true));
- active_decoding_tasks_++;
}
void MediaSourcePlayer::DecodeMoreVideo() {
@@ -579,7 +620,6 @@ void MediaSourcePlayer::DecodeMoreVideo() {
start_wallclock_time_, start_presentation_timestamp_,
base::Bind(&MediaSourcePlayer::MediaDecoderCallback,
weak_this_.GetWeakPtr(), false));
- active_decoding_tasks_++;
}
@@ -591,7 +631,7 @@ void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
if ((!HasAudio() || audio_finished_) && (!HasVideo() || video_finished_)) {
playing_ = false;
- start_wallclock_time_ = base::Time();
+ start_wallclock_time_ = base::TimeTicks();
OnPlaybackComplete();
}
}
@@ -601,11 +641,13 @@ void MediaSourcePlayer::ClearDecodingData() {
audio_decoder_job_->Flush();
if (video_decoder_job_)
video_decoder_job_->Flush();
- start_wallclock_time_ = base::Time();
+ start_wallclock_time_ = base::TimeTicks();
received_audio_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
received_video_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
audio_access_unit_index_ = 0;
video_access_unit_index_ = 0;
+ waiting_for_audio_data_ = false;
+ waiting_for_video_data_ = false;
}
bool MediaSourcePlayer::HasVideo() {
@@ -616,31 +658,36 @@ bool MediaSourcePlayer::HasAudio() {
return kUnknownAudioCodec != audio_codec_;
}
-void MediaSourcePlayer::CreateAudioDecoderJob() {
- DCHECK_EQ(0, active_decoding_tasks_);
+void MediaSourcePlayer::ConfigureAudioDecoderJob() {
+ if (!HasAudio()) {
+ audio_decoder_job_.reset();
+ return;
+ }
// Create audio decoder job only if config changes.
if (HasAudio() && (reconfig_audio_decoder_ || !audio_decoder_job_)) {
- audio_decoder_job_.reset(new AudioDecoderJob(
+ audio_decoder_job_.reset(AudioDecoderJob::Create(
audio_codec_, sampling_rate_, num_channels_,
&audio_extra_data_[0], audio_extra_data_.size()));
- reconfig_audio_decoder_ = false;
+ if (audio_decoder_job_)
+ reconfig_audio_decoder_ = false;
}
}
-void MediaSourcePlayer::CreateVideoDecoderJob() {
- DCHECK_EQ(0, active_decoding_tasks_);
-
+void MediaSourcePlayer::ConfigureVideoDecoderJob() {
if (!HasVideo() || surface_.IsSurfaceEmpty()) {
video_decoder_job_.reset();
return;
}
if (reconfig_video_decoder_ || !video_decoder_job_) {
+ // Release the old VideoDecoderJob first so the surface can get released.
video_decoder_job_.reset();
- video_decoder_job_.reset(new VideoDecoderJob(
+ // Create the new VideoDecoderJob.
+ video_decoder_job_.reset(VideoDecoderJob::Create(
video_codec_, gfx::Size(width_, height_), surface_.j_surface().obj()));
- reconfig_video_decoder_ = false;
+ if (video_decoder_job_)
+ reconfig_video_decoder_ = false;
}
// Inform the fullscreen view the player is ready.
diff --git a/media/base/android/media_source_player.h b/media/base/android/media_source_player.h
index b2d5a72688..51796529a3 100644
--- a/media/base/android/media_source_player.h
+++ b/media/base/android/media_source_player.h
@@ -46,12 +46,12 @@ class MediaDecoderJob {
// finished successfully, presentation time, timestamp when the data is
// rendered, whether decoder is reaching EOS.
typedef base::Callback<void(DecodeStatus, const base::TimeDelta&,
- const base::Time&, bool)> DecoderCallback;
+ const base::TimeTicks&, bool)> DecoderCallback;
// Called by MediaSourcePlayer to decode some data.
void Decode(
const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::Time& start_wallclock_time,
+ const base::TimeTicks& start_wallclock_time,
const base::TimeDelta& start_presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback);
@@ -59,7 +59,7 @@ class MediaDecoderJob {
void Flush();
struct Deleter {
- inline void operator()(MediaDecoderJob* ptr) const { ptr->Release(); }
+ inline void operator()(MediaDecoderJob* ptr) const { ptr->Release(); }
};
// Causes this instance to be deleted on the thread it is bound to.
@@ -68,8 +68,12 @@ class MediaDecoderJob {
// Called on the UI thread to indicate that one decode cycle has completed.
void OnDecodeCompleted();
+ bool is_decoding() const { return is_decoding_; }
+
protected:
- MediaDecoderJob(base::Thread* thread, bool is_audio);
+ MediaDecoderJob(const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
+ MediaCodecBridge* media_codec_bridge,
+ bool is_audio);
// Release the output buffer and render it.
void ReleaseOutputBuffer(
@@ -85,19 +89,19 @@ class MediaDecoderJob {
// flushed at the beginning of this call.
void DecodeInternal(
const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::Time& start_wallclock_time,
+ const base::TimeTicks& start_wallclock_time,
const base::TimeDelta& start_presentation_timestamp,
bool needs_flush,
const MediaDecoderJob::DecoderCallback& callback);
- // The media codec bridge used for decoding.
- scoped_ptr<MediaCodecBridge> media_codec_bridge_;
+ // The UI message loop where callbacks should be dispatched.
+ scoped_refptr<base::MessageLoopProxy> ui_loop_;
- // The message loop where callbacks should be dispatched.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ // The message loop that decoder job runs on.
+ scoped_refptr<base::MessageLoopProxy> decoder_loop_;
- // Thread the decode task runs on.
- base::Thread* thread_;
+ // The media codec bridge used for decoding.
+ scoped_ptr<MediaCodecBridge> media_codec_bridge_;
// Whether the decoder needs to be flushed.
bool needs_flush_;
@@ -110,7 +114,7 @@ class MediaDecoderJob {
base::WeakPtrFactory<MediaDecoderJob> weak_this_;
// Whether the decoder is actively decoding data.
- bool decoding_;
+ bool is_decoding_;
};
typedef scoped_ptr<MediaDecoderJob, MediaDecoderJob::Deleter>
@@ -154,11 +158,14 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
virtual void ReadFromDemuxerAck(
const MediaPlayerHostMsg_ReadFromDemuxerAck_Params& params) OVERRIDE;
+ // Called when the demuxer has changed the duration.
+ virtual void DurationChanged(const base::TimeDelta& duration) OVERRIDE;
+
private:
// Update the timestamps for A/V sync scheduling.
void UpdateTimestamps(
const base::TimeDelta& presentation_timestamp,
- const base::Time& wallclock_time);
+ const base::TimeTicks& wallclock_time);
// Helper function for starting media playback.
void StartInternal();
@@ -170,14 +177,14 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
void MediaDecoderCallback(
bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
const base::TimeDelta& presentation_timestamp,
- const base::Time& wallclock_time, bool end_of_stream);
+ const base::TimeTicks& wallclock_time, bool end_of_stream);
// Handle pending events when all the decoder jobs finished.
void ProcessPendingEvents();
- // Helper method to create the decoder jobs.
- void CreateVideoDecoderJob();
- void CreateAudioDecoderJob();
+ // Helper method to configure the decoder jobs.
+ void ConfigureVideoDecoderJob();
+ void ConfigureAudioDecoderJob();
// Flush the decoders and clean up all the data needs to be decoded.
void ClearDecodingData();
@@ -190,6 +197,9 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
bool HasVideo();
bool HasAudio();
+ // Determine seekability based on duration.
+ bool Seekable();
+
enum PendingEventFlags {
NO_EVENT_PENDING = 0,
SEEK_EVENT_PENDING = 1 << 0,
@@ -199,9 +209,6 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
// Pending event that the player needs to do.
unsigned pending_event_;
- // Number of active decoding tasks.
- int active_decoding_tasks_;
-
// ID to keep track of whether all the seek requests are acked.
unsigned seek_request_id_;
@@ -213,7 +220,6 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
VideoCodec video_codec_;
int num_channels_;
int sampling_rate_;
- bool seekable_;
base::TimeDelta last_presentation_timestamp_;
std::vector<uint8> audio_extra_data_;
bool audio_finished_;
@@ -226,7 +232,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
// should be rendered.
// TODO(qinmin): Need to fix the problem if audio/video lagged too far behind
// due to network or decoding problem.
- base::Time start_wallclock_time_;
+ base::TimeTicks start_wallclock_time_;
base::TimeDelta start_presentation_timestamp_;
// The surface object currently owned by the player.
@@ -252,6 +258,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
// Weak pointer passed to media decoder jobs for callbacks.
base::WeakPtrFactory<MediaSourcePlayer> weak_this_;
+ friend class MediaSourcePlayerTest;
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayer);
};
diff --git a/media/base/android/media_source_player_unittest.cc b/media/base/android/media_source_player_unittest.cc
new file mode 100644
index 0000000000..d139be66e8
--- /dev/null
+++ b/media/base/android/media_source_player_unittest.cc
@@ -0,0 +1,328 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_player_manager.h"
+#include "media/base/android/media_source_player.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/test_data_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "ui/gl/android/surface_texture_bridge.h"
+
+namespace media {
+
+// Mock of MediaPlayerManager for testing purpose
+class MockMediaPlayerManager : public MediaPlayerManager {
+ public:
+ MockMediaPlayerManager() : num_requests_(0), last_seek_request_id_(0) {}
+ virtual ~MockMediaPlayerManager() {};
+
+ // MediaPlayerManager implementation.
+ virtual void RequestMediaResources(int player_id) OVERRIDE {}
+ virtual void ReleaseMediaResources(int player_id) OVERRIDE {}
+ virtual MediaResourceGetter* GetMediaResourceGetter() OVERRIDE {
+ return NULL;
+ }
+ virtual void OnTimeUpdate(int player_id,
+ base::TimeDelta current_time) OVERRIDE {}
+ virtual void OnMediaMetadataChanged(
+ int player_id, base::TimeDelta duration, int width, int height,
+ bool success) OVERRIDE {}
+ virtual void OnPlaybackComplete(int player_id) OVERRIDE {}
+ virtual void OnMediaInterrupted(int player_id) OVERRIDE {}
+ virtual void OnBufferingUpdate(int player_id, int percentage) OVERRIDE {}
+ virtual void OnSeekComplete(int player_id,
+ base::TimeDelta current_time) OVERRIDE {}
+ virtual void OnError(int player_id, int error) OVERRIDE {}
+ virtual void OnVideoSizeChanged(int player_id, int width,
+ int height) OVERRIDE {}
+ virtual MediaPlayerAndroid* GetFullscreenPlayer() OVERRIDE { return NULL; }
+ virtual MediaPlayerAndroid* GetPlayer(int player_id) OVERRIDE { return NULL; }
+ virtual void DestroyAllMediaPlayers() OVERRIDE {}
+ virtual void OnReadFromDemuxer(int player_id, media::DemuxerStream::Type type,
+ bool seek_done) OVERRIDE {
+ num_requests_++;
+ if (message_loop_.is_running())
+ message_loop_.Quit();
+ }
+ virtual void OnMediaSeekRequest(int player_id, base::TimeDelta time_to_seek,
+ unsigned seek_request_id) OVERRIDE {
+ last_seek_request_id_ = seek_request_id;
+ }
+ virtual void OnMediaConfigRequest(int player_id) OVERRIDE {}
+ virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) OVERRIDE {
+ return NULL;
+ }
+ virtual void OnKeyAdded(int key_id,
+ const std::string& session_id) OVERRIDE {}
+ virtual void OnKeyError(int key_id,
+ const std::string& session_id,
+ media::MediaKeys::KeyError error_code,
+ int system_code) OVERRIDE {}
+ virtual void OnKeyMessage(int key_id,
+ const std::string& session_id,
+ const std::string& message,
+ const std::string& destination_url) OVERRIDE {}
+
+ int num_requests() const { return num_requests_; }
+ unsigned last_seek_request_id() const { return last_seek_request_id_; }
+ base::MessageLoop* message_loop() { return &message_loop_; }
+
+ private:
+ // The number of request this object sents for decoding data.
+ int num_requests_;
+ unsigned last_seek_request_id_;
+ base::MessageLoop message_loop_;
+};
+
+class MediaSourcePlayerTest : public testing::Test {
+ public:
+ MediaSourcePlayerTest() {
+ manager_.reset(new MockMediaPlayerManager());
+ player_.reset(new MediaSourcePlayer(0, manager_.get()));
+ }
+ virtual ~MediaSourcePlayerTest() {}
+
+ protected:
+ // Get the decoder job from the MediaSourcePlayer.
+ MediaDecoderJob* GetMediaDecoderJob(bool is_audio) {
+ if (is_audio)
+ return player_->audio_decoder_job_.get();
+ return player_->video_decoder_job_.get();
+ }
+
+ // Starts an audio decoder job.
+ void StartAudioDecoderJob() {
+ MediaPlayerHostMsg_DemuxerReady_Params params;
+ params.audio_codec = kCodecVorbis;
+ params.audio_channels = 2;
+ params.audio_sampling_rate = 44100;
+ params.is_audio_encrypted = false;
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-extradata");
+ params.audio_extra_data = std::vector<uint8>(
+ buffer->GetData(),
+ buffer->GetData() + buffer->GetDataSize());
+ Start(params);
+ }
+
+ void StartVideoDecoderJob() {
+ MediaPlayerHostMsg_DemuxerReady_Params params;
+ params.video_codec = kCodecVP8;
+ params.video_size = gfx::Size(320, 240);
+ params.is_video_encrypted = false;
+ Start(params);
+ }
+
+ // Starts decoding the data.
+ void Start(const MediaPlayerHostMsg_DemuxerReady_Params& params) {
+ player_->DemuxerReady(params);
+ player_->Start();
+ }
+
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params
+ CreateReadFromDemuxerAckForAudio() {
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params ack_params;
+ ack_params.type = DemuxerStream::AUDIO;
+ ack_params.access_units.resize(1);
+ ack_params.access_units[0].status = DemuxerStream::kOk;
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-packet-0");
+ ack_params.access_units[0].data = std::vector<uint8>(
+ buffer->GetData(), buffer->GetData() + buffer->GetDataSize());
+ // Vorbis needs 4 extra bytes padding on Android to decode properly. Check
+ // NuMediaExtractor.cpp in Android source code.
+ uint8 padding[4] = { 0xff , 0xff , 0xff , 0xff };
+ ack_params.access_units[0].data.insert(
+ ack_params.access_units[0].data.end(), padding, padding + 4);
+ return ack_params;
+ }
+
+ protected:
+ scoped_ptr<MockMediaPlayerManager> manager_;
+ scoped_ptr<MediaSourcePlayer> player_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayerTest);
+};
+
+TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithValidConfig) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test audio decoder job will be created when codec is successfully started.
+ StartAudioDecoderJob();
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
+ EXPECT_EQ(1, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test audio decoder job will not be created when failed to start the codec.
+ MediaPlayerHostMsg_DemuxerReady_Params params;
+ params.audio_codec = kCodecVorbis;
+ params.audio_channels = 2;
+ params.audio_sampling_rate = 44100;
+ params.is_audio_encrypted = false;
+ uint8 invalid_codec_data[] = { 0x00, 0xff, 0xff, 0xff, 0xff };
+ params.audio_extra_data.insert(params.audio_extra_data.begin(),
+ invalid_codec_data, invalid_codec_data + 4);
+ Start(params);
+ EXPECT_EQ(NULL, GetMediaDecoderJob(true));
+ EXPECT_EQ(0, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test video decoder job will be created when surface is valid.
+ scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
+ new gfx::SurfaceTextureBridge(0));
+ gfx::ScopedJavaSurface surface(surface_texture.get());
+ StartVideoDecoderJob();
+ // Video decoder job will not be created until surface is available.
+ EXPECT_EQ(NULL, GetMediaDecoderJob(false));
+ EXPECT_EQ(0, manager_->num_requests());
+
+ player_->SetVideoSurface(surface.Pass());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ // The decoder job should be ready now.
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
+ EXPECT_EQ(1, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test video decoder job will be created when surface is valid.
+ scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
+ new gfx::SurfaceTextureBridge(0));
+ gfx::ScopedJavaSurface surface(surface_texture.get());
+ StartVideoDecoderJob();
+ // Video decoder job will not be created until surface is available.
+ EXPECT_EQ(NULL, GetMediaDecoderJob(false));
+ EXPECT_EQ(0, manager_->num_requests());
+
+ // Release the surface texture.
+ surface_texture = NULL;
+ player_->SetVideoSurface(surface.Pass());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ EXPECT_EQ(NULL, GetMediaDecoderJob(false));
+ EXPECT_EQ(0, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, ReadFromDemuxerAfterSeek) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test decoder job will resend a ReadFromDemuxer request after seek.
+ StartAudioDecoderJob();
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
+ EXPECT_EQ(1, manager_->num_requests());
+
+ // Initiate a seek
+ player_->SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+ // Sending back the seek ACK, this should trigger the player to call
+ // OnReadFromDemuxer() again.
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ EXPECT_EQ(2, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, SetSurfaceWhileSeeking) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test SetVideoSurface() will not cause an extra seek while the player is
+ // waiting for a seek ACK.
+ scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
+ new gfx::SurfaceTextureBridge(0));
+ gfx::ScopedJavaSurface surface(surface_texture.get());
+ StartVideoDecoderJob();
+ // Player is still waiting for SetVideoSurface(), so no request is sent.
+ EXPECT_EQ(0, manager_->num_requests());
+ player_->SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+
+ player_->SetVideoSurface(surface.Pass());
+ EXPECT_TRUE(NULL == GetMediaDecoderJob(false));
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+
+ // Send the seek ack, player should start requesting data afterwards.
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
+ EXPECT_EQ(1, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartAfterSeekFinish) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test decoder job will not start until all pending seek event is handled.
+
+ MediaPlayerHostMsg_DemuxerReady_Params params;
+ params.audio_codec = kCodecVorbis;
+ params.audio_channels = 2;
+ params.audio_sampling_rate = 44100;
+ params.is_audio_encrypted = false;
+ player_->DemuxerReady(params);
+ EXPECT_EQ(NULL, GetMediaDecoderJob(true));
+ EXPECT_EQ(0, manager_->num_requests());
+
+ // Initiate a seek
+ player_->SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+
+ player_->Start();
+ EXPECT_EQ(NULL, GetMediaDecoderJob(true));
+ EXPECT_EQ(0, manager_->num_requests());
+
+ // Sending back the seek ACK.
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
+ EXPECT_EQ(1, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test that if the decoding job is not fully stopped after Pause(),
+ // calling Start() will be a noop.
+ StartAudioDecoderJob();
+
+ MediaDecoderJob* decoder_job = GetMediaDecoderJob(true);
+ EXPECT_TRUE(NULL != decoder_job);
+ EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
+
+ // Sending data to player.
+ player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio());
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+
+ // Decoder job will not immediately stop after Pause() since it is
+ // running on another thread.
+ player_->Pause();
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+
+ // Nothing happens when calling Start() again.
+ player_->Start();
+ // Verify that Start() will not destroy and recreate the decoder job.
+ EXPECT_EQ(decoder_job, GetMediaDecoderJob(true));
+ EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ manager_->message_loop()->Run();
+ // The decoder job should finish and a new request will be sent.
+ EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
+}
+
+} // namespace media
diff --git a/media/base/audio_buffer.cc b/media/base/audio_buffer.cc
new file mode 100644
index 0000000000..a612a57746
--- /dev/null
+++ b/media/base/audio_buffer.cc
@@ -0,0 +1,171 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_buffer.h"
+
+#include "base/logging.h"
+#include "media/base/audio_bus.h"
+#include "media/base/buffers.h"
+#include "media/base/limits.h"
+
+namespace media {
+
+// Alignment of each channel's data; use 8-byte alignment as that is bigger
+// than maximum size of a sample, and the minimum alignment.
+enum { kChannelAlignment = 8 };
+
+AudioBuffer::AudioBuffer(SampleFormat sample_format,
+ int channel_count,
+ int frame_count,
+ const uint8* const* data,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration)
+ : sample_format_(sample_format),
+ channel_count_(channel_count),
+ frame_count_(frame_count),
+ timestamp_(timestamp),
+ duration_(duration) {
+ CHECK_GE(channel_count, 0);
+ CHECK_LE(channel_count, limits::kMaxChannels);
+ CHECK_GE(frame_count, 0);
+ int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format);
+ DCHECK_LE(bytes_per_channel, kChannelAlignment);
+ int data_size = frame_count * bytes_per_channel;
+
+ // Empty buffer?
+ if (!data) {
+ CHECK_EQ(frame_count, 0);
+ return;
+ }
+
+ if (sample_format == kSampleFormatPlanarF32 ||
+ sample_format == kSampleFormatPlanarS16) {
+ // Planar data, so need to allocate buffer for each channel.
+ // Determine per channel data size, taking into account alignment.
+ int block_size_per_channel =
+ (data_size + kChannelAlignment - 1) & ~(kChannelAlignment - 1);
+ DCHECK_GE(block_size_per_channel, data_size);
+
+ // Allocate a contiguous buffer for all the channel data.
+ data_.reset(static_cast<uint8*>(base::AlignedAlloc(
+ channel_count * block_size_per_channel, kChannelAlignment)));
+ channel_data_.reserve(channel_count);
+
+ // Copy each channel's data into the appropriate spot.
+ for (int i = 0; i < channel_count; ++i) {
+ channel_data_.push_back(data_.get() + i * block_size_per_channel);
+ memcpy(channel_data_[i], data[i], data_size);
+ }
+ return;
+ }
+
+ // Remaining formats are interleaved data.
+ DCHECK(sample_format_ == kSampleFormatU8 ||
+ sample_format_ == kSampleFormatS16 ||
+ sample_format_ == kSampleFormatS32 ||
+ sample_format_ == kSampleFormatF32) << sample_format_;
+ // Allocate our own buffer and copy the supplied data into it. Buffer must
+ // contain the data for all channels.
+ data_size *= channel_count;
+ data_.reset(
+ static_cast<uint8*>(base::AlignedAlloc(data_size, kChannelAlignment)));
+ memcpy(data_.get(), data[0], data_size);
+}
+
+AudioBuffer::~AudioBuffer() {}
+
+// static
+scoped_refptr<AudioBuffer> AudioBuffer::CopyFrom(
+ SampleFormat sample_format,
+ int channel_count,
+ int frame_count,
+ const uint8* const* data,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration) {
+ // If you hit this CHECK you likely have a bug in a demuxer. Go fix it.
+ CHECK(data[0]);
+ return make_scoped_refptr(new AudioBuffer(
+ sample_format, channel_count, frame_count, data, timestamp, duration));
+}
+
+// static
+scoped_refptr<AudioBuffer> AudioBuffer::CreateEOSBuffer() {
+ return make_scoped_refptr(new AudioBuffer(
+ kUnknownSampleFormat, 1, 0, NULL, kNoTimestamp(), kNoTimestamp()));
+}
+
+// Convert int16 values in the range [kint16min, kint16max] to [-1.0, 1.0].
+static inline float ConvertS16ToFloat(int16 value) {
+ return value * (value < 0 ? -1.0f / kint16min : 1.0f / kint16max);
+}
+
+void AudioBuffer::ReadFrames(int frames_to_copy,
+ int source_frame_offset,
+ int dest_frame_offset,
+ AudioBus* dest) {
+ // Deinterleave each channel (if necessary) and convert to 32bit
+ // floating-point with nominal range -1.0 -> +1.0 (if necessary).
+
+ // |dest| must have the same number of channels, and the number of frames
+ // specified must be in range.
+ DCHECK(!end_of_stream());
+ DCHECK_EQ(dest->channels(), channel_count_);
+ DCHECK_LE(source_frame_offset + frames_to_copy, frame_count_);
+ DCHECK_LE(dest_frame_offset + frames_to_copy, dest->frames());
+
+ if (sample_format_ == kSampleFormatPlanarF32) {
+ // Format is planar float32. Copy the data from each channel as a block.
+ for (int ch = 0; ch < channel_count_; ++ch) {
+ const float* source_data =
+ reinterpret_cast<const float*>(channel_data_[ch]) +
+ source_frame_offset;
+ memcpy(dest->channel(ch) + dest_frame_offset,
+ source_data,
+ sizeof(float) * frames_to_copy);
+ }
+ return;
+ }
+
+ if (sample_format_ == kSampleFormatPlanarS16) {
+ // Format is planar signed16. Convert each value into float and insert into
+ // output channel data.
+ for (int ch = 0; ch < channel_count_; ++ch) {
+ const int16* source_data =
+ reinterpret_cast<const int16*>(channel_data_[ch]) +
+ source_frame_offset;
+ float* dest_data = dest->channel(ch) + dest_frame_offset;
+ for (int i = 0; i < frames_to_copy; ++i) {
+ dest_data[i] = ConvertS16ToFloat(source_data[i]);
+ }
+ }
+ return;
+ }
+
+ if (sample_format_ == kSampleFormatF32) {
+ // Format is interleaved float32. Copy the data into each channel.
+ const float* source_data = reinterpret_cast<const float*>(data_.get()) +
+ source_frame_offset * channel_count_;
+ for (int ch = 0; ch < channel_count_; ++ch) {
+ float* dest_data = dest->channel(ch) + dest_frame_offset;
+ for (int i = 0, offset = ch; i < frames_to_copy;
+ ++i, offset += channel_count_) {
+ dest_data[i] = source_data[offset];
+ }
+ }
+ return;
+ }
+
+ // Remaining formats are integer interleaved data. Use the deinterleaving code
+ // in AudioBus to copy the data.
+ DCHECK(sample_format_ == kSampleFormatU8 ||
+ sample_format_ == kSampleFormatS16 ||
+ sample_format_ == kSampleFormatS32);
+ int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format_);
+ int frame_size = channel_count_ * bytes_per_channel;
+ const uint8* source_data = data_.get() + source_frame_offset * frame_size;
+ dest->FromInterleavedPartial(
+ source_data, dest_frame_offset, frames_to_copy, bytes_per_channel);
+}
+
+} // namespace media
diff --git a/media/base/audio_buffer.h b/media/base/audio_buffer.h
new file mode 100644
index 0000000000..f0ca772e0c
--- /dev/null
+++ b/media/base/audio_buffer.h
@@ -0,0 +1,98 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_BUFFER_H_
+#define MEDIA_BASE_AUDIO_BUFFER_H_
+
+#include <vector>
+
+#include "base/memory/aligned_memory.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time.h"
+#include "media/base/media_export.h"
+#include "media/base/sample_format.h"
+
+namespace media {
+class AudioBus;
+
+// An audio buffer that takes a copy of the data passed to it, holds it, and
+// copies it into an AudioBus when needed. Also supports an end of stream
+// marker.
+class MEDIA_EXPORT AudioBuffer
+ : public base::RefCountedThreadSafe<AudioBuffer> {
+ public:
+ // Create an AudioBuffer whose channel data is copied from |data|. For
+ // interleaved data, only the first buffer is used. For planar data, the
+ // number of buffers must be equal to |channel_count|. |frame_count| is the
+ // number of frames in each buffer. |data| must not be null and |frame_count|
+ // must be >= 0.
+ //
+ // TODO(jrummell): Compute duration rather than pass it in.
+ static scoped_refptr<AudioBuffer> CopyFrom(SampleFormat sample_format,
+ int channel_count,
+ int frame_count,
+ const uint8* const* data,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration);
+
+ // Create a AudioBuffer indicating we've reached end of stream.
+ // Calling any method other than end_of_stream() on the resulting buffer
+ // is disallowed.
+ static scoped_refptr<AudioBuffer> CreateEOSBuffer();
+
+ // Copy frames into |dest|. |frames_to_copy| is the number of frames to copy.
+ // |source_frame_offset| specified how many frames in the buffer to skip
+ // first. |dest_frame_offset| is the frame offset in |dest|. The frames are
+ // converted from their source format into planar float32 data (which is all
+ // that AudioBus handles).
+ void ReadFrames(int frames_to_copy,
+ int source_frame_offset,
+ int dest_frame_offset,
+ AudioBus* dest);
+
+ // Return the number of frames held.
+ int frame_count() const { return frame_count_; }
+
+ // Access to constructor parameters.
+ base::TimeDelta timestamp() const { return timestamp_; }
+ base::TimeDelta duration() const { return duration_; }
+
+ // If there's no data in this buffer, it represents end of stream.
+ bool end_of_stream() const { return data_ == NULL; }
+
+ private:
+ friend class base::RefCountedThreadSafe<AudioBuffer>;
+
+ // Allocates aligned contiguous buffer to hold all channel data (1 block for
+ // interleaved data, |channel_count| blocks for planar data), copies
+ // [data,data+data_size) to the allocated buffer(s). If |data| is null an end
+ // of stream buffer is created.
+ AudioBuffer(SampleFormat sample_format,
+ int channel_count,
+ int frame_count,
+ const uint8* const* data,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration);
+
+ virtual ~AudioBuffer();
+
+ SampleFormat sample_format_;
+ int channel_count_;
+ int frame_count_;
+ base::TimeDelta timestamp_;
+ base::TimeDelta duration_;
+
+ // Contiguous block of channel data.
+ scoped_ptr_malloc<uint8, base::ScopedPtrAlignedFree> data_;
+
+ // For planar data, points to each channels data.
+ std::vector<uint8*> channel_data_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AudioBuffer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_BUFFER_H_
diff --git a/media/base/audio_buffer_unittest.cc b/media/base/audio_buffer_unittest.cc
new file mode 100644
index 0000000000..f4f9ebf9db
--- /dev/null
+++ b/media/base/audio_buffer_unittest.cc
@@ -0,0 +1,272 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+template <class T>
+static scoped_refptr<AudioBuffer> MakeInterleavedBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ const base::TimeDelta start_time) {
+ DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
+ format == kSampleFormatS32 || format == kSampleFormatF32);
+
+ // Create a block of memory with values:
+ // start
+ // start + increment
+ // start + 2 * increment, ...
+ // Since this is interleaved data, channel 0 data will be:
+ // start
+ // start + channels * increment
+ // start + 2 * channels * increment, ...
+ int buffer_size = frames * channels * sizeof(T);
+ uint8* memory = new uint8[buffer_size];
+ uint8* data[] = { memory };
+ T* buffer = reinterpret_cast<T*>(memory);
+ for (int i = 0; i < frames * channels; ++i) {
+ buffer[i] = start;
+ start += increment;
+ }
+ // Duration is 1 second per frame (for simplicity).
+ base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ return AudioBuffer::CopyFrom(
+ format, channels, frames, data, start_time, duration);
+}
+
+template <class T>
+static scoped_refptr<AudioBuffer> MakePlanarBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ const base::TimeDelta start_time) {
+ DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
+
+ // Create multiple blocks of data, once for each channel.
+ // Values in channel 0 will be:
+ // start
+ // start + increment
+ // start + 2 * increment, ...
+ // Values in channel 1 will be:
+ // start + frames * increment
+ // start + (frames + 1) * increment
+ // start + (frames + 2) * increment, ...
+ uint8** data = new uint8*[channels];
+ int buffer_size = frames * sizeof(T);
+ for (int i = 0; i < channels; ++i) {
+ uint8* memory = new uint8[buffer_size];
+ data[i] = memory;
+ T* buffer = reinterpret_cast<T*>(memory);
+ for (int j = 0; j < frames; ++j) {
+ buffer[j] = start;
+ start += increment;
+ }
+ }
+ // Duration is 1 second per frame (for simplicity).
+ base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ return AudioBuffer::CopyFrom(
+ format, channels, frames, data, start_time, duration);
+}
+
+static void VerifyResult(float* channel_data,
+ int frames,
+ float start,
+ float increment) {
+ for (int i = 0; i < frames; ++i) {
+ SCOPED_TRACE(base::StringPrintf(
+ "i=%d/%d start=%f, increment=%f", i, frames, start, increment));
+ ASSERT_EQ(channel_data[i], start);
+ start += increment;
+ }
+}
+
+TEST(AudioBufferTest, CopyFrom) {
+ const int channels = 1;
+ const int frames = 8;
+ const base::TimeDelta start_time;
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<uint8>(
+ kSampleFormatU8, channels, 1, 1, frames, start_time);
+ EXPECT_EQ(frames, buffer->frame_count());
+ EXPECT_EQ(buffer->timestamp(), start_time);
+ EXPECT_EQ(buffer->duration().InSeconds(), frames);
+ EXPECT_FALSE(buffer->end_of_stream());
+}
+
+TEST(AudioBufferTest, CreateEOSBuffer) {
+ scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateEOSBuffer();
+ EXPECT_TRUE(buffer->end_of_stream());
+}
+
+TEST(AudioBufferTest, FrameSize) {
+ const uint8 kTestData[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31 };
+ const base::TimeDelta kTimestampA = base::TimeDelta::FromMicroseconds(1337);
+ const base::TimeDelta kTimestampB = base::TimeDelta::FromMicroseconds(1234);
+
+ const uint8* const data[] = { kTestData };
+ scoped_refptr<AudioBuffer> buffer = AudioBuffer::CopyFrom(
+ kSampleFormatU8, 2, 16, data, kTimestampA, kTimestampB);
+ EXPECT_EQ(16, buffer->frame_count()); // 2 channels of 8-bit data
+
+ buffer = AudioBuffer::CopyFrom(
+ kSampleFormatF32, 4, 2, data, kTimestampA, kTimestampB);
+ EXPECT_EQ(2, buffer->frame_count()); // now 4 channels of 32-bit data
+}
+
+TEST(AudioBufferTest, ReadU8) {
+ const int channels = 4;
+ const int frames = 4;
+ const base::TimeDelta start_time;
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<uint8>(
+ kSampleFormatU8, channels, 128, 1, frames, start_time);
+
+ // Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
+ // 128, 132, 136, 140, other channels similar. However, values are converted
+ // from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer
+ // value should be 0.0, then 1/127, 2/127, etc.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 0.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(1), frames, 1.0f / 127.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(2), frames, 2.0f / 127.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(3), frames, 3.0f / 127.0f, 4.0f / 127.0f);
+}
+
+TEST(AudioBufferTest, ReadS16) {
+ const int channels = 2;
+ const int frames = 10;
+ const base::TimeDelta start_time;
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, frames, start_time);
+
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
+ // 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
+ // to float from -1.0 to 1.0 based on int16 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(6, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max);
+ VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max);
+
+ // Now read the same data one frame at a time.
+ bus = AudioBus::Create(channels, 100);
+ for (int i = 0; i < frames; ++i) {
+ buffer->ReadFrames(1, i, i, bus.get());
+ }
+ VerifyResult(bus->channel(0), frames, 1.0f / kint16max, 2.0f / kint16max);
+ VerifyResult(bus->channel(1), frames, 2.0f / kint16max, 2.0f / kint16max);
+}
+
+TEST(AudioBufferTest, ReadS32) {
+ const int channels = 2;
+ const int frames = 6;
+ const base::TimeDelta start_time;
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<int32>(
+ kSampleFormatS32, channels, 1, 1, frames, start_time);
+
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
+ // 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
+ // to float from -1.0 to 1.0 based on int32 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 1.0f / kint32max, 2.0f / kint32max);
+ VerifyResult(bus->channel(1), frames, 2.0f / kint32max, 2.0f / kint32max);
+
+ // Now read 2 frames starting at frame offset 3. ch[0] should be 7, 9, and
+ // ch[1] should be 8, 10.
+ buffer->ReadFrames(2, 3, 0, bus.get());
+ VerifyResult(bus->channel(0), 2, 7.0f / kint32max, 2.0f / kint32max);
+ VerifyResult(bus->channel(1), 2, 8.0f / kint32max, 2.0f / kint32max);
+}
+
+TEST(AudioBufferTest, ReadF32) {
+ const int channels = 2;
+ const int frames = 20;
+ const base::TimeDelta start_time;
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<float>(
+ kSampleFormatF32, channels, 1.0f, 1.0f, frames, start_time);
+
+ // Read first 10 frames from the buffer. F32 is interleaved, so ch[0] should
+ // be 1, 3, 5, ... and ch[1] should be 2, 4, 6, ...
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(10, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 10, 1.0f, 2.0f);
+ VerifyResult(bus->channel(1), 10, 2.0f, 2.0f);
+
+ // Read second 10 frames.
+ bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(10, 10, 0, bus.get());
+ VerifyResult(bus->channel(0), 10, 21.0f, 2.0f);
+ VerifyResult(bus->channel(1), 10, 22.0f, 2.0f);
+}
+
+TEST(AudioBufferTest, ReadS16Planar) {
+ const int channels = 2;
+ const int frames = 20;
+ const base::TimeDelta start_time;
+ scoped_refptr<AudioBuffer> buffer = MakePlanarBuffer<int16>(
+ kSampleFormatPlanarS16, channels, 1, 1, frames, start_time);
+
+ // Read 6 frames from the buffer. Data is planar, so ch[0] should be 1, 2, 3,
+ // 4, 5, 6, and ch[1] should be 21, 22, 23, 24, 25, 26. Data is converted to
+ // float from -1.0 to 1.0 based on int16 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(6, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 1.0f / kint16max);
+ VerifyResult(bus->channel(1), 6, 21.0f / kint16max, 1.0f / kint16max);
+
+ // Read all the frames backwards, one by one. ch[0] should be 20, 19, ...
+ bus = AudioBus::Create(channels, 100);
+ for (int i = 0; i < frames; ++i) {
+ buffer->ReadFrames(1, frames - i - 1, i, bus.get());
+ }
+ VerifyResult(bus->channel(0), frames, 20.0f / kint16max, -1.0f / kint16max);
+ VerifyResult(bus->channel(1), frames, 40.0f / kint16max, -1.0f / kint16max);
+
+ // Read 0 frames with different offsets. Existing data in AudioBus should be
+ // unchanged.
+ buffer->ReadFrames(0, 0, 0, bus.get());
+ buffer->ReadFrames(0, 0, 10, bus.get());
+ buffer->ReadFrames(0, 10, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 20.0f / kint16max, -1.0f / kint16max);
+ VerifyResult(bus->channel(1), frames, 40.0f / kint16max, -1.0f / kint16max);
+}
+
+TEST(AudioBufferTest, ReadF32Planar) {
+ const int channels = 4;
+ const int frames = 100;
+ const base::TimeDelta start_time;
+ scoped_refptr<AudioBuffer> buffer = MakePlanarBuffer<float>(
+ kSampleFormatPlanarF32, channels, 1.0f, 1.0f, frames, start_time);
+
+ // Read all 100 frames from the buffer. F32 is planar, so ch[0] should be 1,
+ // 2, 3, 4, ..., ch[1] should be 101, 102, 103, ..., and so on for all 4
+ // channels.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 1.0f, 1.0f);
+ VerifyResult(bus->channel(1), frames, 101.0f, 1.0f);
+ VerifyResult(bus->channel(2), frames, 201.0f, 1.0f);
+ VerifyResult(bus->channel(3), frames, 301.0f, 1.0f);
+
+ // Now read 20 frames from the middle of the buffer.
+ bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(20, 50, 0, bus.get());
+ VerifyResult(bus->channel(0), 20, 51.0f, 1.0f);
+ VerifyResult(bus->channel(1), 20, 151.0f, 1.0f);
+ VerifyResult(bus->channel(2), 20, 251.0f, 1.0f);
+ VerifyResult(bus->channel(3), 20, 351.0f, 1.0f);
+}
+
+} // namespace media
diff --git a/media/base/audio_decoder_config.cc b/media/base/audio_decoder_config.cc
index a4e57c2df4..38db05d3a5 100644
--- a/media/base/audio_decoder_config.cc
+++ b/media/base/audio_decoder_config.cc
@@ -8,34 +8,14 @@
#include "base/metrics/histogram.h"
#include "media/audio/sample_rates.h"
#include "media/base/limits.h"
+#include "media/base/sample_format.h"
namespace media {
-static int SampleFormatToBitsPerChannel(SampleFormat sample_format) {
- switch (sample_format) {
- case kUnknownSampleFormat:
- return 0;
- case kSampleFormatU8:
- return 8;
- case kSampleFormatS16:
- case kSampleFormatPlanarS16:
- return 16;
- case kSampleFormatS32:
- case kSampleFormatF32:
- case kSampleFormatPlanarF32:
- return 32;
- case kSampleFormatMax:
- break;
- }
-
- NOTREACHED() << "Invalid sample format provided: " << sample_format;
- return 0;
-}
-
AudioDecoderConfig::AudioDecoderConfig()
: codec_(kUnknownAudioCodec),
sample_format_(kUnknownSampleFormat),
- bits_per_channel_(0),
+ bytes_per_channel_(0),
channel_layout_(CHANNEL_LAYOUT_UNSUPPORTED),
samples_per_second_(0),
bytes_per_frame_(0),
@@ -83,12 +63,12 @@ void AudioDecoderConfig::Initialize(AudioCodec codec,
channel_layout_ = channel_layout;
samples_per_second_ = samples_per_second;
sample_format_ = sample_format;
- bits_per_channel_ = SampleFormatToBitsPerChannel(sample_format);
+ bytes_per_channel_ = SampleFormatToBytesPerChannel(sample_format);
extra_data_.assign(extra_data, extra_data + extra_data_size);
is_encrypted_ = is_encrypted;
int channels = ChannelLayoutToChannelCount(channel_layout_);
- bytes_per_frame_ = channels * bits_per_channel_ / 8;
+ bytes_per_frame_ = channels * bytes_per_channel_;
}
AudioDecoderConfig::~AudioDecoderConfig() {}
@@ -96,8 +76,8 @@ AudioDecoderConfig::~AudioDecoderConfig() {}
bool AudioDecoderConfig::IsValidConfig() const {
return codec_ != kUnknownAudioCodec &&
channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED &&
- bits_per_channel_ > 0 &&
- bits_per_channel_ <= limits::kMaxBitsPerSample &&
+ bytes_per_channel_ > 0 &&
+ bytes_per_channel_ <= limits::kMaxBytesPerSample &&
samples_per_second_ > 0 &&
samples_per_second_ <= limits::kMaxSampleRate &&
sample_format_ != kUnknownSampleFormat;
@@ -105,7 +85,7 @@ bool AudioDecoderConfig::IsValidConfig() const {
bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
return ((codec() == config.codec()) &&
- (bits_per_channel() == config.bits_per_channel()) &&
+ (bytes_per_channel() == config.bytes_per_channel()) &&
(channel_layout() == config.channel_layout()) &&
(samples_per_second() == config.samples_per_second()) &&
(extra_data_size() == config.extra_data_size()) &&
diff --git a/media/base/audio_decoder_config.h b/media/base/audio_decoder_config.h
index 5b886e0da4..1c61e70c3a 100644
--- a/media/base/audio_decoder_config.h
+++ b/media/base/audio_decoder_config.h
@@ -10,6 +10,7 @@
#include "base/basictypes.h"
#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
+#include "media/base/sample_format.h"
namespace media {
@@ -40,22 +41,6 @@ enum AudioCodec {
kAudioCodecMax
};
-enum SampleFormat {
- // These values are histogrammed over time; do not change their ordinal
- // values. When deleting a sample format replace it with a dummy value; when
- // adding a sample format, do so at the bottom before kSampleFormatMax.
- kUnknownSampleFormat = 0,
- kSampleFormatU8, // Unsigned 8-bit w/ bias of 128.
- kSampleFormatS16, // Signed 16-bit.
- kSampleFormatS32, // Signed 32-bit.
- kSampleFormatF32, // Float 32-bit.
- kSampleFormatPlanarS16, // Signed 16-bit planar.
- kSampleFormatPlanarF32, // Float 32-bit planar.
-
- // Must always be last!
- kSampleFormatMax
-};
-
// TODO(dalecurtis): FFmpeg API uses |bytes_per_channel| instead of
// |bits_per_channel|, we should switch over since bits are generally confusing
// to work with.
@@ -89,7 +74,8 @@ class MEDIA_EXPORT AudioDecoderConfig {
bool Matches(const AudioDecoderConfig& config) const;
AudioCodec codec() const { return codec_; }
- int bits_per_channel() const { return bits_per_channel_; }
+ int bits_per_channel() const { return bytes_per_channel_ * 8; }
+ int bytes_per_channel() const { return bytes_per_channel_; }
ChannelLayout channel_layout() const { return channel_layout_; }
int samples_per_second() const { return samples_per_second_; }
SampleFormat sample_format() const { return sample_format_; }
@@ -110,7 +96,7 @@ class MEDIA_EXPORT AudioDecoderConfig {
private:
AudioCodec codec_;
SampleFormat sample_format_;
- int bits_per_channel_;
+ int bytes_per_channel_;
ChannelLayout channel_layout_;
int samples_per_second_;
int bytes_per_frame_;
diff --git a/media/base/audio_renderer_mixer.cc b/media/base/audio_renderer_mixer.cc
index 7db9b3f64c..11b1211026 100644
--- a/media/base/audio_renderer_mixer.cc
+++ b/media/base/audio_renderer_mixer.cc
@@ -18,7 +18,7 @@ AudioRendererMixer::AudioRendererMixer(
: audio_sink_(sink),
audio_converter_(input_params, output_params, true),
pause_delay_(base::TimeDelta::FromSeconds(kPauseDelaySeconds)),
- last_play_time_(base::Time::Now()),
+ last_play_time_(base::TimeTicks::Now()),
// Initialize |playing_| to true since Start() results in an auto-play.
playing_(true) {
audio_sink_->Initialize(output_params, this);
@@ -40,7 +40,7 @@ void AudioRendererMixer::AddMixerInput(AudioConverter::InputCallback* input,
if (!playing_) {
playing_ = true;
- last_play_time_ = base::Time::Now();
+ last_play_time_ = base::TimeTicks::Now();
audio_sink_->Play();
}
@@ -65,7 +65,7 @@ int AudioRendererMixer::Render(AudioBus* audio_bus,
// If there are no mixer inputs and we haven't seen one for a while, pause the
// sink to avoid wasting resources when media elements are present but remain
// in the pause state.
- base::Time now = base::Time::Now();
+ const base::TimeTicks now = base::TimeTicks::Now();
if (!mixer_inputs_.empty()) {
last_play_time_ = now;
} else if (now - last_play_time_ >= pause_delay_ && playing_) {
diff --git a/media/base/audio_renderer_mixer.h b/media/base/audio_renderer_mixer.h
index 6168064e3c..943d77969e 100644
--- a/media/base/audio_renderer_mixer.h
+++ b/media/base/audio_renderer_mixer.h
@@ -56,7 +56,7 @@ class MEDIA_EXPORT AudioRendererMixer
// Handles physical stream pause when no inputs are playing. For latency
// reasons we don't want to immediately pause the physical stream.
base::TimeDelta pause_delay_;
- base::Time last_play_time_;
+ base::TimeTicks last_play_time_;
bool playing_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererMixer);
diff --git a/media/base/audio_renderer_mixer_unittest.cc b/media/base/audio_renderer_mixer_unittest.cc
index a0a34f3f47..8853068335 100644
--- a/media/base/audio_renderer_mixer_unittest.cc
+++ b/media/base/audio_renderer_mixer_unittest.cc
@@ -421,11 +421,11 @@ TEST_P(AudioRendererMixerBehavioralTest, MixerPausesStream) {
// Ensure never playing the input results in a sink pause.
const base::TimeDelta kSleepTime = base::TimeDelta::FromMilliseconds(100);
- base::Time start_time = base::Time::Now();
+ base::TimeTicks start_time = base::TimeTicks::Now();
while (!pause_event.IsSignaled()) {
mixer_callback_->Render(audio_bus_.get(), 0);
base::PlatformThread::Sleep(kSleepTime);
- ASSERT_TRUE(base::Time::Now() - start_time < kTestTimeout);
+ ASSERT_TRUE(base::TimeTicks::Now() - start_time < kTestTimeout);
}
pause_event.Reset();
@@ -436,11 +436,11 @@ TEST_P(AudioRendererMixerBehavioralTest, MixerPausesStream) {
mixer_inputs_[0]->Pause();
// Ensure once the input is paused the sink eventually pauses.
- start_time = base::Time::Now();
+ start_time = base::TimeTicks::Now();
while (!pause_event.IsSignaled()) {
mixer_callback_->Render(audio_bus_.get(), 0);
base::PlatformThread::Sleep(kSleepTime);
- ASSERT_TRUE(base::Time::Now() - start_time < kTestTimeout);
+ ASSERT_TRUE(base::TimeTicks::Now() - start_time < kTestTimeout);
}
mixer_inputs_[0]->Stop();
diff --git a/media/base/clock.cc b/media/base/clock.cc
index 733c5fe28f..ea954834e9 100644
--- a/media/base/clock.cc
+++ b/media/base/clock.cc
@@ -7,12 +7,12 @@
#include <algorithm>
#include "base/logging.h"
-#include "base/time/clock.h"
+#include "base/time/tick_clock.h"
#include "media/base/buffers.h"
namespace media {
-Clock::Clock(base::Clock* clock) : clock_(clock) {
+Clock::Clock(base::TickClock* clock) : clock_(clock) {
DCHECK(clock_);
Reset();
}
@@ -89,7 +89,8 @@ void Clock::SetDuration(base::TimeDelta duration) {
max_time_ = ClampToValidTimeRange(max_time_);
}
-base::TimeDelta Clock::ElapsedViaProvidedTime(const base::Time& time) const {
+base::TimeDelta Clock::ElapsedViaProvidedTime(
+ const base::TimeTicks& time) const {
// TODO(scherkus): floating point badness scaling time by playback rate.
int64 now_us = (time - reference_).InMicroseconds();
now_us = static_cast<int64>(now_us * playback_rate_);
@@ -119,11 +120,11 @@ void Clock::UpdateReferencePoints() {
void Clock::UpdateReferencePoints(base::TimeDelta current_time) {
media_time_ = ClampToValidTimeRange(current_time);
- reference_ = clock_->Now();
+ reference_ = clock_->NowTicks();
}
base::TimeDelta Clock::EstimatedElapsedTime() {
- return ClampToValidTimeRange(ElapsedViaProvidedTime(clock_->Now()));
+ return ClampToValidTimeRange(ElapsedViaProvidedTime(clock_->NowTicks()));
}
void Clock::Reset() {
@@ -132,7 +133,7 @@ void Clock::Reset() {
max_time_ = kNoTimestamp();
duration_ = kNoTimestamp();
media_time_ = base::TimeDelta();
- reference_ = base::Time();
+ reference_ = base::TimeTicks();
underflow_ = false;
}
diff --git a/media/base/clock.h b/media/base/clock.h
index 267666f668..6a86106c17 100644
--- a/media/base/clock.h
+++ b/media/base/clock.h
@@ -10,16 +10,16 @@
#include "media/base/media_export.h"
namespace base {
-class Clock;
+class TickClock;
} // namespace base
namespace media {
// A clock represents a single source of time to allow audio and video streams
// to synchronize with each other. Clock essentially tracks the media time with
-// respect to some other source of time, whether that may be the system clock or
-// updates via SetTime(). Clock uses linear interpolation to calculate the
-// current media time since the last time SetTime() was called.
+// respect to some other source of time, whether that may be the monotonic
+// system clock or updates via SetTime(). Clock uses linear interpolation to
+// calculate the current media time since the last time SetTime() was called.
//
// Clocks start off paused with a playback rate of 1.0f and a media time of 0.
//
@@ -28,9 +28,12 @@ namespace media {
// TODO(scherkus): Clock will some day be responsible for executing callbacks
// given a media time. This will be used primarily by video renderers. For now
// we'll keep using a poll-and-sleep solution.
+//
+// TODO(miu): Rename media::Clock to avoid confusion (and tripping up the media
+// PRESUBMIT script on future changes).
class MEDIA_EXPORT Clock {
public:
- explicit Clock(base::Clock* clock);
+ explicit Clock(base::TickClock* clock);
~Clock();
// Returns true if the clock is running.
@@ -88,13 +91,13 @@ class MEDIA_EXPORT Clock {
// the |max_time_| cap.
base::TimeDelta EstimatedElapsedTime();
- // Returns the current media time treating the given time as the latest
- // value as returned by |time_provider_|.
- base::TimeDelta ElapsedViaProvidedTime(const base::Time& time) const;
+ // Translates |time| into the current media time, based on the perspective of
+ // the monotonically-increasing system clock.
+ base::TimeDelta ElapsedViaProvidedTime(const base::TimeTicks& time) const;
base::TimeDelta ClampToValidTimeRange(base::TimeDelta time) const;
- base::Clock* const clock_;
+ base::TickClock* const clock_;
// Whether the clock is running.
bool playing_;
@@ -103,9 +106,9 @@ class MEDIA_EXPORT Clock {
// allowed.
bool underflow_;
- // The system clock time when this clock last starting playing or had its
- // time set via SetTime().
- base::Time reference_;
+ // The monotonic system clock time when this Clock last started playing or had
+ // its time set via SetTime().
+ base::TimeTicks reference_;
// Current accumulated amount of media time. The remaining portion must be
// calculated by comparing the system time to the reference time.
diff --git a/media/base/clock_unittest.cc b/media/base/clock_unittest.cc
index 919c7e5cb3..3bf05996c6 100644
--- a/media/base/clock_unittest.cc
+++ b/media/base/clock_unittest.cc
@@ -4,7 +4,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
-#include "base/test/simple_test_clock.h"
+#include "base/test/simple_test_tick_clock.h"
#include "base/time/clock.h"
#include "media/base/clock.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -30,7 +30,7 @@ static const int kDurationInSeconds = 120;
class ClockTest : public ::testing::Test {
public:
- ClockTest() : clock_(&test_clock_) {
+ ClockTest() : clock_(&test_tick_clock_) {
SetDuration();
}
@@ -43,10 +43,10 @@ class ClockTest : public ::testing::Test {
}
void AdvanceSystemTime(base::TimeDelta delta) {
- test_clock_.Advance(delta);
+ test_tick_clock_.Advance(delta);
}
- base::SimpleTestClock test_clock_;
+ base::SimpleTestTickClock test_tick_clock_;
Clock clock_;
base::TimeDelta time_elapsed_;
};
diff --git a/media/base/limits.h b/media/base/limits.h
index 309635fc5e..ed7ac513c7 100644
--- a/media/base/limits.h
+++ b/media/base/limits.h
@@ -34,10 +34,11 @@ enum {
kMaxSampleRate = 192000,
kMinSampleRate = 3000,
kMaxChannels = 32,
- kMaxBitsPerSample = 32,
+ kMaxBytesPerSample = 4,
+ kMaxBitsPerSample = kMaxBytesPerSample * 8,
kMaxSamplesPerPacket = kMaxSampleRate,
kMaxPacketSizeInBytes =
- (kMaxBitsPerSample / 8) * kMaxChannels * kMaxSamplesPerPacket,
+ kMaxBytesPerSample * kMaxChannels * kMaxSamplesPerPacket,
// This limit is used by ParamTraits<VideoCaptureParams>.
kMaxFramesPerSecond = 1000,
diff --git a/media/base/media_keys.h b/media/base/media_keys.h
index 14789e23c1..5140554542 100644
--- a/media/base/media_keys.h
+++ b/media/base/media_keys.h
@@ -26,38 +26,35 @@ class MEDIA_EXPORT MediaKeys {
enum KeyError {
kUnknownError = 1,
kClientError,
- kServiceError,
- kOutputError,
- kHardwareChangeError,
- kDomainError,
+ // The following v0.1b values have never been used.
+ // kServiceError,
+ // kOutputError,
+ // kHardwareChangeError,
+ // kDomainError,
kMaxKeyError // Must be last and greater than any legit value.
};
MediaKeys();
virtual ~MediaKeys();
- // Generates a key request for the |key_system| with |type| and
- // |init_data| provided.
+ // Generates a key request with the |type| and |init_data| provided.
// Returns true if generating key request succeeded, false otherwise.
// Note: AddKey() and CancelKeyRequest() should only be called after
// GenerateKeyRequest() returns true.
- virtual bool GenerateKeyRequest(const std::string& key_system,
- const std::string& type,
+ virtual bool GenerateKeyRequest(const std::string& type,
const uint8* init_data,
int init_data_length) = 0;
- // Adds a |key| to the |key_system|. The |key| is not limited to a decryption
+ // Adds a |key| to the session. The |key| is not limited to a decryption
// key. It can be any data that the key system accepts, such as a license.
// If multiple calls of this function set different keys for the same
// key ID, the older key will be replaced by the newer key.
- virtual void AddKey(const std::string& key_system,
- const uint8* key, int key_length,
+ virtual void AddKey(const uint8* key, int key_length,
const uint8* init_data, int init_data_length,
const std::string& session_id) = 0;
// Cancels the key request specified by |session_id|.
- virtual void CancelKeyRequest(const std::string& key_system,
- const std::string& session_id) = 0;
+ virtual void CancelKeyRequest(const std::string& session_id) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(MediaKeys);
@@ -65,21 +62,17 @@ class MEDIA_EXPORT MediaKeys {
// Key event callbacks. See the spec for details:
// http://dvcs.w3.org/hg/html-media/raw-file/eme-v0.1b/encrypted-media/encrypted-media.html#event-summary
-typedef base::Callback<void(const std::string& key_system,
- const std::string& session_id)> KeyAddedCB;
+typedef base::Callback<void(const std::string& session_id)> KeyAddedCB;
-typedef base::Callback<void(const std::string& key_system,
- const std::string& session_id,
+typedef base::Callback<void(const std::string& session_id,
media::MediaKeys::KeyError error_code,
int system_code)> KeyErrorCB;
-typedef base::Callback<void(const std::string& key_system,
- const std::string& session_id,
+typedef base::Callback<void(const std::string& session_id,
const std::string& message,
const std::string& default_url)> KeyMessageCB;
-typedef base::Callback<void(const std::string& key_system,
- const std::string& session_id,
+typedef base::Callback<void(const std::string& session_id,
const std::string& type,
scoped_ptr<uint8[]> init_data,
int init_data_size)> NeedKeyCB;
diff --git a/media/base/media_switches.cc b/media/base/media_switches.cc
index 98c02795db..b1f6279262 100644
--- a/media/base/media_switches.cc
+++ b/media/base/media_switches.cc
@@ -18,9 +18,6 @@ const char kEnableOpusPlayback[] = "enable-opus-playback";
// Enables VP8 Alpha playback in media elements.
const char kEnableVp8AlphaPlayback[] = "enable-vp8-alpha-playback";
-// Enables VP9 playback in media elements.
-const char kEnableVp9Playback[] = "enable-vp9-playback";
-
// Set number of threads to use for video decoding.
const char kVideoThreads[] = "video-threads";
diff --git a/media/base/media_switches.h b/media/base/media_switches.h
index 3e547b29e9..08fdd37ce4 100644
--- a/media/base/media_switches.h
+++ b/media/base/media_switches.h
@@ -20,8 +20,6 @@ MEDIA_EXPORT extern const char kEnableOpusPlayback[];
MEDIA_EXPORT extern const char kEnableVp8AlphaPlayback[];
-MEDIA_EXPORT extern const char kEnableVp9Playback[];
-
MEDIA_EXPORT extern const char kVideoThreads[];
#if defined(GOOGLE_TV)
diff --git a/media/base/pipeline.cc b/media/base/pipeline.cc
index 68483fc223..419ae31629 100644
--- a/media/base/pipeline.cc
+++ b/media/base/pipeline.cc
@@ -39,7 +39,7 @@ Pipeline::Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
natural_size_(0, 0),
volume_(1.0f),
playback_rate_(0.0f),
- clock_(new Clock(&default_clock_)),
+ clock_(new Clock(&default_tick_clock_)),
waiting_for_clock_update_(false),
status_(PIPELINE_OK),
has_audio_(false),
@@ -49,7 +49,7 @@ Pipeline::Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
video_ended_(false),
audio_disabled_(false),
demuxer_(NULL),
- creation_time_(base::Time::Now()) {
+ creation_time_(default_tick_clock_.NowTicks()) {
media_log_->AddEvent(media_log_->CreatePipelineStateChangedEvent(kCreated));
media_log_->AddEvent(
media_log_->CreateEvent(MediaLogEvent::PIPELINE_CREATED));
@@ -213,9 +213,9 @@ void Pipeline::SetErrorForTesting(PipelineStatus status) {
void Pipeline::SetState(State next_state) {
if (state_ != kStarted && next_state == kStarted &&
!creation_time_.is_null()) {
- UMA_HISTOGRAM_TIMES(
- "Media.TimeToPipelineStarted", base::Time::Now() - creation_time_);
- creation_time_ = base::Time();
+ UMA_HISTOGRAM_TIMES("Media.TimeToPipelineStarted",
+ default_tick_clock_.NowTicks() - creation_time_);
+ creation_time_ = base::TimeTicks();
}
DVLOG(2) << GetStateString(state_) << " -> " << GetStateString(next_state);
diff --git a/media/base/pipeline.h b/media/base/pipeline.h
index 0ca7d62ec5..09ff904163 100644
--- a/media/base/pipeline.h
+++ b/media/base/pipeline.h
@@ -11,7 +11,7 @@
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
-#include "base/time/default_clock.h"
+#include "base/time/default_tick_clock.h"
#include "media/base/audio_renderer.h"
#include "media/base/demuxer.h"
#include "media/base/media_export.h"
@@ -373,8 +373,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// the filters.
float playback_rate_;
- // base::Clock used by |clock_|.
- base::DefaultClock default_clock_;
+ // base::TickClock used by |clock_|.
+ base::DefaultTickClock default_tick_clock_;
// Reference clock. Keeps track of current playback time. Uses system
// clock and linear interpolation, but can have its time manually set
@@ -439,7 +439,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Time of pipeline creation; is non-zero only until the pipeline first
// reaches "kStarted", at which point it is used & zeroed out.
- base::Time creation_time_;
+ base::TimeTicks creation_time_;
scoped_ptr<SerialRunner> pending_callbacks_;
diff --git a/media/base/pipeline_unittest.cc b/media/base/pipeline_unittest.cc
index 778a036daa..a121ee4769 100644
--- a/media/base/pipeline_unittest.cc
+++ b/media/base/pipeline_unittest.cc
@@ -7,7 +7,7 @@
#include "base/bind.h"
#include "base/message_loop.h"
#include "base/stl_util.h"
-#include "base/test/simple_test_clock.h"
+#include "base/test/simple_test_tick_clock.h"
#include "base/threading/simple_thread.h"
#include "base/time/clock.h"
#include "media/base/clock.h"
@@ -286,7 +286,7 @@ class PipelineTest : public ::testing::Test {
// Fixture members.
StrictMock<CallbackHelper> callbacks_;
- base::SimpleTestClock test_clock_;
+ base::SimpleTestTickClock test_tick_clock_;
base::MessageLoop message_loop_;
scoped_ptr<Pipeline> pipeline_;
@@ -606,7 +606,7 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
// Replace the clock so we can simulate wallclock time advancing w/o using
// Sleep().
- pipeline_->SetClockForTesting(new Clock(&test_clock_));
+ pipeline_->SetClockForTesting(new Clock(&test_tick_clock_));
InitializeDemuxer(&streams, duration);
InitializeAudioRenderer(audio_stream(), false);
@@ -627,7 +627,7 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
// Verify that the clock doesn't advance since it hasn't been started by
// a time update from the audio stream.
int64 start_time = pipeline_->GetMediaTime().ToInternalValue();
- test_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
+ test_tick_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
EXPECT_EQ(pipeline_->GetMediaTime().ToInternalValue(), start_time);
// Signal end of audio stream.
@@ -636,7 +636,7 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
// Verify that the clock advances.
start_time = pipeline_->GetMediaTime().ToInternalValue();
- test_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
+ test_tick_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
EXPECT_GT(pipeline_->GetMediaTime().ToInternalValue(), start_time);
// Signal end of video stream and make sure OnEnded() callback occurs.
diff --git a/media/base/run_all_unittests.cc b/media/base/run_all_unittests.cc
index 4a6d272b9a..4274634d0b 100644
--- a/media/base/run_all_unittests.cc
+++ b/media/base/run_all_unittests.cc
@@ -34,13 +34,10 @@ void TestSuiteNoAtExit::Initialize() {
// Run this here instead of main() to ensure an AtExitManager is already
// present.
media::InitializeMediaLibraryForTesting();
- // Enable VP9 video codec support for all media tests.
- // TODO(tomfinegan): Remove this once the VP9 flag is removed or negated.
- CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- cmd_line->AppendSwitch(switches::kEnableVp9Playback);
// Enable VP8 alpha support for all media tests.
// TODO(tomfinegan): Remove this once the VP8 alpha flag is removed or
// negated.
+ CommandLine* cmd_line = CommandLine::ForCurrentProcess();
cmd_line->AppendSwitch(switches::kEnableVp8AlphaPlayback);
}
diff --git a/media/base/sample_format.cc b/media/base/sample_format.cc
new file mode 100644
index 0000000000..3fdcf1018e
--- /dev/null
+++ b/media/base/sample_format.cc
@@ -0,0 +1,32 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/sample_format.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+int SampleFormatToBytesPerChannel(SampleFormat sample_format) {
+ switch (sample_format) {
+ case kUnknownSampleFormat:
+ return 0;
+ case kSampleFormatU8:
+ return 1;
+ case kSampleFormatS16:
+ case kSampleFormatPlanarS16:
+ return 2;
+ case kSampleFormatS32:
+ case kSampleFormatF32:
+ case kSampleFormatPlanarF32:
+ return 4;
+ case kSampleFormatMax:
+ break;
+ }
+
+ NOTREACHED() << "Invalid sample format provided: " << sample_format;
+ return 0;
+}
+
+} // namespace media
diff --git a/media/base/sample_format.h b/media/base/sample_format.h
new file mode 100644
index 0000000000..bcaa5b2785
--- /dev/null
+++ b/media/base/sample_format.h
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SAMPLE_FORMAT_H
+#define MEDIA_BASE_SAMPLE_FORMAT_H
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+enum SampleFormat {
+ // These values are histogrammed over time; do not change their ordinal
+ // values. When deleting a sample format replace it with a dummy value; when
+ // adding a sample format, do so at the bottom before kSampleFormatMax.
+ kUnknownSampleFormat = 0,
+ kSampleFormatU8, // Unsigned 8-bit w/ bias of 128.
+ kSampleFormatS16, // Signed 16-bit.
+ kSampleFormatS32, // Signed 32-bit.
+ kSampleFormatF32, // Float 32-bit.
+ kSampleFormatPlanarS16, // Signed 16-bit planar.
+ kSampleFormatPlanarF32, // Float 32-bit planar.
+
+ // Must always be last!
+ kSampleFormatMax
+};
+
+// Returns the number of bytes used per channel for the specified
+// |sample_format|.
+MEDIA_EXPORT int SampleFormatToBytesPerChannel(SampleFormat sample_format);
+
+} // namespace media
+
+#endif // MEDIA_BASE_SAMPLE_FORMAT_H