summaryrefslogtreecommitdiff
path: root/media/base
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2013-11-28 11:55:43 +0000
committerTorne (Richard Coles) <torne@google.com>2013-11-28 11:55:43 +0000
commitf2477e01787aa58f445919b809d89e252beef54f (patch)
tree2db962b4af39f0db3a5f83b314373d0530c484b8 /media/base
parent7daea1dd5ff7e419322de831b642d81af3247912 (diff)
downloadchromium_org-f2477e01787aa58f445919b809d89e252beef54f.tar.gz
Merge from Chromium at DEPS revision 237746
This commit was generated by merge_to_master.py. Change-Id: I8997af4cddfeb09a7c26f7e8e672c712cab461ea
Diffstat (limited to 'media/base')
-rw-r--r--media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java3
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaCodecBridge.java36
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaDrmBridge.java596
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java10
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaPlayerListener.java18
-rw-r--r--media/base/android/java/src/org/chromium/media/VideoCapture.java8
-rw-r--r--media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java6
-rw-r--r--media/base/android/media_codec_bridge.cc12
-rw-r--r--media/base/android/media_codec_bridge.h4
-rw-r--r--media/base/android/media_codec_bridge_unittest.cc14
-rw-r--r--media/base/android/media_decoder_job.cc14
-rw-r--r--media/base/android/media_decoder_job.h7
-rw-r--r--media/base/android/media_drm_bridge.cc61
-rw-r--r--media/base/android/media_drm_bridge.h31
-rw-r--r--media/base/android/media_player_bridge.cc15
-rw-r--r--media/base/android/media_player_manager.h15
-rw-r--r--media/base/android/media_source_player.cc69
-rw-r--r--media/base/android/media_source_player.h8
-rw-r--r--media/base/android/media_source_player_unittest.cc177
-rw-r--r--media/base/audio_bus.h1
-rw-r--r--media/base/audio_bus_perftest.cc13
-rw-r--r--media/base/audio_converter_perftest.cc2
-rw-r--r--media/base/audio_decoder_config.h1
-rw-r--r--media/base/bit_reader.cc2
-rw-r--r--media/base/demuxer.h15
-rw-r--r--media/base/demuxer_perftest.cc28
-rw-r--r--media/base/demuxer_stream.h1
-rw-r--r--media/base/fake_text_track_stream.cc83
-rw-r--r--media/base/fake_text_track_stream.h47
-rw-r--r--media/base/filter_collection.cc10
-rw-r--r--media/base/filter_collection.h5
-rw-r--r--media/base/media_keys.h31
-rw-r--r--media/base/media_log.cc2
-rw-r--r--media/base/media_log_event.h3
-rw-r--r--media/base/media_switches.cc11
-rw-r--r--media/base/media_switches.h3
-rw-r--r--media/base/mock_demuxer_host.h6
-rw-r--r--media/base/mock_filters.cc4
-rw-r--r--media/base/mock_filters.h19
-rw-r--r--media/base/pipeline.cc80
-rw-r--r--media/base/pipeline.h22
-rw-r--r--media/base/pipeline_status.cc24
-rw-r--r--media/base/pipeline_status.h5
-rw-r--r--media/base/pipeline_unittest.cc108
-rw-r--r--media/base/run_all_unittests.cc4
-rw-r--r--media/base/simd/vector_math_sse.cc79
-rw-r--r--media/base/sinc_resampler_perftest.cc8
-rw-r--r--media/base/stream_parser.h15
-rw-r--r--media/base/text_cue.cc23
-rw-r--r--media/base/text_cue.h48
-rw-r--r--media/base/text_renderer.cc369
-rw-r--r--media/base/text_renderer.h145
-rw-r--r--media/base/text_renderer_unittest.cc1382
-rw-r--r--media/base/text_track.h19
-rw-r--r--media/base/text_track_config.cc30
-rw-r--r--media/base/text_track_config.h48
-rw-r--r--media/base/vector_math.cc97
-rw-r--r--media/base/vector_math.h12
-rw-r--r--media/base/vector_math_perftest.cc67
-rw-r--r--media/base/vector_math_testing.h8
-rw-r--r--media/base/vector_math_unittest.cc244
-rw-r--r--media/base/video_decoder_config.cc2
-rw-r--r--media/base/video_frame.cc130
-rw-r--r--media/base/video_frame.h51
-rw-r--r--media/base/video_frame_unittest.cc50
65 files changed, 3965 insertions, 496 deletions
diff --git a/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java b/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
index 3bb7f9e658..0f0cfb61e0 100644
--- a/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
+++ b/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
@@ -64,6 +64,9 @@ class AudioManagerAndroid {
}
mOriginalSpeakerStatus = mAudioManager.isSpeakerphoneOn();
+ if (!mOriginalSpeakerStatus) {
+ mAudioManager.setSpeakerphoneOn(true);
+ }
IntentFilter filter = new IntentFilter(Intent.ACTION_HEADSET_PLUG);
mReceiver = new BroadcastReceiver() {
diff --git a/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java b/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
index 74bec56745..ad98a5060c 100644
--- a/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
+++ b/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
@@ -12,8 +12,9 @@ import android.media.MediaCodecInfo;
import android.media.MediaCodecList;
import android.media.MediaCrypto;
import android.media.MediaFormat;
-import android.view.Surface;
+import android.os.Build;
import android.util.Log;
+import android.view.Surface;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -83,12 +84,10 @@ class MediaCodecBridge {
private static class CodecInfo {
private final String mCodecType; // e.g. "video/x-vnd.on2.vp8".
private final String mCodecName; // e.g. "OMX.google.vp8.decoder".
- private final boolean mIsSecureDecoderSupported;
- private CodecInfo(String codecType, String codecName, boolean isSecureDecoderSupported) {
+ private CodecInfo(String codecType, String codecName) {
mCodecType = codecType;
mCodecName = codecName;
- mIsSecureDecoderSupported = isSecureDecoderSupported;
}
@CalledByNative("CodecInfo")
@@ -96,9 +95,6 @@ class MediaCodecBridge {
@CalledByNative("CodecInfo")
private String codecName() { return mCodecName; }
-
- @CalledByNative("CodecInfo")
- private boolean isSecureDecoderSupported() { return mIsSecureDecoderSupported; }
}
private static class DequeueOutputResult {
@@ -151,21 +147,12 @@ class MediaCodecBridge {
continue;
}
- String[] supportedTypes = info.getSupportedTypes();
String codecString = info.getName();
- String secureCodecName = codecString + ".secure";
- boolean secureDecoderSupported = false;
- try {
- MediaCodec secureCodec = MediaCodec.createByCodecName(secureCodecName);
- secureDecoderSupported = true;
- secureCodec.release();
- } catch (Exception e) {
- Log.e(TAG, "Failed to create " + secureCodecName);
- }
+ String[] supportedTypes = info.getSupportedTypes();
for (int j = 0; j < supportedTypes.length; ++j) {
- if (!CodecInfoMap.containsKey(supportedTypes[j]) || secureDecoderSupported) {
+ if (!CodecInfoMap.containsKey(supportedTypes[j])) {
CodecInfoMap.put(supportedTypes[j], new CodecInfo(
- supportedTypes[j], codecString, secureDecoderSupported));
+ supportedTypes[j], codecString));
}
}
}
@@ -201,6 +188,11 @@ class MediaCodecBridge {
@CalledByNative
private static MediaCodecBridge create(String mime, boolean isSecure) {
+ // Creation of ".secure" codecs sometimes crash instead of throwing exceptions
+ // on pre-JBMR2 devices.
+ if (isSecure && Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN_MR2) {
+ return null;
+ }
MediaCodec mediaCodec = null;
try {
// |isSecure| only applies to video decoders.
@@ -330,11 +322,11 @@ class MediaCodecBridge {
mMediaCodec.queueSecureInputBuffer(index, offset, cryptoInfo, presentationTimeUs, 0);
} catch (MediaCodec.CryptoException e) {
Log.e(TAG, "Failed to queue secure input buffer: " + e.toString());
- // TODO(xhwang): Replace hard coded value with constant/enum.
- if (e.getErrorCode() == 1) {
- Log.e(TAG, "No key available.");
+ if (e.getErrorCode() == MediaCodec.CryptoException.ERROR_NO_KEY) {
+ Log.e(TAG, "MediaCodec.CryptoException.ERROR_NO_KEY");
return MEDIA_CODEC_NO_KEY;
}
+ Log.e(TAG, "MediaCodec.CryptoException with error code " + e.getErrorCode());
return MEDIA_CODEC_ERROR;
} catch(IllegalStateException e) {
Log.e(TAG, "Failed to queue secure input buffer: " + e.toString());
diff --git a/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java b/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
index 21a8a21bf1..c34bbf0e7c 100644
--- a/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
+++ b/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
@@ -7,6 +7,7 @@ package org.chromium.media;
import android.media.MediaCrypto;
import android.media.MediaDrm;
import android.os.AsyncTask;
+import android.os.Build;
import android.os.Handler;
import android.util.Log;
@@ -20,6 +21,8 @@ import org.chromium.base.CalledByNative;
import org.chromium.base.JNINamespace;
import java.io.IOException;
+import java.lang.RuntimeException;
+import java.util.ArrayDeque;
import java.util.HashMap;
import java.util.UUID;
@@ -29,22 +32,88 @@ import java.util.UUID;
*/
@JNINamespace("media")
class MediaDrmBridge {
+ // Implementation Notes:
+ // - A media crypto session (mMediaCryptoSessionId) is opened after MediaDrm
+ // is created. This session will be added to mSessionIds.
+ // a) In multiple session mode, this session will only be used to create
+ // the MediaCrypto object and it's associated mime type is always null.
+ // b) In single session mode, this session will be used to create the
+ // MediaCrypto object and will be used to call getKeyRequest() and
+ // manage all keys.
+ // - Each generateKeyRequest() call creates a new session. All sessions are
+ // managed in mSessionIds.
+ // - Whenever NotProvisionedException is thrown, we will clean up the
+ // current state and start the provisioning process.
+ // - When provisioning is finished, we will try to resume suspended
+ // operations:
+ // a) Create the media crypto session if it's not created.
+ // b) Finish generateKeyRequest() if previous generateKeyRequest() was
+ // interrupted by a NotProvisionedException.
+ // - Whenever an unexpected error occurred, we'll call release() to release
+ // all resources and clear all states. In that case all calls to this
+ // object will be no-op. All public APIs and callbacks should check
+ // mMediaBridge to make sure release() hasn't been called. Also, we call
+ // release() immediately after the error happens (e.g. after mMediaDrm)
+ // calls. Indirect calls should not call release() again to avoid
+ // duplication (even though it doesn't hurt to call release() twice).
private static final String TAG = "MediaDrmBridge";
private static final String SECURITY_LEVEL = "securityLevel";
private static final String PRIVACY_MODE = "privacyMode";
+ private static final String SESSION_SHARING = "sessionSharing";
+ private static final String ENABLE = "enable";
+
private MediaDrm mMediaDrm;
+ private long mNativeMediaDrmBridge;
private UUID mSchemeUUID;
- private int mNativeMediaDrmBridge;
- // TODO(qinmin): we currently only support one session per DRM bridge.
- // Change this to a HashMap if we start to support multiple sessions.
- private String mSessionId;
- private MediaCrypto mMediaCrypto;
- private String mMimeType;
private Handler mHandler;
- private byte[] mPendingInitData;
+
+ // In this mode, we only open one session, i.e. mMediaCryptoSessionId, and
+ // mSessionIds is always empty.
+ private boolean mSingleSessionMode;
+
+ // A session only for the purpose of creating a MediaCrypto object.
+ // This session is opened when generateKeyRequest is called for the first
+ // time.
+ // - In multiple session mode, all following generateKeyRequest() calls
+ // should create a new session and use it to call getKeyRequest(). No
+ // getKeyRequest() should ever be called on this media crypto session.
+ // - In single session mode, all generateKeyRequest() calls use the same
+ // media crypto session. When generateKeyRequest() is called with a new
+ // initData, previously added keys may not be available anymore.
+ private String mMediaCryptoSessionId;
+ private MediaCrypto mMediaCrypto;
+
+ // The map of all opened sessions to their mime types.
+ private HashMap<String, String> mSessionIds;
+
+ private ArrayDeque<PendingGkrData> mPendingGkrDataQueue;
+
private boolean mResetDeviceCredentialsPending;
+ // MediaDrmBridge is waiting for provisioning response from the server.
+ //
+ // Notes about NotProvisionedException: This exception can be thrown in a
+ // lot of cases. To streamline implementation, we do not catch it in private
+ // non-native methods and only catch it in public APIs.
+ private boolean mProvisioningPending;
+
+ /*
+ * This class contains data needed to call generateKeyRequest().
+ */
+ private static class PendingGkrData {
+ private final byte[] mInitData;
+ private final String mMimeType;
+
+ private PendingGkrData(byte[] initData, String mimeType) {
+ mInitData = initData;
+ mMimeType = mimeType;
+ }
+
+ private byte[] initData() { return mInitData; }
+ private String mimeType() { return mMimeType; }
+ }
+
private static UUID getUUIDFromBytes(byte[] data) {
if (data.length != 16) {
return null;
@@ -60,73 +129,115 @@ class MediaDrmBridge {
return new UUID(mostSigBits, leastSigBits);
}
- private MediaDrmBridge(UUID schemeUUID, String securityLevel, int nativeMediaDrmBridge)
- throws android.media.UnsupportedSchemeException {
+ private MediaDrmBridge(UUID schemeUUID, String securityLevel, long nativeMediaDrmBridge,
+ boolean singleSessionMode) throws android.media.UnsupportedSchemeException {
mSchemeUUID = schemeUUID;
mMediaDrm = new MediaDrm(schemeUUID);
- mHandler = new Handler();
mNativeMediaDrmBridge = nativeMediaDrmBridge;
+ mHandler = new Handler();
+ mSingleSessionMode = singleSessionMode;
+ mSessionIds = new HashMap<String, String>();
+ mPendingGkrDataQueue = new ArrayDeque<PendingGkrData>();
mResetDeviceCredentialsPending = false;
+ mProvisioningPending = false;
+
mMediaDrm.setOnEventListener(new MediaDrmListener());
- mMediaDrm.setPropertyString(PRIVACY_MODE, "enable");
+ mMediaDrm.setPropertyString(PRIVACY_MODE, ENABLE);
+ if (!mSingleSessionMode) {
+ mMediaDrm.setPropertyString(SESSION_SHARING, ENABLE);
+ }
String currentSecurityLevel = mMediaDrm.getPropertyString(SECURITY_LEVEL);
Log.e(TAG, "Security level: current " + currentSecurityLevel + ", new " + securityLevel);
- if (!securityLevel.equals(currentSecurityLevel))
+ if (!securityLevel.equals(currentSecurityLevel)) {
mMediaDrm.setPropertyString(SECURITY_LEVEL, securityLevel);
+ }
+
+ // We could open a MediaCrypto session here to support faster start of
+ // clear lead (no need to wait for generateKeyRequest()). But on
+ // Android, memory and battery resources are precious and we should
+ // only create a session when we are sure we'll use it.
+ // TODO(xhwang): Investigate other options to support fast start.
}
/**
* Create a MediaCrypto object.
*
- * @return if a MediaCrypto object is successfully created.
+ * @return whether a MediaCrypto object is successfully created.
*/
- private boolean createMediaCrypto() {
- assert(mSessionId != null);
+ private boolean createMediaCrypto() throws android.media.NotProvisionedException {
+ if (mMediaDrm == null) {
+ return false;
+ }
+ assert(!mProvisioningPending);
+ assert(mMediaCryptoSessionId == null);
assert(mMediaCrypto == null);
+
+ // Open media crypto session.
+ mMediaCryptoSessionId = openSession();
+ if (mMediaCryptoSessionId == null) {
+ Log.e(TAG, "Cannot create MediaCrypto Session.");
+ return false;
+ }
+ Log.d(TAG, "MediaCrypto Session created: " + mMediaCryptoSessionId);
+
+ // Create MediaCrypto object.
try {
- final byte[] session = mSessionId.getBytes("UTF-8");
if (MediaCrypto.isCryptoSchemeSupported(mSchemeUUID)) {
- mMediaCrypto = new MediaCrypto(mSchemeUUID, session);
+ final byte[] mediaCryptoSession = mMediaCryptoSessionId.getBytes("UTF-8");
+ mMediaCrypto = new MediaCrypto(mSchemeUUID, mediaCryptoSession);
+ assert(mMediaCrypto != null);
+ Log.d(TAG, "MediaCrypto successfully created!");
+ mSessionIds.put(mMediaCryptoSessionId, null);
+ // Notify the native code that MediaCrypto is ready.
+ nativeOnMediaCryptoReady(mNativeMediaDrmBridge);
+ return true;
+ } else {
+ Log.e(TAG, "Cannot create MediaCrypto for unsupported scheme.");
}
- } catch (android.media.MediaCryptoException e) {
- Log.e(TAG, "Cannot create MediaCrypto " + e.toString());
- return false;
} catch (java.io.UnsupportedEncodingException e) {
- Log.e(TAG, "Cannot create MediaCrypto " + e.toString());
- return false;
+ Log.e(TAG, "Cannot create MediaCrypto", e);
+ } catch (android.media.MediaCryptoException e) {
+ Log.e(TAG, "Cannot create MediaCrypto", e);
}
- assert(mMediaCrypto != null);
- nativeOnMediaCryptoReady(mNativeMediaDrmBridge);
- return true;
+ release();
+ return false;
}
/**
- * Open a new session and return the sessionId.
+ * Open a new session and return the session ID string.
*
- * @return false if unexpected error happens. Return true if a new session
- * is successfully opened, or if provisioning is required to open a session.
+ * @return null if unexpected error happened.
*/
- private boolean openSession() {
- assert(mSessionId == null);
-
- if (mMediaDrm == null) {
- return false;
+ private String openSession() throws android.media.NotProvisionedException {
+ assert(mMediaDrm != null);
+ String sessionId = null;
+ try {
+ final byte[] session = mMediaDrm.openSession();
+ sessionId = new String(session, "UTF-8");
+ } catch (java.lang.RuntimeException e) { // TODO(xhwang): Drop this?
+ Log.e(TAG, "Cannot open a new session", e);
+ release();
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "Cannot open a new session", e);
+ release();
}
+ return sessionId;
+ }
+ /**
+ * Close a session.
+ *
+ * @param sesstionIdString ID of the session to be closed.
+ */
+ private void closeSession(String sesstionIdString) {
+ assert(mMediaDrm != null);
try {
- final byte[] sessionId = mMediaDrm.openSession();
- mSessionId = new String(sessionId, "UTF-8");
- } catch (android.media.NotProvisionedException e) {
- Log.e(TAG, "Cannot open a new session: " + e.toString());
- return true;
- } catch (Exception e) {
- Log.e(TAG, "Cannot open a new session: " + e.toString());
- return false;
+ final byte[] session = sesstionIdString.getBytes("UTF-8");
+ mMediaDrm.closeSession(session);
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "Failed to close session", e);
}
-
- assert(mSessionId != null);
- return createMediaCrypto();
}
/**
@@ -159,16 +270,23 @@ class MediaDrmBridge {
return null;
}
+ boolean singleSessionMode = false;
+ if (Build.VERSION.RELEASE.equals("4.4")) {
+ singleSessionMode = true;
+ }
+ Log.d(TAG, "MediaDrmBridge uses " +
+ (singleSessionMode ? "single" : "multiple") + "-session mode.");
+
MediaDrmBridge media_drm_bridge = null;
try {
media_drm_bridge = new MediaDrmBridge(
- cryptoScheme, securityLevel, nativeMediaDrmBridge);
+ cryptoScheme, securityLevel, nativeMediaDrmBridge, singleSessionMode);
} catch (android.media.UnsupportedSchemeException e) {
- Log.e(TAG, "Unsupported DRM scheme: " + e.toString());
+ Log.e(TAG, "Unsupported DRM scheme", e);
} catch (java.lang.IllegalArgumentException e) {
- Log.e(TAG, "Failed to create MediaDrmBridge: " + e.toString());
+ Log.e(TAG, "Failed to create MediaDrmBridge", e);
} catch (java.lang.IllegalStateException e) {
- Log.e(TAG, "Failed to create MediaDrmBridge: " + e.toString());
+ Log.e(TAG, "Failed to create MediaDrmBridge", e);
}
return media_drm_bridge;
@@ -198,19 +316,26 @@ class MediaDrmBridge {
*/
@CalledByNative
private void release() {
+ // Do not reset mHandler and mNativeMediaDrmBridge so that we can still
+ // post KeyError back to native code.
+
+ mPendingGkrDataQueue.clear();
+ mPendingGkrDataQueue = null;
+
+ for (String sessionId : mSessionIds.keySet()) {
+ closeSession(sessionId);
+ }
+ mSessionIds.clear();
+ mSessionIds = null;
+
+ // This session was closed in the "for" loop above.
+ mMediaCryptoSessionId = null;
+
if (mMediaCrypto != null) {
mMediaCrypto.release();
mMediaCrypto = null;
}
- if (mSessionId != null) {
- try {
- final byte[] session = mSessionId.getBytes("UTF-8");
- mMediaDrm.closeSession(session);
- } catch (java.io.UnsupportedEncodingException e) {
- Log.e(TAG, "Failed to close session: " + e.toString());
- }
- mSessionId = null;
- }
+
if (mMediaDrm != null) {
mMediaDrm.release();
mMediaDrm = null;
@@ -218,68 +343,173 @@ class MediaDrmBridge {
}
/**
- * Generate a key request and post an asynchronous task to the native side
- * with the response message.
+ * Get a key request and post an asynchronous task to the native side
+ * with the response message upon success, or with the key error if
+ * unexpected error happened.
+ *
+ * @param sessionId ID of the session on which we need to get the key request.
+ * @param data Data needed to get the key request.
+ * @param mime Mime type to get the key request.
+ *
+ * @return whether a key request is successfully obtained.
+ */
+ private boolean getKeyRequest(String sessionId, byte[] data, String mime)
+ throws android.media.NotProvisionedException {
+ assert(mMediaDrm != null);
+ assert(mMediaCrypto != null);
+ assert(!mProvisioningPending);
+ assert(sessionExists(sessionId));
+
+ try {
+ final byte[] session = sessionId.getBytes("UTF-8");
+ HashMap<String, String> optionalParameters = new HashMap<String, String>();
+ final MediaDrm.KeyRequest request = mMediaDrm.getKeyRequest(
+ session, data, mime, MediaDrm.KEY_TYPE_STREAMING, optionalParameters);
+ Log.e(TAG, "Got key request successfully.");
+ onKeyMessage(sessionId, request.getData(), request.getDefaultUrl());
+ return true;
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "Cannot get key request", e);
+ }
+ onKeyError(sessionId);
+ release();
+ return false;
+ }
+
+ /**
+ * Save |initData| and |mime| to |mPendingGkrDataQueue| so that we can
+ * resume the generateKeyRequest() call later.
+ */
+ private void savePendingGkrData(byte[] initData, String mime) {
+ Log.d(TAG, "savePendingGkrData()");
+ mPendingGkrDataQueue.offer(new PendingGkrData(initData, mime));
+ }
+
+ /**
+ * Process all pending generateKeyRequest() calls synchronously.
+ */
+ private void processPendingGkrData() {
+ Log.d(TAG, "processPendingGkrData()");
+ assert(mMediaDrm != null);
+
+ // Check mMediaDrm != null because error may happen in generateKeyRequest().
+ // Check !mProvisioningPending because NotProvisionedException may be
+ // thrown in generateKeyRequest().
+ while (mMediaDrm != null && !mProvisioningPending && !mPendingGkrDataQueue.isEmpty()) {
+ PendingGkrData pendingGkrData = mPendingGkrDataQueue.poll();
+ byte[] initData = pendingGkrData.initData();
+ String mime = pendingGkrData.mimeType();
+ generateKeyRequest(initData, mime);
+ }
+ }
+
+ /**
+ * Process pending operations asynchrnously.
+ */
+ private void resumePendingOperations() {
+ mHandler.post(new Runnable(){
+ public void run() {
+ processPendingGkrData();
+ }
+ });
+ }
+
+ /**
+ * Generate a key request with |initData| and |mime|, and post an
+ * asynchronous task to the native side with the key request or a key error.
+ * In multiple session mode, a new session will be open. In single session
+ * mode, the mMediaCryptoSessionId will be used.
*
* @param initData Data needed to generate the key request.
* @param mime Mime type.
*/
@CalledByNative
private void generateKeyRequest(byte[] initData, String mime) {
- Log.d(TAG, "generateKeyRequest().");
+ Log.d(TAG, "generateKeyRequest()");
+ if (mMediaDrm == null) {
+ Log.e(TAG, "generateKeyRequest() called when MediaDrm is null.");
+ return;
+ }
- if (mMimeType == null) {
- mMimeType = mime;
- } else if (!mMimeType.equals(mime)) {
- onKeyError();
+ if (mProvisioningPending) {
+ assert(mMediaCrypto == null);
+ savePendingGkrData(initData, mime);
return;
}
- if (mSessionId == null) {
- if (!openSession()) {
- onKeyError();
- return;
+ boolean newSessionOpened = false;
+ String sessionId = null;
+ try {
+ // Create MediaCrypto if necessary.
+ if (mMediaCrypto == null && !createMediaCrypto()) {
+ onKeyError(null);
+ return;
}
-
- // NotProvisionedException happened during openSession().
- if (mSessionId == null) {
- if (mPendingInitData != null) {
- Log.e(TAG, "generateKeyRequest called when another call is pending.");
- onKeyError();
+ assert(mMediaCrypto != null);
+ assert(mSessionIds.containsKey(mMediaCryptoSessionId));
+
+ if (mSingleSessionMode) {
+ sessionId = mMediaCryptoSessionId;
+ if (mSessionIds.get(sessionId) == null) {
+ // Set |mime| when we call generateKeyRequest() for the first time.
+ mSessionIds.put(sessionId, mime);
+ } else if (!mSessionIds.get(sessionId).equals(mime)) {
+ Log.e(TAG, "Only one mime type is supported in single session mode.");
+ onKeyError(sessionId);
+ return;
+ }
+ } else {
+ sessionId = openSession();
+ if (sessionId == null) {
+ Log.e(TAG, "Cannot open session in generateKeyRequest().");
+ onKeyError(null);
return;
}
+ newSessionOpened = true;
+ assert(!mSessionIds.containsKey(sessionId));
+ }
- // We assume MediaDrm.EVENT_PROVISION_REQUIRED is always fired if
- // NotProvisionedException is throwed in openSession().
- // generateKeyRequest() will be resumed after provisioning is finished.
- // TODO(xhwang): Double check if this assumption is true. Otherwise we need
- // to handle the exception in openSession more carefully.
- mPendingInitData = initData;
+ // KeyMessage or KeyError is fired in getKeyRequest().
+ assert(sessionExists(sessionId));
+ if (!getKeyRequest(sessionId, initData, mime)) {
+ Log.e(TAG, "Cannot get key request in generateKeyRequest().");
return;
}
- }
- try {
- final byte[] session = mSessionId.getBytes("UTF-8");
- HashMap<String, String> optionalParameters = new HashMap<String, String>();
- final MediaDrm.KeyRequest request = mMediaDrm.getKeyRequest(
- session, initData, mime, MediaDrm.KEY_TYPE_STREAMING, optionalParameters);
- mHandler.post(new Runnable(){
- public void run() {
- nativeOnKeyMessage(mNativeMediaDrmBridge, mSessionId,
- request.getData(), request.getDefaultUrl());
- }
- });
- return;
+ if (newSessionOpened) {
+ Log.e(TAG, "generateKeyRequest(): Session " + sessionId + " created.");
+ mSessionIds.put(sessionId, mime);
+ return;
+ }
} catch (android.media.NotProvisionedException e) {
- // MediaDrm.EVENT_PROVISION_REQUIRED is also fired in this case.
- // Provisioning is handled in the handler of that event.
- Log.e(TAG, "Cannot get key request: " + e.toString());
- return;
- } catch (java.io.UnsupportedEncodingException e) {
- Log.e(TAG, "Cannot get key request: " + e.toString());
+ Log.e(TAG, "Device not provisioned", e);
+ if (newSessionOpened) {
+ closeSession(sessionId);
+ }
+ savePendingGkrData(initData, mime);
+ startProvisioning();
+ }
+ }
+
+ /**
+ * Returns whether |sessionId| is a valid key session, excluding the media
+ * crypto session in multi-session mode.
+ *
+ * @param sessionId Crypto session Id.
+ */
+ private boolean sessionExists(String sessionId) {
+ if (mMediaCryptoSessionId == null) {
+ assert(mSessionIds.isEmpty());
+ Log.e(TAG, "Session doesn't exist because media crypto session is not created.");
+ return false;
}
- onKeyError();
+ assert(mSessionIds.containsKey(mMediaCryptoSessionId));
+
+ if (mSingleSessionMode) {
+ return mMediaCryptoSessionId.equals(sessionId);
+ }
+
+ return !mMediaCryptoSessionId.equals(sessionId) && mSessionIds.containsKey(sessionId);
}
/**
@@ -289,14 +519,30 @@ class MediaDrmBridge {
*/
@CalledByNative
private void cancelKeyRequest(String sessionId) {
- if (mSessionId == null || !mSessionId.equals(sessionId)) {
+ Log.d(TAG, "cancelKeyRequest(): " + sessionId);
+ if (mMediaDrm == null) {
+ Log.e(TAG, "cancelKeyRequest() called when MediaDrm is null.");
+ return;
+ }
+
+ if (!sessionExists(sessionId)) {
+ Log.e(TAG, "Invalid session in cancelKeyRequest.");
+ onKeyError(sessionId);
return;
}
+
try {
final byte[] session = sessionId.getBytes("UTF-8");
mMediaDrm.removeKeys(session);
} catch (java.io.UnsupportedEncodingException e) {
- Log.e(TAG, "Cannot cancel key request: " + e.toString());
+ Log.e(TAG, "Cannot cancel key request", e);
+ }
+
+ // We don't close the media crypto session in single session mode.
+ if (!mSingleSessionMode) {
+ closeSession(sessionId);
+ mSessionIds.remove(sessionId);
+ Log.d(TAG, "Session " + sessionId + "closed.");
}
}
@@ -308,9 +554,19 @@ class MediaDrmBridge {
*/
@CalledByNative
private void addKey(String sessionId, byte[] key) {
- if (mSessionId == null || !mSessionId.equals(sessionId)) {
+ Log.d(TAG, "addKey(): " + sessionId);
+ if (mMediaDrm == null) {
+ Log.e(TAG, "addKey() called when MediaDrm is null.");
return;
}
+
+ // TODO(xhwang): We should be able to DCHECK this when WD EME is implemented.
+ if (!sessionExists(sessionId)) {
+ Log.e(TAG, "Invalid session in addKey.");
+ onKeyError(sessionId);
+ return;
+ }
+
try {
final byte[] session = sessionId.getBytes("UTF-8");
try {
@@ -319,23 +575,21 @@ class MediaDrmBridge {
// This is not really an exception. Some error code are incorrectly
// reported as an exception.
// TODO(qinmin): remove this exception catch when b/10495563 is fixed.
- Log.e(TAG, "Exception intentionally caught when calling provideKeyResponse() "
- + e.toString());
+ Log.e(TAG, "Exception intentionally caught when calling provideKeyResponse()", e);
}
- mHandler.post(new Runnable() {
- public void run() {
- nativeOnKeyAdded(mNativeMediaDrmBridge, mSessionId);
- }
- });
+ onKeyAdded(sessionId);
+ Log.d(TAG, "Key successfully added for session " + sessionId);
return;
} catch (android.media.NotProvisionedException e) {
- Log.e(TAG, "failed to provide key response: " + e.toString());
+ // TODO (xhwang): Should we handle this?
+ Log.e(TAG, "failed to provide key response", e);
} catch (android.media.DeniedByServerException e) {
- Log.e(TAG, "failed to provide key response: " + e.toString());
+ Log.e(TAG, "failed to provide key response", e);
} catch (java.io.UnsupportedEncodingException e) {
- Log.e(TAG, "failed to provide key response: " + e.toString());
+ Log.e(TAG, "failed to provide key response", e);
}
- onKeyError();
+ onKeyError(sessionId);
+ release();
}
/**
@@ -343,9 +597,23 @@ class MediaDrmBridge {
*/
@CalledByNative
private String getSecurityLevel() {
+ if (mMediaDrm == null) {
+ Log.e(TAG, "getSecurityLevel() called when MediaDrm is null.");
+ return null;
+ }
return mMediaDrm.getPropertyString("securityLevel");
}
+ private void startProvisioning() {
+ Log.d(TAG, "startProvisioning");
+ assert(mMediaDrm != null);
+ assert(!mProvisioningPending);
+ mProvisioningPending = true;
+ MediaDrm.ProvisionRequest request = mMediaDrm.getProvisionRequest();
+ PostRequestTask postTask = new PostRequestTask(request.getData());
+ postTask.execute(request.getDefaultUrl());
+ }
+
/**
* Called when the provision response is received.
*
@@ -353,6 +621,8 @@ class MediaDrmBridge {
*/
private void onProvisionResponse(byte[] response) {
Log.d(TAG, "onProvisionResponse()");
+ assert(mProvisioningPending);
+ mProvisioningPending = false;
// If |mMediaDrm| is released, there is no need to callback native.
if (mMediaDrm == null) {
@@ -360,14 +630,14 @@ class MediaDrmBridge {
}
boolean success = provideProvisionResponse(response);
+
if (mResetDeviceCredentialsPending) {
nativeOnResetDeviceCredentialsCompleted(mNativeMediaDrmBridge, success);
mResetDeviceCredentialsPending = false;
- return;
}
- if (!success) {
- onKeyError();
+ if (success) {
+ resumePendingOperations();
}
}
@@ -383,54 +653,86 @@ class MediaDrmBridge {
try {
mMediaDrm.provideProvisionResponse(response);
+ return true;
} catch (android.media.DeniedByServerException e) {
- Log.e(TAG, "failed to provide provision response: " + e.toString());
- return false;
+ Log.e(TAG, "failed to provide provision response", e);
} catch (java.lang.IllegalStateException e) {
- Log.e(TAG, "failed to provide provision response: " + e.toString());
- return false;
+ Log.e(TAG, "failed to provide provision response", e);
}
+ return false;
+ }
- if (mPendingInitData != null) {
- assert(!mResetDeviceCredentialsPending);
- byte[] initData = mPendingInitData;
- mPendingInitData = null;
- generateKeyRequest(initData, mMimeType);
- }
- return true;
+ private void onKeyMessage(
+ final String sessionId, final byte[] message, final String destinationUrl) {
+ mHandler.post(new Runnable(){
+ public void run() {
+ nativeOnKeyMessage(mNativeMediaDrmBridge, sessionId, message, destinationUrl);
+ }
+ });
+ }
+
+ private void onKeyAdded(final String sessionId) {
+ mHandler.post(new Runnable() {
+ public void run() {
+ nativeOnKeyAdded(mNativeMediaDrmBridge, sessionId);
+ }
+ });
}
- private void onKeyError() {
+ private void onKeyError(final String sessionId) {
// TODO(qinmin): pass the error code to native.
mHandler.post(new Runnable() {
public void run() {
- nativeOnKeyError(mNativeMediaDrmBridge, mSessionId);
+ nativeOnKeyError(mNativeMediaDrmBridge, sessionId);
}
});
}
+ private String GetSessionId(byte[] session) {
+ String sessionId = null;
+ try {
+ sessionId = new String(session, "UTF-8");
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "GetSessionId failed", e);
+ } catch (java.lang.NullPointerException e) {
+ Log.e(TAG, "GetSessionId failed", e);
+ }
+ return sessionId;
+ }
+
private class MediaDrmListener implements MediaDrm.OnEventListener {
@Override
- public void onEvent(MediaDrm mediaDrm, byte[] sessionId, int event, int extra,
- byte[] data) {
+ public void onEvent(MediaDrm mediaDrm, byte[] session, int event, int extra, byte[] data) {
+ String sessionId = null;
switch(event) {
case MediaDrm.EVENT_PROVISION_REQUIRED:
- Log.d(TAG, "MediaDrm.EVENT_PROVISION_REQUIRED.");
- MediaDrm.ProvisionRequest request = mMediaDrm.getProvisionRequest();
- PostRequestTask postTask = new PostRequestTask(request.getData());
- postTask.execute(request.getDefaultUrl());
+ // This is handled by the handler of NotProvisionedException.
+ Log.d(TAG, "MediaDrm.EVENT_PROVISION_REQUIRED");
break;
case MediaDrm.EVENT_KEY_REQUIRED:
- Log.d(TAG, "MediaDrm.EVENT_KEY_REQUIRED.");
- generateKeyRequest(data, mMimeType);
+ Log.d(TAG, "MediaDrm.EVENT_KEY_REQUIRED");
+ sessionId = GetSessionId(session);
+ if (sessionId != null && !mProvisioningPending) {
+ assert(sessionExists(sessionId));
+ String mime = mSessionIds.get(sessionId);
+ try {
+ getKeyRequest(sessionId, data, mime);
+ } catch (android.media.NotProvisionedException e) {
+ Log.e(TAG, "Device not provisioned", e);
+ startProvisioning();
+ }
+ }
break;
case MediaDrm.EVENT_KEY_EXPIRED:
- Log.d(TAG, "MediaDrm.EVENT_KEY_EXPIRED.");
- onKeyError();
+ Log.d(TAG, "MediaDrm.EVENT_KEY_EXPIRED");
+ sessionId = GetSessionId(session);
+ if (sessionId != null) {
+ onKeyError(sessionId);
+ }
break;
case MediaDrm.EVENT_VENDOR_DEFINED:
- Log.d(TAG, "MediaDrm.EVENT_VENDOR_DEFINED.");
- assert(false);
+ Log.d(TAG, "MediaDrm.EVENT_VENDOR_DEFINED");
+ assert(false); // Should never happen.
break;
default:
Log.e(TAG, "Invalid DRM event " + (int)event);
@@ -495,15 +797,15 @@ class MediaDrmBridge {
}
}
- private native void nativeOnMediaCryptoReady(int nativeMediaDrmBridge);
+ private native void nativeOnMediaCryptoReady(long nativeMediaDrmBridge);
- private native void nativeOnKeyMessage(int nativeMediaDrmBridge, String sessionId,
+ private native void nativeOnKeyMessage(long nativeMediaDrmBridge, String sessionId,
byte[] message, String destinationUrl);
- private native void nativeOnKeyAdded(int nativeMediaDrmBridge, String sessionId);
+ private native void nativeOnKeyAdded(long nativeMediaDrmBridge, String sessionId);
- private native void nativeOnKeyError(int nativeMediaDrmBridge, String sessionId);
+ private native void nativeOnKeyError(long nativeMediaDrmBridge, String sessionId);
private native void nativeOnResetDeviceCredentialsCompleted(
- int nativeMediaDrmBridge, boolean success);
+ long nativeMediaDrmBridge, boolean success);
}
diff --git a/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java b/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
index ecf445853e..fe9407d94c 100644
--- a/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
+++ b/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
@@ -50,8 +50,14 @@ public class MediaPlayerBridge {
}
@CalledByNative
- protected void prepareAsync() throws IllegalStateException {
- getLocalPlayer().prepareAsync();
+ protected boolean prepareAsync() {
+ try {
+ getLocalPlayer().prepareAsync();
+ } catch (IllegalStateException e) {
+ Log.e(TAG, "Unable to prepare MediaPlayer.", e);
+ return false;
+ }
+ return true;
}
@CalledByNative
diff --git a/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java b/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java
index 3c68589844..c30258518c 100644
--- a/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java
+++ b/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java
@@ -35,10 +35,10 @@ class MediaPlayerListener implements MediaPlayer.OnPreparedListener,
public static final int MEDIA_ERROR_TIMED_OUT = -110;
// Used to determine the class instance to dispatch the native call to.
- private int mNativeMediaPlayerListener = 0;
+ private long mNativeMediaPlayerListener = 0;
private final Context mContext;
- private MediaPlayerListener(int nativeMediaPlayerListener, Context context) {
+ private MediaPlayerListener(long nativeMediaPlayerListener, Context context) {
mNativeMediaPlayerListener = nativeMediaPlayerListener;
mContext = context;
}
@@ -148,22 +148,22 @@ class MediaPlayerListener implements MediaPlayer.OnPreparedListener,
* See media/base/android/media_player_listener.cc for all the following functions.
*/
private native void nativeOnMediaError(
- int nativeMediaPlayerListener,
+ long nativeMediaPlayerListener,
int errorType);
private native void nativeOnVideoSizeChanged(
- int nativeMediaPlayerListener,
+ long nativeMediaPlayerListener,
int width, int height);
private native void nativeOnBufferingUpdate(
- int nativeMediaPlayerListener,
+ long nativeMediaPlayerListener,
int percent);
- private native void nativeOnMediaPrepared(int nativeMediaPlayerListener);
+ private native void nativeOnMediaPrepared(long nativeMediaPlayerListener);
- private native void nativeOnPlaybackComplete(int nativeMediaPlayerListener);
+ private native void nativeOnPlaybackComplete(long nativeMediaPlayerListener);
- private native void nativeOnSeekComplete(int nativeMediaPlayerListener);
+ private native void nativeOnSeekComplete(long nativeMediaPlayerListener);
- private native void nativeOnMediaInterrupted(int nativeMediaPlayerListener);
+ private native void nativeOnMediaInterrupted(long nativeMediaPlayerListener);
}
diff --git a/media/base/android/java/src/org/chromium/media/VideoCapture.java b/media/base/android/java/src/org/chromium/media/VideoCapture.java
index 446d50ed43..b17cfa18b5 100644
--- a/media/base/android/java/src/org/chromium/media/VideoCapture.java
+++ b/media/base/android/java/src/org/chromium/media/VideoCapture.java
@@ -68,7 +68,7 @@ public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
private int mExpectedFrameSize = 0;
private int mId = 0;
// Native callback context variable.
- private int mNativeVideoCaptureDeviceAndroid = 0;
+ private long mNativeVideoCaptureDeviceAndroid = 0;
private int[] mGlTextures = null;
private SurfaceTexture mSurfaceTexture = null;
private static final int GL_TEXTURE_EXTERNAL_OES = 0x8D65;
@@ -82,12 +82,12 @@ public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
@CalledByNative
public static VideoCapture createVideoCapture(
- Context context, int id, int nativeVideoCaptureDeviceAndroid) {
+ Context context, int id, long nativeVideoCaptureDeviceAndroid) {
return new VideoCapture(context, id, nativeVideoCaptureDeviceAndroid);
}
public VideoCapture(
- Context context, int id, int nativeVideoCaptureDeviceAndroid) {
+ Context context, int id, long nativeVideoCaptureDeviceAndroid) {
mContext = context;
mId = id;
mNativeVideoCaptureDeviceAndroid = nativeVideoCaptureDeviceAndroid;
@@ -410,7 +410,7 @@ public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
}
private native void nativeOnFrameAvailable(
- int nativeVideoCaptureDeviceAndroid,
+ long nativeVideoCaptureDeviceAndroid,
byte[] data,
int length,
int rotation,
diff --git a/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java b/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java
index 17720e6671..005b2b143f 100644
--- a/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java
+++ b/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java
@@ -34,7 +34,7 @@ class WebAudioMediaCodecBridge {
@CalledByNative
private static boolean decodeAudioFile(Context ctx,
- int nativeMediaCodecBridge,
+ long nativeMediaCodecBridge,
int inputFD,
long dataSize) {
@@ -183,11 +183,11 @@ class WebAudioMediaCodecBridge {
}
private static native void nativeOnChunkDecoded(
- int nativeWebAudioMediaCodecBridge, ByteBuffer buf, int size,
+ long nativeWebAudioMediaCodecBridge, ByteBuffer buf, int size,
int inputChannelCount, int outputChannelCount);
private static native void nativeInitializeDestination(
- int nativeWebAudioMediaCodecBridge,
+ long nativeWebAudioMediaCodecBridge,
int inputChannelCount,
int sampleRate,
long durationMicroseconds);
diff --git a/media/base/android/media_codec_bridge.cc b/media/base/android/media_codec_bridge.cc
index 1c4928aad9..1acd23afa7 100644
--- a/media/base/android/media_codec_bridge.cc
+++ b/media/base/android/media_codec_bridge.cc
@@ -49,6 +49,8 @@ static const std::string VideoCodecToAndroidMimeType(const VideoCodec& codec) {
return "video/avc";
case kCodecVP8:
return "video/x-vnd.on2.vp8";
+ case kCodecVP9:
+ return "video/x-vnd.on2.vp9";
default:
return std::string();
}
@@ -62,6 +64,8 @@ static const std::string CodecTypeToAndroidMimeType(const std::string& codec) {
return "audio/mp4a-latm";
if (codec == "vp8" || codec == "vp8.0")
return "video/x-vnd.on2.vp8";
+ if (codec == "vp9" || codec == "vp9.0")
+ return "video/x-vnd.on2.vp9";
if (codec == "vorbis")
return "audio/vorbis";
return std::string();
@@ -122,8 +126,6 @@ void MediaCodecBridge::GetCodecsInfo(
CodecsInfo info;
info.codecs = AndroidMimeTypeToCodecType(mime_type);
ConvertJavaStringToUTF8(env, j_codec_name.obj(), &info.name);
- info.secure_decoder_supported =
- Java_CodecInfo_isSecureDecoderSupported(env, j_info.obj());
codecs_info->push_back(info);
}
}
@@ -242,6 +244,12 @@ MediaCodecStatus MediaCodecBridge::QueueSecureInputBuffer(
DCHECK_GT(subsamples_size, 0);
DCHECK(subsamples);
for (int i = 0; i < subsamples_size; ++i) {
+ DCHECK(subsamples[i].clear_bytes <= std::numeric_limits<uint16>::max());
+ if (subsamples[i].cypher_bytes >
+ static_cast<uint32>(std::numeric_limits<jint>::max())) {
+ return MEDIA_CODEC_ERROR;
+ }
+
native_clear_array[i] = subsamples[i].clear_bytes;
native_cypher_array[i] = subsamples[i].cypher_bytes;
}
diff --git a/media/base/android/media_codec_bridge.h b/media/base/android/media_codec_bridge.h
index 64c73571c6..cdd883f031 100644
--- a/media/base/android/media_codec_bridge.h
+++ b/media/base/android/media_codec_bridge.h
@@ -48,14 +48,12 @@ class MEDIA_EXPORT MediaCodecBridge {
// decode |codec| type.
static bool CanDecode(const std::string& codec, bool is_secure);
- // Represents supported codecs on android. |secure_decoder_supported| is true
- // if secure decoder is available for the codec type.
+ // Represents supported codecs on android.
// TODO(qinmin): Curretly the codecs string only contains one codec, do we
// need more specific codecs separated by comma. (e.g. "vp8" -> "vp8, vp8.0")
struct CodecsInfo {
std::string codecs;
std::string name;
- bool secure_decoder_supported;
};
// Get a list of supported codecs.
diff --git a/media/base/android/media_codec_bridge_unittest.cc b/media/base/android/media_codec_bridge_unittest.cc
index 0a08cadd60..5a35579d1d 100644
--- a/media/base/android/media_codec_bridge_unittest.cc
+++ b/media/base/android/media_codec_bridge_unittest.cc
@@ -94,13 +94,13 @@ unsigned char test_mp3[] = {
namespace media {
// Helper macro to skip the test if MediaCodecBridge isn't available.
-#define SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE() \
- do { \
- if (!MediaCodecBridge::IsAvailable()) { \
- LOG(INFO) << "Could not run test - not supported on device."; \
- return; \
- } \
- } while (0) \
+#define SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE() \
+ do { \
+ if (!MediaCodecBridge::IsAvailable()) { \
+ VLOG(0) << "Could not run test - not supported on device."; \
+ return; \
+ } \
+ } while (0)
static const int kPresentationTimeBase = 100;
diff --git a/media/base/android/media_decoder_job.cc b/media/base/android/media_decoder_job.cc
index 4558845f32..74aeecdc54 100644
--- a/media/base/android/media_decoder_job.cc
+++ b/media/base/android/media_decoder_job.cc
@@ -28,6 +28,7 @@ MediaDecoderJob::MediaDecoderJob(
media_codec_bridge_(media_codec_bridge),
needs_flush_(false),
input_eos_encountered_(false),
+ skip_eos_enqueue_(true),
prerolling_(true),
weak_this_(this),
request_data_cb_(request_data_cb),
@@ -288,6 +289,17 @@ void MediaDecoderJob::DecodeInternal(
return;
}
+ if (skip_eos_enqueue_) {
+ if (unit.end_of_stream || unit.data.empty()) {
+ input_eos_encountered_ = true;
+ callback.Run(MEDIA_CODEC_OUTPUT_END_OF_STREAM, kNoTimestamp(), 0);
+ return;
+ }
+
+ skip_eos_enqueue_ = false;
+ }
+
+
MediaCodecStatus input_status = MEDIA_CODEC_INPUT_END_OF_STREAM;
if (!input_eos_encountered_) {
input_status = QueueInputBuffer(unit);
@@ -333,7 +345,7 @@ void MediaDecoderJob::DecodeInternal(
// input queueing without immediate dequeue when |input_status| !=
// |MEDIA_CODEC_OK|. Need to use the |presentation_timestamp| for video, and
// use |size| to calculate the timestamp for audio. See
- // http://crbug.com/310823 and b/11356652.
+ // http://crbug.com/310823 and http://b/11356652.
bool render_output = unit.timestamp >= preroll_timestamp_ &&
(status != MEDIA_CODEC_OUTPUT_END_OF_STREAM || size != 0u);
base::TimeDelta time_to_render;
diff --git a/media/base/android/media_decoder_job.h b/media/base/android/media_decoder_job.h
index e9530b39e0..bb921bc4d8 100644
--- a/media/base/android/media_decoder_job.h
+++ b/media/base/android/media_decoder_job.h
@@ -144,8 +144,15 @@ class MediaDecoderJob {
bool needs_flush_;
// Whether input EOS is encountered.
+ // TODO(wolenetz/qinmin): Protect with a lock. See http://crbug.com/320043.
bool input_eos_encountered_;
+ // Tracks whether DecodeInternal() should skip decoding if the first access
+ // unit is EOS or empty, and report |MEDIA_CODEC_OUTPUT_END_OF_STREAM|. This
+ // is to work around some decoders that could crash otherwise. See
+ // http://b/11696552.
+ bool skip_eos_enqueue_;
+
// The timestamp the decoder needs to preroll to. If an access unit's
// timestamp is smaller than |preroll_timestamp_|, don't render it.
// TODO(qinmin): Comparing access unit's timestamp with |preroll_timestamp_|
diff --git a/media/base/android/media_drm_bridge.cc b/media/base/android/media_drm_bridge.cc
index d1a6a052b5..8b5d59e413 100644
--- a/media/base/android/media_drm_bridge.cc
+++ b/media/base/android/media_drm_bridge.cc
@@ -11,6 +11,7 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "base/strings/string_util.h"
#include "jni/MediaDrmBridge_jni.h"
#include "media/base/android/media_player_manager.h"
@@ -235,7 +236,8 @@ MediaDrmBridge::~MediaDrmBridge() {
Java_MediaDrmBridge_release(env, j_media_drm_.obj());
}
-bool MediaDrmBridge::GenerateKeyRequest(const std::string& type,
+bool MediaDrmBridge::GenerateKeyRequest(uint32 reference_id,
+ const std::string& type,
const uint8* init_data,
int init_data_length) {
std::vector<uint8> pssh_data;
@@ -246,28 +248,29 @@ bool MediaDrmBridge::GenerateKeyRequest(const std::string& type,
ScopedJavaLocalRef<jbyteArray> j_pssh_data =
base::android::ToJavaByteArray(env, &pssh_data[0], pssh_data.size());
ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, type);
+ pending_key_request_reference_ids_.push(reference_id);
Java_MediaDrmBridge_generateKeyRequest(
env, j_media_drm_.obj(), j_pssh_data.obj(), j_mime.obj());
return true;
}
-void MediaDrmBridge::AddKey(const uint8* key, int key_length,
- const uint8* init_data, int init_data_length,
- const std::string& session_id) {
+void MediaDrmBridge::AddKey(uint32 reference_id,
+ const uint8* key, int key_length,
+ const uint8* init_data, int init_data_length) {
DVLOG(1) << __FUNCTION__;
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jbyteArray> j_key_data =
base::android::ToJavaByteArray(env, key, key_length);
ScopedJavaLocalRef<jstring> j_session_id =
- ConvertUTF8ToJavaString(env, session_id);
+ ConvertUTF8ToJavaString(env, LookupSessionId(reference_id));
Java_MediaDrmBridge_addKey(
env, j_media_drm_.obj(), j_session_id.obj(), j_key_data.obj());
}
-void MediaDrmBridge::CancelKeyRequest(const std::string& session_id) {
+void MediaDrmBridge::CancelKeyRequest(uint32 reference_id) {
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_session_id =
- ConvertUTF8ToJavaString(env, session_id);
+ ConvertUTF8ToJavaString(env, LookupSessionId(reference_id));
Java_MediaDrmBridge_cancelKeyRequest(
env, j_media_drm_.obj(), j_session_id.obj());
}
@@ -300,22 +303,28 @@ void MediaDrmBridge::OnKeyMessage(JNIEnv* env,
jbyteArray j_message,
jstring j_destination_url) {
std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
+ uint32_t reference_id = DetermineReferenceId(session_id);
std::vector<uint8> message;
JavaByteArrayToByteVector(env, j_message, &message);
std::string destination_url = ConvertJavaStringToUTF8(env, j_destination_url);
- manager_->OnKeyMessage(media_keys_id_, session_id, message, destination_url);
+ manager_->OnSetSessionId(media_keys_id_, reference_id, session_id);
+ manager_->OnKeyMessage(
+ media_keys_id_, reference_id, message, destination_url);
}
void MediaDrmBridge::OnKeyAdded(JNIEnv* env, jobject, jstring j_session_id) {
std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
- manager_->OnKeyAdded(media_keys_id_, session_id);
+ uint32_t reference_id = DetermineReferenceId(session_id);
+ manager_->OnKeyAdded(media_keys_id_, reference_id);
}
void MediaDrmBridge::OnKeyError(JNIEnv* env, jobject, jstring j_session_id) {
// |j_session_id| can be NULL, in which case we'll return an empty string.
std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
- manager_->OnKeyError(media_keys_id_, session_id, MediaKeys::kUnknownError, 0);
+ uint32 reference_id = DetermineReferenceId(session_id);
+ manager_->OnKeyError(
+ media_keys_id_, reference_id, MediaKeys::kUnknownError, 0);
}
ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
@@ -354,4 +363,36 @@ void MediaDrmBridge::OnResetDeviceCredentialsCompleted(
base::ResetAndReturn(&reset_credentials_cb_).Run(success);
}
+uint32_t MediaDrmBridge::DetermineReferenceId(const std::string& session_id) {
+ for (SessionMap::iterator it = session_map_.begin();
+ it != session_map_.end();
+ ++it) {
+ if (it->second == session_id)
+ return it->first;
+ }
+
+ // There is no entry in the map; assume it came from the oldest
+ // GenerateKeyRequest() call.
+ DCHECK(!pending_key_request_reference_ids_.empty());
+ uint32 reference_id = pending_key_request_reference_ids_.front();
+ pending_key_request_reference_ids_.pop();
+
+ // If this is a valid |session_id|, add it to the list. Otherwise, avoid
+ // adding empty string as a mapping to prevent future calls with an empty
+ // string from using the wrong reference_id.
+ if (!session_id.empty()) {
+ DCHECK(session_map_.find(reference_id) == session_map_.end());
+ DCHECK(!session_id.empty());
+ session_map_[reference_id] = session_id;
+ }
+
+ return reference_id;
+}
+
+const std::string& MediaDrmBridge::LookupSessionId(uint32 reference_id) {
+ // Session may not exist if error happens during GenerateKeyRequest().
+ SessionMap::iterator it = session_map_.find(reference_id);
+ return (it != session_map_.end()) ? it->second : EmptyString();
+}
+
} // namespace media
diff --git a/media/base/android/media_drm_bridge.h b/media/base/android/media_drm_bridge.h
index 520b54d13c..2443f65ecb 100644
--- a/media/base/android/media_drm_bridge.h
+++ b/media/base/android/media_drm_bridge.h
@@ -6,6 +6,8 @@
#define MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
#include <jni.h>
+#include <map>
+#include <queue>
#include <string>
#include <vector>
@@ -59,13 +61,14 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
static bool RegisterMediaDrmBridge(JNIEnv* env);
// MediaKeys implementations.
- virtual bool GenerateKeyRequest(const std::string& type,
+ virtual bool GenerateKeyRequest(uint32 reference_id,
+ const std::string& type,
const uint8* init_data,
int init_data_length) OVERRIDE;
- virtual void AddKey(const uint8* key, int key_length,
- const uint8* init_data, int init_data_length,
- const std::string& session_id) OVERRIDE;
- virtual void CancelKeyRequest(const std::string& session_id) OVERRIDE;
+ virtual void AddKey(uint32 reference_id,
+ const uint8* key, int key_length,
+ const uint8* init_data, int init_data_length) OVERRIDE;
+ virtual void CancelKeyRequest(uint32 reference_id) OVERRIDE;
// Returns a MediaCrypto object if it's already created. Returns a null object
// otherwise.
@@ -103,6 +106,9 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
GURL frame_url() const { return frame_url_; }
private:
+ // Map between session_id and reference_id.
+ typedef std::map<uint32_t, std::string> SessionMap;
+
static bool IsSecureDecoderRequired(SecurityLevel security_level);
MediaDrmBridge(int media_keys_id,
@@ -114,6 +120,13 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
// Get the security level of the media.
SecurityLevel GetSecurityLevel();
+ // Determine the corresponding reference_id for |session_id|.
+ uint32_t DetermineReferenceId(const std::string& session_id);
+
+ // Determine the corresponding session_id for |reference_id|. The returned
+ // value is only valid on the main thread, and should be stored by copy.
+ const std::string& LookupSessionId(uint32_t reference_id);
+
// ID of the MediaKeys object.
int media_keys_id_;
@@ -133,6 +146,14 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
ResetCredentialsCB reset_credentials_cb_;
+ SessionMap session_map_;
+
+ // As the response from GenerateKeyRequest() will be asynchronous, add this
+ // request to a queue and assume that the subsequent responses come back in
+ // the order issued.
+ // TODO(jrummell): Remove once the Java interface supports reference_id.
+ std::queue<uint32_t> pending_key_request_reference_ids_;
+
DISALLOW_COPY_AND_ASSIGN(MediaDrmBridge);
};
diff --git a/media/base/android/media_player_bridge.cc b/media/base/android/media_player_bridge.cc
index 435363d9d7..64e2db849e 100644
--- a/media/base/android/media_player_bridge.cc
+++ b/media/base/android/media_player_bridge.cc
@@ -118,8 +118,8 @@ void MediaPlayerBridge::SetVideoSurface(gfx::ScopedJavaSurface surface) {
}
void MediaPlayerBridge::Prepare() {
- if (j_media_player_bridge_.is_null())
- CreateJavaMediaPlayerBridge();
+ DCHECK(j_media_player_bridge_.is_null());
+ CreateJavaMediaPlayerBridge();
if (url_.SchemeIsFileSystem()) {
manager()->GetMediaResourceGetter()->GetPlatformPathFromFileSystemURL(
url_, base::Bind(&MediaPlayerBridge::SetDataSource,
@@ -144,15 +144,16 @@ void MediaPlayerBridge::SetDataSource(const std::string& url) {
jobject j_context = base::android::GetApplicationContext();
DCHECK(j_context);
- if (Java_MediaPlayerBridge_setDataSource(
+ if (!Java_MediaPlayerBridge_setDataSource(
env, j_media_player_bridge_.obj(), j_context, j_url_string.obj(),
j_cookies.obj(), hide_url_log_)) {
- manager()->RequestMediaResources(player_id());
- Java_MediaPlayerBridge_prepareAsync(
- env, j_media_player_bridge_.obj());
- } else {
OnMediaError(MEDIA_ERROR_FORMAT);
+ return;
}
+
+ manager()->RequestMediaResources(player_id());
+ if (!Java_MediaPlayerBridge_prepareAsync(env, j_media_player_bridge_.obj()))
+ OnMediaError(MEDIA_ERROR_FORMAT);
}
void MediaPlayerBridge::OnCookiesRetrieved(const std::string& cookies) {
diff --git a/media/base/android/media_player_manager.h b/media/base/android/media_player_manager.h
index 668613bf5a..ec15b35cee 100644
--- a/media/base/android/media_player_manager.h
+++ b/media/base/android/media_player_manager.h
@@ -87,25 +87,30 @@ class MEDIA_EXPORT MediaPlayerManager {
// Called by the player to get a hardware protected surface.
virtual void OnProtectedSurfaceRequested(int player_id) = 0;
- // TODO(xhwang): The following three methods needs to be decoupled from
+ // TODO(xhwang): The following four methods needs to be decoupled from
// MediaPlayerManager to support the W3C Working Draft version of the EME
- // spec.
+ // spec. http://crbug.com/315312
// Called when MediaDrmBridge wants to send a KeyAdded.
virtual void OnKeyAdded(int media_keys_id,
- const std::string& session_id) = 0;
+ uint32 reference_id) = 0;
// Called when MediaDrmBridge wants to send a KeyError.
virtual void OnKeyError(int media_keys_id,
- const std::string& session_id,
+ uint32 reference_id,
media::MediaKeys::KeyError error_code,
int system_code) = 0;
// Called when MediaDrmBridge wants to send a KeyMessage.
virtual void OnKeyMessage(int media_keys_id,
- const std::string& session_id,
+ uint32 reference_id,
const std::vector<uint8>& message,
const std::string& destination_url) = 0;
+
+ // Called when MediaDrmBridge determines a SessionId.
+ virtual void OnSetSessionId(int media_keys_id,
+ uint32 reference_id,
+ const std::string& session_id) = 0;
};
} // namespace media
diff --git a/media/base/android/media_source_player.cc b/media/base/android/media_source_player.cc
index da4b55f47e..4c6cb9c121 100644
--- a/media/base/android/media_source_player.cc
+++ b/media/base/android/media_source_player.cc
@@ -248,7 +248,7 @@ void MediaSourcePlayer::Release() {
pending_event_ &= (SEEK_EVENT_PENDING | CONFIG_CHANGE_EVENT_PENDING);
audio_decoder_job_.reset();
- video_decoder_job_.reset();
+ ResetVideoDecoderJob();
// Prevent job re-creation attempts in OnDemuxerConfigsAvailable()
reconfig_audio_decoder_ = false;
@@ -404,8 +404,8 @@ void MediaSourcePlayer::SetDrmBridge(MediaDrmBridge* drm_bridge) {
// TODO(qinmin): support DRM change after playback has started.
// http://crbug.com/253792.
if (GetCurrentTime() > base::TimeDelta()) {
- LOG(INFO) << "Setting DRM bridge after playback has started. "
- << "This is not well supported!";
+ VLOG(0) << "Setting DRM bridge after playback has started. "
+ << "This is not well supported!";
}
drm_bridge_ = drm_bridge;
@@ -518,13 +518,13 @@ void MediaSourcePlayer::ProcessPendingEvents() {
if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : Handling SURFACE_CHANGE_EVENT.";
// Setting a new surface will require a new MediaCodec to be created.
- video_decoder_job_.reset();
+ ResetVideoDecoderJob();
ConfigureVideoDecoderJob();
// Return early if we can't successfully configure a new video decoder job
// yet, except continue processing other pending events if |surface_| is
// empty.
- if (!video_decoder_job_ && !surface_.IsEmpty())
+ if (HasVideo() && !video_decoder_job_ && !surface_.IsEmpty())
return;
}
@@ -655,7 +655,16 @@ void MediaSourcePlayer::DecodeMoreAudio() {
// Failed to start the next decode.
// Wait for demuxer ready message.
+ DCHECK(!reconfig_audio_decoder_);
reconfig_audio_decoder_ = true;
+
+ // Config change may have just been detected on the other stream. If so,
+ // don't send a duplicate demuxer config request.
+ if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
+ DCHECK(reconfig_video_decoder_);
+ return;
+ }
+
SetPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
ProcessPendingEvents();
}
@@ -675,12 +684,21 @@ void MediaSourcePlayer::DecodeMoreVideo() {
// Failed to start the next decode.
// Wait for demuxer ready message.
- reconfig_video_decoder_ = true;
// After this detection of video config change, next video data received
// will begin with I-frame.
next_video_data_is_iframe_ = true;
+ DCHECK(!reconfig_video_decoder_);
+ reconfig_video_decoder_ = true;
+
+ // Config change may have just been detected on the other stream. If so,
+ // don't send a duplicate demuxer config request.
+ if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
+ DCHECK(reconfig_audio_decoder_);
+ return;
+ }
+
SetPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
ProcessPendingEvents();
}
@@ -748,17 +766,27 @@ void MediaSourcePlayer::ConfigureAudioDecoderJob() {
}
}
+void MediaSourcePlayer::ResetVideoDecoderJob() {
+ video_decoder_job_.reset();
+
+ // Any eventual video decoder job re-creation will use the current |surface_|.
+ if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
+ ClearPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
+}
+
void MediaSourcePlayer::ConfigureVideoDecoderJob() {
if (!HasVideo() || surface_.IsEmpty()) {
- video_decoder_job_.reset();
- if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
- ClearPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
+ ResetVideoDecoderJob();
return;
}
// Create video decoder job only if config changes or we don't have a job.
- if (video_decoder_job_ && !reconfig_video_decoder_)
+ if (video_decoder_job_ && !reconfig_video_decoder_) {
+ DCHECK(!IsEventPending(SURFACE_CHANGE_EVENT_PENDING));
return;
+ }
+
+ DCHECK(!video_decoder_job_ || !video_decoder_job_->is_decoding());
if (reconfig_video_decoder_) {
// No hack browser seek should be required. I-Frame must be next.
@@ -769,6 +797,8 @@ void MediaSourcePlayer::ConfigureVideoDecoderJob() {
// If uncertain that video I-frame data is next and there is no seek already
// in process, request browser demuxer seek so the new decoder will decode
// an I-frame first. Otherwise, the new MediaCodec might crash. See b/8950387.
+ // Eventual OnDemuxerSeekDone() will trigger ProcessPendingEvents() and
+ // continue from here.
// TODO(wolenetz): Instead of doing hack browser seek, replay cached data
// since last keyframe. See http://crbug.com/304234.
if (!next_video_data_is_iframe_ && !IsEventPending(SEEK_EVENT_PENDING)) {
@@ -776,17 +806,16 @@ void MediaSourcePlayer::ConfigureVideoDecoderJob() {
return;
}
+ // Release the old VideoDecoderJob first so the surface can get released.
+ // Android does not allow 2 MediaCodec instances use the same surface.
+ ResetVideoDecoderJob();
+
base::android::ScopedJavaLocalRef<jobject> media_crypto = GetMediaCrypto();
if (is_video_encrypted_ && media_crypto.is_null())
return;
- DCHECK(!video_decoder_job_ || !video_decoder_job_->is_decoding());
-
DVLOG(1) << __FUNCTION__ << " : creating new video decoder job";
- // Release the old VideoDecoderJob first so the surface can get released.
- // Android does not allow 2 MediaCodec instances use the same surface.
- video_decoder_job_.reset();
// Create the new VideoDecoderJob.
bool is_secure = IsProtectedSurfaceRequired();
video_decoder_job_.reset(
@@ -798,13 +827,11 @@ void MediaSourcePlayer::ConfigureVideoDecoderJob() {
base::Bind(&DemuxerAndroid::RequestDemuxerData,
base::Unretained(demuxer_.get()),
DemuxerStream::VIDEO)));
- if (video_decoder_job_) {
- video_decoder_job_->BeginPrerolling(preroll_timestamp_);
- reconfig_video_decoder_ = false;
- }
+ if (!video_decoder_job_)
+ return;
- if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
- ClearPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
+ video_decoder_job_->BeginPrerolling(preroll_timestamp_);
+ reconfig_video_decoder_ = false;
// Inform the fullscreen view the player is ready.
// TODO(qinmin): refactor MediaPlayerBridge so that we have a better way
diff --git a/media/base/android/media_source_player.h b/media/base/android/media_source_player.h
index c718464408..f798eed4f9 100644
--- a/media/base/android/media_source_player.h
+++ b/media/base/android/media_source_player.h
@@ -100,14 +100,18 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// Handle pending events when all the decoder jobs finished.
void ProcessPendingEvents();
- // Helper method to configure the decoder jobs.
+ // Helper method to clear any pending |SURFACE_CHANGE_EVENT_PENDING|
+ // and reset |video_decoder_job_| to null.
+ void ResetVideoDecoderJob();
+
+ // Helper methods to configure the decoder jobs.
void ConfigureVideoDecoderJob();
void ConfigureAudioDecoderJob();
// Flush the decoders and clean up all the data needs to be decoded.
void ClearDecodingData();
- // Called to decoder more data.
+ // Called to decode more data.
void DecodeMoreAudio();
void DecodeMoreVideo();
diff --git a/media/base/android/media_source_player_unittest.cc b/media/base/android/media_source_player_unittest.cc
index 8b33f13899..b7572f94dc 100644
--- a/media/base/android/media_source_player_unittest.cc
+++ b/media/base/android/media_source_player_unittest.cc
@@ -20,13 +20,13 @@
namespace media {
// Helper macro to skip the test if MediaCodecBridge isn't available.
-#define SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE() \
- do { \
- if (!MediaCodecBridge::IsAvailable()) { \
- LOG(INFO) << "Could not run test - not supported on device."; \
- return; \
- } \
- } while (0) \
+#define SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE() \
+ do { \
+ if (!MediaCodecBridge::IsAvailable()) { \
+ VLOG(0) << "Could not run test - not supported on device."; \
+ return; \
+ } \
+ } while (0)
static const int kDefaultDurationInMs = 10000;
@@ -39,7 +39,8 @@ static const char kVideoWebM[] = "video/webm";
class MockMediaPlayerManager : public MediaPlayerManager {
public:
explicit MockMediaPlayerManager(base::MessageLoop* message_loop)
- : message_loop_(message_loop) {}
+ : message_loop_(message_loop),
+ playback_completed_(false) {}
virtual ~MockMediaPlayerManager() {}
// MediaPlayerManager implementation.
@@ -54,6 +55,7 @@ class MockMediaPlayerManager : public MediaPlayerManager {
int player_id, base::TimeDelta duration, int width, int height,
bool success) OVERRIDE {}
virtual void OnPlaybackComplete(int player_id) OVERRIDE {
+ playback_completed_ = true;
if (message_loop_->is_running())
message_loop_->Quit();
}
@@ -72,18 +74,26 @@ class MockMediaPlayerManager : public MediaPlayerManager {
}
virtual void OnProtectedSurfaceRequested(int player_id) OVERRIDE {}
virtual void OnKeyAdded(int key_id,
- const std::string& session_id) OVERRIDE {}
+ uint32 reference_id) OVERRIDE {}
virtual void OnKeyError(int key_id,
- const std::string& session_id,
+ uint32 reference_id,
MediaKeys::KeyError error_code,
int system_code) OVERRIDE {}
virtual void OnKeyMessage(int key_id,
- const std::string& session_id,
+ uint32 reference_id,
const std::vector<uint8>& message,
const std::string& destination_url) OVERRIDE {}
+ virtual void OnSetSessionId(int media_keys_id,
+ uint32 reference_id,
+ const std::string& session_id) OVERRIDE {}
+
+ bool playback_completed() const {
+ return playback_completed_;
+ }
private:
base::MessageLoop* message_loop_;
+ bool playback_completed_;
DISALLOW_COPY_AND_ASSIGN(MockMediaPlayerManager);
};
@@ -208,23 +218,43 @@ class MediaSourcePlayerTest : public testing::Test {
decoder_callback_hook_executed_ = true;
}
- DemuxerConfigs CreateAudioDemuxerConfigs() {
+ // Inspect internal pending_event_ state of |player_|. This is for infrequent
+ // use by tests, only where required.
+ bool IsPendingSurfaceChange() {
+ return player_.IsEventPending(player_.SURFACE_CHANGE_EVENT_PENDING);
+ }
+
+ DemuxerConfigs CreateAudioDemuxerConfigs(AudioCodec audio_codec) {
DemuxerConfigs configs;
- configs.audio_codec = kCodecVorbis;
+ configs.audio_codec = audio_codec;
configs.audio_channels = 2;
- configs.audio_sampling_rate = 44100;
configs.is_audio_encrypted = false;
configs.duration_ms = kDefaultDurationInMs;
- scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-extradata");
+
+ if (audio_codec == kCodecVorbis) {
+ configs.audio_sampling_rate = 44100;
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(
+ "vorbis-extradata");
+ configs.audio_extra_data = std::vector<uint8>(
+ buffer->data(),
+ buffer->data() + buffer->data_size());
+ return configs;
+ }
+
+ // Other codecs are not yet supported by this helper.
+ EXPECT_EQ(audio_codec, kCodecAAC);
+
+ configs.audio_sampling_rate = 48000;
+ uint8 aac_extra_data[] = { 0x13, 0x10 };
configs.audio_extra_data = std::vector<uint8>(
- buffer->data(),
- buffer->data() + buffer->data_size());
+ aac_extra_data,
+ aac_extra_data + 2);
return configs;
}
// Starts an audio decoder job.
void StartAudioDecoderJob() {
- Start(CreateAudioDemuxerConfigs());
+ Start(CreateAudioDemuxerConfigs(kCodecVorbis));
}
DemuxerConfigs CreateVideoDemuxerConfigs() {
@@ -237,7 +267,7 @@ class MediaSourcePlayerTest : public testing::Test {
}
DemuxerConfigs CreateAudioVideoDemuxerConfigs() {
- DemuxerConfigs configs = CreateAudioDemuxerConfigs();
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis);
configs.video_codec = kCodecVP8;
configs.video_size = gfx::Size(320, 240);
configs.is_video_encrypted = false;
@@ -570,7 +600,7 @@ TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test audio decoder job will not be created when failed to start the codec.
- DemuxerConfigs configs = CreateAudioDemuxerConfigs();
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis);
// Replace with invalid |audio_extra_data|
configs.audio_extra_data.clear();
uint8 invalid_codec_data[] = { 0x00, 0xff, 0xff, 0xff, 0xff };
@@ -722,7 +752,7 @@ TEST_F(MediaSourcePlayerTest, AudioOnlyStartAfterSeekFinish) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test audio decoder job will not start until pending seek event is handled.
- DemuxerConfigs configs = CreateAudioDemuxerConfigs();
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis);
player_.OnDemuxerConfigsAvailable(configs);
EXPECT_FALSE(GetMediaDecoderJob(true));
EXPECT_EQ(0, demuxer_->num_data_requests());
@@ -947,6 +977,42 @@ TEST_F(MediaSourcePlayerTest, ReplayAfterInputEOS) {
EXPECT_EQ(1, demuxer_->num_seek_requests());
}
+TEST_F(MediaSourcePlayerTest, FirstDataIsEOS) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decode of EOS buffer without any prior decode. See also
+ // http://b/11696552.
+ Start(CreateAudioDemuxerConfigs(kCodecAAC));
+ EXPECT_TRUE(GetMediaDecoderJob(true));
+
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+ player_.OnDemuxerDataAvailable(CreateEOSAck(true));
+ EXPECT_FALSE(manager_.playback_completed());
+
+ message_loop_.Run();
+ EXPECT_TRUE(manager_.playback_completed());
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, FirstDataAfterSeekIsEOS) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decode of EOS buffer, just after seeking, without any prior decode
+ // (other than the simulated |kAborted| resulting from the seek process.)
+ // See also http://b/11696552.
+ Start(CreateAudioDemuxerConfigs(kCodecAAC));
+ EXPECT_TRUE(GetMediaDecoderJob(true));
+
+ SeekPlayer(true, base::TimeDelta());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ player_.OnDemuxerDataAvailable(CreateEOSAck(true));
+ EXPECT_FALSE(manager_.playback_completed());
+
+ message_loop_.Run();
+ EXPECT_TRUE(manager_.playback_completed());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+}
+
TEST_F(MediaSourcePlayerTest, NoRequestForDataAfterAbort) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
@@ -1252,7 +1318,7 @@ TEST_F(MediaSourcePlayerTest, PrerollContinuesAcrossConfigChange) {
EXPECT_EQ(1, demuxer_->num_config_requests());
// Simulate arrival of new configs.
- player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs());
+ player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs(kCodecVorbis));
// Send some data before the seek position.
for (int i = 1; i < 4; ++i) {
@@ -1272,6 +1338,45 @@ TEST_F(MediaSourcePlayerTest, PrerollContinuesAcrossConfigChange) {
EXPECT_FALSE(IsPrerolling(true));
}
+TEST_F(MediaSourcePlayerTest, SimultaneousAudioVideoConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that the player allows simultaneous audio and video config change,
+ // such as might occur during OnPrefetchDone() if next access unit for both
+ // audio and video jobs is |kConfigChanged|.
+ Start(CreateAudioVideoDemuxerConfigs());
+ CreateNextTextureAndSetVideoSurface();
+ MediaDecoderJob* first_audio_job = GetMediaDecoderJob(true);
+ MediaDecoderJob* first_video_job = GetMediaDecoderJob(false);
+ EXPECT_TRUE(first_audio_job && first_video_job);
+
+ // Simulate audio |kConfigChanged| prefetched as standalone access unit.
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckWithConfigChanged(true, 0));
+ EXPECT_EQ(0, demuxer_->num_config_requests()); // No OnPrefetchDone() yet.
+
+ // Simulate video |kConfigChanged| prefetched as standalone access unit.
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckWithConfigChanged(false, 0));
+ EXPECT_EQ(1, demuxer_->num_config_requests()); // OnPrefetchDone() occurred.
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+
+ // No job re-creation should occur until the requested configs arrive.
+ EXPECT_EQ(first_audio_job, GetMediaDecoderJob(true));
+ EXPECT_EQ(first_video_job, GetMediaDecoderJob(false));
+
+ player_.OnDemuxerConfigsAvailable(CreateAudioVideoDemuxerConfigs());
+ EXPECT_EQ(4, demuxer_->num_data_requests());
+ MediaDecoderJob* second_audio_job = GetMediaDecoderJob(true);
+ MediaDecoderJob* second_video_job = GetMediaDecoderJob(false);
+ EXPECT_NE(first_audio_job, second_audio_job);
+ EXPECT_NE(first_video_job, second_video_job);
+ EXPECT_TRUE(second_audio_job && second_video_job);
+
+ // Confirm no further demuxer configs requested.
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+}
+
TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInPrefetchUnit0) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
@@ -1613,7 +1718,7 @@ TEST_F(MediaSourcePlayerTest, ConfigChangedThenReleaseThenConfigsAvailable) {
StartConfigChange(true, true, 0);
ReleasePlayer();
- player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs());
+ player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs(kCodecVorbis));
EXPECT_FALSE(GetMediaDecoderJob(true));
EXPECT_FALSE(player_.IsPlaying());
EXPECT_EQ(1, demuxer_->num_data_requests());
@@ -1642,7 +1747,7 @@ TEST_F(MediaSourcePlayerTest, ConfigChangedThenReleaseThenStart) {
EXPECT_FALSE(GetMediaDecoderJob(true));
EXPECT_EQ(1, demuxer_->num_data_requests());
- player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs());
+ player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs(kCodecVorbis));
EXPECT_TRUE(GetMediaDecoderJob(true));
EXPECT_EQ(2, demuxer_->num_data_requests());
@@ -1708,10 +1813,30 @@ TEST_F(MediaSourcePlayerTest, BrowserSeek_ThenReleaseThenStart) {
EXPECT_EQ(1, demuxer_->num_seek_requests());
}
+// TODO(xhwang): Once we add tests to cover DrmBridge, update this test to
+// also verify that the job is successfully created if SetDrmBridge(), Start()
+// and eventually OnMediaCrypto() occur. This would increase test coverage of
+// http://crbug.com/313470 and allow us to remove inspection of internal player
+// pending event state. See http://crbug.com/313860.
+TEST_F(MediaSourcePlayerTest, SurfaceChangeClearedEvenIfMediaCryptoAbsent) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that |SURFACE_CHANGE_EVENT_PENDING| is not pending after
+ // SetVideoSurface() for a player configured for encrypted video, when the
+ // player has not yet received media crypto.
+ DemuxerConfigs configs = CreateVideoDemuxerConfigs();
+ configs.is_video_encrypted = true;
+
+ player_.OnDemuxerConfigsAvailable(configs);
+ CreateNextTextureAndSetVideoSurface();
+ EXPECT_FALSE(IsPendingSurfaceChange());
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+}
+
// TODO(xhwang): Enable this test when the test devices are updated.
TEST_F(MediaSourcePlayerTest, DISABLED_IsTypeSupported_Widevine) {
if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
- LOG(INFO) << "Could not run test - not supported on device.";
+ VLOG(0) << "Could not run test - not supported on device.";
return;
}
@@ -1763,7 +1888,7 @@ TEST_F(MediaSourcePlayerTest, DISABLED_IsTypeSupported_Widevine) {
TEST_F(MediaSourcePlayerTest, IsTypeSupported_InvalidUUID) {
if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
- LOG(INFO) << "Could not run test - not supported on device.";
+ VLOG(0) << "Could not run test - not supported on device.";
return;
}
diff --git a/media/base/audio_bus.h b/media/base/audio_bus.h
index dbb49ca57f..d1106f558e 100644
--- a/media/base/audio_bus.h
+++ b/media/base/audio_bus.h
@@ -47,7 +47,6 @@ class MEDIA_EXPORT AudioBus {
static scoped_ptr<AudioBus> WrapMemory(int channels, int frames, void* data);
static scoped_ptr<AudioBus> WrapMemory(const AudioParameters& params,
void* data);
- // Returns the required memory size to use the WrapMemory() method.
static int CalculateMemorySize(const AudioParameters& params);
// Calculates the required size for an AudioBus given the number of channels
diff --git a/media/base/audio_bus_perftest.cc b/media/base/audio_bus_perftest.cc
index e4152fdfc6..ae60531074 100644
--- a/media/base/audio_bus_perftest.cc
+++ b/media/base/audio_bus_perftest.cc
@@ -10,7 +10,7 @@
namespace media {
-static const int kBenchmarkIterations = 100;
+static const int kBenchmarkIterations = 20;
template <typename T>
void RunInterleaveBench(AudioBus* bus, const std::string& trace_name) {
@@ -22,20 +22,21 @@ void RunInterleaveBench(AudioBus* bus, const std::string& trace_name) {
for (int i = 0; i < kBenchmarkIterations; ++i) {
bus->ToInterleaved(bus->frames(), byte_size, interleaved.get());
}
- double total_time_seconds =
- (base::TimeTicks::HighResNow() - start).InSecondsF();
+ double total_time_milliseconds =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
perf_test::PrintResult(
"audio_bus_to_interleaved", "", trace_name,
- kBenchmarkIterations / total_time_seconds, "runs/s", true);
+ total_time_milliseconds / kBenchmarkIterations, "ms", true);
start = base::TimeTicks::HighResNow();
for (int i = 0; i < kBenchmarkIterations; ++i) {
bus->FromInterleaved(interleaved.get(), bus->frames(), byte_size);
}
- total_time_seconds = (base::TimeTicks::HighResNow() - start).InSecondsF();
+ total_time_milliseconds =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
perf_test::PrintResult(
"audio_bus_from_interleaved", "", trace_name,
- kBenchmarkIterations / total_time_seconds, "runs/s", true);
+ total_time_milliseconds / kBenchmarkIterations, "ms", true);
}
// Benchmark the FromInterleaved() and ToInterleaved() methods.
diff --git a/media/base/audio_converter_perftest.cc b/media/base/audio_converter_perftest.cc
index f8570e1ec3..83f715e110 100644
--- a/media/base/audio_converter_perftest.cc
+++ b/media/base/audio_converter_perftest.cc
@@ -10,7 +10,7 @@
namespace media {
-static const int kBenchmarkIterations = 500000;
+static const int kBenchmarkIterations = 200000;
// InputCallback that zero's out the provided AudioBus.
class NullInputProvider : public AudioConverter::InputCallback {
diff --git a/media/base/audio_decoder_config.h b/media/base/audio_decoder_config.h
index a17d2215b9..53705ccda7 100644
--- a/media/base/audio_decoder_config.h
+++ b/media/base/audio_decoder_config.h
@@ -33,6 +33,7 @@ enum AudioCodec {
kCodecPCM_S24BE,
kCodecOpus,
kCodecEAC3,
+ kCodecPCM_ALAW,
// DO NOT ADD RANDOM AUDIO CODECS!
//
// The only acceptable time to add a new codec is if there is production code
diff --git a/media/base/bit_reader.cc b/media/base/bit_reader.cc
index ea74350390..e4d83af741 100644
--- a/media/base/bit_reader.cc
+++ b/media/base/bit_reader.cc
@@ -19,7 +19,7 @@ BitReader::~BitReader() {}
bool BitReader::SkipBits(int num_bits) {
DCHECK_GE(num_bits, 0);
- DLOG_IF(INFO, num_bits > 100)
+ DVLOG_IF(0, num_bits > 100)
<< "BitReader::SkipBits inefficient for large skips";
// Skip any bits in the current byte waiting to be processed, then
diff --git a/media/base/demuxer.h b/media/base/demuxer.h
index a2dad22d67..9b671f007c 100644
--- a/media/base/demuxer.h
+++ b/media/base/demuxer.h
@@ -15,6 +15,8 @@
namespace media {
+class TextTrackConfig;
+
class MEDIA_EXPORT DemuxerHost : public DataSourceHost {
public:
// Sets the duration of the media in microseconds.
@@ -25,6 +27,13 @@ class MEDIA_EXPORT DemuxerHost : public DataSourceHost {
// method with PIPELINE_OK.
virtual void OnDemuxerError(PipelineStatus error) = 0;
+ // Add |text_stream| to the collection managed by the text renderer.
+ virtual void AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config) = 0;
+
+ // Remove |text_stream| from the presentation.
+ virtual void RemoveTextStream(DemuxerStream* text_stream) = 0;
+
protected:
virtual ~DemuxerHost();
};
@@ -45,7 +54,8 @@ class MEDIA_EXPORT Demuxer {
// The demuxer does not own |host| as it is guaranteed to outlive the
// lifetime of the demuxer. Don't delete it!
virtual void Initialize(DemuxerHost* host,
- const PipelineStatusCB& status_cb) = 0;
+ const PipelineStatusCB& status_cb,
+ bool enable_text_tracks) = 0;
// Carry out any actions required to seek to the given time, executing the
// callback upon completion.
@@ -66,7 +76,8 @@ class MEDIA_EXPORT Demuxer {
// TODO(scherkus): this might not be needed http://crbug.com/234708
virtual void OnAudioRendererDisabled() = 0;
- // Returns the given stream type, or NULL if that type is not present.
+ // Returns the first stream of the given stream type (which is not allowed
+ // to be DemuxerStream::TEXT), or NULL if that type of stream is not present.
virtual DemuxerStream* GetStream(DemuxerStream::Type type) = 0;
// Returns the starting time for the media file.
diff --git a/media/base/demuxer_perftest.cc b/media/base/demuxer_perftest.cc
index e86b0b81ee..f63e6e4b3e 100644
--- a/media/base/demuxer_perftest.cc
+++ b/media/base/demuxer_perftest.cc
@@ -17,7 +17,7 @@
namespace media {
-static const int kBenchmarkIterations = 5000;
+static const int kBenchmarkIterations = 500;
class DemuxerHostImpl : public media::DemuxerHost {
public:
@@ -33,6 +33,9 @@ class DemuxerHostImpl : public media::DemuxerHost {
// DemuxerHost implementation.
virtual void SetDuration(base::TimeDelta duration) OVERRIDE {}
virtual void OnDemuxerError(media::PipelineStatus error) OVERRIDE {}
+ virtual void AddTextStream(media::DemuxerStream* text_stream,
+ const media::TextTrackConfig& config) OVERRIDE {}
+ virtual void RemoveTextStream(media::DemuxerStream* text_stream) OVERRIDE {}
private:
DISALLOW_COPY_AND_ASSIGN(DemuxerHostImpl);
@@ -46,7 +49,7 @@ static void QuitLoopWithStatus(base::MessageLoop* message_loop,
static void NeedKey(const std::string& type,
const std::vector<uint8>& init_data) {
- LOG(INFO) << "File is encrypted.";
+ VLOG(0) << "File is encrypted.";
}
typedef std::vector<media::DemuxerStream* > Streams;
@@ -64,7 +67,7 @@ class StreamReader {
// Returns true when all streams have reached end of stream.
bool IsDone();
- int number_of_streams() { return streams_.size(); }
+ int number_of_streams() { return static_cast<int>(streams_.size()); }
const Streams& streams() { return streams_; }
const std::vector<int>& counts() { return counts_; }
@@ -182,8 +185,9 @@ static void RunDemuxerBenchmark(const std::string& filename) {
need_key_cb,
new MediaLog());
- demuxer.Initialize(&demuxer_host, base::Bind(
- &QuitLoopWithStatus, &message_loop));
+ demuxer.Initialize(&demuxer_host,
+ base::Bind(&QuitLoopWithStatus, &message_loop),
+ false);
message_loop.Run();
StreamReader stream_reader(&demuxer, false);
@@ -208,18 +212,18 @@ static void RunDemuxerBenchmark(const std::string& filename) {
}
TEST(DemuxerPerfTest, Demuxer) {
- RunDemuxerBenchmark("media/test/data/bear.ogv");
- RunDemuxerBenchmark("media/test/data/bear-640x360.webm");
- RunDemuxerBenchmark("media/test/data/sfx_s16le.wav");
+ RunDemuxerBenchmark("bear.ogv");
+ RunDemuxerBenchmark("bear-640x360.webm");
+ RunDemuxerBenchmark("sfx_s16le.wav");
#if defined(USE_PROPRIETARY_CODECS)
- RunDemuxerBenchmark("media/test/data/bear-1280x720.mp4");
- RunDemuxerBenchmark("media/test/data/sfx.mp3");
+ RunDemuxerBenchmark("bear-1280x720.mp4");
+ RunDemuxerBenchmark("sfx.mp3");
#endif
#if defined(OS_CHROMEOS)
- RunDemuxerBenchmark("media/test/data/bear.flac");
+ RunDemuxerBenchmark("bear.flac");
#endif
#if defined(USE_PROPRIETARY_CODECS) && defined(OS_CHROMEOS)
- RunDemuxerBenchmark("media/test/data/bear.avi");
+ RunDemuxerBenchmark("bear.avi");
#endif
}
diff --git a/media/base/demuxer_stream.h b/media/base/demuxer_stream.h
index bb4534475e..4e07c66d8f 100644
--- a/media/base/demuxer_stream.h
+++ b/media/base/demuxer_stream.h
@@ -21,6 +21,7 @@ class MEDIA_EXPORT DemuxerStream {
UNKNOWN,
AUDIO,
VIDEO,
+ TEXT,
NUM_TYPES, // Always keep this entry as the last one!
};
diff --git a/media/base/fake_text_track_stream.cc b/media/base/fake_text_track_stream.cc
new file mode 100644
index 0000000000..3136c475a7
--- /dev/null
+++ b/media/base/fake_text_track_stream.cc
@@ -0,0 +1,83 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/fake_text_track_stream.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "media/base/decoder_buffer.h"
+#include "media/filters/webvtt_util.h"
+
+namespace media {
+
+FakeTextTrackStream::FakeTextTrackStream()
+ : message_loop_(base::MessageLoopProxy::current()),
+ stopping_(false) {
+}
+
+FakeTextTrackStream::~FakeTextTrackStream() {
+ DCHECK(read_cb_.is_null());
+}
+
+void FakeTextTrackStream::Read(const ReadCB& read_cb) {
+ DCHECK(!read_cb.is_null());
+ DCHECK(read_cb_.is_null());
+ OnRead();
+ read_cb_ = read_cb;
+
+ if (stopping_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &FakeTextTrackStream::AbortPendingRead, base::Unretained(this)));
+ }
+}
+
+DemuxerStream::Type FakeTextTrackStream::type() {
+ return DemuxerStream::TEXT;
+}
+
+void FakeTextTrackStream::SatisfyPendingRead(
+ const base::TimeDelta& start,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings) {
+ DCHECK(!read_cb_.is_null());
+
+ const uint8* const data_buf = reinterpret_cast<const uint8*>(content.data());
+ const int data_len = static_cast<int>(content.size());
+
+ std::vector<uint8> side_data;
+ MakeSideData(id.begin(), id.end(),
+ settings.begin(), settings.end(),
+ &side_data);
+
+ const uint8* const sd_buf = &side_data[0];
+ const int sd_len = static_cast<int>(side_data.size());
+
+ scoped_refptr<DecoderBuffer> buffer;
+ buffer = DecoderBuffer::CopyFrom(data_buf, data_len, sd_buf, sd_len);
+
+ buffer->set_timestamp(start);
+ buffer->set_duration(duration);
+
+ base::ResetAndReturn(&read_cb_).Run(kOk, buffer);
+}
+
+void FakeTextTrackStream::AbortPendingRead() {
+ DCHECK(!read_cb_.is_null());
+ base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
+}
+
+void FakeTextTrackStream::SendEosNotification() {
+ DCHECK(!read_cb_.is_null());
+ base::ResetAndReturn(&read_cb_).Run(kOk, DecoderBuffer::CreateEOSBuffer());
+}
+
+void FakeTextTrackStream::Stop() {
+ stopping_ = true;
+ if (!read_cb_.is_null())
+ AbortPendingRead();
+}
+
+} // namespace media
diff --git a/media/base/fake_text_track_stream.h b/media/base/fake_text_track_stream.h
new file mode 100644
index 0000000000..33c74ef4f3
--- /dev/null
+++ b/media/base/fake_text_track_stream.h
@@ -0,0 +1,47 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/video_decoder_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+// Fake implementation of the DemuxerStream. These are the stream objects
+// we pass to the text renderer object when streams are added and removed.
+class FakeTextTrackStream : public DemuxerStream {
+ public:
+ FakeTextTrackStream();
+ virtual ~FakeTextTrackStream();
+
+ // DemuxerStream implementation.
+ virtual void Read(const ReadCB&) OVERRIDE;
+ MOCK_METHOD0(audio_decoder_config, AudioDecoderConfig());
+ MOCK_METHOD0(video_decoder_config, VideoDecoderConfig());
+ virtual Type type() OVERRIDE;
+ MOCK_METHOD0(EnableBitstreamConverter, void());
+
+ void SatisfyPendingRead(const base::TimeDelta& start,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings);
+ void AbortPendingRead();
+ void SendEosNotification();
+
+ void Stop();
+
+ MOCK_METHOD0(OnRead, void());
+
+ private:
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+ ReadCB read_cb_;
+ bool stopping_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeTextTrackStream);
+};
+
+} // namespace media
diff --git a/media/base/filter_collection.cc b/media/base/filter_collection.cc
index 730835f191..da5042f327 100644
--- a/media/base/filter_collection.cc
+++ b/media/base/filter_collection.cc
@@ -6,6 +6,7 @@
#include "media/base/audio_renderer.h"
#include "media/base/demuxer.h"
+#include "media/base/text_renderer.h"
#include "media/base/video_renderer.h"
namespace media {
@@ -40,4 +41,13 @@ scoped_ptr<VideoRenderer> FilterCollection::GetVideoRenderer() {
return video_renderer_.Pass();
}
+void FilterCollection::SetTextRenderer(
+ scoped_ptr<TextRenderer> text_renderer) {
+ text_renderer_ = text_renderer.Pass();
+}
+
+scoped_ptr<TextRenderer> FilterCollection::GetTextRenderer() {
+ return text_renderer_.Pass();
+}
+
} // namespace media
diff --git a/media/base/filter_collection.h b/media/base/filter_collection.h
index 90ea066944..a0aee76f0b 100644
--- a/media/base/filter_collection.h
+++ b/media/base/filter_collection.h
@@ -12,6 +12,7 @@ namespace media {
class AudioRenderer;
class Demuxer;
+class TextRenderer;
class VideoRenderer;
// Represents a set of uninitialized demuxer and audio/video decoders and
@@ -33,10 +34,14 @@ class MEDIA_EXPORT FilterCollection {
void SetVideoRenderer(scoped_ptr<VideoRenderer> video_renderer);
scoped_ptr<VideoRenderer> GetVideoRenderer();
+ void SetTextRenderer(scoped_ptr<TextRenderer> text_renderer);
+ scoped_ptr<TextRenderer> GetTextRenderer();
+
private:
Demuxer* demuxer_;
scoped_ptr<AudioRenderer> audio_renderer_;
scoped_ptr<VideoRenderer> video_renderer_;
+ scoped_ptr<TextRenderer> text_renderer_;
DISALLOW_COPY_AND_ASSIGN(FilterCollection);
};
diff --git a/media/base/media_keys.h b/media/base/media_keys.h
index c0fc56af34..743d71459b 100644
--- a/media/base/media_keys.h
+++ b/media/base/media_keys.h
@@ -24,7 +24,7 @@ class Decryptor;
class MEDIA_EXPORT MediaKeys {
public:
// Reported to UMA, so never reuse a value!
- // Must be kept in sync with WebKit::WebMediaPlayerClient::MediaKeyErrorCode
+ // Must be kept in sync with blink::WebMediaPlayerClient::MediaKeyErrorCode
// (enforced in webmediaplayer_impl.cc).
enum KeyError {
kUnknownError = 1,
@@ -37,6 +37,8 @@ class MEDIA_EXPORT MediaKeys {
kMaxKeyError // Must be last and greater than any legit value.
};
+ const static uint32 kInvalidReferenceId = 0;
+
MediaKeys();
virtual ~MediaKeys();
@@ -44,7 +46,8 @@ class MEDIA_EXPORT MediaKeys {
// Returns true if generating key request succeeded, false otherwise.
// Note: AddKey() and CancelKeyRequest() should only be called after
// GenerateKeyRequest() returns true.
- virtual bool GenerateKeyRequest(const std::string& type,
+ virtual bool GenerateKeyRequest(uint32 reference_id,
+ const std::string& type,
const uint8* init_data,
int init_data_length) = 0;
@@ -52,12 +55,14 @@ class MEDIA_EXPORT MediaKeys {
// key. It can be any data that the key system accepts, such as a license.
// If multiple calls of this function set different keys for the same
// key ID, the older key will be replaced by the newer key.
- virtual void AddKey(const uint8* key, int key_length,
- const uint8* init_data, int init_data_length,
- const std::string& session_id) = 0;
+ virtual void AddKey(uint32 reference_id,
+ const uint8* key,
+ int key_length,
+ const uint8* init_data,
+ int init_data_length) = 0;
- // Cancels the key request specified by |session_id|.
- virtual void CancelKeyRequest(const std::string& session_id) = 0;
+ // Cancels the key request specified by |reference_id|.
+ virtual void CancelKeyRequest(uint32 reference_id) = 0;
// Gets the Decryptor object associated with the MediaKeys. Returns NULL if
// no Decryptor object is associated. The returned object is only guaranteed
@@ -70,15 +75,21 @@ class MEDIA_EXPORT MediaKeys {
// Key event callbacks. See the spec for details:
// http://dvcs.w3.org/hg/html-media/raw-file/eme-v0.1b/encrypted-media/encrypted-media.html#event-summary
-typedef base::Callback<void(const std::string& session_id)> KeyAddedCB;
+typedef base::Callback<void(uint32 reference_id)> KeyAddedCB;
-typedef base::Callback<void(const std::string& session_id,
+typedef base::Callback<void(uint32 reference_id,
media::MediaKeys::KeyError error_code,
int system_code)> KeyErrorCB;
-typedef base::Callback<void(const std::string& session_id,
+typedef base::Callback<void(uint32 reference_id,
const std::vector<uint8>& message,
const std::string& default_url)> KeyMessageCB;
+
+// Called by the CDM when it generates the |session_id| as a result of a
+// GenerateKeyRequest() call. Must be called before KeyMessageCB or KeyAddedCB
+// events are fired.
+typedef base::Callback<void(uint32 reference_id,
+ const std::string& session_id)> SetSessionIdCB;
} // namespace media
#endif // MEDIA_BASE_MEDIA_KEYS_H_
diff --git a/media/base/media_log.cc b/media/base/media_log.cc
index 8a07b020c7..e791b441f4 100644
--- a/media/base/media_log.cc
+++ b/media/base/media_log.cc
@@ -50,6 +50,8 @@ const char* MediaLog::EventTypeToString(MediaLogEvent::Type type) {
return "AUDIO_ENDED";
case MediaLogEvent::VIDEO_ENDED:
return "VIDEO_ENDED";
+ case MediaLogEvent::TEXT_ENDED:
+ return "TEXT_ENDED";
case MediaLogEvent::AUDIO_RENDERER_DISABLED:
return "AUDIO_RENDERER_DISABLED";
case MediaLogEvent::BUFFERED_EXTENTS_CHANGED:
diff --git a/media/base/media_log_event.h b/media/base/media_log_event.h
index 811d1131a7..3052d415c1 100644
--- a/media/base/media_log_event.h
+++ b/media/base/media_log_event.h
@@ -70,9 +70,10 @@ struct MediaLogEvent {
TOTAL_BYTES_SET,
NETWORK_ACTIVITY_SET,
- // Audio/Video stream playback has ended.
+ // Audio/Video/Text stream playback has ended.
AUDIO_ENDED,
VIDEO_ENDED,
+ TEXT_ENDED,
// The audio renderer has been disabled.
// params: none.
diff --git a/media/base/media_switches.cc b/media/base/media_switches.cc
index 435b03169c..3718d96ce1 100644
--- a/media/base/media_switches.cc
+++ b/media/base/media_switches.cc
@@ -12,8 +12,8 @@ const char kAudioBufferSize[] = "audio-buffer-size";
// Enable EAC3 playback in MSE.
const char kEnableEac3Playback[] = "enable-eac3-playback";
-// Enables Opus playback in media elements.
-const char kEnableOpusPlayback[] = "enable-opus-playback";
+// Disables Opus playback in media elements.
+const char kDisableOpusPlayback[] = "disable-opus-playback";
// Disables VP8 Alpha playback in media elements.
const char kDisableVp8AlphaPlayback[] = "disable-vp8-alpha-playback";
@@ -56,6 +56,13 @@ const char kAlsaOutputDevice[] = "alsa-output-device";
// tested. See http://crbug.com/158170.
// TODO(dalecurtis): Remove this once we're sure nothing has exploded.
const char kDisableMainThreadAudio[] = "disable-main-thread-audio";
+// AVFoundation is available in versions 10.7 and onwards, and is to be used
+// http://crbug.com/288562 for both audio and video device monitoring and for
+// video capture. Being a dynamically loaded NSBundle and library, it hits the
+// Chrome startup time (http://crbug.com/311325 and http://crbug.com/311437);
+// until development is finished and the library load time issue is solved, the
+// usage of this library is hidden behind this flag.
+const char kEnableAVFoundation[] = "enable-avfoundation";
#endif
#if defined(OS_WIN)
diff --git a/media/base/media_switches.h b/media/base/media_switches.h
index 3618cb94ae..43b62dfb89 100644
--- a/media/base/media_switches.h
+++ b/media/base/media_switches.h
@@ -16,7 +16,7 @@ MEDIA_EXPORT extern const char kAudioBufferSize[];
MEDIA_EXPORT extern const char kEnableEac3Playback[];
-MEDIA_EXPORT extern const char kEnableOpusPlayback[];
+MEDIA_EXPORT extern const char kDisableOpusPlayback[];
MEDIA_EXPORT extern const char kDisableVp8AlphaPlayback[];
@@ -40,6 +40,7 @@ MEDIA_EXPORT extern const char kAlsaOutputDevice[];
#if defined(OS_MACOSX)
MEDIA_EXPORT extern const char kDisableMainThreadAudio[];
+MEDIA_EXPORT extern const char kEnableAVFoundation[];
#endif
#if defined(OS_WIN)
diff --git a/media/base/mock_demuxer_host.h b/media/base/mock_demuxer_host.h
index 597c13298c..61761a84b9 100644
--- a/media/base/mock_demuxer_host.h
+++ b/media/base/mock_demuxer_host.h
@@ -5,9 +5,8 @@
#ifndef MEDIA_BASE_MOCK_DEMUXER_HOST_H_
#define MEDIA_BASE_MOCK_DEMUXER_HOST_H_
-#include <string>
-
#include "media/base/demuxer.h"
+#include "media/base/text_track_config.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -26,6 +25,9 @@ class MockDemuxerHost : public DemuxerHost {
// DemuxerHost implementation.
MOCK_METHOD1(OnDemuxerError, void(PipelineStatus error));
MOCK_METHOD1(SetDuration, void(base::TimeDelta duration));
+ MOCK_METHOD2(AddTextStream, void(DemuxerStream*,
+ const TextTrackConfig&));
+ MOCK_METHOD1(RemoveTextStream, void(DemuxerStream*));
private:
DISALLOW_COPY_AND_ASSIGN(MockDemuxerHost);
diff --git a/media/base/mock_filters.cc b/media/base/mock_filters.cc
index eaf52013cd..e4faf70b3e 100644
--- a/media/base/mock_filters.cc
+++ b/media/base/mock_filters.cc
@@ -66,6 +66,10 @@ MockAudioRenderer::MockAudioRenderer() {}
MockAudioRenderer::~MockAudioRenderer() {}
+MockTextTrack::MockTextTrack() {}
+
+MockTextTrack::~MockTextTrack() {}
+
MockDecryptor::MockDecryptor() {}
MockDecryptor::~MockDecryptor() {}
diff --git a/media/base/mock_filters.h b/media/base/mock_filters.h
index fb5e8a0dfd..c71590da1d 100644
--- a/media/base/mock_filters.h
+++ b/media/base/mock_filters.h
@@ -16,6 +16,7 @@
#include "media/base/demuxer.h"
#include "media/base/filter_collection.h"
#include "media/base/pipeline_status.h"
+#include "media/base/text_track.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
@@ -30,7 +31,8 @@ class MockDemuxer : public Demuxer {
virtual ~MockDemuxer();
// Demuxer implementation.
- MOCK_METHOD2(Initialize, void(DemuxerHost* host, const PipelineStatusCB& cb));
+ MOCK_METHOD3(Initialize,
+ void(DemuxerHost* host, const PipelineStatusCB& cb, bool));
MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
MOCK_METHOD2(Seek, void(base::TimeDelta time, const PipelineStatusCB& cb));
MOCK_METHOD1(Stop, void(const base::Closure& callback));
@@ -155,6 +157,21 @@ class MockAudioRenderer : public AudioRenderer {
DISALLOW_COPY_AND_ASSIGN(MockAudioRenderer);
};
+class MockTextTrack : public TextTrack {
+ public:
+ MockTextTrack();
+ virtual ~MockTextTrack();
+
+ MOCK_METHOD5(addWebVTTCue, void(const base::TimeDelta& start,
+ const base::TimeDelta& end,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockTextTrack);
+};
+
class MockDecryptor : public Decryptor {
public:
MockDecryptor();
diff --git a/media/base/pipeline.cc b/media/base/pipeline.cc
index 5454fa7c11..e82f88dd6e 100644
--- a/media/base/pipeline.cc
+++ b/media/base/pipeline.cc
@@ -21,6 +21,8 @@
#include "media/base/clock.h"
#include "media/base/filter_collection.h"
#include "media/base/media_log.h"
+#include "media/base/text_renderer.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_renderer.h"
@@ -47,6 +49,7 @@ Pipeline::Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
state_(kCreated),
audio_ended_(false),
video_ended_(false),
+ text_ended_(false),
audio_disabled_(false),
demuxer_(NULL),
creation_time_(default_tick_clock_.NowTicks()) {
@@ -293,6 +296,19 @@ void Pipeline::OnDemuxerError(PipelineStatus error) {
SetError(error);
}
+void Pipeline::AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::AddTextStreamTask, base::Unretained(this),
+ text_stream, config));
+}
+
+void Pipeline::RemoveTextStream(DemuxerStream* text_stream) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::RemoveTextStreamTask, base::Unretained(this),
+ text_stream));
+}
+
void Pipeline::SetError(PipelineStatus error) {
DCHECK(IsRunning());
DCHECK_NE(PIPELINE_OK, error);
@@ -537,6 +553,10 @@ void Pipeline::DoSeek(
bound_fns.Push(base::Bind(
&VideoRenderer::Pause, base::Unretained(video_renderer_.get())));
}
+ if (text_renderer_) {
+ bound_fns.Push(base::Bind(
+ &TextRenderer::Pause, base::Unretained(text_renderer_.get())));
+ }
// Flush.
if (audio_renderer_) {
@@ -547,6 +567,10 @@ void Pipeline::DoSeek(
bound_fns.Push(base::Bind(
&VideoRenderer::Flush, base::Unretained(video_renderer_.get())));
}
+ if (text_renderer_) {
+ bound_fns.Push(base::Bind(
+ &TextRenderer::Flush, base::Unretained(text_renderer_.get())));
+ }
// Seek demuxer.
bound_fns.Push(base::Bind(
@@ -586,6 +610,11 @@ void Pipeline::DoPlay(const PipelineStatusCB& done_cb) {
&VideoRenderer::Play, base::Unretained(video_renderer_.get())));
}
+ if (text_renderer_) {
+ bound_fns.Push(base::Bind(
+ &TextRenderer::Play, base::Unretained(text_renderer_.get())));
+ }
+
pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
}
@@ -609,6 +638,11 @@ void Pipeline::DoStop(const PipelineStatusCB& done_cb) {
&VideoRenderer::Stop, base::Unretained(video_renderer_.get())));
}
+ if (text_renderer_) {
+ bound_fns.Push(base::Bind(
+ &TextRenderer::Stop, base::Unretained(text_renderer_.get())));
+ }
+
pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
}
@@ -625,6 +659,7 @@ void Pipeline::OnStopCompleted(PipelineStatus status) {
filter_collection_.reset();
audio_renderer_.reset();
video_renderer_.reset();
+ text_renderer_.reset();
demuxer_ = NULL;
// If we stop during initialization/seeking we want to run |seek_cb_|
@@ -685,6 +720,13 @@ void Pipeline::OnVideoRendererEnded() {
media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::VIDEO_ENDED));
}
+void Pipeline::OnTextRendererEnded() {
+ // Force post to process ended messages after current execution frame.
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::DoTextRendererEnded, base::Unretained(this)));
+ media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::TEXT_ENDED));
+}
+
// Called from any thread.
void Pipeline::OnUpdateStatistics(const PipelineStatistics& stats) {
base::AutoLock auto_lock(lock_);
@@ -711,6 +753,13 @@ void Pipeline::StartTask(scoped_ptr<FilterCollection> filter_collection,
buffering_state_cb_ = buffering_state_cb;
duration_change_cb_ = duration_change_cb;
+ text_renderer_ = filter_collection_->GetTextRenderer();
+
+ if (text_renderer_) {
+ text_renderer_->Initialize(
+ base::Bind(&Pipeline::OnTextRendererEnded, base::Unretained(this)));
+ }
+
StateTransitionTask(PIPELINE_OK);
}
@@ -800,6 +849,7 @@ void Pipeline::SeekTask(TimeDelta time, const PipelineStatusCB& seek_cb) {
seek_cb_ = seek_cb;
audio_ended_ = false;
video_ended_ = false;
+ text_ended_ = false;
// Kick off seeking!
{
@@ -843,6 +893,18 @@ void Pipeline::DoVideoRendererEnded() {
RunEndedCallbackIfNeeded();
}
+void Pipeline::DoTextRendererEnded() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (state_ != kStarted)
+ return;
+
+ DCHECK(!text_ended_);
+ text_ended_ = true;
+
+ RunEndedCallbackIfNeeded();
+}
+
void Pipeline::RunEndedCallbackIfNeeded() {
DCHECK(message_loop_->BelongsToCurrentThread());
@@ -852,6 +914,9 @@ void Pipeline::RunEndedCallbackIfNeeded() {
if (video_renderer_ && !video_ended_)
return;
+ if (text_renderer_ && text_renderer_->HasTracks() && !text_ended_)
+ return;
+
{
base::AutoLock auto_lock(lock_);
clock_->EndOfStream();
@@ -876,11 +941,24 @@ void Pipeline::AudioDisabledTask() {
StartClockIfWaitingForTimeUpdate_Locked();
}
+void Pipeline::AddTextStreamTask(DemuxerStream* text_stream,
+ const TextTrackConfig& config) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ // TODO(matthewjheaney): fix up text_ended_ when text stream
+ // is added (http://crbug.com/321446).
+ text_renderer_->AddTextStream(text_stream, config);
+}
+
+void Pipeline::RemoveTextStreamTask(DemuxerStream* text_stream) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ text_renderer_->RemoveTextStream(text_stream);
+}
+
void Pipeline::InitializeDemuxer(const PipelineStatusCB& done_cb) {
DCHECK(message_loop_->BelongsToCurrentThread());
demuxer_ = filter_collection_->GetDemuxer();
- demuxer_->Initialize(this, done_cb);
+ demuxer_->Initialize(this, done_cb, text_renderer_);
}
void Pipeline::InitializeAudioRenderer(const PipelineStatusCB& done_cb) {
diff --git a/media/base/pipeline.h b/media/base/pipeline.h
index 09ff904163..222091fcdb 100644
--- a/media/base/pipeline.h
+++ b/media/base/pipeline.h
@@ -30,6 +30,8 @@ namespace media {
class Clock;
class FilterCollection;
class MediaLog;
+class TextRenderer;
+class TextTrackConfig;
class VideoRenderer;
// Pipeline runs the media pipeline. Filters are created and called on the
@@ -232,6 +234,9 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// DemuxerHost implementaion.
virtual void SetDuration(base::TimeDelta duration) OVERRIDE;
virtual void OnDemuxerError(PipelineStatus error) OVERRIDE;
+ virtual void AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config) OVERRIDE;
+ virtual void RemoveTextStream(DemuxerStream* text_stream) OVERRIDE;
// Initiates teardown sequence in response to a runtime error.
//
@@ -244,6 +249,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Callbacks executed when a renderer has ended.
void OnAudioRendererEnded();
void OnVideoRendererEnded();
+ void OnTextRendererEnded();
// Callback executed by filters to update statistics.
void OnUpdateStatistics(const PipelineStatistics& stats);
@@ -283,14 +289,22 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Carries out notifying filters that we are seeking to a new timestamp.
void SeekTask(base::TimeDelta time, const PipelineStatusCB& seek_cb);
- // Handles audio/video ended logic and running |ended_cb_|.
+ // Handles audio/video/text ended logic and running |ended_cb_|.
void DoAudioRendererEnded();
void DoVideoRendererEnded();
+ void DoTextRendererEnded();
void RunEndedCallbackIfNeeded();
// Carries out disabling the audio renderer.
void AudioDisabledTask();
+ // Carries out adding a new text stream to the text renderer.
+ void AddTextStreamTask(DemuxerStream* text_stream,
+ const TextTrackConfig& config);
+
+ // Carries out removing a text stream from the text renderer.
+ void RemoveTextStreamTask(DemuxerStream* text_stream);
+
// Kicks off initialization for each media object, executing |done_cb| with
// the result when completed.
void InitializeDemuxer(const PipelineStatusCB& done_cb);
@@ -392,7 +406,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// reset the pipeline state, and restore this to PIPELINE_OK.
PipelineStatus status_;
- // Whether the media contains rendered audio and video streams.
+ // Whether the media contains rendered audio or video streams.
// TODO(fischman,scherkus): replace these with checks for
// {audio,video}_decoder_ once extraction of {Audio,Video}Decoder from the
// Filter heirarchy is done.
@@ -405,9 +419,10 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Member that tracks the current state.
State state_;
- // Whether we've received the audio/video ended events.
+ // Whether we've received the audio/video/text ended events.
bool audio_ended_;
bool video_ended_;
+ bool text_ended_;
// Set to true in DisableAudioRendererTask().
bool audio_disabled_;
@@ -434,6 +449,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// playback rate, and determining when playback has finished.
scoped_ptr<AudioRenderer> audio_renderer_;
scoped_ptr<VideoRenderer> video_renderer_;
+ scoped_ptr<TextRenderer> text_renderer_;
PipelineStatistics statistics_;
diff --git a/media/base/pipeline_status.cc b/media/base/pipeline_status.cc
deleted file mode 100644
index 6c08383cdc..0000000000
--- a/media/base/pipeline_status.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/pipeline_status.h"
-
-#include "base/bind.h"
-#include "base/metrics/histogram.h"
-
-namespace media {
-
-static void ReportAndRun(const std::string& name,
- const PipelineStatusCB& cb,
- PipelineStatus status) {
- UMA_HISTOGRAM_ENUMERATION(name, status, PIPELINE_STATUS_MAX);
- cb.Run(status);
-}
-
-PipelineStatusCB CreateUMAReportingPipelineCB(const std::string& name,
- const PipelineStatusCB& cb) {
- return base::Bind(&ReportAndRun, name, cb);
-}
-
-} // namespace media
diff --git a/media/base/pipeline_status.h b/media/base/pipeline_status.h
index c208d01d58..a9f8585f57 100644
--- a/media/base/pipeline_status.h
+++ b/media/base/pipeline_status.h
@@ -37,11 +37,6 @@ enum PipelineStatus {
typedef base::Callback<void(PipelineStatus)> PipelineStatusCB;
-// Wrap & return a callback around |cb| which reports its argument to UMA under
-// the requested |name|.
-PipelineStatusCB CreateUMAReportingPipelineCB(const std::string& name,
- const PipelineStatusCB& cb);
-
// TODO(scherkus): this should be moved alongside host interface definitions.
struct PipelineStatistics {
PipelineStatistics()
diff --git a/media/base/pipeline_unittest.cc b/media/base/pipeline_unittest.cc
index 1506c2194f..a7a8cae316 100644
--- a/media/base/pipeline_unittest.cc
+++ b/media/base/pipeline_unittest.cc
@@ -11,11 +11,14 @@
#include "base/threading/simple_thread.h"
#include "base/time/clock.h"
#include "media/base/clock.h"
+#include "media/base/fake_text_track_stream.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/media_log.h"
#include "media/base/mock_filters.h"
#include "media/base/pipeline.h"
#include "media/base/test_helpers.h"
+#include "media/base/text_renderer.h"
+#include "media/base/text_track_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/size.h"
@@ -93,6 +96,13 @@ class PipelineTest : public ::testing::Test {
scoped_ptr<AudioRenderer> audio_renderer(audio_renderer_);
filter_collection_->SetAudioRenderer(audio_renderer.Pass());
+ text_renderer_ = new TextRenderer(
+ message_loop_.message_loop_proxy(),
+ base::Bind(&PipelineTest::OnAddTextTrack,
+ base::Unretained(this)));
+ scoped_ptr<TextRenderer> text_renderer(text_renderer_);
+ filter_collection_->SetTextRenderer(text_renderer.Pass());
+
// InitializeDemuxer() adds overriding expectations for expected non-NULL
// streams.
DemuxerStream* null_pointer = NULL;
@@ -109,6 +119,13 @@ class PipelineTest : public ::testing::Test {
ExpectStop();
+ // The mock demuxer doesn't stop the fake text track stream,
+ // so just stop it manually.
+ if (text_stream_) {
+ text_stream_->Stop();
+ message_loop_.RunUntilIdle();
+ }
+
// Expect a stop callback if we were started.
EXPECT_CALL(callbacks_, OnStop());
pipeline_->Stop(base::Bind(&CallbackHelper::OnStop,
@@ -122,7 +139,7 @@ class PipelineTest : public ::testing::Test {
void InitializeDemuxer(MockDemuxerStreamVector* streams,
const base::TimeDelta& duration) {
EXPECT_CALL(callbacks_, OnDurationChange());
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(DoAll(SetDemuxerProperties(duration),
RunCallback<1>(PIPELINE_OK)));
@@ -173,6 +190,13 @@ class PipelineTest : public ::testing::Test {
}
}
+ void AddTextStream() {
+ EXPECT_CALL(*this, OnAddTextTrack(_,_))
+ .WillOnce(Invoke(this, &PipelineTest::DoOnAddTextTrack));
+ static_cast<DemuxerHost*>(pipeline_.get())->AddTextStream(text_stream(),
+ TextTrackConfig(kTextSubtitles, "", "", ""));
+ }
+
// Sets up expectations on the callback and initializes the pipeline. Called
// after tests have set expectations any filters they wish to use.
void InitializePipeline(PipelineStatus start_status) {
@@ -215,6 +239,11 @@ class PipelineTest : public ::testing::Test {
video_stream_->set_video_decoder_config(video_decoder_config_);
}
+ void CreateTextStream() {
+ scoped_ptr<FakeTextTrackStream> text_stream(new FakeTextTrackStream);
+ text_stream_ = text_stream.Pass();
+ }
+
MockDemuxerStream* audio_stream() {
return audio_stream_.get();
}
@@ -223,6 +252,10 @@ class PipelineTest : public ::testing::Test {
return video_stream_.get();
}
+ FakeTextTrackStream* text_stream() {
+ return text_stream_.get();
+ }
+
void ExpectSeek(const base::TimeDelta& seek_time) {
// Every filter should receive a call to Seek().
EXPECT_CALL(*demuxer_, Seek(seek_time, _))
@@ -281,6 +314,15 @@ class PipelineTest : public ::testing::Test {
EXPECT_CALL(*video_renderer_, Stop(_)).WillOnce(RunClosure<0>());
}
+ MOCK_METHOD2(OnAddTextTrack, void(const TextTrackConfig&,
+ const AddTextTrackDoneCB&));
+
+ void DoOnAddTextTrack(const TextTrackConfig& config,
+ const AddTextTrackDoneCB& done_cb) {
+ scoped_ptr<TextTrack> text_track(new MockTextTrack);
+ done_cb.Run(text_track.Pass());
+ }
+
// Fixture members.
StrictMock<CallbackHelper> callbacks_;
base::SimpleTestTickClock test_tick_clock_;
@@ -291,8 +333,11 @@ class PipelineTest : public ::testing::Test {
scoped_ptr<MockDemuxer> demuxer_;
MockVideoRenderer* video_renderer_;
MockAudioRenderer* audio_renderer_;
+ StrictMock<CallbackHelper> text_renderer_callbacks_;
+ TextRenderer* text_renderer_;
scoped_ptr<StrictMock<MockDemuxerStream> > audio_stream_;
scoped_ptr<StrictMock<MockDemuxerStream> > video_stream_;
+ scoped_ptr<FakeTextTrackStream> text_stream_;
AudioRenderer::TimeCB audio_time_cb_;
VideoDecoderConfig video_decoder_config_;
@@ -338,7 +383,7 @@ TEST_F(PipelineTest, NotStarted) {
TEST_F(PipelineTest, NeverInitializes) {
// Don't execute the callback passed into Initialize().
- EXPECT_CALL(*demuxer_, Initialize(_, _));
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _));
// This test hangs during initialization by never calling
// InitializationComplete(). StrictMock<> will ensure that the callback is
@@ -363,7 +408,7 @@ TEST_F(PipelineTest, NeverInitializes) {
}
TEST_F(PipelineTest, URLNotFound) {
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_ERROR_URL_NOT_FOUND));
EXPECT_CALL(*demuxer_, Stop(_))
.WillOnce(RunClosure<0>());
@@ -372,7 +417,7 @@ TEST_F(PipelineTest, URLNotFound) {
}
TEST_F(PipelineTest, NoStreams) {
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*demuxer_, Stop(_))
.WillOnce(RunClosure<0>());
@@ -422,9 +467,47 @@ TEST_F(PipelineTest, AudioVideoStream) {
EXPECT_TRUE(pipeline_->HasVideo());
}
+TEST_F(PipelineTest, VideoTextStream) {
+ CreateVideoStream();
+ CreateTextStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(video_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeVideoRenderer(video_stream());
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_FALSE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+
+ AddTextStream();
+ message_loop_.RunUntilIdle();
+}
+
+TEST_F(PipelineTest, VideoAudioTextStream) {
+ CreateVideoStream();
+ CreateAudioStream();
+ CreateTextStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(video_stream());
+ streams.push_back(audio_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeVideoRenderer(video_stream());
+ InitializeAudioRenderer(audio_stream(), false);
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_TRUE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+
+ AddTextStream();
+ message_loop_.RunUntilIdle();
+}
+
TEST_F(PipelineTest, Seek) {
CreateAudioStream();
CreateVideoStream();
+ CreateTextStream();
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
streams.push_back(video_stream());
@@ -436,6 +519,9 @@ TEST_F(PipelineTest, Seek) {
// Initialize then seek!
InitializePipeline(PIPELINE_OK);
+ AddTextStream();
+ message_loop_.RunUntilIdle();
+
// Every filter should receive a call to Seek().
base::TimeDelta expected = base::TimeDelta::FromSeconds(2000);
ExpectSeek(expected);
@@ -574,6 +660,7 @@ TEST_F(PipelineTest, DisableAudioRendererDuringInit) {
TEST_F(PipelineTest, EndedCallback) {
CreateAudioStream();
CreateVideoStream();
+ CreateTextStream();
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
streams.push_back(video_stream());
@@ -583,13 +670,18 @@ TEST_F(PipelineTest, EndedCallback) {
InitializeVideoRenderer(video_stream());
InitializePipeline(PIPELINE_OK);
- // The ended callback shouldn't run until both renderers have ended.
+ AddTextStream();
+
+ // The ended callback shouldn't run until all renderers have ended.
pipeline_->OnAudioRendererEnded();
message_loop_.RunUntilIdle();
- EXPECT_CALL(callbacks_, OnEnded());
pipeline_->OnVideoRendererEnded();
message_loop_.RunUntilIdle();
+
+ EXPECT_CALL(callbacks_, OnEnded());
+ text_stream()->SendEosNotification();
+ message_loop_.RunUntilIdle();
}
TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
@@ -923,13 +1015,13 @@ class PipelineTeardownTest : public PipelineTest {
if (state == kInitDemuxer) {
if (stop_or_error == kStop) {
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
RunCallback<1>(PIPELINE_OK)));
EXPECT_CALL(callbacks_, OnStop());
} else {
status = DEMUXER_ERROR_COULD_NOT_OPEN;
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(status));
}
diff --git a/media/base/run_all_unittests.cc b/media/base/run_all_unittests.cc
index fe097f11a7..f1a0092814 100644
--- a/media/base/run_all_unittests.cc
+++ b/media/base/run_all_unittests.cc
@@ -41,10 +41,6 @@ void TestSuiteNoAtExit::Initialize() {
media::InitializeMediaLibraryForTesting();
CommandLine* cmd_line = CommandLine::ForCurrentProcess();
cmd_line->AppendSwitch(switches::kEnableMP3StreamParser);
-
- // Enable Opus support for all media tests.
- // TODO(vigneshv): Remove this once the Opus flag is removed or negated.
- cmd_line->AppendSwitch(switches::kEnableOpusPlayback);
}
int main(int argc, char** argv) {
diff --git a/media/base/simd/vector_math_sse.cc b/media/base/simd/vector_math_sse.cc
index 39bcaa0c19..c2121225cd 100644
--- a/media/base/simd/vector_math_sse.cc
+++ b/media/base/simd/vector_math_sse.cc
@@ -4,6 +4,8 @@
#include "media/base/vector_math_testing.h"
+#include <algorithm>
+
#include <xmmintrin.h> // NOLINT
namespace media {
@@ -35,5 +37,82 @@ void FMAC_SSE(const float src[], float scale, int len, float dest[]) {
dest[i] += src[i] * scale;
}
+// Convenience macro to extract float 0 through 3 from the vector |a|. This is
+// needed because compilers other than clang don't support access via
+// operator[]().
+#define EXTRACT_FLOAT(a, i) \
+ (i == 0 ? \
+ _mm_cvtss_f32(a) : \
+ _mm_cvtss_f32(_mm_shuffle_ps(a, a, i)))
+
+std::pair<float, float> EWMAAndMaxPower_SSE(
+ float initial_value, const float src[], int len, float smoothing_factor) {
+ // When the recurrence is unrolled, we see that we can split it into 4
+ // separate lanes of evaluation:
+ //
+ // y[n] = a(S[n]^2) + (1-a)(y[n-1])
+ // = a(S[n]^2) + (1-a)^1(aS[n-1]^2) + (1-a)^2(aS[n-2]^2) + ...
+ // = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ //
+ // where z[n] = a(S[n]^2) + (1-a)^4(z[n-4]) + (1-a)^8(z[n-8]) + ...
+ //
+ // Thus, the strategy here is to compute z[n], z[n-1], z[n-2], and z[n-3] in
+ // each of the 4 lanes, and then combine them to give y[n].
+
+ const int rem = len % 4;
+ const int last_index = len - rem;
+
+ const __m128 smoothing_factor_x4 = _mm_set_ps1(smoothing_factor);
+ const float weight_prev = 1.0f - smoothing_factor;
+ const __m128 weight_prev_x4 = _mm_set_ps1(weight_prev);
+ const __m128 weight_prev_squared_x4 =
+ _mm_mul_ps(weight_prev_x4, weight_prev_x4);
+ const __m128 weight_prev_4th_x4 =
+ _mm_mul_ps(weight_prev_squared_x4, weight_prev_squared_x4);
+
+ // Compute z[n], z[n-1], z[n-2], and z[n-3] in parallel in lanes 3, 2, 1 and
+ // 0, respectively.
+ __m128 max_x4 = _mm_setzero_ps();
+ __m128 ewma_x4 = _mm_setr_ps(0.0f, 0.0f, 0.0f, initial_value);
+ int i;
+ for (i = 0; i < last_index; i += 4) {
+ ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_4th_x4);
+ const __m128 sample_x4 = _mm_load_ps(src + i);
+ const __m128 sample_squared_x4 = _mm_mul_ps(sample_x4, sample_x4);
+ max_x4 = _mm_max_ps(max_x4, sample_squared_x4);
+ // Note: The compiler optimizes this to a single multiply-and-accumulate
+ // instruction:
+ ewma_x4 = _mm_add_ps(ewma_x4,
+ _mm_mul_ps(sample_squared_x4, smoothing_factor_x4));
+ }
+
+ // y[n] = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ float ewma = EXTRACT_FLOAT(ewma_x4, 3);
+ ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
+ ewma += EXTRACT_FLOAT(ewma_x4, 2);
+ ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
+ ewma += EXTRACT_FLOAT(ewma_x4, 1);
+ ewma_x4 = _mm_mul_ss(ewma_x4, weight_prev_x4);
+ ewma += EXTRACT_FLOAT(ewma_x4, 0);
+
+ // Fold the maximums together to get the overall maximum.
+ max_x4 = _mm_max_ps(max_x4,
+ _mm_shuffle_ps(max_x4, max_x4, _MM_SHUFFLE(3, 3, 1, 1)));
+ max_x4 = _mm_max_ss(max_x4, _mm_shuffle_ps(max_x4, max_x4, 2));
+
+ std::pair<float, float> result(ewma, EXTRACT_FLOAT(max_x4, 0));
+
+ // Handle remaining values at the end of |src|.
+ for (; i < len; ++i) {
+ result.first *= weight_prev;
+ const float sample = src[i];
+ const float sample_squared = sample * sample;
+ result.first += sample_squared * smoothing_factor;
+ result.second = std::max(result.second, sample_squared);
+ }
+
+ return result;
+}
+
} // namespace vector_math
} // namespace media
diff --git a/media/base/sinc_resampler_perftest.cc b/media/base/sinc_resampler_perftest.cc
index c7e75170e6..21c6ec325c 100644
--- a/media/base/sinc_resampler_perftest.cc
+++ b/media/base/sinc_resampler_perftest.cc
@@ -40,13 +40,13 @@ static void RunConvolveBenchmark(
resampler->get_kernel_for_testing(),
kKernelInterpolationFactor);
}
- double total_time_seconds =
- (base::TimeTicks::HighResNow() - start).InSecondsF();
+ double total_time_milliseconds =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
perf_test::PrintResult("sinc_resampler_convolve",
"",
trace_name,
- kBenchmarkIterations / total_time_seconds,
- "runs/s",
+ kBenchmarkIterations / total_time_milliseconds,
+ "runs/ms",
true);
}
diff --git a/media/base/stream_parser.h b/media/base/stream_parser.h
index 33a336def8..101ce4eee0 100644
--- a/media/base/stream_parser.h
+++ b/media/base/stream_parser.h
@@ -6,6 +6,7 @@
#define MEDIA_BASE_STREAM_PARSER_H_
#include <deque>
+#include <map>
#include <string>
#include "base/callback_forward.h"
@@ -14,18 +15,19 @@
#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
-#include "media/base/text_track.h"
namespace media {
class AudioDecoderConfig;
class StreamParserBuffer;
+class TextTrackConfig;
class VideoDecoderConfig;
// Abstract interface for parsing media byte streams.
class MEDIA_EXPORT StreamParser {
public:
typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
+ typedef std::map<int, TextTrackConfig> TextTrackConfigMap;
StreamParser();
virtual ~StreamParser();
@@ -43,11 +45,14 @@ class MEDIA_EXPORT StreamParser {
// then it means that there isn't an audio stream.
// Second parameter - The new video configuration. If the config is not valid
// then it means that there isn't an audio stream.
+ // Third parameter - The new text tracks configuration. If the map is empty,
+ // then no text tracks were parsed from the stream.
// Return value - True if the new configurations are accepted.
// False if the new configurations are not supported
// and indicates that a parsing error should be signalled.
typedef base::Callback<bool(const AudioDecoderConfig&,
- const VideoDecoderConfig&)> NewConfigCB;
+ const VideoDecoderConfig&,
+ const TextTrackConfigMap&)> NewConfigCB;
// New stream buffers have been parsed.
// First parameter - A queue of newly parsed audio buffers.
@@ -59,12 +64,13 @@ class MEDIA_EXPORT StreamParser {
const BufferQueue&)> NewBuffersCB;
// New stream buffers of inband text have been parsed.
- // First parameter - The text track to which these cues will be added.
+ // First parameter - The id of the text track to which these cues will
+ // be added.
// Second parameter - A queue of newly parsed buffers.
// Return value - True indicates that the buffers are accepted.
// False if something was wrong with the buffers and a parsing
// error should be signalled.
- typedef base::Callback<bool(TextTrack*, const BufferQueue&)> NewTextBuffersCB;
+ typedef base::Callback<bool(int, const BufferQueue&)> NewTextBuffersCB;
// Signals the beginning of a new media segment.
typedef base::Callback<void()> NewMediaSegmentCB;
@@ -85,7 +91,6 @@ class MEDIA_EXPORT StreamParser {
const NewBuffersCB& new_buffers_cb,
const NewTextBuffersCB& text_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) = 0;
diff --git a/media/base/text_cue.cc b/media/base/text_cue.cc
new file mode 100644
index 0000000000..3d8a892664
--- /dev/null
+++ b/media/base/text_cue.cc
@@ -0,0 +1,23 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/text_cue.h"
+
+namespace media {
+
+TextCue::TextCue(const base::TimeDelta& timestamp,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& settings,
+ const std::string& text)
+ : timestamp_(timestamp),
+ duration_(duration),
+ id_(id),
+ settings_(settings),
+ text_(text) {
+}
+
+TextCue::~TextCue() {}
+
+} // namespace media
diff --git a/media/base/text_cue.h b/media/base/text_cue.h
new file mode 100644
index 0000000000..2afae8d5a4
--- /dev/null
+++ b/media/base/text_cue.h
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEXT_CUE_H_
+#define MEDIA_BASE_TEXT_CUE_H_
+
+#include <string>
+
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// A text buffer to carry the components of a text track cue.
+class MEDIA_EXPORT TextCue
+ : public base::RefCountedThreadSafe<TextCue> {
+ public:
+ TextCue(const base::TimeDelta& timestamp,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& settings,
+ const std::string& text);
+
+ // Access to constructor parameters.
+ base::TimeDelta timestamp() const { return timestamp_; }
+ base::TimeDelta duration() const { return duration_; }
+ const std::string& id() const { return id_; }
+ const std::string& settings() const { return settings_; }
+ const std::string& text() const { return text_; }
+
+ private:
+ friend class base::RefCountedThreadSafe<TextCue>;
+ ~TextCue();
+
+ base::TimeDelta timestamp_;
+ base::TimeDelta duration_;
+ std::string id_;
+ std::string settings_;
+ std::string text_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TextCue);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEXT_CUE_H_
diff --git a/media/base/text_renderer.cc b/media/base/text_renderer.cc
new file mode 100644
index 0000000000..91f9a33618
--- /dev/null
+++ b/media/base/text_renderer.cc
@@ -0,0 +1,369 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/text_renderer.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/stl_util.h"
+#include "media/base/bind_to_loop.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/demuxer.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/text_cue.h"
+
+namespace media {
+
+TextRenderer::TextRenderer(
+ const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const AddTextTrackCB& add_text_track_cb)
+ : message_loop_(message_loop),
+ weak_factory_(this),
+ add_text_track_cb_(add_text_track_cb),
+ state_(kUninitialized),
+ pending_read_count_(0) {
+}
+
+TextRenderer::~TextRenderer() {
+ DCHECK(state_ == kUninitialized ||
+ state_ == kStopped) << "state_ " << state_;
+ DCHECK_EQ(pending_read_count_, 0);
+ STLDeleteValues(&text_track_state_map_);
+}
+
+void TextRenderer::Initialize(const base::Closure& ended_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!ended_cb.is_null());
+ DCHECK_EQ(kUninitialized, state_) << "state_ " << state_;
+ DCHECK(text_track_state_map_.empty());
+ DCHECK_EQ(pending_read_count_, 0);
+ DCHECK(pending_eos_set_.empty());
+ DCHECK(ended_cb_.is_null());
+
+ weak_this_ = weak_factory_.GetWeakPtr();
+ ended_cb_ = ended_cb;
+ state_ = kPaused;
+}
+
+void TextRenderer::Play(const base::Closure& callback) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, kPaused) << "state_ " << state_;
+
+ for (TextTrackStateMap::iterator itr = text_track_state_map_.begin();
+ itr != text_track_state_map_.end(); ++itr) {
+ TextTrackState* state = itr->second;
+ if (state->read_state == TextTrackState::kReadPending) {
+ DCHECK_GT(pending_read_count_, 0);
+ continue;
+ }
+
+ Read(state, itr->first);
+ }
+
+ state_ = kPlaying;
+ callback.Run();
+}
+
+void TextRenderer::Pause(const base::Closure& callback) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(state_ == kPlaying || state_ == kEnded) << "state_ " << state_;
+ DCHECK_GE(pending_read_count_, 0);
+ pause_cb_ = callback;
+
+ if (pending_read_count_ == 0) {
+ state_ = kPaused;
+ base::ResetAndReturn(&pause_cb_).Run();
+ return;
+ }
+
+ state_ = kPausePending;
+}
+
+void TextRenderer::Flush(const base::Closure& callback) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_EQ(pending_read_count_, 0);
+ DCHECK(state_ == kPaused) << "state_ " << state_;
+
+ for (TextTrackStateMap::iterator itr = text_track_state_map_.begin();
+ itr != text_track_state_map_.end(); ++itr) {
+ pending_eos_set_.insert(itr->first);
+ }
+ DCHECK_EQ(pending_eos_set_.size(), text_track_state_map_.size());
+ callback.Run();
+}
+
+void TextRenderer::Stop(const base::Closure& cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!cb.is_null());
+ DCHECK(state_ == kPlaying ||
+ state_ == kPausePending ||
+ state_ == kPaused ||
+ state_ == kEnded) << "state_ " << state_;
+ DCHECK_GE(pending_read_count_, 0);
+
+ stop_cb_ = cb;
+
+ if (pending_read_count_ == 0) {
+ state_ = kStopped;
+ base::ResetAndReturn(&stop_cb_).Run();
+ return;
+ }
+
+ state_ = kStopPending;
+}
+
+void TextRenderer::AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(state_ != kUninitialized) << "state_ " << state_;
+ DCHECK_NE(state_, kStopPending);
+ DCHECK_NE(state_, kStopped);
+ DCHECK(text_track_state_map_.find(text_stream) ==
+ text_track_state_map_.end());
+ DCHECK(pending_eos_set_.find(text_stream) ==
+ pending_eos_set_.end());
+
+ media::AddTextTrackDoneCB done_cb =
+ media::BindToLoop(message_loop_,
+ base::Bind(&TextRenderer::OnAddTextTrackDone,
+ weak_this_,
+ text_stream));
+
+ add_text_track_cb_.Run(config, done_cb);
+}
+
+void TextRenderer::RemoveTextStream(DemuxerStream* text_stream) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ TextTrackStateMap::iterator itr = text_track_state_map_.find(text_stream);
+ DCHECK(itr != text_track_state_map_.end());
+
+ TextTrackState* state = itr->second;
+ DCHECK_EQ(state->read_state, TextTrackState::kReadIdle);
+ delete state;
+ text_track_state_map_.erase(itr);
+
+ pending_eos_set_.erase(text_stream);
+}
+
+bool TextRenderer::HasTracks() const {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ return !text_track_state_map_.empty();
+}
+
+void TextRenderer::BufferReady(
+ DemuxerStream* stream,
+ DemuxerStream::Status status,
+ const scoped_refptr<DecoderBuffer>& input) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_NE(status, DemuxerStream::kConfigChanged);
+
+ if (status == DemuxerStream::kAborted) {
+ DCHECK(!input);
+ DCHECK_GT(pending_read_count_, 0);
+ DCHECK(pending_eos_set_.find(stream) != pending_eos_set_.end());
+
+ TextTrackStateMap::iterator itr = text_track_state_map_.find(stream);
+ DCHECK(itr != text_track_state_map_.end());
+
+ TextTrackState* state = itr->second;
+ DCHECK_EQ(state->read_state, TextTrackState::kReadPending);
+
+ --pending_read_count_;
+ state->read_state = TextTrackState::kReadIdle;
+
+ switch (state_) {
+ case kPlaying:
+ return;
+
+ case kPausePending:
+ if (pending_read_count_ == 0) {
+ state_ = kPaused;
+ base::ResetAndReturn(&pause_cb_).Run();
+ }
+
+ return;
+
+ case kStopPending:
+ if (pending_read_count_ == 0) {
+ state_ = kStopped;
+ base::ResetAndReturn(&stop_cb_).Run();
+ }
+
+ return;
+
+ case kPaused:
+ case kStopped:
+ case kUninitialized:
+ case kEnded:
+ NOTREACHED();
+ return;
+ }
+
+ NOTREACHED();
+ return;
+ }
+
+ if (input->end_of_stream()) {
+ CueReady(stream, NULL);
+ return;
+ }
+
+ DCHECK_EQ(status, DemuxerStream::kOk);
+ DCHECK_GE(input->side_data_size(), 2);
+
+ // The side data contains both the cue id and cue settings,
+ // each terminated with a NUL.
+ const char* id_ptr = reinterpret_cast<const char*>(input->side_data());
+ size_t id_len = strlen(id_ptr);
+ std::string id(id_ptr, id_len);
+
+ const char* settings_ptr = id_ptr + id_len + 1;
+ size_t settings_len = strlen(settings_ptr);
+ std::string settings(settings_ptr, settings_len);
+
+ // The cue payload is stored in the data-part of the input buffer.
+ std::string text(input->data(), input->data() + input->data_size());
+
+ scoped_refptr<TextCue> text_cue(
+ new TextCue(input->timestamp(),
+ input->duration(),
+ id,
+ settings,
+ text));
+
+ CueReady(stream, text_cue);
+}
+
+void TextRenderer::CueReady(
+ DemuxerStream* text_stream,
+ const scoped_refptr<TextCue>& text_cue) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(state_ != kUninitialized &&
+ state_ != kStopped) << "state_ " << state_;
+ DCHECK_GT(pending_read_count_, 0);
+ DCHECK(pending_eos_set_.find(text_stream) != pending_eos_set_.end());
+
+ TextTrackStateMap::iterator itr = text_track_state_map_.find(text_stream);
+ DCHECK(itr != text_track_state_map_.end());
+
+ TextTrackState* state = itr->second;
+ DCHECK_EQ(state->read_state, TextTrackState::kReadPending);
+ DCHECK(state->text_track);
+
+ --pending_read_count_;
+ state->read_state = TextTrackState::kReadIdle;
+
+ switch (state_) {
+ case kPlaying: {
+ if (text_cue)
+ break;
+
+ const size_t count = pending_eos_set_.erase(text_stream);
+ DCHECK_EQ(count, 1U);
+
+ if (pending_eos_set_.empty()) {
+ DCHECK_EQ(pending_read_count_, 0);
+ state_ = kEnded;
+ ended_cb_.Run();
+ return;
+ }
+
+ DCHECK_GT(pending_read_count_, 0);
+ return;
+ }
+ case kPausePending: {
+ if (text_cue)
+ break;
+
+ const size_t count = pending_eos_set_.erase(text_stream);
+ DCHECK_EQ(count, 1U);
+
+ if (pending_read_count_ > 0) {
+ DCHECK(!pending_eos_set_.empty());
+ return;
+ }
+
+ state_ = kPaused;
+ base::ResetAndReturn(&pause_cb_).Run();
+
+ return;
+ }
+ case kStopPending:
+ if (pending_read_count_ == 0) {
+ state_ = kStopped;
+ base::ResetAndReturn(&stop_cb_).Run();
+ }
+
+ return;
+
+ case kPaused:
+ case kStopped:
+ case kUninitialized:
+ case kEnded:
+ NOTREACHED();
+ return;
+ }
+
+ base::TimeDelta start = text_cue->timestamp();
+ base::TimeDelta end = start + text_cue->duration();
+
+ state->text_track->addWebVTTCue(start, end,
+ text_cue->id(),
+ text_cue->text(),
+ text_cue->settings());
+
+ if (state_ == kPlaying) {
+ Read(state, text_stream);
+ return;
+ }
+
+ if (pending_read_count_ == 0) {
+ DCHECK_EQ(state_, kPausePending) << "state_ " << state_;
+ state_ = kPaused;
+ base::ResetAndReturn(&pause_cb_).Run();
+ }
+}
+
+void TextRenderer::OnAddTextTrackDone(DemuxerStream* text_stream,
+ scoped_ptr<TextTrack> text_track) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(state_ != kUninitialized &&
+ state_ != kStopped &&
+ state_ != kStopPending) << "state_ " << state_;
+ DCHECK(text_stream);
+ DCHECK(text_track);
+
+ scoped_ptr<TextTrackState> state(new TextTrackState(text_track.Pass()));
+ text_track_state_map_[text_stream] = state.release();
+ pending_eos_set_.insert(text_stream);
+
+ if (state_ == kPlaying)
+ Read(text_track_state_map_[text_stream], text_stream);
+}
+
+void TextRenderer::Read(
+ TextTrackState* state,
+ DemuxerStream* text_stream) {
+ DCHECK_NE(state->read_state, TextTrackState::kReadPending);
+
+ state->read_state = TextTrackState::kReadPending;
+ ++pending_read_count_;
+
+ text_stream->Read(base::Bind(&TextRenderer::BufferReady,
+ weak_this_,
+ text_stream));
+}
+
+TextRenderer::TextTrackState::TextTrackState(scoped_ptr<TextTrack> tt)
+ : read_state(kReadIdle),
+ text_track(tt.Pass()) {
+}
+
+TextRenderer::TextTrackState::~TextTrackState() {
+}
+
+} // namespace media
diff --git a/media/base/text_renderer.h b/media/base/text_renderer.h
new file mode 100644
index 0000000000..532a1fa037
--- /dev/null
+++ b/media/base/text_renderer.h
@@ -0,0 +1,145 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEXT_RENDERER_H_
+#define MEDIA_BASE_TEXT_RENDERER_H_
+
+#include <map>
+#include <set>
+
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/text_track.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+
+class TextCue;
+class TextTrackConfig;
+
+// Receives decoder buffers from the upstream demuxer, decodes them to text
+// cues, and then passes them onto the TextTrack object associated with each
+// demuxer text stream.
+class MEDIA_EXPORT TextRenderer {
+ public:
+ // |message_loop| is the thread on which TextRenderer will execute.
+ //
+ // |add_text_track_cb] is called when the demuxer requests (via its host)
+ // that a new text track be created.
+ TextRenderer(const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const AddTextTrackCB& add_text_track_cb);
+ ~TextRenderer();
+
+ // |ended_cb| is executed when all of the text tracks have reached
+ // end of stream, following a play request.
+ void Initialize(const base::Closure& ended_cb);
+
+ // Start text track cue decoding and rendering, executing |callback| when
+ // playback is underway.
+ void Play(const base::Closure& callback);
+
+ // Temporarily suspend decoding and rendering, executing |callback| when
+ // playback has been suspended.
+ void Pause(const base::Closure& callback);
+
+ // Discard any text data, executing |callback| when completed.
+ void Flush(const base::Closure& callback);
+
+ // Stop all operations in preparation for being deleted, executing |callback|
+ // when complete.
+ void Stop(const base::Closure& callback);
+
+ // Add new |text_stream|, having the indicated |config|, to the text stream
+ // collection managed by this text renderer.
+ void AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config);
+
+ // Remove |text_stream| from the text stream collection.
+ void RemoveTextStream(DemuxerStream* text_stream);
+
+ // Returns true if there are extant text tracks.
+ bool HasTracks() const;
+
+ private:
+ struct TextTrackState {
+ // To determine read progress.
+ enum ReadState {
+ kReadIdle,
+ kReadPending
+ };
+
+ explicit TextTrackState(scoped_ptr<TextTrack> text_track);
+ ~TextTrackState();
+
+ ReadState read_state;
+ scoped_ptr<TextTrack> text_track;
+ };
+
+ // Callback delivered by the demuxer |text_stream| when
+ // a read from the stream completes.
+ void BufferReady(DemuxerStream* text_stream,
+ DemuxerStream::Status status,
+ const scoped_refptr<DecoderBuffer>& input);
+
+ // Dispatches the decoded cue delivered on the demuxer's |text_stream|.
+ void CueReady(DemuxerStream* text_stream,
+ const scoped_refptr<TextCue>& text_cue);
+
+ // Dispatched when the AddTextTrackCB completes, after having created
+ // the TextTrack object associated with |text_stream|.
+ void OnAddTextTrackDone(DemuxerStream* text_stream,
+ scoped_ptr<TextTrack> text_track);
+
+ // Utility function to post a read request on |text_stream|.
+ void Read(TextTrackState* state, DemuxerStream* text_stream);
+
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+ base::WeakPtrFactory<TextRenderer> weak_factory_;
+ base::WeakPtr<TextRenderer> weak_this_;
+ const AddTextTrackCB add_text_track_cb_;
+
+ // Callbacks provided during Initialize().
+ base::Closure ended_cb_;
+
+ // Callback provided to Pause().
+ base::Closure pause_cb_;
+
+ // Callback provided to Stop().
+ base::Closure stop_cb_;
+
+ // Simple state tracking variable.
+ enum State {
+ kUninitialized,
+ kPausePending,
+ kPaused,
+ kPlaying,
+ kEnded,
+ kStopPending,
+ kStopped
+ };
+ State state_;
+
+ typedef std::map<DemuxerStream*, TextTrackState*> TextTrackStateMap;
+ TextTrackStateMap text_track_state_map_;
+
+ // Indicates how many read requests are in flight.
+ int pending_read_count_;
+
+ // Indicates which text streams have not delivered end-of-stream yet.
+ typedef std::set<DemuxerStream*> PendingEosSet;
+ PendingEosSet pending_eos_set_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TextRenderer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEXT_RENDERER_H_
diff --git a/media/base/text_renderer_unittest.cc b/media/base/text_renderer_unittest.cc
new file mode 100644
index 0000000000..77e8c47182
--- /dev/null
+++ b/media/base/text_renderer_unittest.cc
@@ -0,0 +1,1382 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/memory/scoped_vector.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/fake_text_track_stream.h"
+#include "media/base/text_renderer.h"
+#include "media/base/text_track_config.h"
+#include "media/base/video_decoder_config.h"
+#include "media/filters/webvtt_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::Eq;
+using ::testing::Exactly;
+using ::testing::Invoke;
+using ::testing::_;
+
+namespace media {
+
+// Local implementation of the TextTrack interface.
+class FakeTextTrack : public TextTrack {
+ public:
+ FakeTextTrack(const base::Closure& destroy_cb,
+ const TextTrackConfig& config)
+ : destroy_cb_(destroy_cb),
+ config_(config) {
+ }
+ virtual ~FakeTextTrack() {
+ destroy_cb_.Run();
+ }
+
+ MOCK_METHOD5(addWebVTTCue, void(const base::TimeDelta& start,
+ const base::TimeDelta& end,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings));
+
+ const base::Closure destroy_cb_;
+ const TextTrackConfig config_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FakeTextTrack);
+};
+
+class TextRendererTest : public testing::Test {
+ public:
+ TextRendererTest() {}
+
+ void CreateTextRenderer() {
+ DCHECK(!text_renderer_);
+
+ text_renderer_.reset(
+ new TextRenderer(message_loop_.message_loop_proxy(),
+ base::Bind(&TextRendererTest::OnAddTextTrack,
+ base::Unretained(this))));
+ text_renderer_->Initialize(base::Bind(&TextRendererTest::OnEnd,
+ base::Unretained(this)));
+ }
+
+ void DestroyTextRenderer() {
+ EXPECT_CALL(*this, OnStop());
+ text_renderer_->Stop(base::Bind(&TextRendererTest::OnStop,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+
+ text_renderer_.reset();
+ text_track_streams_.clear();
+ }
+
+ void AddTextTrack(TextKind kind,
+ const std::string& name,
+ const std::string& language,
+ bool expect_read) {
+ const size_t idx = text_track_streams_.size();
+ text_track_streams_.push_back(new FakeTextTrackStream);
+
+ if (expect_read)
+ ExpectRead(idx);
+
+ const TextTrackConfig config(kind, name, language, std::string());
+ text_renderer_->AddTextStream(text_track_streams_.back(), config);
+ message_loop_.RunUntilIdle();
+
+ EXPECT_EQ(text_tracks_.size(), text_track_streams_.size());
+ FakeTextTrack* const text_track = text_tracks_.back();
+ EXPECT_TRUE(text_track);
+ EXPECT_TRUE(text_track->config_.Matches(config));
+ }
+
+ void OnAddTextTrack(const TextTrackConfig& config,
+ const AddTextTrackDoneCB& done_cb) {
+ base::Closure destroy_cb =
+ base::Bind(&TextRendererTest::OnDestroyTextTrack,
+ base::Unretained(this),
+ text_tracks_.size());
+ // Text track objects are owned by the text renderer, but we cache them
+ // here so we can inspect them. They get removed from our cache when the
+ // text renderer deallocates them.
+ text_tracks_.push_back(new FakeTextTrack(destroy_cb, config));
+ scoped_ptr<TextTrack> text_track(text_tracks_.back());
+ done_cb.Run(text_track.Pass());
+ }
+
+ void RemoveTextTrack(unsigned idx) {
+ FakeTextTrackStream* const stream = text_track_streams_[idx];
+ text_renderer_->RemoveTextStream(stream);
+ EXPECT_FALSE(text_tracks_[idx]);
+ }
+
+ void SatisfyPendingReads(const base::TimeDelta& start,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings) {
+ for (TextTrackStreams::iterator itr = text_track_streams_.begin();
+ itr != text_track_streams_.end(); ++itr) {
+ (*itr)->SatisfyPendingRead(start, duration, id, content, settings);
+ }
+ }
+
+ void AbortPendingRead(unsigned idx) {
+ FakeTextTrackStream* const stream = text_track_streams_[idx];
+ stream->AbortPendingRead();
+ message_loop_.RunUntilIdle();
+ }
+
+ void AbortPendingReads() {
+ for (size_t idx = 0; idx < text_track_streams_.size(); ++idx) {
+ AbortPendingRead(idx);
+ }
+ }
+
+ void SendEosNotification(unsigned idx) {
+ FakeTextTrackStream* const stream = text_track_streams_[idx];
+ stream->SendEosNotification();
+ message_loop_.RunUntilIdle();
+ }
+
+ void SendEosNotifications() {
+ for (size_t idx = 0; idx < text_track_streams_.size(); ++idx) {
+ SendEosNotification(idx);
+ }
+ }
+
+ void SendCue(unsigned idx, bool expect_cue) {
+ FakeTextTrackStream* const text_stream = text_track_streams_[idx];
+
+ const base::TimeDelta start;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(42);
+ const std::string id = "id";
+ const std::string content = "subtitle";
+ const std::string settings;
+
+ if (expect_cue) {
+ FakeTextTrack* const text_track = text_tracks_[idx];
+ EXPECT_CALL(*text_track, addWebVTTCue(start,
+ start + duration,
+ id,
+ content,
+ settings));
+ }
+
+ text_stream->SatisfyPendingRead(start, duration, id, content, settings);
+ message_loop_.RunUntilIdle();
+ }
+
+ void SendCues(bool expect_cue) {
+ for (size_t idx = 0; idx < text_track_streams_.size(); ++idx) {
+ SendCue(idx, expect_cue);
+ }
+ }
+
+ void OnDestroyTextTrack(unsigned idx) {
+ text_tracks_[idx] = NULL;
+ }
+
+ void Play() {
+ EXPECT_CALL(*this, OnPlay());
+ text_renderer_->Play(base::Bind(&TextRendererTest::OnPlay,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
+ void Pause() {
+ text_renderer_->Pause(base::Bind(&TextRendererTest::OnPause,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
+ void Flush() {
+ EXPECT_CALL(*this, OnFlush());
+ text_renderer_->Flush(base::Bind(&TextRendererTest::OnFlush,
+ base::Unretained(this)));
+ }
+
+ void Stop() {
+ text_renderer_->Stop(base::Bind(&TextRendererTest::OnStop,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
+ void ExpectRead(size_t idx) {
+ FakeTextTrackStream* const stream = text_track_streams_[idx];
+ EXPECT_CALL(*stream, OnRead());
+ }
+
+ MOCK_METHOD0(OnEnd, void());
+ MOCK_METHOD0(OnStop, void());
+ MOCK_METHOD0(OnPlay, void());
+ MOCK_METHOD0(OnPause, void());
+ MOCK_METHOD0(OnFlush, void());
+
+ scoped_ptr<TextRenderer> text_renderer_;
+ base::MessageLoop message_loop_;
+
+ typedef ScopedVector<FakeTextTrackStream> TextTrackStreams;
+ TextTrackStreams text_track_streams_;
+
+ typedef std::vector<FakeTextTrack*> TextTracks;
+ TextTracks text_tracks_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TextRendererTest);
+};
+
+TEST_F(TextRendererTest, CreateTextRendererNoInit) {
+ text_renderer_.reset(
+ new TextRenderer(message_loop_.message_loop_proxy(),
+ base::Bind(&TextRendererTest::OnAddTextTrack,
+ base::Unretained(this))));
+ text_renderer_.reset();
+}
+
+TEST_F(TextRendererTest, TestStop) {
+ CreateTextRenderer();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTextTrackOnly_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", false);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTextTrackOnly_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "track 1", "", false);
+ AddTextTrack(kTextSubtitles, "track 2", "", false);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayOnly) {
+ CreateTextRenderer();
+ Play();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlay_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlay_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlay_OneTrackAfter) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlay_TwoTracksAfter) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlay_OneTrackBeforeOneTrackAfter) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ Play();
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayAddCue_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ ExpectRead(0);
+ SendCues(true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayAddCue_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ ExpectRead(0);
+ ExpectRead(1);
+ SendCues(true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosOnly_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosOnly_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueEos_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ ExpectRead(0);
+ SendCues(true);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueEos_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ ExpectRead(0);
+ ExpectRead(1);
+ SendCues(true);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, StopPending_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotifications();
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, StopPending_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotifications();
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, PlayPause_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPause_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePending_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePending_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePending_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendCues(true);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePending_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendCues(true);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_SplitEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlush_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlush_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ ExpectRead(1);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTextTrackOnlyRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", false);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTextTrackOnlyRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "track 1", "", false);
+ AddTextTrack(kTextSubtitles, "track 2", "", false);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_SeparateCancel) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_RemoveOneThenPlay) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", false);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ Play();
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_RemoveTwoThenPlay) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", false);
+ AddTextTrack(kTextSubtitles, "2", "", false);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ Play();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlayRemove_OneTrack) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlayRemove_TwoTracks) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlayRemove_SplitCancel) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlayRemove_SplitAdd) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ Play();
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayAddCueRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ ExpectRead(0);
+ SendCues(true);
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayAddCueRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ ExpectRead(0);
+ ExpectRead(1);
+ SendCues(true);
+ AbortPendingRead(0);
+ AbortPendingRead(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosOnlyRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosOnlyRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueEosRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ ExpectRead(0);
+ SendCues(true);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueEosRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ ExpectRead(0);
+ ExpectRead(1);
+ SendCues(true);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, TestStopPendingRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, TestStopPendingRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Stop();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, TestStopPendingRemove_RemoveThenSendEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Stop();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, PlayPauseRemove_PauseThenRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPauseRemove_RemoveThanPause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPause_PauseThenRemoveTwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPauseRemove_RemoveThenPauseTwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPauseRemove_SplitCancel) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+
+TEST_F(TextRendererTest, PlayPauseRemove_PauseLast) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePendingRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePendingRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePendingRemove_SplitEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendCues(true);
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ EXPECT_CALL(*this, OnPause());
+ SendCue(1, true);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingRemove_SplitSendCue) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ SendCue(1, true);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPauseRemove_PauseThenRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPauseRemove_RemoveThenPause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_PauseThenRemoveTwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_RemovePauseRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosThenPause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_PauseLast) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosPauseRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosRemovePause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosRemoveEosPause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosRemoveEosRemovePause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlushRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlushRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ ExpectRead(1);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlushRemove_EosRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ ExpectRead(1);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayShort_SendCueThenEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayShort_EosThenSendCue) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnPause());
+ SendCue(1, true);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayShortRemove_SendEosRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayShortRemove_SendRemoveEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingCancel_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ AbortPendingRead(0);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingCancel_SendThenCancel) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ EXPECT_CALL(*this, OnPause());
+ AbortPendingRead(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingCancel_CancelThenSend) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ AbortPendingRead(0);
+ EXPECT_CALL(*this, OnPause());
+ SendCue(1, true);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueStopPendingCancel_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ AbortPendingRead(0);
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, PlayCueStopPendingCancel_SendThenCancel) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ Stop();
+ SendCue(0, false);
+ EXPECT_CALL(*this, OnStop());
+ AbortPendingRead(1);
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, PlayCueStopPendingCancel_CancelThenSend) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ Stop();
+ AbortPendingRead(0);
+ EXPECT_CALL(*this, OnStop());
+ SendCue(1, false);
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, AddRemoveAdd) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddRemoveEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddRemovePause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddRemovePauseStop) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ Pause();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotification(1);
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+} // namespace media
diff --git a/media/base/text_track.h b/media/base/text_track.h
index 01a2ed727f..0e04a0eb46 100644
--- a/media/base/text_track.h
+++ b/media/base/text_track.h
@@ -13,14 +13,7 @@
namespace media {
-// Specifies the varieties of text tracks.
-enum TextKind {
- kTextSubtitles,
- kTextCaptions,
- kTextDescriptions,
- kTextMetadata,
- kTextNone
-};
+class TextTrackConfig;
class TextTrack {
public:
@@ -32,10 +25,12 @@ class TextTrack {
const std::string& settings) = 0;
};
-typedef base::Callback<scoped_ptr<TextTrack>
- (TextKind kind,
- const std::string& label,
- const std::string& language)> AddTextTrackCB;
+typedef base::Callback<void
+ (scoped_ptr<TextTrack>)> AddTextTrackDoneCB;
+
+typedef base::Callback<void
+ (const TextTrackConfig& config,
+ const AddTextTrackDoneCB& done_cb)> AddTextTrackCB;
} // namespace media
diff --git a/media/base/text_track_config.cc b/media/base/text_track_config.cc
new file mode 100644
index 0000000000..0d4b11f6dd
--- /dev/null
+++ b/media/base/text_track_config.cc
@@ -0,0 +1,30 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/text_track_config.h"
+
+namespace media {
+
+TextTrackConfig::TextTrackConfig()
+ : kind_(kTextNone) {
+}
+
+TextTrackConfig::TextTrackConfig(TextKind kind,
+ const std::string& label,
+ const std::string& language,
+ const std::string& id)
+ : kind_(kind),
+ label_(label),
+ language_(language),
+ id_(id) {
+}
+
+bool TextTrackConfig::Matches(const TextTrackConfig& config) const {
+ return config.kind() == kind_ &&
+ config.label() == label_ &&
+ config.language() == language_ &&
+ config.id() == id_;
+}
+
+} // namespace media
diff --git a/media/base/text_track_config.h b/media/base/text_track_config.h
new file mode 100644
index 0000000000..58efba4b03
--- /dev/null
+++ b/media/base/text_track_config.h
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEXT_TRACK_CONFIG_H_
+#define MEDIA_BASE_TEXT_TRACK_CONFIG_H_
+
+#include <string>
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Specifies the varieties of text tracks.
+enum TextKind {
+ kTextSubtitles,
+ kTextCaptions,
+ kTextDescriptions,
+ kTextMetadata,
+ kTextNone
+};
+
+class MEDIA_EXPORT TextTrackConfig {
+ public:
+ TextTrackConfig();
+ TextTrackConfig(TextKind kind,
+ const std::string& label,
+ const std::string& language,
+ const std::string& id);
+
+ // Returns true if all fields in |config| match this config.
+ bool Matches(const TextTrackConfig& config) const;
+
+ TextKind kind() const { return kind_; }
+ const std::string& label() const { return label_; }
+ const std::string& language() const { return language_; }
+ const std::string& id() const { return id_; }
+
+ private:
+ TextKind kind_;
+ std::string label_;
+ std::string language_;
+ std::string id_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEXT_TRACK_H_
diff --git a/media/base/vector_math.cc b/media/base/vector_math.cc
index de946ca8cb..32584f5cf6 100644
--- a/media/base/vector_math.cc
+++ b/media/base/vector_math.cc
@@ -5,6 +5,8 @@
#include "media/base/vector_math.h"
#include "media/base/vector_math_testing.h"
+#include <algorithm>
+
#include "base/cpu.h"
#include "base/logging.h"
#include "build/build_config.h"
@@ -23,33 +25,42 @@ namespace vector_math {
#if defined(__SSE__)
#define FMAC_FUNC FMAC_SSE
#define FMUL_FUNC FMUL_SSE
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_SSE
void Initialize() {}
#else
// X86 CPU detection required. Functions will be set by Initialize().
// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed.
#define FMAC_FUNC g_fmac_proc_
#define FMUL_FUNC g_fmul_proc_
+#define EWMAAndMaxPower_FUNC g_ewma_power_proc_
typedef void (*MathProc)(const float src[], float scale, int len, float dest[]);
static MathProc g_fmac_proc_ = NULL;
static MathProc g_fmul_proc_ = NULL;
+typedef std::pair<float, float> (*EWMAAndMaxPowerProc)(
+ float initial_value, const float src[], int len, float smoothing_factor);
+static EWMAAndMaxPowerProc g_ewma_power_proc_ = NULL;
void Initialize() {
CHECK(!g_fmac_proc_);
CHECK(!g_fmul_proc_);
+ CHECK(!g_ewma_power_proc_);
const bool kUseSSE = base::CPU().has_sse();
g_fmac_proc_ = kUseSSE ? FMAC_SSE : FMAC_C;
g_fmul_proc_ = kUseSSE ? FMUL_SSE : FMUL_C;
+ g_ewma_power_proc_ = kUseSSE ? EWMAAndMaxPower_SSE : EWMAAndMaxPower_C;
}
#endif
#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
#define FMAC_FUNC FMAC_NEON
#define FMUL_FUNC FMUL_NEON
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_NEON
void Initialize() {}
#else
// Unknown architecture.
#define FMAC_FUNC FMAC_C
#define FMUL_FUNC FMUL_C
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_C
void Initialize() {}
#endif
@@ -77,6 +88,27 @@ void FMUL_C(const float src[], float scale, int len, float dest[]) {
dest[i] = src[i] * scale;
}
+std::pair<float, float> EWMAAndMaxPower(
+ float initial_value, const float src[], int len, float smoothing_factor) {
+ // Ensure |src| is 16-byte aligned.
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(src) & (kRequiredAlignment - 1));
+ return EWMAAndMaxPower_FUNC(initial_value, src, len, smoothing_factor);
+}
+
+std::pair<float, float> EWMAAndMaxPower_C(
+ float initial_value, const float src[], int len, float smoothing_factor) {
+ std::pair<float, float> result(initial_value, 0.0f);
+ const float weight_prev = 1.0f - smoothing_factor;
+ for (int i = 0; i < len; ++i) {
+ result.first *= weight_prev;
+ const float sample = src[i];
+ const float sample_squared = sample * sample;
+ result.first += sample_squared * smoothing_factor;
+ result.second = std::max(result.second, sample_squared);
+ }
+ return result;
+}
+
#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
void FMAC_NEON(const float src[], float scale, int len, float dest[]) {
const int rem = len % 4;
@@ -103,6 +135,71 @@ void FMUL_NEON(const float src[], float scale, int len, float dest[]) {
for (int i = last_index; i < len; ++i)
dest[i] = src[i] * scale;
}
+
+std::pair<float, float> EWMAAndMaxPower_NEON(
+ float initial_value, const float src[], int len, float smoothing_factor) {
+ // When the recurrence is unrolled, we see that we can split it into 4
+ // separate lanes of evaluation:
+ //
+ // y[n] = a(S[n]^2) + (1-a)(y[n-1])
+ // = a(S[n]^2) + (1-a)^1(aS[n-1]^2) + (1-a)^2(aS[n-2]^2) + ...
+ // = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ //
+ // where z[n] = a(S[n]^2) + (1-a)^4(z[n-4]) + (1-a)^8(z[n-8]) + ...
+ //
+ // Thus, the strategy here is to compute z[n], z[n-1], z[n-2], and z[n-3] in
+ // each of the 4 lanes, and then combine them to give y[n].
+
+ const int rem = len % 4;
+ const int last_index = len - rem;
+
+ const float32x4_t smoothing_factor_x4 = vdupq_n_f32(smoothing_factor);
+ const float weight_prev = 1.0f - smoothing_factor;
+ const float32x4_t weight_prev_x4 = vdupq_n_f32(weight_prev);
+ const float32x4_t weight_prev_squared_x4 =
+ vmulq_f32(weight_prev_x4, weight_prev_x4);
+ const float32x4_t weight_prev_4th_x4 =
+ vmulq_f32(weight_prev_squared_x4, weight_prev_squared_x4);
+
+ // Compute z[n], z[n-1], z[n-2], and z[n-3] in parallel in lanes 3, 2, 1 and
+ // 0, respectively.
+ float32x4_t max_x4 = vdupq_n_f32(0.0f);
+ float32x4_t ewma_x4 = vsetq_lane_f32(initial_value, vdupq_n_f32(0.0f), 3);
+ int i;
+ for (i = 0; i < last_index; i += 4) {
+ ewma_x4 = vmulq_f32(ewma_x4, weight_prev_4th_x4);
+ const float32x4_t sample_x4 = vld1q_f32(src + i);
+ const float32x4_t sample_squared_x4 = vmulq_f32(sample_x4, sample_x4);
+ max_x4 = vmaxq_f32(max_x4, sample_squared_x4);
+ ewma_x4 = vmlaq_f32(ewma_x4, sample_squared_x4, smoothing_factor_x4);
+ }
+
+ // y[n] = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ float ewma = vgetq_lane_f32(ewma_x4, 3);
+ ewma_x4 = vmulq_f32(ewma_x4, weight_prev_x4);
+ ewma += vgetq_lane_f32(ewma_x4, 2);
+ ewma_x4 = vmulq_f32(ewma_x4, weight_prev_x4);
+ ewma += vgetq_lane_f32(ewma_x4, 1);
+ ewma_x4 = vmulq_f32(ewma_x4, weight_prev_x4);
+ ewma += vgetq_lane_f32(ewma_x4, 0);
+
+ // Fold the maximums together to get the overall maximum.
+ float32x2_t max_x2 = vpmax_f32(vget_low_f32(max_x4), vget_high_f32(max_x4));
+ max_x2 = vpmax_f32(max_x2, max_x2);
+
+ std::pair<float, float> result(ewma, vget_lane_f32(max_x2, 0));
+
+ // Handle remaining values at the end of |src|.
+ for (; i < len; ++i) {
+ result.first *= weight_prev;
+ const float sample = src[i];
+ const float sample_squared = sample * sample;
+ result.first += sample_squared * smoothing_factor;
+ result.second = std::max(result.second, sample_squared);
+ }
+
+ return result;
+}
#endif
} // namespace vector_math
diff --git a/media/base/vector_math.h b/media/base/vector_math.h
index 4764f0b7e3..a4dea37289 100644
--- a/media/base/vector_math.h
+++ b/media/base/vector_math.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_BASE_VECTOR_MATH_H_
#define MEDIA_BASE_VECTOR_MATH_H_
+#include <utility>
+
#include "media/base/media_export.h"
namespace media {
@@ -26,6 +28,16 @@ MEDIA_EXPORT void FMAC(const float src[], float scale, int len, float dest[]);
// |dest| must be aligned by kRequiredAlignment.
MEDIA_EXPORT void FMUL(const float src[], float scale, int len, float dest[]);
+// Computes the exponentially-weighted moving average power of a signal by
+// iterating the recurrence:
+//
+// y[-1] = initial_value
+// y[n] = smoothing_factor * src[n]^2 + (1-smoothing_factor) * y[n-1]
+//
+// Returns the final average power and the maximum squared element value.
+MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower(
+ float initial_value, const float src[], int len, float smoothing_factor);
+
} // namespace vector_math
} // namespace media
diff --git a/media/base/vector_math_perftest.cc b/media/base/vector_math_perftest.cc
index 78699c3d70..9742f2e953 100644
--- a/media/base/vector_math_perftest.cc
+++ b/media/base/vector_math_perftest.cc
@@ -17,6 +17,7 @@ using std::fill;
namespace media {
static const int kBenchmarkIterations = 200000;
+static const int kEWMABenchmarkIterations = 50000;
static const float kScale = 0.5;
static const int kVectorSize = 8192;
@@ -43,12 +44,32 @@ class VectorMathPerfTest : public testing::Test {
kVectorSize - (aligned ? 0 : 1),
output_vector_.get());
}
- double total_time_seconds = (TimeTicks::HighResNow() - start).InSecondsF();
+ double total_time_milliseconds =
+ (TimeTicks::HighResNow() - start).InMillisecondsF();
perf_test::PrintResult(test_name,
"",
trace_name,
- kBenchmarkIterations / total_time_seconds,
- "runs/s",
+ kBenchmarkIterations / total_time_milliseconds,
+ "runs/ms",
+ true);
+ }
+
+ void RunBenchmark(
+ std::pair<float, float> (*fn)(float, const float[], int, float),
+ int len,
+ const std::string& test_name,
+ const std::string& trace_name) {
+ TimeTicks start = TimeTicks::HighResNow();
+ for (int i = 0; i < kEWMABenchmarkIterations; ++i) {
+ fn(0.5f, input_vector_.get(), len, 0.1f);
+ }
+ double total_time_milliseconds =
+ (TimeTicks::HighResNow() - start).InMillisecondsF();
+ perf_test::PrintResult(test_name,
+ "",
+ trace_name,
+ kEWMABenchmarkIterations / total_time_milliseconds,
+ "runs/ms",
true);
}
@@ -110,15 +131,51 @@ TEST_F(VectorMathPerfTest, FMUL) {
ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
sizeof(float)), 0U);
RunBenchmark(
- vector_math::FMUL_FUNC, false, "vector_math_fmac", "optimized_unaligned");
+ vector_math::FMUL_FUNC, false, "vector_math_fmul", "optimized_unaligned");
// Benchmark FMUL_FUNC() with aligned size.
ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
0U);
RunBenchmark(
- vector_math::FMUL_FUNC, true, "vector_math_fmac", "optimized_aligned");
+ vector_math::FMUL_FUNC, true, "vector_math_fmul", "optimized_aligned");
#endif
}
#undef FMUL_FUNC
+#if defined(ARCH_CPU_X86_FAMILY)
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_SSE
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_NEON
+#endif
+
+// Benchmark for each optimized vector_math::EWMAAndMaxPower() method.
+TEST_F(VectorMathPerfTest, EWMAAndMaxPower) {
+ // Benchmark EWMAAndMaxPower_C().
+ RunBenchmark(vector_math::EWMAAndMaxPower_C,
+ kVectorSize,
+ "vector_math_ewma_and_max_power",
+ "unoptimized");
+#if defined(EWMAAndMaxPower_FUNC)
+#if defined(ARCH_CPU_X86_FAMILY)
+ ASSERT_TRUE(base::CPU().has_sse());
+#endif
+ // Benchmark EWMAAndMaxPower_FUNC() with unaligned size.
+ ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
+ sizeof(float)), 0U);
+ RunBenchmark(vector_math::EWMAAndMaxPower_FUNC,
+ kVectorSize - 1,
+ "vector_math_ewma_and_max_power",
+ "optimized_unaligned");
+ // Benchmark EWMAAndMaxPower_FUNC() with aligned size.
+ ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
+ 0U);
+ RunBenchmark(vector_math::EWMAAndMaxPower_FUNC,
+ kVectorSize,
+ "vector_math_ewma_and_max_power",
+ "optimized_aligned");
+#endif
+}
+
+#undef EWMAAndMaxPower_FUNC
+
} // namespace media
diff --git a/media/base/vector_math_testing.h b/media/base/vector_math_testing.h
index 02d14f807c..b0b304409d 100644
--- a/media/base/vector_math_testing.h
+++ b/media/base/vector_math_testing.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_BASE_VECTOR_MATH_TESTING_H_
#define MEDIA_BASE_VECTOR_MATH_TESTING_H_
+#include <utility>
+
#include "build/build_config.h"
#include "media/base/media_export.h"
@@ -14,12 +16,16 @@ namespace vector_math {
// Optimized versions exposed for testing. See vector_math.h for details.
MEDIA_EXPORT void FMAC_C(const float src[], float scale, int len, float dest[]);
MEDIA_EXPORT void FMUL_C(const float src[], float scale, int len, float dest[]);
+MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower_C(
+ float initial_value, const float src[], int len, float smoothing_factor);
#if defined(ARCH_CPU_X86_FAMILY)
MEDIA_EXPORT void FMAC_SSE(const float src[], float scale, int len,
float dest[]);
MEDIA_EXPORT void FMUL_SSE(const float src[], float scale, int len,
float dest[]);
+MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower_SSE(
+ float initial_value, const float src[], int len, float smoothing_factor);
#endif
#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
@@ -27,6 +33,8 @@ MEDIA_EXPORT void FMAC_NEON(const float src[], float scale, int len,
float dest[]);
MEDIA_EXPORT void FMUL_NEON(const float src[], float scale, int len,
float dest[]);
+MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower_NEON(
+ float initial_value, const float src[], int len, float smoothing_factor);
#endif
} // namespace vector_math
diff --git a/media/base/vector_math_unittest.cc b/media/base/vector_math_unittest.cc
index 32e5ea468c..f8278ce1b5 100644
--- a/media/base/vector_math_unittest.cc
+++ b/media/base/vector_math_unittest.cc
@@ -138,4 +138,248 @@ TEST_F(VectorMathTest, FMUL) {
#endif
}
+namespace {
+
+class EWMATestScenario {
+ public:
+ EWMATestScenario(float initial_value, const float src[], int len,
+ float smoothing_factor)
+ : initial_value_(initial_value),
+ data_(static_cast<float*>(
+ len == 0 ? NULL :
+ base::AlignedAlloc(len * sizeof(float),
+ vector_math::kRequiredAlignment))),
+ data_len_(len),
+ smoothing_factor_(smoothing_factor),
+ expected_final_avg_(initial_value),
+ expected_max_(0.0f) {
+ if (data_len_ > 0)
+ memcpy(data_.get(), src, len * sizeof(float));
+ }
+
+ // Copy constructor and assignment operator for ::testing::Values(...).
+ EWMATestScenario(const EWMATestScenario& other) { *this = other; }
+ EWMATestScenario& operator=(const EWMATestScenario& other) {
+ this->initial_value_ = other.initial_value_;
+ this->smoothing_factor_ = other.smoothing_factor_;
+ if (other.data_len_ == 0) {
+ this->data_.reset();
+ } else {
+ this->data_.reset(static_cast<float*>(
+ base::AlignedAlloc(other.data_len_ * sizeof(float),
+ vector_math::kRequiredAlignment)));
+ memcpy(this->data_.get(), other.data_.get(),
+ other.data_len_ * sizeof(float));
+ }
+ this->data_len_ = other.data_len_;
+ this->expected_final_avg_ = other.expected_final_avg_;
+ this->expected_max_ = other.expected_max_;
+ return *this;
+ }
+
+ EWMATestScenario ScaledBy(float scale) const {
+ EWMATestScenario result(*this);
+ float* p = result.data_.get();
+ float* const p_end = p + result.data_len_;
+ for (; p < p_end; ++p)
+ *p *= scale;
+ return result;
+ }
+
+ EWMATestScenario WithImpulse(float value, int offset) const {
+ EWMATestScenario result(*this);
+ result.data_.get()[offset] = value;
+ return result;
+ }
+
+ EWMATestScenario HasExpectedResult(float final_avg_value,
+ float max_value) const {
+ EWMATestScenario result(*this);
+ result.expected_final_avg_ = final_avg_value;
+ result.expected_max_ = max_value;
+ return result;
+ }
+
+ void RunTest() const {
+ {
+ SCOPED_TRACE("EWMAAndMaxPower");
+ const std::pair<float, float>& result = vector_math::EWMAAndMaxPower(
+ initial_value_, data_.get(), data_len_, smoothing_factor_);
+ EXPECT_NEAR(expected_final_avg_, result.first, 0.0000001f);
+ EXPECT_NEAR(expected_max_, result.second, 0.0000001f);
+ }
+
+ {
+ SCOPED_TRACE("EWMAAndMaxPower_C");
+ const std::pair<float, float>& result = vector_math::EWMAAndMaxPower_C(
+ initial_value_, data_.get(), data_len_, smoothing_factor_);
+ EXPECT_NEAR(expected_final_avg_, result.first, 0.0000001f);
+ EXPECT_NEAR(expected_max_, result.second, 0.0000001f);
+ }
+
+#if defined(ARCH_CPU_X86_FAMILY)
+ {
+ ASSERT_TRUE(base::CPU().has_sse());
+ SCOPED_TRACE("EWMAAndMaxPower_SSE");
+ const std::pair<float, float>& result = vector_math::EWMAAndMaxPower_SSE(
+ initial_value_, data_.get(), data_len_, smoothing_factor_);
+ EXPECT_NEAR(expected_final_avg_, result.first, 0.0000001f);
+ EXPECT_NEAR(expected_max_, result.second, 0.0000001f);
+ }
+#endif
+
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+ {
+ SCOPED_TRACE("EWMAAndMaxPower_NEON");
+ const std::pair<float, float>& result = vector_math::EWMAAndMaxPower_NEON(
+ initial_value_, data_.get(), data_len_, smoothing_factor_);
+ EXPECT_NEAR(expected_final_avg_, result.first, 0.0000001f);
+ EXPECT_NEAR(expected_max_, result.second, 0.0000001f);
+ }
+#endif
+ }
+
+ private:
+ float initial_value_;
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
+ int data_len_;
+ float smoothing_factor_;
+ float expected_final_avg_;
+ float expected_max_;
+};
+
+} // namespace
+
+typedef testing::TestWithParam<EWMATestScenario> VectorMathEWMAAndMaxPowerTest;
+
+TEST_P(VectorMathEWMAAndMaxPowerTest, Correctness) {
+ GetParam().RunTest();
+}
+
+static const float kZeros[] = { // 32 zeros
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static const float kOnes[] = { // 32 ones
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+};
+
+static const float kCheckerboard[] = { // 32 alternating 0, 1
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+};
+
+static const float kInverseCheckerboard[] = { // 32 alternating 1, 0
+ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,
+ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0
+};
+
+INSTANTIATE_TEST_CASE_P(
+ Scenarios, VectorMathEWMAAndMaxPowerTest,
+ ::testing::Values(
+ // Zero-length input: Result should equal initial value.
+ EWMATestScenario(0.0f, NULL, 0, 0.0f).HasExpectedResult(0.0f, 0.0f),
+ EWMATestScenario(1.0f, NULL, 0, 0.0f).HasExpectedResult(1.0f, 0.0f),
+
+ // Smoothing factor of zero: Samples have no effect on result.
+ EWMATestScenario(0.0f, kOnes, 32, 0.0f).HasExpectedResult(0.0f, 1.0f),
+ EWMATestScenario(1.0f, kZeros, 32, 0.0f).HasExpectedResult(1.0f, 0.0f),
+
+ // Smothing factor of one: Result = last sample squared.
+ EWMATestScenario(0.0f, kCheckerboard, 32, 1.0f)
+ .ScaledBy(2.0f)
+ .HasExpectedResult(4.0f, 4.0f),
+ EWMATestScenario(1.0f, kInverseCheckerboard, 32, 1.0f)
+ .ScaledBy(2.0f)
+ .HasExpectedResult(0.0f, 4.0f),
+
+ // Smoothing factor of 1/4, muted signal.
+ EWMATestScenario(1.0f, kZeros, 1, 0.25f)
+ .HasExpectedResult(powf(0.75, 1.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 2, 0.25f)
+ .HasExpectedResult(powf(0.75, 2.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 3, 0.25f)
+ .HasExpectedResult(powf(0.75, 3.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 12, 0.25f)
+ .HasExpectedResult(powf(0.75, 12.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 13, 0.25f)
+ .HasExpectedResult(powf(0.75, 13.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 14, 0.25f)
+ .HasExpectedResult(powf(0.75, 14.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 15, 0.25f)
+ .HasExpectedResult(powf(0.75, 15.0f), 0.0f),
+
+ // Smoothing factor of 1/4, constant full-amplitude signal.
+ EWMATestScenario(0.0f, kOnes, 1, 0.25f).HasExpectedResult(0.25f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 2, 0.25f)
+ .HasExpectedResult(0.4375f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 3, 0.25f)
+ .HasExpectedResult(0.578125f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 12, 0.25f)
+ .HasExpectedResult(0.96832365f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 13, 0.25f)
+ .HasExpectedResult(0.97624274f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 14, 0.25f)
+ .HasExpectedResult(0.98218205f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 15, 0.25f)
+ .HasExpectedResult(0.98663654f, 1.0f),
+
+ // Smoothing factor of 1/4, checkerboard signal.
+ EWMATestScenario(0.0f, kCheckerboard, 1, 0.25f)
+ .HasExpectedResult(0.0f, 0.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 2, 0.25f)
+ .HasExpectedResult(0.25f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 3, 0.25f)
+ .HasExpectedResult(0.1875f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 12, 0.25f)
+ .HasExpectedResult(0.55332780f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 13, 0.25f)
+ .HasExpectedResult(0.41499585f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 14, 0.25f)
+ .HasExpectedResult(0.56124689f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 15, 0.25f)
+ .HasExpectedResult(0.42093517f, 1.0f),
+
+ // Smoothing factor of 1/4, inverse checkerboard signal.
+ EWMATestScenario(0.0f, kInverseCheckerboard, 1, 0.25f)
+ .HasExpectedResult(0.25f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 2, 0.25f)
+ .HasExpectedResult(0.1875f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 3, 0.25f)
+ .HasExpectedResult(0.390625f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 12, 0.25f)
+ .HasExpectedResult(0.41499585f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 13, 0.25f)
+ .HasExpectedResult(0.56124689f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 14, 0.25f)
+ .HasExpectedResult(0.42093517f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 15, 0.25f)
+ .HasExpectedResult(0.56570137f, 1.0f),
+
+ // Smoothing factor of 1/4, impluse signal.
+ EWMATestScenario(0.0f, kZeros, 3, 0.25f)
+ .WithImpulse(2.0f, 0)
+ .HasExpectedResult(0.562500f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 3, 0.25f)
+ .WithImpulse(2.0f, 1)
+ .HasExpectedResult(0.75f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 3, 0.25f)
+ .WithImpulse(2.0f, 2)
+ .HasExpectedResult(1.0f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 32, 0.25f)
+ .WithImpulse(2.0f, 0)
+ .HasExpectedResult(0.00013394f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 32, 0.25f)
+ .WithImpulse(2.0f, 1)
+ .HasExpectedResult(0.00017858f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 32, 0.25f)
+ .WithImpulse(2.0f, 2)
+ .HasExpectedResult(0.00023811f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 32, 0.25f)
+ .WithImpulse(2.0f, 3)
+ .HasExpectedResult(0.00031748f, 4.0f)
+ ));
+
} // namespace media
diff --git a/media/base/video_decoder_config.cc b/media/base/video_decoder_config.cc
index 015ae09066..82d607526b 100644
--- a/media/base/video_decoder_config.cc
+++ b/media/base/video_decoder_config.cc
@@ -76,6 +76,8 @@ void VideoDecoderConfig::Initialize(VideoCodec codec,
UmaHistogramAspectRatio("Media.VideoCodedAspectRatio", coded_size);
UMA_HISTOGRAM_COUNTS_10000("Media.VideoVisibleWidth", visible_rect.width());
UmaHistogramAspectRatio("Media.VideoVisibleAspectRatio", visible_rect);
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.VideoPixelFormat", format, VideoFrame::HISTOGRAM_MAX);
}
codec_ = codec;
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index 9c07251e59..3541e6efa4 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -26,11 +26,8 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
base::TimeDelta timestamp) {
DCHECK(IsValidConfig(format, coded_size, visible_rect, natural_size));
scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp));
+ format, coded_size, visible_rect, natural_size, timestamp, false));
switch (format) {
- case VideoFrame::RGB32:
- frame->AllocateRGB(4u);
- break;
case VideoFrame::YV12:
case VideoFrame::YV12A:
case VideoFrame::YV16:
@@ -48,14 +45,10 @@ std::string VideoFrame::FormatToString(VideoFrame::Format format) {
switch (format) {
case VideoFrame::UNKNOWN:
return "UNKNOWN";
- case VideoFrame::RGB32:
- return "RGB32";
case VideoFrame::YV12:
return "YV12";
case VideoFrame::YV16:
return "YV16";
- case VideoFrame::EMPTY:
- return "EMPTY";
case VideoFrame::I420:
return "I420";
case VideoFrame::NATIVE_TEXTURE:
@@ -66,6 +59,8 @@ std::string VideoFrame::FormatToString(VideoFrame::Format format) {
#endif
case VideoFrame::YV12A:
return "YV12A";
+ case VideoFrame::HISTOGRAM_MAX:
+ return "HISTOGRAM_MAX";
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
return "";
@@ -101,8 +96,12 @@ scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture(
base::TimeDelta timestamp,
const ReadPixelsCB& read_pixels_cb,
const base::Closure& no_longer_needed_cb) {
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- NATIVE_TEXTURE, coded_size, visible_rect, natural_size, timestamp));
+ scoped_refptr<VideoFrame> frame(new VideoFrame(NATIVE_TEXTURE,
+ coded_size,
+ visible_rect,
+ natural_size,
+ timestamp,
+ false));
frame->texture_mailbox_holder_ = mailbox_holder;
frame->texture_target_ = texture_target;
frame->read_pixels_cb_ = read_pixels_cb;
@@ -118,7 +117,7 @@ void VideoFrame::ReadPixelsFromNativeTexture(const SkBitmap& pixels) {
}
// static
-scoped_refptr<VideoFrame> VideoFrame::WrapExternalSharedMemory(
+scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -134,7 +133,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalSharedMemory(
switch (format) {
case I420: {
scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp));
+ format, coded_size, visible_rect, natural_size, timestamp, false));
frame->shared_memory_handle_ = handle;
frame->strides_[kYPlane] = coded_size.width();
frame->strides_[kUPlane] = coded_size.width() / 2;
@@ -167,7 +166,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
const base::Closure& no_longer_needed_cb) {
DCHECK(format == YV12 || format == YV16 || format == I420) << format;
scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp));
+ format, coded_size, visible_rect, natural_size, timestamp, false));
frame->strides_[kYPlane] = y_stride;
frame->strides_[kUPlane] = u_stride;
frame->strides_[kVPlane] = v_stride;
@@ -179,10 +178,13 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
}
// static
-scoped_refptr<VideoFrame> VideoFrame::CreateEmptyFrame() {
- return new VideoFrame(
- VideoFrame::EMPTY, gfx::Size(), gfx::Rect(), gfx::Size(),
- base::TimeDelta());
+scoped_refptr<VideoFrame> VideoFrame::CreateEOSFrame() {
+ return new VideoFrame(VideoFrame::UNKNOWN,
+ gfx::Size(),
+ gfx::Rect(),
+ gfx::Size(),
+ kNoTimestamp(),
+ true);
}
// static
@@ -218,7 +220,7 @@ scoped_refptr<VideoFrame> VideoFrame::CreateHoleFrame(
const gfx::Size& size) {
DCHECK(IsValidConfig(VideoFrame::HOLE, size, gfx::Rect(size), size));
scoped_refptr<VideoFrame> frame(new VideoFrame(
- VideoFrame::HOLE, size, gfx::Rect(size), size, base::TimeDelta()));
+ VideoFrame::HOLE, size, gfx::Rect(size), size, base::TimeDelta(), false));
return frame;
}
#endif
@@ -231,16 +233,14 @@ size_t VideoFrame::NumPlanes(Format format) {
case VideoFrame::HOLE:
#endif
return 0;
- case VideoFrame::RGB32:
- return 1;
case VideoFrame::YV12:
case VideoFrame::YV16:
case VideoFrame::I420:
return 3;
case VideoFrame::YV12A:
return 4;
- case VideoFrame::EMPTY:
case VideoFrame::UNKNOWN:
+ case VideoFrame::HISTOGRAM_MAX:
break;
}
NOTREACHED() << "Unsupported video frame format: " << format;
@@ -255,58 +255,73 @@ static inline size_t RoundUp(size_t value, size_t alignment) {
// static
size_t VideoFrame::AllocationSize(Format format, const gfx::Size& coded_size) {
+ size_t total = 0;
+ for (size_t i = 0; i < NumPlanes(format); ++i)
+ total += PlaneAllocationSize(format, i, coded_size);
+ return total;
+}
+
+// static
+size_t VideoFrame::PlaneAllocationSize(Format format,
+ size_t plane,
+ const gfx::Size& coded_size) {
+ const size_t area =
+ RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
switch (format) {
- case VideoFrame::RGB32:
- return coded_size.GetArea() * 4;
case VideoFrame::YV12:
case VideoFrame::I420: {
- const size_t rounded_size =
- RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
- return rounded_size * 3 / 2;
+ switch (plane) {
+ case VideoFrame::kYPlane:
+ return area;
+ case VideoFrame::kUPlane:
+ case VideoFrame::kVPlane:
+ return area / 4;
+ default:
+ break;
+ }
}
case VideoFrame::YV12A: {
- const size_t rounded_size =
- RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
- return rounded_size * 5 / 2;
+ switch (plane) {
+ case VideoFrame::kYPlane:
+ case VideoFrame::kAPlane:
+ return area;
+ case VideoFrame::kUPlane:
+ case VideoFrame::kVPlane:
+ return area / 4;
+ default:
+ break;
+ }
}
case VideoFrame::YV16: {
- const size_t rounded_size =
- RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
- return rounded_size * 2;
+ switch (plane) {
+ case VideoFrame::kYPlane:
+ return area;
+ case VideoFrame::kUPlane:
+ case VideoFrame::kVPlane:
+ return area / 2;
+ default:
+ break;
+ }
}
case VideoFrame::UNKNOWN:
- case VideoFrame::EMPTY:
case VideoFrame::NATIVE_TEXTURE:
+ case VideoFrame::HISTOGRAM_MAX:
#if defined(GOOGLE_TV)
case VideoFrame::HOLE:
#endif
break;
}
- NOTREACHED() << "Unsupported video frame format: " << format;
+ NOTREACHED() << "Unsupported video frame format/plane: "
+ << format << "/" << plane;
return 0;
}
-// Release data allocated by AllocateRGB() or AllocateYUV().
+// Release data allocated by AllocateYUV().
static void ReleaseData(uint8* data) {
DCHECK(data);
base::AlignedFree(data);
}
-void VideoFrame::AllocateRGB(size_t bytes_per_pixel) {
- // Round up to align at least at a 16-byte boundary for each row.
- // This is sufficient for MMX and SSE2 reads (movq/movdqa).
- size_t bytes_per_row = RoundUp(coded_size_.width(),
- kFrameSizeAlignment) * bytes_per_pixel;
- size_t aligned_height = RoundUp(coded_size_.height(), kFrameSizeAlignment);
- strides_[VideoFrame::kRGBPlane] = bytes_per_row;
- data_[VideoFrame::kRGBPlane] = reinterpret_cast<uint8*>(
- base::AlignedAlloc(bytes_per_row * aligned_height + kFrameSizePadding,
- kFrameAddressAlignment));
- no_longer_needed_cb_ = base::Bind(&ReleaseData, data_[VideoFrame::kRGBPlane]);
- DCHECK(!(reinterpret_cast<intptr_t>(data_[VideoFrame::kRGBPlane]) & 7));
- COMPILE_ASSERT(0 == VideoFrame::kRGBPlane, RGB_data_must_be_index_0);
-}
-
void VideoFrame::AllocateYUV() {
DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16 ||
format_ == VideoFrame::YV12A || format_ == VideoFrame::I420);
@@ -364,14 +379,16 @@ VideoFrame::VideoFrame(VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
- base::TimeDelta timestamp)
+ base::TimeDelta timestamp,
+ bool end_of_stream)
: format_(format),
coded_size_(coded_size),
visible_rect_(visible_rect),
natural_size_(natural_size),
texture_target_(0),
shared_memory_handle_(base::SharedMemory::NULLHandle()),
- timestamp_(timestamp) {
+ timestamp_(timestamp),
+ end_of_stream_(end_of_stream) {
memset(&strides_, 0, sizeof(strides_));
memset(&data_, 0, sizeof(data_));
}
@@ -394,10 +411,6 @@ int VideoFrame::row_bytes(size_t plane) const {
DCHECK(IsValidPlane(plane));
int width = coded_size_.width();
switch (format_) {
- // 32bpp.
- case RGB32:
- return width * 4;
-
// Planar, 8bpp.
case YV12A:
if (plane == kAPlane)
@@ -423,7 +436,6 @@ int VideoFrame::rows(size_t plane) const {
DCHECK(IsValidPlane(plane));
int height = coded_size_.height();
switch (format_) {
- case RGB32:
case YV16:
return height;
@@ -466,10 +478,6 @@ base::SharedMemoryHandle VideoFrame::shared_memory_handle() const {
return shared_memory_handle_;
}
-bool VideoFrame::IsEndOfStream() const {
- return format_ == VideoFrame::EMPTY;
-}
-
void VideoFrame::HashFrameForTesting(base::MD5Context* context) {
for (int plane = 0; plane < kMaxPlanes; ++plane) {
if (!IsValidPlane(plane))
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index fa8aa96bc0..ed554a1b39 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -28,8 +28,6 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
enum {
kMaxPlanes = 4,
- kRGBPlane = 0,
-
kYPlane = 0,
kUPlane = 1,
kVPlane = 2,
@@ -39,18 +37,18 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Surface formats roughly based on FOURCC labels, see:
// http://www.fourcc.org/rgb.php
// http://www.fourcc.org/yuv.php
+ // Logged to UMA, so never reuse values.
enum Format {
UNKNOWN = 0, // Unknown format value.
- RGB32 = 4, // 32bpp RGB packed with extra byte 8:8:8
- YV12 = 6, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
- YV16 = 7, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
- EMPTY = 9, // An empty frame.
- I420 = 11, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
- NATIVE_TEXTURE = 12, // Native texture. Pixel-format agnostic.
+ YV12 = 1, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
+ YV16 = 2, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
+ I420 = 3, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
+ YV12A = 4, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
#if defined(GOOGLE_TV)
- HOLE = 13, // Hole frame.
+ HOLE = 5, // Hole frame.
#endif
- YV12A = 14, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
+ NATIVE_TEXTURE = 6, // Native texture. Pixel-format agnostic.
+ HISTOGRAM_MAX, // Must always be greatest.
};
// Returns the name of a Format as a string.
@@ -137,12 +135,13 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// least as large as 4*visible_rect().width()*visible_rect().height().
void ReadPixelsFromNativeTexture(const SkBitmap& pixels);
- // Wraps image data in a buffer backed by a base::SharedMemoryHandle with a
- // VideoFrame. The image data resides in |data| and is assumed to be packed
- // tightly in a buffer of logical dimensions |coded_size| with the appropriate
- // bit depth and plane count as given by |format|. When the frame is
- // destroyed |no_longer_needed_cb.Run()| will be called.
- static scoped_refptr<VideoFrame> WrapExternalSharedMemory(
+ // Wraps packed image data residing in a memory buffer with a VideoFrame.
+ // The image data resides in |data| and is assumed to be packed tightly in a
+ // buffer of logical dimensions |coded_size| with the appropriate bit depth
+ // and plane count as given by |format|. The shared memory handle of the
+ // backing allocation, if present, can be passed in with |handle|. When the
+ // frame is destroyed, |no_longer_needed_cb.Run()| will be called.
+ static scoped_refptr<VideoFrame> WrapExternalPackedMemory(
Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -172,9 +171,8 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb);
- // Creates a frame with format equals to VideoFrame::EMPTY, width, height,
- // and timestamp are all 0.
- static scoped_refptr<VideoFrame> CreateEmptyFrame();
+ // Creates a frame which indicates end-of-stream.
+ static scoped_refptr<VideoFrame> CreateEOSFrame();
// Allocates YV12 frame based on |size|, and sets its data to the YUV(y,u,v).
static scoped_refptr<VideoFrame> CreateColorFrame(
@@ -197,6 +195,12 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// given coded size and format.
static size_t AllocationSize(Format format, const gfx::Size& coded_size);
+ // Returns the required allocation size for a (tightly packed) plane of the
+ // given coded size and format.
+ static size_t PlaneAllocationSize(Format format,
+ size_t plane,
+ const gfx::Size& coded_size);
+
Format format() const { return format_; }
const gfx::Size& coded_size() const { return coded_size_; }
@@ -228,7 +232,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::SharedMemoryHandle shared_memory_handle() const;
// Returns true if this VideoFrame represents the end of the stream.
- bool IsEndOfStream() const;
+ bool end_of_stream() const { return end_of_stream_; }
base::TimeDelta GetTimestamp() const {
return timestamp_;
@@ -248,11 +252,10 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
- base::TimeDelta timestamp);
+ base::TimeDelta timestamp,
+ bool end_of_stream);
virtual ~VideoFrame();
- // Used internally by CreateFrame().
- void AllocateRGB(size_t bytes_per_pixel);
void AllocateYUV();
// Used to DCHECK() plane parameters.
@@ -291,6 +294,8 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::TimeDelta timestamp_;
+ const bool end_of_stream_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoFrame);
};
diff --git a/media/base/video_frame_unittest.cc b/media/base/video_frame_unittest.cc
index b88d20c363..20210e56a3 100644
--- a/media/base/video_frame_unittest.cc
+++ b/media/base/video_frame_unittest.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/format_macros.h"
+#include "base/memory/aligned_memory.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/stringprintf.h"
#include "media/base/buffers.h"
@@ -46,40 +47,41 @@ void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) {
ASSERT_EQ(VideoFrame::YV12, yv12_frame->format());
ASSERT_EQ(yv12_frame->stride(VideoFrame::kUPlane),
yv12_frame->stride(VideoFrame::kVPlane));
-
- scoped_refptr<media::VideoFrame> rgb_frame;
- rgb_frame = media::VideoFrame::CreateFrame(VideoFrame::RGB32,
- yv12_frame->coded_size(),
- yv12_frame->visible_rect(),
- yv12_frame->natural_size(),
- yv12_frame->GetTimestamp());
-
- ASSERT_EQ(yv12_frame->coded_size().width(),
- rgb_frame->coded_size().width());
- ASSERT_EQ(yv12_frame->coded_size().height(),
- rgb_frame->coded_size().height());
+ ASSERT_EQ(
+ yv12_frame->coded_size().width() & (VideoFrame::kFrameSizeAlignment - 1),
+ 0);
+ ASSERT_EQ(
+ yv12_frame->coded_size().height() & (VideoFrame::kFrameSizeAlignment - 1),
+ 0);
+
+ size_t bytes_per_row = yv12_frame->coded_size().width() * 4u;
+ uint8* rgb_data = reinterpret_cast<uint8*>(
+ base::AlignedAlloc(bytes_per_row * yv12_frame->coded_size().height() +
+ VideoFrame::kFrameSizePadding,
+ VideoFrame::kFrameAddressAlignment));
media::ConvertYUVToRGB32(yv12_frame->data(VideoFrame::kYPlane),
yv12_frame->data(VideoFrame::kUPlane),
yv12_frame->data(VideoFrame::kVPlane),
- rgb_frame->data(VideoFrame::kRGBPlane),
- rgb_frame->coded_size().width(),
- rgb_frame->coded_size().height(),
+ rgb_data,
+ yv12_frame->coded_size().width(),
+ yv12_frame->coded_size().height(),
yv12_frame->stride(VideoFrame::kYPlane),
yv12_frame->stride(VideoFrame::kUPlane),
- rgb_frame->stride(VideoFrame::kRGBPlane),
+ bytes_per_row,
media::YV12);
- for (int row = 0; row < rgb_frame->coded_size().height(); ++row) {
+ for (int row = 0; row < yv12_frame->coded_size().height(); ++row) {
uint32* rgb_row_data = reinterpret_cast<uint32*>(
- rgb_frame->data(VideoFrame::kRGBPlane) +
- (rgb_frame->stride(VideoFrame::kRGBPlane) * row));
- for (int col = 0; col < rgb_frame->coded_size().width(); ++col) {
+ rgb_data + (bytes_per_row * row));
+ for (int col = 0; col < yv12_frame->coded_size().width(); ++col) {
SCOPED_TRACE(
base::StringPrintf("Checking (%d, %d)", row, col));
EXPECT_EQ(expect_rgb_color, rgb_row_data[col]);
}
}
+
+ base::AlignedFree(rgb_data);
}
// Fill each plane to its reported extents and verify accessors report non
@@ -157,8 +159,8 @@ TEST(VideoFrame, CreateFrame) {
EXPECT_EQ(MD5DigestToBase16(digest), "911991d51438ad2e1a40ed5f6fc7c796");
// Test an empty frame.
- frame = VideoFrame::CreateEmptyFrame();
- EXPECT_TRUE(frame->IsEndOfStream());
+ frame = VideoFrame::CreateEOSFrame();
+ EXPECT_TRUE(frame->end_of_stream());
}
TEST(VideoFrame, CreateBlackFrame) {
@@ -173,7 +175,7 @@ TEST(VideoFrame, CreateBlackFrame) {
// Test basic properties.
EXPECT_EQ(0, frame->GetTimestamp().InMicroseconds());
- EXPECT_FALSE(frame->IsEndOfStream());
+ EXPECT_FALSE(frame->end_of_stream());
// Test |frame| properties.
EXPECT_EQ(VideoFrame::YV12, frame->format());
@@ -204,8 +206,6 @@ TEST(VideoFrame, CheckFrameExtents) {
// and the expected hash of all planes if filled with kFillByte (defined in
// ExpectFrameExtents).
ExpectFrameExtents(
- VideoFrame::RGB32, 1, 4, "de6d3d567e282f6a38d478f04fc81fb0");
- ExpectFrameExtents(
VideoFrame::YV12, 3, 1, "71113bdfd4c0de6cf62f48fb74f7a0b1");
ExpectFrameExtents(
VideoFrame::YV16, 3, 1, "9bb99ac3ff350644ebff4d28dc01b461");