summaryrefslogtreecommitdiff
path: root/media/base
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2014-03-18 10:20:56 +0000
committerTorne (Richard Coles) <torne@google.com>2014-03-18 10:20:56 +0000
commita1401311d1ab56c4ed0a474bd38c108f75cb0cd9 (patch)
tree3437151d9ae1ce20a1e53a0d98c19ca01c786394 /media/base
parentaf5066f1e36c6579e74752647e6c584438f80f94 (diff)
downloadchromium_org-a1401311d1ab56c4ed0a474bd38c108f75cb0cd9.tar.gz
Merge from Chromium at DEPS revision 257591
This commit was generated by merge_to_master.py. Change-Id: I0010df2ec3fbb5d4947cd026de2feb150ce7a6b5
Diffstat (limited to 'media/base')
-rw-r--r--media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java230
-rw-r--r--media/base/android/java/src/org/chromium/media/MediaDrmBridge.java2
-rw-r--r--media/base/android/java/src/org/chromium/media/UsbMidiDeviceAndroid.java15
-rw-r--r--media/base/android/java/src/org/chromium/media/VideoCapture.java59
-rw-r--r--media/base/android/media_codec_bridge.cc24
-rw-r--r--media/base/android/media_codec_bridge.h6
-rw-r--r--media/base/android/media_drm_bridge.cc97
-rw-r--r--media/base/android/media_drm_bridge.h33
-rw-r--r--media/base/android/media_drm_bridge_unittest.cc84
-rw-r--r--media/base/android/media_player_android.cc8
-rw-r--r--media/base/android/media_player_android.h17
-rw-r--r--media/base/android/media_player_bridge.cc24
-rw-r--r--media/base/android/media_player_bridge.h8
-rw-r--r--media/base/android/media_player_manager.h24
-rw-r--r--media/base/android/media_source_player.cc66
-rw-r--r--media/base/android/media_source_player.h11
-rw-r--r--media/base/android/media_source_player_unittest.cc203
-rw-r--r--media/base/android/video_decoder_job.cc27
-rw-r--r--media/base/android/video_decoder_job.h12
-rw-r--r--media/base/audio_buffer_queue_unittest.cc78
-rw-r--r--media/base/audio_buffer_unittest.cc40
-rw-r--r--media/base/audio_bus.cc7
-rw-r--r--media/base/audio_bus.h4
-rw-r--r--media/base/audio_decoder.cc6
-rw-r--r--media/base/audio_decoder.h36
-rw-r--r--media/base/audio_decoder_config.cc12
-rw-r--r--media/base/audio_decoder_config.h7
-rw-r--r--media/base/audio_splicer.cc414
-rw-r--r--media/base/audio_splicer.h77
-rw-r--r--media/base/audio_splicer_unittest.cc458
-rw-r--r--media/base/audio_timestamp_helper.h3
-rw-r--r--media/base/bind_to_current_loop.h8
-rw-r--r--media/base/bind_to_current_loop.h.pump8
-rw-r--r--media/base/bind_to_current_loop_unittest.cc27
-rw-r--r--media/base/buffers.h2
-rw-r--r--media/base/channel_layout.cc4
-rw-r--r--media/base/channel_layout.h6
-rw-r--r--media/base/channel_mixer.cc4
-rw-r--r--media/base/channel_mixer_unittest.cc2
-rw-r--r--media/base/decoder_buffer_queue.cc14
-rw-r--r--media/base/decoder_buffer_queue.h6
-rw-r--r--media/base/decoder_buffer_queue_unittest.cc31
-rw-r--r--media/base/media_file_checker.cc3
-rw-r--r--media/base/media_keys.h2
-rw-r--r--media/base/media_log.cc17
-rw-r--r--media/base/media_log.h1
-rw-r--r--media/base/media_switches.cc15
-rw-r--r--media/base/media_switches.h7
-rw-r--r--media/base/mock_filters.h9
-rw-r--r--media/base/pipeline_status.h3
-rw-r--r--media/base/sample_format.cc4
-rw-r--r--media/base/sample_format.h7
-rw-r--r--media/base/test_data_util.cc16
-rw-r--r--media/base/test_helpers.cc114
-rw-r--r--media/base/test_helpers.h44
-rw-r--r--media/base/vector_math.cc7
-rw-r--r--media/base/vector_math.h2
-rw-r--r--media/base/vector_math_unittest.cc18
-rw-r--r--media/base/video_decoder.cc6
-rw-r--r--media/base/video_decoder.h7
-rw-r--r--media/base/video_decoder_config.cc2
-rw-r--r--media/base/video_frame.cc11
-rw-r--r--media/base/video_frame.h7
-rw-r--r--media/base/video_frame_pool.cc7
-rw-r--r--media/base/video_frame_pool_unittest.cc18
-rw-r--r--media/base/video_frame_unittest.cc36
66 files changed, 1782 insertions, 785 deletions
diff --git a/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java b/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
index 5fbadca16f..e675883d5c 100644
--- a/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
+++ b/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
@@ -40,6 +40,36 @@ class AudioManagerAndroid {
// NOTE: always check in as false.
private static final boolean DEBUG = false;
+ /**
+ * NonThreadSafe is a helper class used to help verify that methods of a
+ * class are called from the same thread.
+ * Inspired by class in package com.google.android.apps.chrome.utilities.
+ * Is only utilized when DEBUG is set to true.
+ */
+ private static class NonThreadSafe {
+ private final Long mThreadId;
+
+ public NonThreadSafe() {
+ if (DEBUG) {
+ mThreadId = Thread.currentThread().getId();
+ } else {
+ // Avoids "Unread field" issue reported by findbugs.
+ mThreadId = 0L;
+ }
+ }
+
+ /**
+ * Checks if the method is called on the valid thread.
+ * Assigns the current thread if no thread was assigned.
+ */
+ public boolean calledOnValidThread() {
+ if (DEBUG) {
+ return mThreadId.equals(Thread.currentThread().getId());
+ }
+ return true;
+ }
+ }
+
private static boolean runningOnJellyBeanOrHigher() {
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
}
@@ -69,6 +99,22 @@ class AudioManagerAndroid {
private String name() { return mName; }
}
+ // List if device models which have been vetted for good quality platform
+ // echo cancellation.
+ // NOTE: only add new devices to this list if manual tests have been
+ // performed where the AEC performance is evaluated using e.g. a WebRTC
+ // audio client such as https://apprtc.appspot.com/?r=<ROOM NAME>.
+ private static final String[] SUPPORTED_AEC_MODELS = new String[] {
+ "GT-I9300", // Galaxy S3
+ "GT-I9500", // Galaxy S4
+ "GT-N7105", // Galaxy Note 2
+ "Nexus 4", // Nexus 4
+ "Nexus 5", // Nexus 5
+ "Nexus 7", // Nexus 7
+ "SM-N9005", // Galaxy Note 3
+ "SM-T310", // Galaxy Tab 3 8.0 (WiFi)
+ };
+
// Supported audio device types.
private static final int DEVICE_DEFAULT = -2;
private static final int DEVICE_INVALID = -1;
@@ -135,6 +181,11 @@ class AudioManagerAndroid {
// call to setDevice().
private int mRequestedAudioDevice = DEVICE_INVALID;
+ // This class should be created, initialized and closed on the audio thread
+ // in the audio manager. We use |mNonThreadSafe| to ensure that this is
+ // the case. Only active when |DEBUG| is set to true.
+ private final NonThreadSafe mNonThreadSafe = new NonThreadSafe();
+
// Lock to protect |mAudioDevices| and |mRequestedAudioDevice| which can
// be accessed from the main thread and the audio manager thread.
private final Object mLock = new Object();
@@ -143,7 +194,7 @@ class AudioManagerAndroid {
private boolean[] mAudioDevices = new boolean[DEVICE_COUNT];
private final ContentResolver mContentResolver;
- private SettingsObserver mSettingsObserver = null;
+ private ContentObserver mSettingsObserver = null;
private HandlerThread mSettingsObserverThread = null;
private int mCurrentVolume;
@@ -180,18 +231,15 @@ class AudioManagerAndroid {
*/
@CalledByNative
private void init() {
+ checkIfCalledOnValidThread();
if (DEBUG) logd("init");
+ if (DEBUG) logDeviceInfo();
if (mIsInitialized)
return;
- for (int i = 0; i < DEVICE_COUNT; ++i) {
- mAudioDevices[i] = false;
- }
-
// Initialize audio device list with things we know is always available.
- if (hasEarpiece()) {
- mAudioDevices[DEVICE_EARPIECE] = true;
- }
+ mAudioDevices[DEVICE_EARPIECE] = hasEarpiece();
+ mAudioDevices[DEVICE_WIRED_HEADSET] = hasWiredHeadset();
mAudioDevices[DEVICE_SPEAKERPHONE] = true;
// Register receivers for broadcast intents related to Bluetooth device
@@ -202,22 +250,8 @@ class AudioManagerAndroid {
// removing a wired headset (Intent.ACTION_HEADSET_PLUG).
registerForWiredHeadsetIntentBroadcast();
- // Start observer for volume changes.
- // TODO(henrika): try-catch parts below are added as a test to see if
- // it avoids the crash in init() reported in http://crbug.com/336600.
- // Should be removed if possible when we understand the reason better.
- try {
- mSettingsObserverThread = new HandlerThread("SettingsObserver");
- mSettingsObserverThread.start();
- mSettingsObserver = new SettingsObserver(
- new Handler(mSettingsObserverThread.getLooper()));
- } catch (Exception e) {
- // It is fine to rely on code below here to detect failure by
- // observing mSettingsObserver==null.
- Log.wtf(TAG, "SettingsObserver exception: ", e);
- }
-
mIsInitialized = true;
+
if (DEBUG) reportUpdate();
}
@@ -227,24 +261,12 @@ class AudioManagerAndroid {
*/
@CalledByNative
private void close() {
+ checkIfCalledOnValidThread();
if (DEBUG) logd("close");
if (!mIsInitialized)
return;
- if (mSettingsObserverThread != null) {
- mSettingsObserverThread.quit();
- try {
- mSettingsObserverThread.join();
- } catch (Exception e) {
- Log.wtf(TAG, "HandlerThread.join() exception: ", e);
- }
- mSettingsObserverThread = null;
- }
- if (mContentResolver != null) {
- mContentResolver.unregisterContentObserver(mSettingsObserver);
- mSettingsObserver = null;
- }
-
+ stopObservingVolumeChanges();
unregisterForWiredHeadsetIntentBroadcast();
unregisterBluetoothIntentsIfNeeded();
@@ -286,12 +308,22 @@ class AudioManagerAndroid {
Log.wtf(TAG, "setMode exception: ", e);
logDeviceInfo();
}
+
+ // Start observing volume changes to detect when the
+ // voice/communication stream volume is at its lowest level.
+ // It is only possible to pull down the volume slider to about 20%
+ // of the absolute minimum (slider at far left) in communication
+ // mode but we want to be able to mute it completely.
+ startObservingVolumeChanges();
+
} else {
if (mSavedAudioMode == AudioManager.MODE_INVALID) {
Log.wtf(TAG, "Audio mode has not yet been set!");
return;
}
+ stopObservingVolumeChanges();
+
// Restore previously stored audio states.
setMicrophoneMute(mSavedIsMicrophoneMute);
setSpeakerphoneOn(mSavedIsSpeakerphoneOn);
@@ -354,6 +386,7 @@ class AudioManagerAndroid {
*/
@CalledByNative
private AudioDeviceName[] getAudioInputDeviceNames() {
+ if (DEBUG) logd("getAudioInputDeviceNames");
if (!mIsInitialized)
return null;
boolean devices[] = null;
@@ -442,24 +475,20 @@ class AudioManagerAndroid {
}
@CalledByNative
- public static boolean shouldUseAcousticEchoCanceler() {
+ private static boolean shouldUseAcousticEchoCanceler() {
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
if (!runningOnJellyBeanOrHigher()) {
return false;
}
- // Next is a list of device models which have been vetted for good
- // quality platform echo cancellation.
- if (!Build.MODEL.equals("SM-T310R") && // Galaxy Tab 3 7.0
- !Build.MODEL.equals("GT-I9300") && // Galaxy S3
- !Build.MODEL.equals("GT-I9500") && // Galaxy S4
- !Build.MODEL.equals("GT-N7105") && // Galaxy Note 2
- !Build.MODEL.equals("SM-N9005") && // Galaxy Note 3
- !Build.MODEL.equals("Nexus 4") &&
- !Build.MODEL.equals("Nexus 5") &&
- !Build.MODEL.equals("Nexus 7")) {
+ // Verify that this device is among the supported/tested models.
+ List<String> supportedModels = Arrays.asList(SUPPORTED_AEC_MODELS);
+ if (!supportedModels.contains(Build.MODEL)) {
return false;
}
+ if (DEBUG && AcousticEchoCanceler.isAvailable()) {
+ logd("Approved for use of hardware acoustic echo canceler.");
+ }
// As a final check, verify that the device supports acoustic echo
// cancellation.
@@ -467,6 +496,16 @@ class AudioManagerAndroid {
}
/**
+ * Helper method for debugging purposes. Logs message if method is not
+ * called on same thread as this object was created on.
+ */
+ private void checkIfCalledOnValidThread() {
+ if (DEBUG && !mNonThreadSafe.calledOnValidThread()) {
+ Log.wtf(TAG, "Method is not called on valid thread!");
+ }
+ }
+
+ /**
* Register for BT intents if we have the BLUETOOTH permission.
* Also extends the list of available devices with a BT device if one exists.
*/
@@ -483,9 +522,7 @@ class AudioManagerAndroid {
if (!mHasBluetoothPermission) {
return;
}
- if (hasBluetoothHeadset()) {
- mAudioDevices[DEVICE_BLUETOOTH_HEADSET] = true;
- }
+ mAudioDevices[DEVICE_BLUETOOTH_HEADSET] = hasBluetoothHeadset();
// Register receivers for broadcast intents related to changes in
// Bluetooth headset availability and usage of the SCO channel.
@@ -525,12 +562,24 @@ class AudioManagerAndroid {
return mAudioManager.isMicrophoneMute();
}
- /** Gets the current earpice state. */
+ /** Gets the current earpiece state. */
private boolean hasEarpiece() {
return mContext.getPackageManager().hasSystemFeature(
PackageManager.FEATURE_TELEPHONY);
}
+ /**
+ * Checks whether a wired headset is connected or not.
+ * This is not a valid indication that audio playback is actually over
+ * the wired headset as audio routing depends on other conditions. We
+ * only use it as an early indicator (during initialization) of an attached
+ * wired headset.
+ */
+ @Deprecated
+ private boolean hasWiredHeadset() {
+ return mAudioManager.isWiredHeadsetOn();
+ }
+
/** Checks if the process has BLUETOOTH permission or not. */
private boolean hasBluetoothPermission() {
boolean hasBluetooth = mContext.checkPermission(
@@ -954,10 +1003,18 @@ class AudioManagerAndroid {
}
}
+ /** Information about the current build, taken from system properties. */
private void logDeviceInfo() {
- Log.i(TAG, "Manufacturer:" + Build.MANUFACTURER +
- " Board: " + Build.BOARD + " Device: " + Build.DEVICE +
- " Model: " + Build.MODEL + " PRODUCT: " + Build.PRODUCT);
+ logd("Android SDK: " + Build.VERSION.SDK_INT + ", " +
+ "Release: " + Build.VERSION.RELEASE + ", " +
+ "Brand: " + Build.BRAND + ", " +
+ "CPU_ABI: " + Build.CPU_ABI + ", " +
+ "Device: " + Build.DEVICE + ", " +
+ "Id: " + Build.ID + ", " +
+ "Hardware: " + Build.HARDWARE + ", " +
+ "Manufacturer: " + Build.MANUFACTURER + ", " +
+ "Model: " + Build.MODEL + ", " +
+ "Product: " + Build.PRODUCT);
}
/** Trivial helper method for debug logging */
@@ -970,20 +1027,59 @@ class AudioManagerAndroid {
Log.e(TAG, msg);
}
- private class SettingsObserver extends ContentObserver {
- SettingsObserver(Handler handler) {
- super(handler);
- mContentResolver.registerContentObserver(Settings.System.CONTENT_URI, true, this);
- }
+ /** Start thread which observes volume changes on the voice stream. */
+ private void startObservingVolumeChanges() {
+ if (DEBUG) logd("startObservingVolumeChanges");
+ if (mSettingsObserverThread != null)
+ return;
+ mSettingsObserverThread = new HandlerThread("SettingsObserver");
+ mSettingsObserverThread.start();
+
+ mSettingsObserver = new ContentObserver(
+ new Handler(mSettingsObserverThread.getLooper())) {
+
+ @Override
+ public void onChange(boolean selfChange) {
+ if (DEBUG) logd("SettingsObserver.onChange: " + selfChange);
+ super.onChange(selfChange);
+
+ // Ensure that the observer is activated during communication mode.
+ if (mAudioManager.getMode() != AudioManager.MODE_IN_COMMUNICATION) {
+ Log.wtf(TAG, "Only enable SettingsObserver in COMM mode!");
+ return;
+ }
+
+ // Get stream volume for the voice stream and deliver callback if
+ // the volume index is zero. It is not possible to move the volume
+ // slider all the way down in communication mode but the callback
+ // implementation can ensure that the volume is completely muted.
+ int volume = mAudioManager.getStreamVolume(
+ AudioManager.STREAM_VOICE_CALL);
+ if (DEBUG) logd("nativeSetMute: " + (volume == 0));
+ nativeSetMute(mNativeAudioManagerAndroid, (volume == 0));
+ }
+ };
- @Override
- public void onChange(boolean selfChange) {
- if (DEBUG) logd("SettingsObserver.onChange: " + selfChange);
- super.onChange(selfChange);
- int volume = mAudioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
- if (DEBUG) logd("nativeSetMute: " + (volume == 0));
- nativeSetMute(mNativeAudioManagerAndroid, (volume == 0));
+ mContentResolver.registerContentObserver(
+ Settings.System.CONTENT_URI, true, mSettingsObserver);
+ }
+
+ /** Quit observer thread and stop listening for volume changes. */
+ private void stopObservingVolumeChanges() {
+ if (DEBUG) logd("stopObservingVolumeChanges");
+ if (mSettingsObserverThread == null)
+ return;
+
+ mContentResolver.unregisterContentObserver(mSettingsObserver);
+ mSettingsObserver = null;
+
+ mSettingsObserverThread.quit();
+ try {
+ mSettingsObserverThread.join();
+ } catch (InterruptedException e) {
+ Log.wtf(TAG, "Thread.join() exception: ", e);
}
+ mSettingsObserverThread = null;
}
private native void nativeSetMute(long nativeAudioManagerAndroid, boolean muted);
diff --git a/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java b/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
index 06a02b3a7c..2099ddec76 100644
--- a/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
+++ b/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
@@ -254,7 +254,7 @@ class MediaDrmBridge {
* Check whether the crypto scheme is supported for the given container.
* If |containerMimeType| is an empty string, we just return whether
* the crypto scheme is supported.
- * TODO(qinmin): Implement the checking for container.
+ * TODO(xhwang): Implement container check. See: http://crbug.com/350481
*
* @return true if the container and the crypto scheme is supported, or
* false otherwise.
diff --git a/media/base/android/java/src/org/chromium/media/UsbMidiDeviceAndroid.java b/media/base/android/java/src/org/chromium/media/UsbMidiDeviceAndroid.java
index 84c35ea2df..08d572e441 100644
--- a/media/base/android/java/src/org/chromium/media/UsbMidiDeviceAndroid.java
+++ b/media/base/android/java/src/org/chromium/media/UsbMidiDeviceAndroid.java
@@ -11,6 +11,7 @@ import android.hardware.usb.UsbEndpoint;
import android.hardware.usb.UsbInterface;
import android.hardware.usb.UsbManager;
import android.hardware.usb.UsbRequest;
+import android.util.SparseArray;
import org.chromium.base.CalledByNative;
import org.chromium.base.JNINamespace;
@@ -33,7 +34,7 @@ class UsbMidiDeviceAndroid {
/**
* A map from endpoint number to UsbEndpoint.
*/
- private final Map<Integer, UsbEndpoint> mEndpointMap;
+ private final SparseArray<UsbEndpoint> mEndpointMap;
/**
* A map from UsbEndpoint to UsbRequest associated to it.
@@ -52,7 +53,7 @@ class UsbMidiDeviceAndroid {
*/
UsbMidiDeviceAndroid(UsbManager manager, UsbDevice device) {
mConnection = manager.openDevice(device);
- mEndpointMap = new HashMap<Integer, UsbEndpoint>();
+ mEndpointMap = new SparseArray<UsbEndpoint>();
mRequestMap = new HashMap<UsbEndpoint, UsbRequest>();
for (int i = 0; i < device.getInterfaceCount(); ++i) {
@@ -81,14 +82,12 @@ class UsbMidiDeviceAndroid {
if (mConnection == null) {
return;
}
- if (!mEndpointMap.containsKey(endpointNumber)) {
+ UsbEndpoint endpoint = mEndpointMap.get(endpointNumber);
+ if (endpoint == null) {
return;
}
- UsbEndpoint endpoint = mEndpointMap.get(endpointNumber);
- UsbRequest request;
- if (mRequestMap.containsKey(endpoint)) {
- request = mRequestMap.get(endpoint);
- } else {
+ UsbRequest request = mRequestMap.get(endpoint);
+ if (request == null) {
request = new UsbRequest();
request.initialize(mConnection, endpoint);
mRequestMap.put(endpoint, request);
diff --git a/media/base/android/java/src/org/chromium/media/VideoCapture.java b/media/base/android/java/src/org/chromium/media/VideoCapture.java
index 244697abf8..8182c302c8 100644
--- a/media/base/android/java/src/org/chromium/media/VideoCapture.java
+++ b/media/base/android/java/src/org/chromium/media/VideoCapture.java
@@ -154,7 +154,10 @@ public class VideoCapture implements PreviewCallback {
Log.e(TAG, "Camera.open: " + ex);
return null;
}
- Camera.Parameters parameters = camera.getParameters();
+ Camera.Parameters parameters = getCameraParameters(camera);
+ if (parameters == null) {
+ return null;
+ }
ArrayList<CaptureFormat> formatList = new ArrayList<CaptureFormat>();
// getSupportedPreview{Formats,FpsRange,PreviewSizes}() returns Lists
@@ -222,15 +225,24 @@ public class VideoCapture implements PreviewCallback {
return false;
}
- Camera.CameraInfo cameraInfo = new Camera.CameraInfo();
- Camera.getCameraInfo(mId, cameraInfo);
+ Camera.CameraInfo cameraInfo = getCameraInfo(mId);
+ if (cameraInfo == null) {
+ mCamera.release();
+ mCamera = null;
+ return false;
+ }
+
mCameraOrientation = cameraInfo.orientation;
mCameraFacing = cameraInfo.facing;
mDeviceOrientation = getDeviceOrientation();
Log.d(TAG, "allocate: orientation dev=" + mDeviceOrientation +
", cam=" + mCameraOrientation + ", facing=" + mCameraFacing);
- Camera.Parameters parameters = mCamera.getParameters();
+ Camera.Parameters parameters = getCameraParameters(mCamera);
+ if (parameters == null) {
+ mCamera = null;
+ return false;
+ }
// getSupportedPreviewFpsRange() returns a List with at least one
// element, but when camera is in bad state, it can return null pointer.
@@ -386,7 +398,13 @@ public class VideoCapture implements PreviewCallback {
mPreviewBufferLock.unlock();
}
mCamera.setPreviewCallbackWithBuffer(this);
- mCamera.startPreview();
+ try {
+ mCamera.startPreview();
+ } catch (RuntimeException ex) {
+ Log.e(TAG, "startCapture: Camera.startPreview: " + ex);
+ return -1;
+ }
+
return 0;
}
@@ -471,8 +489,7 @@ public class VideoCapture implements PreviewCallback {
private ChromiumCameraInfo(int index) {
mId = index;
- mCameraInfo = new Camera.CameraInfo();
- Camera.getCameraInfo(index, mCameraInfo);
+ mCameraInfo = getCameraInfo(mId);
}
@CalledByNative("ChromiumCameraInfo")
@@ -492,6 +509,9 @@ public class VideoCapture implements PreviewCallback {
@CalledByNative("ChromiumCameraInfo")
private String getDeviceName() {
+ if (mCameraInfo == null) {
+ return "";
+ }
return "camera " + mId + ", facing " +
(mCameraInfo.facing ==
Camera.CameraInfo.CAMERA_FACING_FRONT ? "front" : "back");
@@ -499,7 +519,7 @@ public class VideoCapture implements PreviewCallback {
@CalledByNative("ChromiumCameraInfo")
private int getOrientation() {
- return mCameraInfo.orientation;
+ return (mCameraInfo == null ? 0 : mCameraInfo.orientation);
}
}
@@ -532,4 +552,27 @@ public class VideoCapture implements PreviewCallback {
}
return orientation;
}
+
+ private static Camera.Parameters getCameraParameters(Camera camera) {
+ Camera.Parameters parameters;
+ try {
+ parameters = camera.getParameters();
+ } catch (RuntimeException ex) {
+ Log.e(TAG, "getCameraParameters: Camera.getParameters: " + ex);
+ camera.release();
+ return null;
+ }
+ return parameters;
+ }
+
+ private static Camera.CameraInfo getCameraInfo(int id) {
+ Camera.CameraInfo cameraInfo = new Camera.CameraInfo();
+ try {
+ Camera.getCameraInfo(id, cameraInfo);
+ } catch (RuntimeException ex) {
+ Log.e(TAG, "getCameraInfo: Camera.getCameraInfo: " + ex);
+ return null;
+ }
+ return cameraInfo;
+ }
}
diff --git a/media/base/android/media_codec_bridge.cc b/media/base/android/media_codec_bridge.cc
index 1b35bdeb8d..0178844156 100644
--- a/media/base/android/media_codec_bridge.cc
+++ b/media/base/android/media_codec_bridge.cc
@@ -116,10 +116,10 @@ bool MediaCodecBridge::SupportsSetParameters() {
// static
std::vector<MediaCodecBridge::CodecsInfo> MediaCodecBridge::GetCodecsInfo() {
std::vector<CodecsInfo> codecs_info;
- JNIEnv* env = AttachCurrentThread();
if (!IsAvailable())
return codecs_info;
+ JNIEnv* env = AttachCurrentThread();
std::string mime_type;
ScopedJavaLocalRef<jobjectArray> j_codec_info_array =
Java_MediaCodecBridge_getCodecsInfo(env);
@@ -144,6 +144,9 @@ std::vector<MediaCodecBridge::CodecsInfo> MediaCodecBridge::GetCodecsInfo() {
// static
bool MediaCodecBridge::CanDecode(const std::string& codec, bool is_secure) {
+ if (!IsAvailable())
+ return false;
+
JNIEnv* env = AttachCurrentThread();
std::string mime = CodecTypeToAndroidMimeType(codec);
if (mime.empty())
@@ -161,6 +164,9 @@ bool MediaCodecBridge::CanDecode(const std::string& codec, bool is_secure) {
// static
bool MediaCodecBridge::IsKnownUnaccelerated(const std::string& mime_type,
MediaCodecDirection direction) {
+ if (!IsAvailable())
+ return true;
+
std::string codec_type = AndroidMimeTypeToCodecType(mime_type);
std::vector<media::MediaCodecBridge::CodecsInfo> codecs_info =
MediaCodecBridge::GetCodecsInfo();
@@ -605,7 +611,11 @@ void AudioCodecBridge::SetVolume(double volume) {
Java_MediaCodecBridge_setVolume(env, media_codec(), volume);
}
+// static
AudioCodecBridge* AudioCodecBridge::Create(const AudioCodec& codec) {
+ if (!MediaCodecBridge::IsAvailable())
+ return NULL;
+
const std::string mime = AudioCodecToAndroidMimeType(codec);
return mime.empty() ? NULL : new AudioCodecBridge(mime);
}
@@ -623,12 +633,15 @@ bool VideoCodecBridge::IsKnownUnaccelerated(const VideoCodec& codec,
VideoCodecToAndroidMimeType(codec), direction);
}
+// static
VideoCodecBridge* VideoCodecBridge::CreateDecoder(const VideoCodec& codec,
bool is_secure,
const gfx::Size& size,
jobject surface,
jobject media_crypto) {
- JNIEnv* env = AttachCurrentThread();
+ if (!MediaCodecBridge::IsAvailable())
+ return NULL;
+
const std::string mime = VideoCodecToAndroidMimeType(codec);
if (mime.empty())
return NULL;
@@ -638,6 +651,7 @@ VideoCodecBridge* VideoCodecBridge::CreateDecoder(const VideoCodec& codec,
if (!bridge->media_codec())
return NULL;
+ JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
ScopedJavaLocalRef<jobject> j_format(
Java_MediaCodecBridge_createVideoDecoderFormat(
@@ -655,13 +669,16 @@ VideoCodecBridge* VideoCodecBridge::CreateDecoder(const VideoCodec& codec,
return bridge->StartInternal() ? bridge.release() : NULL;
}
+// static
VideoCodecBridge* VideoCodecBridge::CreateEncoder(const VideoCodec& codec,
const gfx::Size& size,
int bit_rate,
int frame_rate,
int i_frame_interval,
int color_format) {
- JNIEnv* env = AttachCurrentThread();
+ if (!MediaCodecBridge::IsAvailable())
+ return NULL;
+
const std::string mime = VideoCodecToAndroidMimeType(codec);
if (mime.empty())
return NULL;
@@ -671,6 +688,7 @@ VideoCodecBridge* VideoCodecBridge::CreateEncoder(const VideoCodec& codec,
if (!bridge->media_codec())
return NULL;
+ JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
ScopedJavaLocalRef<jobject> j_format(
Java_MediaCodecBridge_createVideoEncoderFormat(env,
diff --git a/media/base/android/media_codec_bridge.h b/media/base/android/media_codec_bridge.h
index e71f67f918..f90edcf6ff 100644
--- a/media/base/android/media_codec_bridge.h
+++ b/media/base/android/media_codec_bridge.h
@@ -51,6 +51,8 @@ enum MediaCodecDirection {
class MEDIA_EXPORT MediaCodecBridge {
public:
// Returns true if MediaCodec is available on the device.
+ // All other static methods check IsAvailable() internally. There's no need
+ // to check IsAvailable() explicitly before calling them.
static bool IsAvailable();
// Returns true if MediaCodec.setParameters() is available on the device.
@@ -61,8 +63,8 @@ class MEDIA_EXPORT MediaCodecBridge {
static bool CanDecode(const std::string& codec, bool is_secure);
// Represents supported codecs on android.
- // TODO(qinmin): Curretly the codecs string only contains one codec, do we
- // need more specific codecs separated by comma. (e.g. "vp8" -> "vp8, vp8.0")
+ // TODO(qinmin): Currently the codecs string only contains one codec. Do we
+ // need to support codecs separated by comma. (e.g. "vp8" -> "vp8, vp8.0")?
struct CodecsInfo {
std::string codecs; // E.g. "vp8" or "avc1".
std::string name; // E.g. "OMX.google.vp8.decoder".
diff --git a/media/base/android/media_drm_bridge.cc b/media/base/android/media_drm_bridge.cc
index e658c2d08e..d1119bc657 100644
--- a/media/base/android/media_drm_bridge.cc
+++ b/media/base/android/media_drm_bridge.cc
@@ -15,6 +15,8 @@
#include "jni/MediaDrmBridge_jni.h"
#include "media/base/android/media_player_manager.h"
+#include "widevine_cdm_version.h" // In SHARED_INTERMEDIATE_DIR.
+
using base::android::AttachCurrentThread;
using base::android::ConvertUTF8ToJavaString;
using base::android::ConvertJavaStringToUTF8;
@@ -50,13 +52,26 @@ static uint64 ReadUint64(const uint8_t* data) {
// uint32 DataSize
// uint8[DataSize] Data
// }
-static const int kBoxHeaderSize = 8; // Box's header contains Size and Type.
-static const int kBoxLargeSizeSize = 8;
-static const int kPsshVersionFlagSize = 4;
-static const int kPsshSystemIdSize = 16;
-static const int kPsshDataSizeSize = 4;
-static const uint32 kTencType = 0x74656e63;
-static const uint32 kPsshType = 0x70737368;
+const int kBoxHeaderSize = 8; // Box's header contains Size and Type.
+const int kBoxLargeSizeSize = 8;
+const int kPsshVersionFlagSize = 4;
+const int kPsshSystemIdSize = 16;
+const int kPsshDataSizeSize = 4;
+const uint32 kTencType = 0x74656e63;
+const uint32 kPsshType = 0x70737368;
+const uint8 kWidevineUuid[16] = {
+ 0xED, 0xEF, 0x8B, 0xA9, 0x79, 0xD6, 0x4A, 0xCE,
+ 0xA3, 0xC8, 0x27, 0xDC, 0xD5, 0x1D, 0x21, 0xED };
+
+static std::vector<uint8> GetUUID(const std::string& key_system) {
+ // For security reasons, we only do exact string comparisons here - we don't
+ // try to parse the |key_system| in any way.
+ if (key_system == kWidevineKeySystem) {
+ return std::vector<uint8>(kWidevineUuid,
+ kWidevineUuid + arraysize(kWidevineUuid));
+ }
+ return std::vector<uint8>();
+}
// Tries to find a PSSH box whose "SystemId" is |uuid| in |data|, parses the
// "Data" of the box and put it in |pssh_data|. Returns true if such a box is
@@ -172,25 +187,37 @@ bool MediaDrmBridge::IsAvailable() {
// static
bool MediaDrmBridge::IsSecureDecoderRequired(SecurityLevel security_level) {
+ DCHECK(IsAvailable());
return SECURITY_LEVEL_1 == security_level;
}
-bool MediaDrmBridge::IsSecurityLevelSupported(
- const std::vector<uint8>& scheme_uuid,
- SecurityLevel security_level) {
- // Pass 0 as |media_keys_id| and NULL as |manager| as they are not used in
+// static
+bool MediaDrmBridge::IsSecurityLevelSupported(const std::string& key_system,
+ SecurityLevel security_level) {
+ if (!IsAvailable())
+ return false;
+
+ // Pass 0 as |cdm_id| and NULL as |manager| as they are not used in
// creation time of MediaDrmBridge.
scoped_ptr<MediaDrmBridge> media_drm_bridge =
- MediaDrmBridge::Create(0, scheme_uuid, GURL(), NULL);
+ MediaDrmBridge::Create(0, key_system, GURL(), NULL);
if (!media_drm_bridge)
return false;
return media_drm_bridge->SetSecurityLevel(security_level);
}
-bool MediaDrmBridge::IsCryptoSchemeSupported(
- const std::vector<uint8>& scheme_uuid,
+// static
+bool MediaDrmBridge::IsKeySystemSupportedWithType(
+ const std::string& key_system,
const std::string& container_mime_type) {
+ if (!IsAvailable())
+ return false;
+
+ std::vector<uint8> scheme_uuid = GetUUID(key_system);
+ if (scheme_uuid.empty())
+ return false;
+
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
@@ -204,11 +231,11 @@ bool MediaDrmBridge::RegisterMediaDrmBridge(JNIEnv* env) {
return RegisterNativesImpl(env);
}
-MediaDrmBridge::MediaDrmBridge(int media_keys_id,
+MediaDrmBridge::MediaDrmBridge(int cdm_id,
const std::vector<uint8>& scheme_uuid,
const GURL& frame_url,
MediaPlayerManager* manager)
- : media_keys_id_(media_keys_id),
+ : cdm_id_(cdm_id),
scheme_uuid_(scheme_uuid),
frame_url_(frame_url),
manager_(manager) {
@@ -228,20 +255,22 @@ MediaDrmBridge::~MediaDrmBridge() {
}
// static
-scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
- int media_keys_id,
- const std::vector<uint8>& scheme_uuid,
- const GURL& frame_url,
- MediaPlayerManager* manager) {
+scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(int cdm_id,
+ const std::string& key_system,
+ const GURL& frame_url,
+ MediaPlayerManager* manager) {
scoped_ptr<MediaDrmBridge> media_drm_bridge;
+ if (!IsAvailable())
+ return media_drm_bridge.Pass();
- if (IsAvailable() && !scheme_uuid.empty()) {
- // TODO(qinmin): check whether the uuid is valid.
- media_drm_bridge.reset(new MediaDrmBridge(
- media_keys_id, scheme_uuid, frame_url, manager));
- if (media_drm_bridge->j_media_drm_.is_null())
- media_drm_bridge.reset();
- }
+ std::vector<uint8> scheme_uuid = GetUUID(key_system);
+ if (scheme_uuid.empty())
+ return media_drm_bridge.Pass();
+
+ media_drm_bridge.reset(
+ new MediaDrmBridge(cdm_id, scheme_uuid, frame_url, manager));
+ if (media_drm_bridge->j_media_drm_.is_null())
+ media_drm_bridge.reset();
return media_drm_bridge.Pass();
}
@@ -328,7 +357,7 @@ void MediaDrmBridge::OnSessionCreated(JNIEnv* env,
jstring j_web_session_id) {
uint32 session_id = j_session_id;
std::string web_session_id = ConvertJavaStringToUTF8(env, j_web_session_id);
- manager_->OnSessionCreated(media_keys_id_, session_id, web_session_id);
+ manager_->OnSessionCreated(cdm_id_, session_id, web_session_id);
}
void MediaDrmBridge::OnSessionMessage(JNIEnv* env,
@@ -347,30 +376,28 @@ void MediaDrmBridge::OnSessionMessage(JNIEnv* env,
destination_gurl = GURL::EmptyGURL(); // Replace invalid destination_url.
}
- manager_->OnSessionMessage(
- media_keys_id_, session_id, message, destination_gurl);
+ manager_->OnSessionMessage(cdm_id_, session_id, message, destination_gurl);
}
void MediaDrmBridge::OnSessionReady(JNIEnv* env,
jobject j_media_drm,
jint j_session_id) {
uint32 session_id = j_session_id;
- manager_->OnSessionReady(media_keys_id_, session_id);
+ manager_->OnSessionReady(cdm_id_, session_id);
}
void MediaDrmBridge::OnSessionClosed(JNIEnv* env,
jobject j_media_drm,
jint j_session_id) {
uint32 session_id = j_session_id;
- manager_->OnSessionClosed(media_keys_id_, session_id);
+ manager_->OnSessionClosed(cdm_id_, session_id);
}
void MediaDrmBridge::OnSessionError(JNIEnv* env,
jobject j_media_drm,
jint j_session_id) {
uint32 session_id = j_session_id;
- manager_->OnSessionError(
- media_keys_id_, session_id, MediaKeys::kUnknownError, 0);
+ manager_->OnSessionError(cdm_id_, session_id, MediaKeys::kUnknownError, 0);
}
ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
diff --git a/media/base/android/media_drm_bridge.h b/media/base/android/media_drm_bridge.h
index e334f7f09d..4e83eb48ca 100644
--- a/media/base/android/media_drm_bridge.h
+++ b/media/base/android/media_drm_bridge.h
@@ -6,8 +6,6 @@
#define MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
#include <jni.h>
-#include <map>
-#include <queue>
#include <string>
#include <vector>
@@ -39,25 +37,30 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
virtual ~MediaDrmBridge();
// Checks whether MediaDRM is available.
+ // All other static methods check IsAvailable() internally. There's no need
+ // to check IsAvailable() explicitly before calling them.
static bool IsAvailable();
- static bool IsSecurityLevelSupported(const std::vector<uint8>& scheme_uuid,
+ static bool IsSecurityLevelSupported(const std::string& key_system,
SecurityLevel security_level);
- static bool IsCryptoSchemeSupported(const std::vector<uint8>& scheme_uuid,
- const std::string& container_mime_type);
+ // TODO(xhwang): The |container_mime_type| is not the same as contentType in
+ // the EME spec. Revisit this once the spec issue with initData type is
+ // resolved.
+ static bool IsKeySystemSupportedWithType(
+ const std::string& key_system,
+ const std::string& container_mime_type);
static bool IsSecureDecoderRequired(SecurityLevel security_level);
static bool RegisterMediaDrmBridge(JNIEnv* env);
- // Returns a MediaDrmBridge instance if |scheme_uuid| is supported, or a NULL
+ // Returns a MediaDrmBridge instance if |key_system| is supported, or a NULL
// pointer otherwise.
- static scoped_ptr<MediaDrmBridge> Create(
- int media_keys_id,
- const std::vector<uint8>& scheme_uuid,
- const GURL& frame_url,
- MediaPlayerManager* manager);
+ static scoped_ptr<MediaDrmBridge> Create(int cdm_id,
+ const std::string& key_system,
+ const GURL& frame_url,
+ MediaPlayerManager* manager);
// Returns true if |security_level| is successfully set, or false otherwise.
// Call this function right after Create() and before any other calls.
@@ -115,12 +118,12 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
// video playback.
bool IsProtectedSurfaceRequired();
- int media_keys_id() const { return media_keys_id_; }
+ int cdm_id() const { return cdm_id_; }
GURL frame_url() const { return frame_url_; }
private:
- MediaDrmBridge(int media_keys_id,
+ MediaDrmBridge(int cdm_id,
const std::vector<uint8>& scheme_uuid,
const GURL& frame_url,
MediaPlayerManager* manager);
@@ -128,8 +131,8 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
// Get the security level of the media.
SecurityLevel GetSecurityLevel();
- // ID of the MediaKeys object.
- int media_keys_id_;
+ // ID of the CDM object.
+ int cdm_id_;
// UUID of the key system.
std::vector<uint8> scheme_uuid_;
diff --git a/media/base/android/media_drm_bridge_unittest.cc b/media/base/android/media_drm_bridge_unittest.cc
new file mode 100644
index 0000000000..fdf5350ed3
--- /dev/null
+++ b/media/base/android/media_drm_bridge_unittest.cc
@@ -0,0 +1,84 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "media/base/android/media_drm_bridge.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "widevine_cdm_version.h" // In SHARED_INTERMEDIATE_DIR.
+
+namespace media {
+
+#define EXPECT_TRUE_IF_AVAILABLE(a) \
+ do { \
+ if (!MediaDrmBridge::IsAvailable()) { \
+ VLOG(0) << "MediaDrm not supported on device."; \
+ EXPECT_FALSE(a); \
+ } else { \
+ EXPECT_TRUE(a); \
+ } \
+ } while (0)
+
+const char kAudioMp4[] = "audio/mp4";
+const char kVideoMp4[] = "video/mp4";
+const char kAudioWebM[] = "audio/webm";
+const char kVideoWebM[] = "video/webm";
+const char kInvalidKeySystem[] = "invalid.keysystem";
+const MediaDrmBridge::SecurityLevel kLNone =
+ MediaDrmBridge::SECURITY_LEVEL_NONE;
+const MediaDrmBridge::SecurityLevel kL1 = MediaDrmBridge::SECURITY_LEVEL_1;
+const MediaDrmBridge::SecurityLevel kL3 = MediaDrmBridge::SECURITY_LEVEL_3;
+
+static bool IsKeySystemSupportedWithType(
+ const std::string& key_system,
+ const std::string& container_mime_type) {
+ return MediaDrmBridge::IsKeySystemSupportedWithType(key_system,
+ container_mime_type);
+}
+
+static bool IsSecurityLevelSupported(
+ const std::string& key_system,
+ MediaDrmBridge::SecurityLevel security_level) {
+ return MediaDrmBridge::IsSecurityLevelSupported(key_system, security_level);
+}
+
+TEST(MediaDrmBridgeTest, IsSecurityLevelSupported_Widevine) {
+ EXPECT_FALSE(IsSecurityLevelSupported(kWidevineKeySystem, kLNone));
+ // We test "L3" fully. But for "L1" we don't check the result as it depends on
+ // whether the test device supports "L1".
+ EXPECT_TRUE_IF_AVAILABLE(IsSecurityLevelSupported(kWidevineKeySystem, kL3));
+ IsSecurityLevelSupported(kWidevineKeySystem, kL1);
+}
+
+// Invalid keysytem is NOT supported regardless whether MediaDrm is available.
+TEST(MediaDrmBridgeTest, IsSecurityLevelSupported_InvalidKeySystem) {
+ EXPECT_FALSE(IsSecurityLevelSupported(kInvalidKeySystem, kLNone));
+ EXPECT_FALSE(IsSecurityLevelSupported(kInvalidKeySystem, kL1));
+ EXPECT_FALSE(IsSecurityLevelSupported(kInvalidKeySystem, kL3));
+}
+
+TEST(MediaDrmBridgeTest, IsTypeSupported_Widevine) {
+ EXPECT_TRUE_IF_AVAILABLE(
+ IsKeySystemSupportedWithType(kWidevineKeySystem, kAudioMp4));
+ EXPECT_TRUE_IF_AVAILABLE(
+ IsKeySystemSupportedWithType(kWidevineKeySystem, kVideoMp4));
+
+ // TODO(xhwang): MediaDrmBridge.IsKeySystemSupportedWithType() doesn't check
+ // the container type. Fix IsKeySystemSupportedWithType() and update this test
+ // as necessary. See: http://crbug.com/350481
+ EXPECT_TRUE_IF_AVAILABLE(
+ IsKeySystemSupportedWithType(kWidevineKeySystem, kAudioWebM));
+ EXPECT_TRUE_IF_AVAILABLE(
+ IsKeySystemSupportedWithType(kWidevineKeySystem, kVideoWebM));
+}
+
+// Invalid keysytem is NOT supported regardless whether MediaDrm is available.
+TEST(MediaDrmBridgeTest, IsTypeSupported_InvalidKeySystem) {
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, ""));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kVideoMp4));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kVideoWebM));
+}
+
+} // namespace media
diff --git a/media/base/android/media_player_android.cc b/media/base/android/media_player_android.cc
index c2e00947af..2a98b7e27f 100644
--- a/media/base/android/media_player_android.cc
+++ b/media/base/android/media_player_android.cc
@@ -12,8 +12,12 @@ namespace media {
MediaPlayerAndroid::MediaPlayerAndroid(
int player_id,
- MediaPlayerManager* manager)
- : player_id_(player_id),
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb)
+ : request_media_resources_cb_(request_media_resources_cb),
+ release_media_resources_cb_(release_media_resources_cb),
+ player_id_(player_id),
manager_(manager) {
}
diff --git a/media/base/android/media_player_android.h b/media/base/android/media_player_android.h
index 27a6432d9e..c7e17447eb 100644
--- a/media/base/android/media_player_android.h
+++ b/media/base/android/media_player_android.h
@@ -34,6 +34,12 @@ class MEDIA_EXPORT MediaPlayerAndroid {
MEDIA_ERROR_INVALID_CODE,
};
+ // Callback when the player needs decoding resources.
+ typedef base::Callback<void(int player_id)> RequestMediaResourcesCB;
+
+ // Callback when the player releases decoding resources.
+ typedef base::Callback<void(int player_id)> ReleaseMediaResourcesCB;
+
// Passing an external java surface object to the player.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) = 0;
@@ -75,14 +81,23 @@ class MEDIA_EXPORT MediaPlayerAndroid {
// may want to start/resume playback if it is waiting for a key.
virtual void OnKeyAdded();
+ // Check whether the player still uses the current surface.
+ virtual bool IsSurfaceInUse() const = 0;
+
int player_id() { return player_id_; }
protected:
MediaPlayerAndroid(int player_id,
- MediaPlayerManager* manager);
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb);
MediaPlayerManager* manager() { return manager_; }
+ RequestMediaResourcesCB request_media_resources_cb_;
+
+ ReleaseMediaResourcesCB release_media_resources_cb_;
+
private:
// Player ID assigned to this player.
int player_id_;
diff --git a/media/base/android/media_player_bridge.cc b/media/base/android/media_player_bridge.cc
index 22569cb113..125c6bf999 100644
--- a/media/base/android/media_player_bridge.cc
+++ b/media/base/android/media_player_bridge.cc
@@ -31,8 +31,11 @@ MediaPlayerBridge::MediaPlayerBridge(
const GURL& first_party_for_cookies,
const std::string& user_agent,
bool hide_url_log,
- MediaPlayerManager* manager)
- : MediaPlayerAndroid(player_id, manager),
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb)
+ : MediaPlayerAndroid(player_id, manager, request_media_resources_cb,
+ release_media_resources_cb),
prepared_(false),
pending_play_(false),
url_(url),
@@ -46,7 +49,8 @@ MediaPlayerBridge::MediaPlayerBridge(
can_seek_backward_(true),
weak_this_(this),
listener_(base::MessageLoopProxy::current(),
- weak_this_.GetWeakPtr()) {
+ weak_this_.GetWeakPtr()),
+ is_surface_in_use_(false) {
}
MediaPlayerBridge::~MediaPlayerBridge() {
@@ -122,7 +126,7 @@ void MediaPlayerBridge::SetVideoSurface(gfx::ScopedJavaSurface surface) {
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
-
+ is_surface_in_use_ = true;
Java_MediaPlayerBridge_setSurface(
env, j_media_player_bridge_.obj(), surface.j_surface().obj());
}
@@ -173,7 +177,7 @@ void MediaPlayerBridge::SetDataSource(const std::string& url) {
return;
}
- manager()->RequestMediaResources(player_id());
+ request_media_resources_cb_.Run(player_id());
if (!Java_MediaPlayerBridge_prepareAsync(env, j_media_player_bridge_.obj()))
OnMediaError(MEDIA_ERROR_FORMAT);
}
@@ -185,7 +189,7 @@ void MediaPlayerBridge::OnDidSetDataUriDataSource(JNIEnv* env, jobject obj,
return;
}
- manager()->RequestMediaResources(player_id());
+ request_media_resources_cb_.Run(player_id());
if (!Java_MediaPlayerBridge_prepareAsync(env, j_media_player_bridge_.obj()))
OnMediaError(MEDIA_ERROR_FORMAT);
}
@@ -300,12 +304,12 @@ void MediaPlayerBridge::Release() {
pending_seek_ = GetCurrentTime();
prepared_ = false;
pending_play_ = false;
+ is_surface_in_use_ = false;
SetVideoSurface(gfx::ScopedJavaSurface());
-
JNIEnv* env = base::android::AttachCurrentThread();
Java_MediaPlayerBridge_release(env, j_media_player_bridge_.obj());
j_media_player_bridge_.Reset();
- manager()->ReleaseMediaResources(player_id());
+ release_media_resources_cb_.Run(player_id());
listener_.ReleaseMediaPlayerListenerResources();
}
@@ -462,4 +466,8 @@ GURL MediaPlayerBridge::GetFirstPartyForCookies() {
return first_party_for_cookies_;
}
+bool MediaPlayerBridge::IsSurfaceInUse() const {
+ return is_surface_in_use_;
+}
+
} // namespace media
diff --git a/media/base/android/media_player_bridge.h b/media/base/android/media_player_bridge.h
index fe03ad4a17..81e4511f89 100644
--- a/media/base/android/media_player_bridge.h
+++ b/media/base/android/media_player_bridge.h
@@ -47,7 +47,9 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
const GURL& first_party_for_cookies,
const std::string& user_agent,
bool hide_url_log,
- MediaPlayerManager* manager);
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb);
virtual ~MediaPlayerBridge();
// Initialize this object and extract the metadata from the media.
@@ -71,6 +73,7 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
virtual bool IsPlayerReady() OVERRIDE;
virtual GURL GetUrl() OVERRIDE;
virtual GURL GetFirstPartyForCookies() OVERRIDE;
+ virtual bool IsSurfaceInUse() const OVERRIDE;
// MediaPlayerListener callbacks.
void OnVideoSizeChanged(int width, int height);
@@ -171,6 +174,9 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
// Listener object that listens to all the media player events.
MediaPlayerListener listener_;
+ // Whether player is currently using a surface.
+ bool is_surface_in_use_;
+
friend class MediaPlayerListener;
DISALLOW_COPY_AND_ASSIGN(MediaPlayerBridge);
};
diff --git a/media/base/android/media_player_manager.h b/media/base/android/media_player_manager.h
index ed5e61a8b0..f4fe548fc7 100644
--- a/media/base/android/media_player_manager.h
+++ b/media/base/android/media_player_manager.h
@@ -25,16 +25,6 @@ class MEDIA_EXPORT MediaPlayerManager {
public:
virtual ~MediaPlayerManager() {}
- // Called by a MediaPlayerAndroid object when it is going to decode
- // media streams. This helps the manager object maintain an array
- // of active MediaPlayerAndroid objects and release the resources
- // when needed.
- virtual void RequestMediaResources(int player_id) = 0;
-
- // Called when a MediaPlayerAndroid object releases all its decoding
- // resources.
- virtual void ReleaseMediaResources(int player_id) = 0;
-
// Return a pointer to the MediaResourceGetter object.
virtual MediaResourceGetter* GetMediaResourceGetter() = 0;
@@ -82,7 +72,7 @@ class MEDIA_EXPORT MediaPlayerManager {
virtual void DestroyAllMediaPlayers() = 0;
// Get the MediaDrmBridge object for the given media key Id.
- virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) = 0;
+ virtual media::MediaDrmBridge* GetDrmBridge(int cdm_id) = 0;
// Called by the player to get a hardware protected surface.
virtual void OnProtectedSurfaceRequested(int player_id) = 0;
@@ -93,27 +83,27 @@ class MEDIA_EXPORT MediaPlayerManager {
// http://crbug.com/315312
// Called when MediaDrmBridge determines a SessionId.
- virtual void OnSessionCreated(int media_keys_id,
+ virtual void OnSessionCreated(int cdm_id,
uint32 session_id,
const std::string& web_session_id) = 0;
// Called when MediaDrmBridge wants to send a Message event.
- virtual void OnSessionMessage(int media_keys_id,
+ virtual void OnSessionMessage(int cdm_id,
uint32 session_id,
const std::vector<uint8>& message,
const GURL& destination_url) = 0;
// Called when MediaDrmBridge wants to send a Ready event.
- virtual void OnSessionReady(int media_keys_id, uint32 session_id) = 0;
+ virtual void OnSessionReady(int cdm_id, uint32 session_id) = 0;
// Called when MediaDrmBridge wants to send a Closed event.
- virtual void OnSessionClosed(int media_keys_id, uint32 session_id) = 0;
+ virtual void OnSessionClosed(int cdm_id, uint32 session_id) = 0;
// Called when MediaDrmBridge wants to send an Error event.
- virtual void OnSessionError(int media_keys_id,
+ virtual void OnSessionError(int cdm_id,
uint32 session_id,
media::MediaKeys::KeyError error_code,
- int system_code) = 0;
+ uint32 system_code) = 0;
};
} // namespace media
diff --git a/media/base/android/media_source_player.cc b/media/base/android/media_source_player.cc
index 598ff6b036..584d50b238 100644
--- a/media/base/android/media_source_player.cc
+++ b/media/base/android/media_source_player.cc
@@ -31,40 +31,14 @@ const int kBytesPerAudioOutputSample = 2;
namespace media {
-// static
-bool MediaSourcePlayer::IsTypeSupported(
- const std::vector<uint8>& scheme_uuid,
- MediaDrmBridge::SecurityLevel security_level,
- const std::string& container,
- const std::vector<std::string>& codecs) {
- if (!MediaDrmBridge::IsCryptoSchemeSupported(scheme_uuid, container)) {
- DVLOG(1) << "UUID and container '" << container << "' not supported.";
- return false;
- }
-
- if (!MediaDrmBridge::IsSecurityLevelSupported(scheme_uuid, security_level)) {
- DVLOG(1) << "UUID and security level '" << security_level
- << "' not supported.";
- return false;
- }
-
- bool is_secure = MediaDrmBridge::IsSecureDecoderRequired(security_level);
- for (size_t i = 0; i < codecs.size(); ++i) {
- if (!MediaCodecBridge::CanDecode(codecs[i], is_secure)) {
- DVLOG(1) << "Codec '" << codecs[i] << "' "
- << (is_secure ? "in secure mode " : "") << "not supported.";
- return false;
- }
- }
-
- return true;
-}
-
MediaSourcePlayer::MediaSourcePlayer(
int player_id,
MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb,
scoped_ptr<DemuxerAndroid> demuxer)
- : MediaPlayerAndroid(player_id, manager),
+ : MediaPlayerAndroid(player_id, manager, request_media_resources_cb,
+ release_media_resources_cb),
demuxer_(demuxer.Pass()),
pending_event_(NO_EVENT_PENDING),
width_(0),
@@ -106,6 +80,7 @@ void MediaSourcePlayer::SetVideoSurface(gfx::ScopedJavaSurface surface) {
}
surface_ = surface.Pass();
+ is_surface_in_use_ = true;
// If there is a pending surface change event, just wait for it to be
// processed.
@@ -246,7 +221,7 @@ void MediaSourcePlayer::Release() {
// Clear all the pending events except seeks and config changes.
pending_event_ &= (SEEK_EVENT_PENDING | CONFIG_CHANGE_EVENT_PENDING);
-
+ is_surface_in_use_ = false;
audio_decoder_job_.reset();
ResetVideoDecoderJob();
@@ -259,7 +234,6 @@ void MediaSourcePlayer::Release() {
decoder_starvation_callback_.Cancel();
surface_ = gfx::ScopedJavaSurface();
- manager()->ReleaseMediaResources(player_id());
if (process_pending_events) {
DVLOG(1) << __FUNCTION__ << " : Resuming seek or config change processing";
ProcessPendingEvents();
@@ -281,6 +255,10 @@ void MediaSourcePlayer::OnKeyAdded() {
StartInternal();
}
+bool MediaSourcePlayer::IsSurfaceInUse() const {
+ return is_surface_in_use_;
+}
+
bool MediaSourcePlayer::CanPause() {
return Seekable();
}
@@ -525,9 +503,8 @@ void MediaSourcePlayer::ProcessPendingEvents() {
ConfigureVideoDecoderJob();
// Return early if we can't successfully configure a new video decoder job
- // yet, except continue processing other pending events if |surface_| is
- // empty.
- if (HasVideo() && !video_decoder_job_ && !surface_.IsEmpty())
+ // yet.
+ if (HasVideo() && !video_decoder_job_)
return;
}
@@ -852,14 +829,17 @@ void MediaSourcePlayer::ConfigureVideoDecoderJob() {
// Create the new VideoDecoderJob.
bool is_secure = IsProtectedSurfaceRequired();
video_decoder_job_.reset(
- VideoDecoderJob::Create(video_codec_,
- is_secure,
- gfx::Size(width_, height_),
- surface_.j_surface().obj(),
- media_crypto.obj(),
- base::Bind(&DemuxerAndroid::RequestDemuxerData,
- base::Unretained(demuxer_.get()),
- DemuxerStream::VIDEO)));
+ VideoDecoderJob::Create(
+ video_codec_,
+ is_secure,
+ gfx::Size(width_, height_),
+ surface_.j_surface().obj(),
+ media_crypto.obj(),
+ base::Bind(&DemuxerAndroid::RequestDemuxerData,
+ base::Unretained(demuxer_.get()),
+ DemuxerStream::VIDEO),
+ base::Bind(request_media_resources_cb_, player_id()),
+ base::Bind(release_media_resources_cb_, player_id())));
if (!video_decoder_job_)
return;
diff --git a/media/base/android/media_source_player.h b/media/base/android/media_source_player.h
index f6d99a6567..b3af1be238 100644
--- a/media/base/android/media_source_player.h
+++ b/media/base/android/media_source_player.h
@@ -41,14 +41,11 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// the lifetime of this object.
MediaSourcePlayer(int player_id,
MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb,
scoped_ptr<DemuxerAndroid> demuxer);
virtual ~MediaSourcePlayer();
- static bool IsTypeSupported(const std::vector<uint8>& scheme_uuid,
- MediaDrmBridge::SecurityLevel security_level,
- const std::string& container,
- const std::vector<std::string>& codecs);
-
// MediaPlayerAndroid implementation.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
virtual void Start() OVERRIDE;
@@ -67,6 +64,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
virtual bool IsPlayerReady() OVERRIDE;
virtual void SetDrmBridge(MediaDrmBridge* drm_bridge) OVERRIDE;
virtual void OnKeyAdded() OVERRIDE;
+ virtual bool IsSurfaceInUse() const OVERRIDE;
// DemuxerAndroidClient implementation.
virtual void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) OVERRIDE;
@@ -281,6 +279,9 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// Test-only callback for hooking the completion of the next decode cycle.
base::Closure decode_callback_for_testing_;
+ // Whether |surface_| is currently used by the player.
+ bool is_surface_in_use_;
+
friend class MediaSourcePlayerTest;
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayer);
};
diff --git a/media/base/android/media_source_player_unittest.cc b/media/base/android/media_source_player_unittest.cc
index c07210be43..9cb04b1829 100644
--- a/media/base/android/media_source_player_unittest.cc
+++ b/media/base/android/media_source_player_unittest.cc
@@ -5,6 +5,7 @@
#include <string>
#include "base/basictypes.h"
+#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/stringprintf.h"
#include "media/base/android/media_codec_bridge.h"
@@ -30,27 +31,20 @@ namespace media {
const int kDefaultDurationInMs = 10000;
-const char kAudioMp4[] = "audio/mp4";
-const char kVideoMp4[] = "video/mp4";
-const char kAudioWebM[] = "audio/webm";
-const char kVideoWebM[] = "video/webm";
-const MediaDrmBridge::SecurityLevel kL1 = MediaDrmBridge::SECURITY_LEVEL_1;
-const MediaDrmBridge::SecurityLevel kL3 = MediaDrmBridge::SECURITY_LEVEL_3;
-
// TODO(wolenetz/qinmin): Simplify tests with more effective mock usage, and
// fix flaky pointer-based MDJ inequality testing. See http://crbug.com/327839.
-// Mock of MediaPlayerManager for testing purpose
+// Mock of MediaPlayerManager for testing purpose.
class MockMediaPlayerManager : public MediaPlayerManager {
public:
explicit MockMediaPlayerManager(base::MessageLoop* message_loop)
: message_loop_(message_loop),
- playback_completed_(false) {}
+ playback_completed_(false),
+ num_resources_requested_(0),
+ num_resources_released_(0) {}
virtual ~MockMediaPlayerManager() {}
// MediaPlayerManager implementation.
- virtual void RequestMediaResources(int player_id) OVERRIDE {}
- virtual void ReleaseMediaResources(int player_id) OVERRIDE {}
virtual MediaResourceGetter* GetMediaResourceGetter() OVERRIDE {
return NULL;
}
@@ -74,31 +68,49 @@ class MockMediaPlayerManager : public MediaPlayerManager {
virtual MediaPlayerAndroid* GetFullscreenPlayer() OVERRIDE { return NULL; }
virtual MediaPlayerAndroid* GetPlayer(int player_id) OVERRIDE { return NULL; }
virtual void DestroyAllMediaPlayers() OVERRIDE {}
- virtual MediaDrmBridge* GetDrmBridge(int media_keys_id) OVERRIDE {
- return NULL;
- }
+ virtual MediaDrmBridge* GetDrmBridge(int cdm_id) OVERRIDE { return NULL; }
virtual void OnProtectedSurfaceRequested(int player_id) OVERRIDE {}
- virtual void OnSessionCreated(int media_keys_id,
+ virtual void OnSessionCreated(int cdm_id,
uint32 session_id,
const std::string& web_session_id) OVERRIDE {}
- virtual void OnSessionMessage(int media_keys_id,
+ virtual void OnSessionMessage(int cdm_id,
uint32 session_id,
const std::vector<uint8>& message,
const GURL& destination_url) OVERRIDE {}
- virtual void OnSessionReady(int media_keys_id, uint32 session_id) OVERRIDE {}
- virtual void OnSessionClosed(int media_keys_id, uint32 session_id) OVERRIDE {}
- virtual void OnSessionError(int media_keys_id,
+ virtual void OnSessionReady(int cdm_id, uint32 session_id) OVERRIDE {}
+ virtual void OnSessionClosed(int cdm_id, uint32 session_id) OVERRIDE {}
+ virtual void OnSessionError(int cdm_id,
uint32 session_id,
media::MediaKeys::KeyError error_code,
- int system_code) OVERRIDE {}
+ uint32 system_code) OVERRIDE {}
bool playback_completed() const {
return playback_completed_;
}
+ int num_resources_requested() const {
+ return num_resources_requested_;
+ }
+
+ int num_resources_released() const {
+ return num_resources_released_;
+ }
+
+ void OnMediaResourcesRequested(int player_id) {
+ num_resources_requested_++;
+ }
+
+ void OnMediaResourcesReleased(int player_id) {
+ num_resources_released_++;
+ }
+
private:
base::MessageLoop* message_loop_;
bool playback_completed_;
+ // The number of resource requests this object has seen.
+ int num_resources_requested_;
+ // The number of released resources.
+ int num_resources_released_;
DISALLOW_COPY_AND_ASSIGN(MockMediaPlayerManager);
};
@@ -157,7 +169,12 @@ class MediaSourcePlayerTest : public testing::Test {
MediaSourcePlayerTest()
: manager_(&message_loop_),
demuxer_(new MockDemuxerAndroid(&message_loop_)),
- player_(0, &manager_, scoped_ptr<DemuxerAndroid>(demuxer_)),
+ player_(0, &manager_,
+ base::Bind(&MockMediaPlayerManager::OnMediaResourcesRequested,
+ base::Unretained(&manager_)),
+ base::Bind(&MockMediaPlayerManager::OnMediaResourcesReleased,
+ base::Unretained(&manager_)),
+ scoped_ptr<DemuxerAndroid>(demuxer_)),
decoder_callback_hook_executed_(false),
surface_texture_a_is_next_(true) {}
virtual ~MediaSourcePlayerTest() {}
@@ -587,10 +604,10 @@ class MediaSourcePlayerTest : public testing::Test {
void CreateNextTextureAndSetVideoSurface() {
gfx::SurfaceTexture* surface_texture;
if (surface_texture_a_is_next_) {
- surface_texture_a_ = new gfx::SurfaceTexture(next_texture_id_++);
+ surface_texture_a_ = gfx::SurfaceTexture::Create(next_texture_id_++);
surface_texture = surface_texture_a_.get();
} else {
- surface_texture_b_ = new gfx::SurfaceTexture(next_texture_id_++);
+ surface_texture_b_ = gfx::SurfaceTexture::Create(next_texture_id_++);
surface_texture = surface_texture_b_.get();
}
@@ -709,14 +726,6 @@ class MediaSourcePlayerTest : public testing::Test {
return player_.start_time_ticks_;
}
- bool IsTypeSupported(const std::vector<uint8>& scheme_uuid,
- MediaDrmBridge::SecurityLevel security_level,
- const std::string& container,
- const std::vector<std::string>& codecs) {
- return MediaSourcePlayer::IsTypeSupported(
- scheme_uuid, security_level, container, codecs);
- }
-
base::MessageLoop message_loop_;
MockMediaPlayerManager manager_;
MockDemuxerAndroid* demuxer_; // Owned by |player_|.
@@ -794,7 +803,7 @@ TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
// Test video decoder job will not be created when surface is invalid.
scoped_refptr<gfx::SurfaceTexture> surface_texture(
- new gfx::SurfaceTexture(0));
+ gfx::SurfaceTexture::Create(0));
gfx::ScopedJavaSurface surface(surface_texture.get());
StartVideoDecoderJob(false);
@@ -886,6 +895,63 @@ TEST_F(MediaSourcePlayerTest, ChangeMultipleSurfaceWhileDecoding) {
EXPECT_EQ(1, demuxer_->num_seek_requests());
}
+TEST_F(MediaSourcePlayerTest, SetEmptySurfaceAndStarveWhileDecoding) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test player pauses if an empty surface is passed.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // Send the first input chunk.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+
+ // While the decoder is decoding, pass an empty surface.
+ gfx::ScopedJavaSurface empty_surface;
+ player_.SetVideoSurface(empty_surface.Pass());
+
+ // Let the player starve. However, it should not issue any new data request in
+ // this case.
+ TriggerPlayerStarvation();
+ // Wait for the decoder job to finish decoding and be reset.
+ while (GetMediaDecoderJob(false))
+ message_loop_.RunUntilIdle();
+
+ // No further seek or data requests should have been received since the
+ // surface is empty.
+ EXPECT_EQ(0, demuxer_->num_browser_seek_requests());
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // Playback resumes once a non-empty surface is passed.
+ CreateNextTextureAndSetVideoSurface();
+ EXPECT_EQ(1, demuxer_->num_browser_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, ReleaseVideoDecoderResourcesWhileDecoding) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that if video decoder is released while decoding, the resources will
+ // not be immediately released.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+ EXPECT_EQ(1, manager_.num_resources_requested());
+ ReleasePlayer();
+ // The resources will be immediately released since the decoder is idle.
+ EXPECT_EQ(1, manager_.num_resources_released());
+
+ // Recreate the video decoder.
+ CreateNextTextureAndSetVideoSurface();
+ player_.Start();
+ EXPECT_EQ(2, manager_.num_resources_requested());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ ReleasePlayer();
+ // The resource is still held by the video decoder until it finishes decoding.
+ EXPECT_EQ(1, manager_.num_resources_released());
+ // Wait for the decoder job to finish decoding and be reset.
+ while (manager_.num_resources_released() != 2)
+ message_loop_.RunUntilIdle();
+}
+
TEST_F(MediaSourcePlayerTest, AudioOnlyStartAfterSeekFinish) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
@@ -1996,77 +2062,4 @@ TEST_F(MediaSourcePlayerTest, SurfaceChangeClearedEvenIfMediaCryptoAbsent) {
EXPECT_FALSE(GetMediaDecoderJob(false));
}
-// TODO(xhwang): Enable this test when the test devices are updated.
-TEST_F(MediaSourcePlayerTest, DISABLED_IsTypeSupported_Widevine) {
- if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
- VLOG(0) << "Could not run test - not supported on device.";
- return;
- }
-
- uint8 kWidevineUUID[] = { 0xED, 0xEF, 0x8B, 0xA9, 0x79, 0xD6, 0x4A, 0xCE,
- 0xA3, 0xC8, 0x27, 0xDC, 0xD5, 0x1D, 0x21, 0xED };
-
- std::vector<uint8> widevine_uuid(kWidevineUUID,
- kWidevineUUID + arraysize(kWidevineUUID));
-
- // We test "L3" fully. But for "L1" we don't check the result as it depend on
- // whether the test device supports "L1" decoding.
-
- std::vector<std::string> codec_avc(1, "avc1");
- std::vector<std::string> codec_aac(1, "mp4a");
- std::vector<std::string> codec_avc_aac(1, "avc1");
- codec_avc_aac.push_back("mp4a");
-
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, kL3, kVideoMp4, codec_avc));
- IsTypeSupported(widevine_uuid, kL1, kVideoMp4, codec_avc);
-
- // TODO(xhwang): L1/L3 doesn't apply to audio, so the result is messy.
- // Clean this up after we have a solution to specifying decoding mode.
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, kL3, kAudioMp4, codec_aac));
- IsTypeSupported(widevine_uuid, kL1, kAudioMp4, codec_aac);
-
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, kL3, kVideoMp4, codec_avc_aac));
- IsTypeSupported(widevine_uuid, kL1, kVideoMp4, codec_avc_aac);
-
- std::vector<std::string> codec_vp8(1, "vp8");
- std::vector<std::string> codec_vorbis(1, "vorbis");
- std::vector<std::string> codec_vp8_vorbis(1, "vp8");
- codec_vp8_vorbis.push_back("vorbis");
-
- // TODO(xhwang): WebM is actually not supported but currently
- // MediaDrmBridge.isCryptoSchemeSupported() doesn't check the container type.
- // Fix isCryptoSchemeSupported() and update this test as necessary.
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, kL3, kVideoWebM, codec_vp8));
- IsTypeSupported(widevine_uuid, kL1, kVideoWebM, codec_vp8);
-
- // TODO(xhwang): L1/L3 doesn't apply to audio, so the result is messy.
- // Clean this up after we have a solution to specifying decoding mode.
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, kL3, kAudioWebM, codec_vorbis));
- IsTypeSupported(widevine_uuid, kL1, kAudioWebM, codec_vorbis);
-
- EXPECT_TRUE(
- IsTypeSupported(widevine_uuid, kL3, kVideoWebM, codec_vp8_vorbis));
- IsTypeSupported(widevine_uuid, kL1, kVideoWebM, codec_vp8_vorbis);
-}
-
-TEST_F(MediaSourcePlayerTest, IsTypeSupported_InvalidUUID) {
- if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
- VLOG(0) << "Could not run test - not supported on device.";
- return;
- }
-
- uint8 kInvalidUUID[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
- 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF };
-
- std::vector<uint8> invalid_uuid(kInvalidUUID,
- kInvalidUUID + arraysize(kInvalidUUID));
-
- std::vector<std::string> codec_avc(1, "avc1");
- EXPECT_FALSE(IsTypeSupported(invalid_uuid, kL3, kVideoMp4, codec_avc));
- EXPECT_FALSE(IsTypeSupported(invalid_uuid, kL1, kVideoMp4, codec_avc));
-}
-
-// TODO(xhwang): Are these IsTypeSupported tests device specific?
-// TODO(xhwang): Add more IsTypeSupported tests.
-
} // namespace media
diff --git a/media/base/android/video_decoder_job.cc b/media/base/android/video_decoder_job.cc
index 75124e7d0d..12ab4414e3 100644
--- a/media/base/android/video_decoder_job.cc
+++ b/media/base/android/video_decoder_job.cc
@@ -24,16 +24,20 @@ class VideoDecoderThread : public base::Thread {
base::LazyInstance<VideoDecoderThread>::Leaky
g_video_decoder_thread = LAZY_INSTANCE_INITIALIZER;
-VideoDecoderJob* VideoDecoderJob::Create(const VideoCodec video_codec,
- bool is_secure,
- const gfx::Size& size,
- jobject surface,
- jobject media_crypto,
- const base::Closure& request_data_cb) {
+VideoDecoderJob* VideoDecoderJob::Create(
+ const VideoCodec video_codec,
+ bool is_secure,
+ const gfx::Size& size,
+ jobject surface,
+ jobject media_crypto,
+ const base::Closure& request_data_cb,
+ const base::Closure& request_resources_cb,
+ const base::Closure& release_resources_cb) {
scoped_ptr<VideoCodecBridge> codec(VideoCodecBridge::CreateDecoder(
video_codec, is_secure, size, surface, media_crypto));
if (codec)
- return new VideoDecoderJob(codec.Pass(), request_data_cb);
+ return new VideoDecoderJob(codec.Pass(), request_data_cb,
+ request_resources_cb, release_resources_cb);
LOG(ERROR) << "Failed to create VideoDecoderJob.";
return NULL;
@@ -41,13 +45,18 @@ VideoDecoderJob* VideoDecoderJob::Create(const VideoCodec video_codec,
VideoDecoderJob::VideoDecoderJob(
scoped_ptr<VideoCodecBridge> video_codec_bridge,
- const base::Closure& request_data_cb)
+ const base::Closure& request_data_cb,
+ const base::Closure& request_resources_cb,
+ const base::Closure& release_resources_cb)
: MediaDecoderJob(g_video_decoder_thread.Pointer()->message_loop_proxy(),
video_codec_bridge.get(), request_data_cb),
- video_codec_bridge_(video_codec_bridge.Pass()) {
+ video_codec_bridge_(video_codec_bridge.Pass()),
+ release_resources_cb_(release_resources_cb) {
+ request_resources_cb.Run();
}
VideoDecoderJob::~VideoDecoderJob() {
+ release_resources_cb_.Run();
}
void VideoDecoderJob::ReleaseOutputBuffer(
diff --git a/media/base/android/video_decoder_job.h b/media/base/android/video_decoder_job.h
index 41c15edc39..5c98850891 100644
--- a/media/base/android/video_decoder_job.h
+++ b/media/base/android/video_decoder_job.h
@@ -26,16 +26,22 @@ class VideoDecoderJob : public MediaDecoderJob {
// |media_crypto| - Handle to a Java object responsible for decrypting the
// video data.
// |request_data_cb| - Callback used to request more data for the decoder.
+ // |request_resources_cb| - Callback used to request resources.
+ // |release_resources_cb| - Callback used to release resources.
static VideoDecoderJob* Create(const VideoCodec video_codec,
bool is_secure,
const gfx::Size& size,
jobject surface,
jobject media_crypto,
- const base::Closure& request_data_cb);
+ const base::Closure& request_data_cb,
+ const base::Closure& request_resources_cb,
+ const base::Closure& release_resources_cb);
private:
VideoDecoderJob(scoped_ptr<VideoCodecBridge> video_codec_bridge,
- const base::Closure& request_data_cb);
+ const base::Closure& request_data_cb,
+ const base::Closure& request_resources_cb,
+ const base::Closure& release_resources_cb);
// MediaDecoderJob implementation.
virtual void ReleaseOutputBuffer(
@@ -47,6 +53,8 @@ class VideoDecoderJob : public MediaDecoderJob {
virtual bool ComputeTimeToRender() const OVERRIDE;
scoped_ptr<VideoCodecBridge> video_codec_bridge_;
+
+ base::Closure release_resources_cb_;
};
} // namespace media
diff --git a/media/base/audio_buffer_queue_unittest.cc b/media/base/audio_buffer_queue_unittest.cc
index b95bdca145..b765009c3a 100644
--- a/media/base/audio_buffer_queue_unittest.cc
+++ b/media/base/audio_buffer_queue_unittest.cc
@@ -34,12 +34,12 @@ TEST(AudioBufferQueueTest, AppendAndClear) {
const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
EXPECT_EQ(0, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(frames, buffer.frames());
buffer.Clear();
EXPECT_EQ(0, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 20, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(frames, buffer.frames());
}
@@ -51,19 +51,19 @@ TEST(AudioBufferQueueTest, MultipleAppend) {
AudioBufferQueue buffer;
// Append 40 frames in 5 buffers.
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(8, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(16, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(32, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(40, buffer.frames());
}
@@ -77,7 +77,7 @@ TEST(AudioBufferQueueTest, IteratorCheck) {
// Append 40 frames in 5 buffers. Intersperse ReadFrames() to make the
// iterator is pointing to the correct position.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 10.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(8, buffer.frames());
@@ -85,10 +85,10 @@ TEST(AudioBufferQueueTest, IteratorCheck) {
EXPECT_EQ(4, buffer.frames());
VerifyResult(bus->channel(0), 4, 10.0f, 1.0f);
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 20.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(12, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 30.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(20, buffer.frames());
@@ -97,10 +97,10 @@ TEST(AudioBufferQueueTest, IteratorCheck) {
EXPECT_EQ(0, buffer.frames());
VerifyResult(bus->channel(0), 4, 34.0f, 1.0f);
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 40.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(8, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 50.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(16, buffer.frames());
@@ -121,7 +121,7 @@ TEST(AudioBufferQueueTest, Seek) {
AudioBufferQueue buffer;
// Add 6 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 1.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(6, buffer.frames());
@@ -143,11 +143,11 @@ TEST(AudioBufferQueueTest, ReadF32) {
AudioBufferQueue buffer;
// Add 76 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 1.0f, 1.0f, 6, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 13.0f, 1.0f, 10, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 33.0f, 1.0f, 60, kNoTime, kNoTime));
EXPECT_EQ(76, buffer.frames());
@@ -182,7 +182,7 @@ TEST(AudioBufferQueueTest, ReadU8) {
AudioBufferQueue buffer;
// Add 4 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 128, 1, frames, kNoTime, kNoTime));
// Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
@@ -204,9 +204,9 @@ TEST(AudioBufferQueueTest, ReadS16) {
AudioBufferQueue buffer;
// Add 24 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 4, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 9, 1, 20, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
@@ -226,9 +226,9 @@ TEST(AudioBufferQueueTest, ReadS32) {
AudioBufferQueue buffer;
// Add 24 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<int32>(
+ buffer.Append(MakeAudioBuffer<int32>(
kSampleFormatS32, channels, 1, 1, 4, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<int32>(
+ buffer.Append(MakeAudioBuffer<int32>(
kSampleFormatS32, channels, 9, 1, 20, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
@@ -254,9 +254,9 @@ TEST(AudioBufferQueueTest, ReadF32Planar) {
AudioBufferQueue buffer;
// Add 14 frames of data.
- buffer.Append(MakePlanarAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatPlanarF32, channels, 1.0f, 1.0f, 4, kNoTime, kNoTime));
- buffer.Append(MakePlanarAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatPlanarF32, channels, 50.0f, 1.0f, 10, kNoTime, kNoTime));
EXPECT_EQ(14, buffer.frames());
@@ -277,9 +277,9 @@ TEST(AudioBufferQueueTest, ReadS16Planar) {
AudioBufferQueue buffer;
// Add 24 frames of data.
- buffer.Append(MakePlanarAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatPlanarS16, channels, 1, 1, 4, kNoTime, kNoTime));
- buffer.Append(MakePlanarAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatPlanarS16, channels, 100, 5, 20, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
@@ -301,17 +301,17 @@ TEST(AudioBufferQueueTest, ReadManyChannels) {
AudioBufferQueue buffer;
// Add 76 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 0.0f, 1.0f, 6, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 6.0f * channels, 1.0f, 10, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(kSampleFormatF32,
- channels,
- 16.0f * channels,
- 1.0f,
- 60,
- kNoTime,
- kNoTime));
+ buffer.Append(MakeAudioBuffer<float>(kSampleFormatF32,
+ channels,
+ 16.0f * channels,
+ 1.0f,
+ 60,
+ kNoTime,
+ kNoTime));
EXPECT_EQ(76, buffer.frames());
// Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
@@ -330,7 +330,7 @@ TEST(AudioBufferQueueTest, Peek) {
AudioBufferQueue buffer;
// Add 60 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 0.0f, 1.0f, 60, kNoTime, kNoTime));
EXPECT_EQ(60, buffer.frames());
@@ -381,7 +381,7 @@ TEST(AudioBufferQueueTest, Time) {
// Add two buffers (second one added later):
// first: start=0s, duration=10s
// second: start=30s, duration=10s
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 10, start_time1, duration));
EXPECT_EQ(10, buffer.frames());
@@ -399,7 +399,7 @@ TEST(AudioBufferQueueTest, Time) {
buffer.current_time());
// Add second buffer for more data.
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 10, start_time2, duration));
EXPECT_EQ(16, buffer.frames());
@@ -430,9 +430,9 @@ TEST(AudioBufferQueueTest, NoTime) {
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
// Add two buffers with no timestamps. Time should always be unknown.
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
EXPECT_EQ(20, buffer.frames());
diff --git a/media/base/audio_buffer_unittest.cc b/media/base/audio_buffer_unittest.cc
index 473778a6b5..15f6416bcd 100644
--- a/media/base/audio_buffer_unittest.cc
+++ b/media/base/audio_buffer_unittest.cc
@@ -28,7 +28,7 @@ TEST(AudioBufferTest, CopyFrom) {
const int frames = 8;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 1, 1, frames, start_time, duration);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(buffer->timestamp(), start_time);
@@ -63,7 +63,7 @@ TEST(AudioBufferTest, ReadU8) {
const int frames = 4;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 128, 1, frames, start_time, duration);
// Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
@@ -83,7 +83,7 @@ TEST(AudioBufferTest, ReadS16) {
const int frames = 10;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int16>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, frames, start_time, duration);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
@@ -108,7 +108,7 @@ TEST(AudioBufferTest, ReadS32) {
const int frames = 6;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int32>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int32>(
kSampleFormatS32, channels, 1, 1, frames, start_time, duration);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
@@ -131,7 +131,7 @@ TEST(AudioBufferTest, ReadF32) {
const int frames = 20;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<float>(
kSampleFormatF32, channels, 1.0f, 1.0f, frames, start_time, duration);
// Read first 10 frames from the buffer. F32 is interleaved, so ch[0] should
@@ -153,7 +153,7 @@ TEST(AudioBufferTest, ReadS16Planar) {
const int frames = 20;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<int16>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int16>(
kSampleFormatPlanarS16, channels, 1, 1, frames, start_time, duration);
// Read 6 frames from the buffer. Data is planar, so ch[0] should be 1, 2, 3,
@@ -187,13 +187,13 @@ TEST(AudioBufferTest, ReadF32Planar) {
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer =
- MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
- channels,
- 1.0f,
- 1.0f,
- frames,
- start_time,
- duration);
+ MakeAudioBuffer<float>(kSampleFormatPlanarF32,
+ channels,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time,
+ duration);
// Read all 100 frames from the buffer. F32 is planar, so ch[0] should be 1,
// 2, 3, 4, ..., ch[1] should be 101, 102, 103, ..., and so on for all 4
@@ -241,13 +241,13 @@ TEST(AudioBufferTest, Trim) {
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer =
- MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
- channels,
- 1.0f,
- 1.0f,
- frames,
- start_time,
- duration);
+ MakeAudioBuffer<float>(kSampleFormatPlanarF32,
+ channels,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time,
+ duration);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(start_time, buffer->timestamp());
EXPECT_EQ(frames, buffer->duration().InSeconds());
diff --git a/media/base/audio_bus.cc b/media/base/audio_bus.cc
index c61f2a2fe7..e34c748939 100644
--- a/media/base/audio_bus.cc
+++ b/media/base/audio_bus.cc
@@ -333,4 +333,11 @@ void AudioBus::Scale(float volume) {
}
}
+void AudioBus::SwapChannels(int a, int b) {
+ DCHECK(a < channels() && a >= 0);
+ DCHECK(b < channels() && b >= 0);
+ DCHECK_NE(a, b);
+ std::swap(channel_data_[a], channel_data_[b]);
+}
+
} // namespace media
diff --git a/media/base/audio_bus.h b/media/base/audio_bus.h
index 294cf70a7c..56ef9777d8 100644
--- a/media/base/audio_bus.h
+++ b/media/base/audio_bus.h
@@ -104,6 +104,10 @@ class MEDIA_EXPORT AudioBus {
// is provided, no adjustment is done.
void Scale(float volume);
+ // Swaps channels identified by |a| and |b|. The caller needs to make sure
+ // the channels are valid.
+ void SwapChannels(int a, int b);
+
private:
friend struct base::DefaultDeleter<AudioBus>;
~AudioBus();
diff --git a/media/base/audio_decoder.cc b/media/base/audio_decoder.cc
index 939066078c..523fc01f61 100644
--- a/media/base/audio_decoder.cc
+++ b/media/base/audio_decoder.cc
@@ -4,10 +4,16 @@
#include "media/base/audio_decoder.h"
+#include "media/base/audio_buffer.h"
+
namespace media {
AudioDecoder::AudioDecoder() {}
AudioDecoder::~AudioDecoder() {}
+scoped_refptr<AudioBuffer> AudioDecoder::GetDecodeOutput() {
+ return NULL;
+}
+
} // namespace media
diff --git a/media/base/audio_decoder.h b/media/base/audio_decoder.h
index 5aefb84aa6..901126d45b 100644
--- a/media/base/audio_decoder.h
+++ b/media/base/audio_decoder.h
@@ -7,9 +7,11 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
+#include "media/base/audio_decoder_config.h"
#include "media/base/channel_layout.h"
-#include "media/base/pipeline_status.h"
+#include "media/base/decoder_buffer.h"
#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
namespace media {
@@ -18,11 +20,15 @@ class DemuxerStream;
class MEDIA_EXPORT AudioDecoder {
public:
- // Status codes for read operations.
+ // Status codes for decode operations.
+ // TODO(rileya): Now that both AudioDecoder and VideoDecoder Status enums
+ // match, break them into a decoder_status.h.
enum Status {
- kOk,
- kAborted,
- kDecodeError,
+ kOk, // We're all good.
+ kAborted, // We aborted as a result of Stop() or Reset().
+ kNotEnoughData, // Not enough data to produce a video frame.
+ kDecodeError, // A decoding error occurred.
+ kDecryptError // Decrypting error happened.
};
AudioDecoder();
@@ -31,23 +37,27 @@ class MEDIA_EXPORT AudioDecoder {
// Initializes an AudioDecoder with the given DemuxerStream, executing the
// callback upon completion.
// statistics_cb is used to update global pipeline statistics.
- virtual void Initialize(DemuxerStream* stream,
- const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) = 0;
+ virtual void Initialize(const AudioDecoderConfig& config,
+ const PipelineStatusCB& status_cb) = 0;
// Requests samples to be decoded and returned via the provided callback.
- // Only one read may be in flight at any given time.
+ // Only one decode may be in flight at any given time.
//
// Implementations guarantee that the callback will not be called from within
// this method.
//
// Non-NULL sample buffer pointers will contain decoded audio data or may
// indicate the end of the stream. A NULL buffer pointer indicates an aborted
- // Read(). This can happen if the DemuxerStream gets flushed and doesn't have
- // any more data to return.
+ // Decode().
typedef base::Callback<void(Status, const scoped_refptr<AudioBuffer>&)>
- ReadCB;
- virtual void Read(const ReadCB& read_cb) = 0;
+ DecodeCB;
+ virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) = 0;
+
+ // Some AudioDecoders will queue up multiple AudioBuffers from a single
+ // DecoderBuffer, if we have any such queued buffers this will return the next
+ // one. Otherwise we return a NULL AudioBuffer.
+ virtual scoped_refptr<AudioBuffer> GetDecodeOutput();
// Resets decoder state, dropping any queued encoded data.
virtual void Reset(const base::Closure& closure) = 0;
diff --git a/media/base/audio_decoder_config.cc b/media/base/audio_decoder_config.cc
index dfaf94a268..d83a41e6ab 100644
--- a/media/base/audio_decoder_config.cc
+++ b/media/base/audio_decoder_config.cc
@@ -48,15 +48,15 @@ void AudioDecoderConfig::Initialize(AudioCodec codec,
CHECK((extra_data_size != 0) == (extra_data != NULL));
if (record_stats) {
- UMA_HISTOGRAM_ENUMERATION("Media.AudioCodec", codec, kAudioCodecMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioCodec", codec, kAudioCodecMax + 1);
UMA_HISTOGRAM_ENUMERATION("Media.AudioSampleFormat", sample_format,
- kSampleFormatMax);
+ kSampleFormatMax + 1);
UMA_HISTOGRAM_ENUMERATION("Media.AudioChannelLayout", channel_layout,
- CHANNEL_LAYOUT_MAX);
- AudioSampleRate asr = media::AsAudioSampleRate(samples_per_second);
- if (asr != kUnexpectedAudioSampleRate) {
+ CHANNEL_LAYOUT_MAX + 1);
+ AudioSampleRate asr;
+ if (ToAudioSampleRate(samples_per_second, &asr)) {
UMA_HISTOGRAM_ENUMERATION("Media.AudioSamplesPerSecond", asr,
- kUnexpectedAudioSampleRate);
+ kAudioSampleRateMax + 1);
} else {
UMA_HISTOGRAM_COUNTS(
"Media.AudioSamplesPerSecondUnexpected", samples_per_second);
diff --git a/media/base/audio_decoder_config.h b/media/base/audio_decoder_config.h
index 77d4fc28ad..f73441b6b0 100644
--- a/media/base/audio_decoder_config.h
+++ b/media/base/audio_decoder_config.h
@@ -18,7 +18,8 @@ namespace media {
enum AudioCodec {
// These values are histogrammed over time; do not change their ordinal
// values. When deleting a codec replace it with a dummy value; when adding a
- // codec, do so at the bottom before kAudioCodecMax.
+ // codec, do so at the bottom before kAudioCodecMax, and update the value of
+ // kAudioCodecMax to equal the new codec.
kUnknownAudioCodec = 0,
kCodecAAC = 1,
kCodecMP3 = 2,
@@ -39,8 +40,8 @@ enum AudioCodec {
// The only acceptable time to add a new codec is if there is production code
// that uses said codec in the same CL.
- // Must always be last!
- kAudioCodecMax
+ // Must always be equal to the largest entry ever logged.
+ kAudioCodecMax = kCodecPCM_ALAW,
};
// TODO(dalecurtis): FFmpeg API uses |bytes_per_channel| instead of
diff --git a/media/base/audio_splicer.cc b/media/base/audio_splicer.cc
index 14b4199e0e..408e69eb1f 100644
--- a/media/base/audio_splicer.cc
+++ b/media/base/audio_splicer.cc
@@ -5,12 +5,15 @@
#include "media/base/audio_splicer.h"
#include <cstdlib>
+#include <deque>
#include "base/logging.h"
#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/buffers.h"
+#include "media/base/vector_math.h"
namespace media {
@@ -20,22 +23,117 @@ namespace media {
// roughly represents the duration of 2 compressed AAC or MP3 frames.
static const int kMaxTimeDeltaInMilliseconds = 50;
-AudioSplicer::AudioSplicer(int samples_per_second)
- : output_timestamp_helper_(samples_per_second),
- min_gap_size_(2),
- received_end_of_stream_(false) {
+// Minimum gap size needed before the splicer will take action to
+// fill a gap. This avoids periodically inserting and then dropping samples
+// when the buffer timestamps are slightly off because of timestamp rounding
+// in the source content. Unit is frames.
+static const int kMinGapSize = 2;
+
+// The number of milliseconds to crossfade before trimming when buffers overlap.
+static const int kCrossfadeDurationInMilliseconds = 5;
+
+// AudioBuffer::TrimStart() is not as accurate as the timestamp helper, so
+// manually adjust the duration and timestamp after trimming.
+static void AccurateTrimStart(int frames_to_trim,
+ const scoped_refptr<AudioBuffer> buffer,
+ const AudioTimestampHelper& timestamp_helper) {
+ buffer->TrimStart(frames_to_trim);
+ buffer->set_timestamp(timestamp_helper.GetTimestamp());
+ buffer->set_duration(
+ timestamp_helper.GetFrameDuration(buffer->frame_count()));
}
-AudioSplicer::~AudioSplicer() {
+// AudioBuffer::TrimEnd() is not as accurate as the timestamp helper, so
+// manually adjust the duration after trimming.
+static void AccurateTrimEnd(int frames_to_trim,
+ const scoped_refptr<AudioBuffer> buffer,
+ const AudioTimestampHelper& timestamp_helper) {
+ DCHECK(buffer->timestamp() == timestamp_helper.GetTimestamp());
+ buffer->TrimEnd(frames_to_trim);
+ buffer->set_duration(
+ timestamp_helper.GetFrameDuration(buffer->frame_count()));
}
-void AudioSplicer::Reset() {
- output_timestamp_helper_.SetBaseTimestamp(kNoTimestamp());
+// Returns an AudioBus whose frame buffer is backed by the provided AudioBuffer.
+static scoped_ptr<AudioBus> CreateAudioBufferWrapper(
+ const scoped_refptr<AudioBuffer>& buffer) {
+ scoped_ptr<AudioBus> wrapper =
+ AudioBus::CreateWrapper(buffer->channel_count());
+ wrapper->set_frames(buffer->frame_count());
+ for (int ch = 0; ch < buffer->channel_count(); ++ch) {
+ wrapper->SetChannelData(
+ ch, reinterpret_cast<float*>(buffer->channel_data()[ch]));
+ }
+ return wrapper.Pass();
+}
+
+class AudioStreamSanitizer {
+ public:
+ explicit AudioStreamSanitizer(int samples_per_second);
+ ~AudioStreamSanitizer();
+
+ // Resets the sanitizer state by clearing the output buffers queue, and
+ // resetting the timestamp helper.
+ void Reset();
+
+ // Similar to Reset(), but initializes the timestamp helper with the given
+ // parameters.
+ void ResetTimestampState(int64 frame_count, base::TimeDelta base_timestamp);
+
+ // Adds a new buffer full of samples or end of stream buffer to the splicer.
+ // Returns true if the buffer was accepted. False is returned if an error
+ // occurred.
+ bool AddInput(const scoped_refptr<AudioBuffer>& input);
+
+ // Returns true if the sanitizer has a buffer to return.
+ bool HasNextBuffer() const;
+
+ // Removes the next buffer from the output buffer queue and returns it; should
+ // only be called if HasNextBuffer() returns true.
+ scoped_refptr<AudioBuffer> GetNextBuffer();
+
+ // Returns the total frame count of all buffers available for output.
+ int GetFrameCount() const;
+
+ // Returns the duration of all buffers added to the output queue thus far.
+ base::TimeDelta GetDuration() const;
+
+ const AudioTimestampHelper& timestamp_helper() {
+ return output_timestamp_helper_;
+ }
+
+ private:
+ void AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer);
+
+ AudioTimestampHelper output_timestamp_helper_;
+ bool received_end_of_stream_;
+
+ typedef std::deque<scoped_refptr<AudioBuffer> > BufferQueue;
+ BufferQueue output_buffers_;
+
+ DISALLOW_ASSIGN(AudioStreamSanitizer);
+};
+
+AudioStreamSanitizer::AudioStreamSanitizer(int samples_per_second)
+ : output_timestamp_helper_(samples_per_second),
+ received_end_of_stream_(false) {}
+
+AudioStreamSanitizer::~AudioStreamSanitizer() {}
+
+void AudioStreamSanitizer::Reset() {
+ ResetTimestampState(0, kNoTimestamp());
+}
+
+void AudioStreamSanitizer::ResetTimestampState(int64 frame_count,
+ base::TimeDelta base_timestamp) {
output_buffers_.clear();
received_end_of_stream_ = false;
+ output_timestamp_helper_.SetBaseTimestamp(base_timestamp);
+ if (frame_count > 0)
+ output_timestamp_helper_.AddFrames(frame_count);
}
-bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
+bool AudioStreamSanitizer::AddInput(const scoped_refptr<AudioBuffer>& input) {
DCHECK(!received_end_of_stream_ || input->end_of_stream());
if (input->end_of_stream()) {
@@ -56,9 +154,10 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
return false;
}
- base::TimeDelta timestamp = input->timestamp();
- base::TimeDelta expected_timestamp = output_timestamp_helper_.GetTimestamp();
- base::TimeDelta delta = timestamp - expected_timestamp;
+ const base::TimeDelta timestamp = input->timestamp();
+ const base::TimeDelta expected_timestamp =
+ output_timestamp_helper_.GetTimestamp();
+ const base::TimeDelta delta = timestamp - expected_timestamp;
if (std::abs(delta.InMilliseconds()) > kMaxTimeDeltaInMilliseconds) {
DVLOG(1) << "Timestamp delta too large: " << delta.InMicroseconds() << "us";
@@ -69,7 +168,7 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
if (delta != base::TimeDelta())
frames_to_fill = output_timestamp_helper_.GetFramesToTarget(timestamp);
- if (frames_to_fill == 0 || std::abs(frames_to_fill) < min_gap_size_) {
+ if (frames_to_fill == 0 || std::abs(frames_to_fill) < kMinGapSize) {
AddOutputBuffer(input);
return true;
}
@@ -92,39 +191,314 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
return true;
}
- int frames_to_skip = -frames_to_fill;
-
+ // Overlapping buffers marked as splice frames are handled by AudioSplicer,
+ // but decoder and demuxer quirks may sometimes produce overlapping samples
+ // which need to be sanitized.
+ //
+ // A crossfade can't be done here because only the current buffer is available
+ // at this point, not previous buffers.
DVLOG(1) << "Overlap detected @ " << expected_timestamp.InMicroseconds()
- << " us: " << -delta.InMicroseconds() << " us";
+ << " us: " << -delta.InMicroseconds() << " us";
+ const int frames_to_skip = -frames_to_fill;
if (input->frame_count() <= frames_to_skip) {
DVLOG(1) << "Dropping whole buffer";
return true;
}
// Copy the trailing samples that do not overlap samples already output
- // into a new buffer. Add this new buffer to the output queue.
+ // into a new buffer. Add this new buffer to the output queue.
//
// TODO(acolwell): Implement a cross-fade here so the transition is less
// jarring.
- input->TrimStart(frames_to_skip);
+ AccurateTrimStart(frames_to_skip, input, output_timestamp_helper_);
AddOutputBuffer(input);
return true;
}
-bool AudioSplicer::HasNextBuffer() const {
+bool AudioStreamSanitizer::HasNextBuffer() const {
return !output_buffers_.empty();
}
-scoped_refptr<AudioBuffer> AudioSplicer::GetNextBuffer() {
+scoped_refptr<AudioBuffer> AudioStreamSanitizer::GetNextBuffer() {
scoped_refptr<AudioBuffer> ret = output_buffers_.front();
output_buffers_.pop_front();
return ret;
}
-void AudioSplicer::AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer) {
+void AudioStreamSanitizer::AddOutputBuffer(
+ const scoped_refptr<AudioBuffer>& buffer) {
output_timestamp_helper_.AddFrames(buffer->frame_count());
output_buffers_.push_back(buffer);
}
+int AudioStreamSanitizer::GetFrameCount() const {
+ int frame_count = 0;
+ for (BufferQueue::const_iterator it = output_buffers_.begin();
+ it != output_buffers_.end(); ++it) {
+ frame_count += (*it)->frame_count();
+ }
+ return frame_count;
+}
+
+base::TimeDelta AudioStreamSanitizer::GetDuration() const {
+ DCHECK(output_timestamp_helper_.base_timestamp() != kNoTimestamp());
+ return output_timestamp_helper_.GetTimestamp() -
+ output_timestamp_helper_.base_timestamp();
+}
+
+AudioSplicer::AudioSplicer(int samples_per_second)
+ : max_crossfade_duration_(
+ base::TimeDelta::FromMilliseconds(kCrossfadeDurationInMilliseconds)),
+ splice_timestamp_(kNoTimestamp()),
+ output_sanitizer_(new AudioStreamSanitizer(samples_per_second)),
+ pre_splice_sanitizer_(new AudioStreamSanitizer(samples_per_second)),
+ post_splice_sanitizer_(new AudioStreamSanitizer(samples_per_second)) {}
+
+AudioSplicer::~AudioSplicer() {}
+
+void AudioSplicer::Reset() {
+ output_sanitizer_->Reset();
+ pre_splice_sanitizer_->Reset();
+ post_splice_sanitizer_->Reset();
+ splice_timestamp_ = kNoTimestamp();
+}
+
+bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
+ // If we're not processing a splice, add the input to the output queue.
+ if (splice_timestamp_ == kNoTimestamp()) {
+ DCHECK(!pre_splice_sanitizer_->HasNextBuffer());
+ DCHECK(!post_splice_sanitizer_->HasNextBuffer());
+ return output_sanitizer_->AddInput(input);
+ }
+
+ // If we're still receiving buffers before the splice point figure out which
+ // sanitizer (if any) to put them in.
+ if (!post_splice_sanitizer_->HasNextBuffer()) {
+ DCHECK(!input->end_of_stream());
+
+ // If the provided buffer is entirely before the splice point it can also be
+ // added to the output queue.
+ if (input->timestamp() + input->duration() < splice_timestamp_) {
+ DCHECK(!pre_splice_sanitizer_->HasNextBuffer());
+ return output_sanitizer_->AddInput(input);
+ }
+
+ // If we've encountered the first pre splice buffer, reset the pre splice
+ // sanitizer based on |output_sanitizer_|. This is done so that gaps and
+ // overlaps between buffers across the sanitizers are accounted for prior
+ // to calculating crossfade.
+ if (!pre_splice_sanitizer_->HasNextBuffer()) {
+ pre_splice_sanitizer_->ResetTimestampState(
+ output_sanitizer_->timestamp_helper().frame_count(),
+ output_sanitizer_->timestamp_helper().base_timestamp());
+ }
+
+ // If we're processing a splice and the input buffer does not overlap any of
+ // the existing buffers append it to the |pre_splice_sanitizer_|.
+ //
+ // The first overlapping buffer is expected to have a timestamp of exactly
+ // |splice_timestamp_|. It's not sufficient to check this though, since in
+ // the case of a perfect overlap, the first pre-splice buffer may have the
+ // same timestamp.
+ //
+ // It's also not sufficient to check if the input timestamp is after the
+ // current expected timestamp from |pre_splice_sanitizer_| since the decoder
+ // may have fuzzed the timestamps slightly.
+ if (!pre_splice_sanitizer_->HasNextBuffer() ||
+ input->timestamp() != splice_timestamp_) {
+ return pre_splice_sanitizer_->AddInput(input);
+ }
+
+ // We've received the first overlapping buffer.
+ } else {
+ // TODO(dalecurtis): The pre splice assignment process still leaves the
+ // unlikely case that the decoder fuzzes a later pre splice buffer's
+ // timestamp such that it matches |splice_timestamp_|.
+ //
+ // Watch for these crashes in the field to see if we need a more complicated
+ // assignment process.
+ CHECK(input->timestamp() != splice_timestamp_);
+ }
+
+ // At this point we have all the fade out preroll buffers from the decoder.
+ // We now need to wait until we have enough data to perform the crossfade (or
+ // we receive an end of stream).
+ if (!post_splice_sanitizer_->AddInput(input))
+ return false;
+
+ if (!input->end_of_stream() &&
+ post_splice_sanitizer_->GetDuration() < max_crossfade_duration_) {
+ return true;
+ }
+
+ // Crossfade the pre splice and post splice sections and transfer all relevant
+ // buffers into |output_sanitizer_|.
+ CrossfadePostSplice(ExtractCrossfadeFromPreSplice().Pass());
+
+ // Clear the splice timestamp so new splices can be accepted.
+ splice_timestamp_ = kNoTimestamp();
+ return true;
+}
+
+bool AudioSplicer::HasNextBuffer() const {
+ return output_sanitizer_->HasNextBuffer();
+}
+
+scoped_refptr<AudioBuffer> AudioSplicer::GetNextBuffer() {
+ return output_sanitizer_->GetNextBuffer();
+}
+
+void AudioSplicer::SetSpliceTimestamp(base::TimeDelta splice_timestamp) {
+ DCHECK(splice_timestamp != kNoTimestamp());
+ if (splice_timestamp_ == splice_timestamp)
+ return;
+
+ // TODO(dalecurtis): We may need the concept of a future_splice_timestamp_ to
+ // handle cases where another splice comes in before we've received 5ms of
+ // data from the last one. Leave this as a CHECK for now to figure out if
+ // this case is possible.
+ CHECK(splice_timestamp_ == kNoTimestamp());
+ splice_timestamp_ = splice_timestamp;
+}
+
+scoped_ptr<AudioBus> AudioSplicer::ExtractCrossfadeFromPreSplice() {
+ const AudioTimestampHelper& output_ts_helper =
+ output_sanitizer_->timestamp_helper();
+
+ // Ensure |output_sanitizer_| has a valid base timestamp so we can use it for
+ // timestamp calculations.
+ if (output_ts_helper.base_timestamp() == kNoTimestamp()) {
+ output_sanitizer_->ResetTimestampState(
+ 0, pre_splice_sanitizer_->timestamp_helper().base_timestamp());
+ }
+
+ int frames_before_splice =
+ output_ts_helper.GetFramesToTarget(splice_timestamp_);
+
+ // Determine crossfade frame count based on available frames in each splicer
+ // and capping to the maximum crossfade duration.
+ const int max_crossfade_frame_count =
+ output_ts_helper.GetFramesToTarget(splice_timestamp_ +
+ max_crossfade_duration_) -
+ frames_before_splice;
+ const int frames_to_crossfade = std::min(
+ max_crossfade_frame_count,
+ std::min(pre_splice_sanitizer_->GetFrameCount() - frames_before_splice,
+ post_splice_sanitizer_->GetFrameCount()));
+
+ int frames_read = 0;
+ scoped_ptr<AudioBus> output_bus;
+ while (pre_splice_sanitizer_->HasNextBuffer() &&
+ frames_read < frames_to_crossfade) {
+ scoped_refptr<AudioBuffer> preroll = pre_splice_sanitizer_->GetNextBuffer();
+
+ // We don't know the channel count until we see the first buffer, so wait
+ // until the first buffer to allocate the output AudioBus.
+ if (!output_bus) {
+ output_bus =
+ AudioBus::Create(preroll->channel_count(), frames_to_crossfade);
+ }
+
+ // There may be enough of a gap introduced during decoding such that an
+ // entire buffer exists before the splice point.
+ if (frames_before_splice >= preroll->frame_count()) {
+ // Adjust the number of frames remaining before the splice. NOTE: This is
+ // safe since |pre_splice_sanitizer_| is a continuation of the timeline in
+ // |output_sanitizer_|. As such we're guaranteed there are no gaps or
+ // overlaps in the timeline between the two sanitizers.
+ frames_before_splice -= preroll->frame_count();
+ CHECK(output_sanitizer_->AddInput(preroll));
+ continue;
+ }
+
+ const int frames_to_read =
+ std::min(preroll->frame_count() - frames_before_splice,
+ output_bus->frames() - frames_read);
+ preroll->ReadFrames(
+ frames_to_read, frames_before_splice, frames_read, output_bus.get());
+ frames_read += frames_to_read;
+
+ // If only part of the buffer was consumed, trim it appropriately and stick
+ // it into the output queue.
+ if (frames_before_splice) {
+ AccurateTrimEnd(preroll->frame_count() - frames_before_splice,
+ preroll,
+ output_ts_helper);
+ CHECK(output_sanitizer_->AddInput(preroll));
+ frames_before_splice = 0;
+ }
+ }
+
+ // All necessary buffers have been processed, it's safe to reset.
+ pre_splice_sanitizer_->Reset();
+ DCHECK_EQ(output_bus->frames(), frames_read);
+ DCHECK_EQ(output_ts_helper.GetFramesToTarget(splice_timestamp_), 0);
+ return output_bus.Pass();
+}
+
+void AudioSplicer::CrossfadePostSplice(scoped_ptr<AudioBus> pre_splice_bus) {
+ // Allocate output buffer for crossfade.
+ scoped_refptr<AudioBuffer> crossfade_buffer =
+ AudioBuffer::CreateBuffer(kSampleFormatPlanarF32,
+ pre_splice_bus->channels(),
+ pre_splice_bus->frames());
+
+ // Use the calculated timestamp and duration to ensure there's no extra gaps
+ // or overlaps to process when adding the buffer to |output_sanitizer_|.
+ const AudioTimestampHelper& output_ts_helper =
+ output_sanitizer_->timestamp_helper();
+ crossfade_buffer->set_timestamp(output_ts_helper.GetTimestamp());
+ crossfade_buffer->set_duration(
+ output_ts_helper.GetFrameDuration(pre_splice_bus->frames()));
+
+ // AudioBuffer::ReadFrames() only allows output into an AudioBus, so wrap
+ // our AudioBuffer in one so we can avoid extra data copies.
+ scoped_ptr<AudioBus> output_bus = CreateAudioBufferWrapper(crossfade_buffer);
+
+ // Extract crossfade section from the |post_splice_sanitizer_|.
+ int frames_read = 0, frames_to_trim = 0;
+ scoped_refptr<AudioBuffer> remainder;
+ while (post_splice_sanitizer_->HasNextBuffer() &&
+ frames_read < output_bus->frames()) {
+ scoped_refptr<AudioBuffer> postroll =
+ post_splice_sanitizer_->GetNextBuffer();
+ const int frames_to_read =
+ std::min(postroll->frame_count(), output_bus->frames() - frames_read);
+ postroll->ReadFrames(frames_to_read, 0, frames_read, output_bus.get());
+ frames_read += frames_to_read;
+
+ // If only part of the buffer was consumed, save it for after we've added
+ // the crossfade buffer
+ if (frames_to_read < postroll->frame_count()) {
+ DCHECK(!remainder);
+ remainder.swap(postroll);
+ frames_to_trim = frames_to_read;
+ }
+ }
+
+ DCHECK_EQ(output_bus->frames(), frames_read);
+
+ // Crossfade the audio into |crossfade_buffer|.
+ for (int ch = 0; ch < output_bus->channels(); ++ch) {
+ vector_math::Crossfade(pre_splice_bus->channel(ch),
+ pre_splice_bus->frames(),
+ output_bus->channel(ch));
+ }
+
+ CHECK(output_sanitizer_->AddInput(crossfade_buffer));
+ DCHECK_EQ(crossfade_buffer->frame_count(), output_bus->frames());
+
+ if (remainder) {
+ // Trim off consumed frames.
+ AccurateTrimStart(frames_to_trim, remainder, output_ts_helper);
+ CHECK(output_sanitizer_->AddInput(remainder));
+ }
+
+ // Transfer all remaining buffers out and reset once empty.
+ while (post_splice_sanitizer_->HasNextBuffer())
+ CHECK(output_sanitizer_->AddInput(post_splice_sanitizer_->GetNextBuffer()));
+ post_splice_sanitizer_->Reset();
+}
+
} // namespace media
diff --git a/media/base/audio_splicer.h b/media/base/audio_splicer.h
index 50445b2d54..c20ecc09a7 100644
--- a/media/base/audio_splicer.h
+++ b/media/base/audio_splicer.h
@@ -5,52 +5,85 @@
#ifndef MEDIA_BASE_AUDIO_SPLICER_H_
#define MEDIA_BASE_AUDIO_SPLICER_H_
-#include <deque>
-
#include "base/memory/ref_counted.h"
-#include "media/base/audio_timestamp_helper.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
#include "media/base/media_export.h"
namespace media {
class AudioBuffer;
-class AudioDecoderConfig;
+class AudioBus;
+class AudioStreamSanitizer;
// Helper class that handles filling gaps and resolving overlaps.
class MEDIA_EXPORT AudioSplicer {
public:
- AudioSplicer(int samples_per_second);
+ explicit AudioSplicer(int samples_per_second);
~AudioSplicer();
- // Resets the splicer state by clearing the output buffers queue,
- // and resetting the timestamp helper.
+ // Resets the splicer state by clearing the output buffers queue and resetting
+ // the timestamp helper.
void Reset();
// Adds a new buffer full of samples or end of stream buffer to the splicer.
- // Returns true if the buffer was accepted. False is returned if an error
+ // Returns true if the buffer was accepted. False is returned if an error
// occurred.
bool AddInput(const scoped_refptr<AudioBuffer>& input);
// Returns true if the splicer has a buffer to return.
bool HasNextBuffer() const;
- // Removes the next buffer from the output buffer queue and returns it.
- // This should only be called if HasNextBuffer() returns true.
+ // Removes the next buffer from the output buffer queue and returns it; this
+ // should only be called if HasNextBuffer() returns true.
scoped_refptr<AudioBuffer> GetNextBuffer();
- private:
- void AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer);
-
- AudioTimestampHelper output_timestamp_helper_;
+ // Indicates that overlapping buffers are coming up and should be crossfaded.
+ // Once set, all buffers encountered after |splice_timestamp| will be queued
+ // internally until at least 5ms of overlapping buffers are received (or end
+ // of stream, whichever comes first).
+ void SetSpliceTimestamp(base::TimeDelta splice_timestamp);
- // Minimum gap size needed before the splicer will take action to
- // fill a gap. This avoids periodically inserting and then dropping samples
- // when the buffer timestamps are slightly off because of timestamp rounding
- // in the source content. Unit is frames.
- int min_gap_size_;
-
- std::deque<scoped_refptr<AudioBuffer> > output_buffers_;
- bool received_end_of_stream_;
+ private:
+ friend class AudioSplicerTest;
+
+ // Extracts frames to be crossfaded from |pre_splice_sanitizer_|. Transfers
+ // all frames before |splice_timestamp_| into |output_sanitizer_| and drops
+ // frames outside of the crossfade duration.
+ //
+ // The size of the returned AudioBus is the crossfade duration in frames.
+ // Crossfade duration is calculated based on the number of frames available
+ // after |splice_timestamp_| in each sanitizer and capped by
+ // |max_crossfade_duration_|.
+ //
+ // |pre_splice_sanitizer_| will be empty after this operation.
+ scoped_ptr<AudioBus> ExtractCrossfadeFromPreSplice();
+
+ // Crossfades |pre_splice_bus->frames()| frames from |post_splice_sanitizer_|
+ // with those from |pre_splice_bus|. Adds the crossfaded buffer to
+ // |output_sanitizer_| along with all buffers in |post_splice_sanitizer_|.
+ //
+ // |post_splice_sanitizer_| will be empty after this operation.
+ void CrossfadePostSplice(scoped_ptr<AudioBus> pre_splice_bus);
+
+ const base::TimeDelta max_crossfade_duration_;
+ base::TimeDelta splice_timestamp_;
+
+ // The various sanitizers for each stage of the crossfade process. Buffers in
+ // |output_sanitizer_| are immediately available for consumption by external
+ // callers.
+ //
+ // Overlapped buffers go into the |pre_splice_sanitizer_| while overlapping
+ // buffers go into the |post_splice_sanitizer_|. Once enough buffers for
+ // crossfading are received the pre and post sanitizers are drained into
+ // |output_sanitizer_| by the two ExtractCrossfadeFromXXX methods above.
+ //
+ // |pre_splice_sanitizer_| is not constructed until the first splice frame is
+ // encountered. At which point it is constructed based on the timestamp state
+ // of |output_sanitizer_|. It is destructed once the splice is finished.
+ scoped_ptr<AudioStreamSanitizer> output_sanitizer_;
+ scoped_ptr<AudioStreamSanitizer> pre_splice_sanitizer_;
+ scoped_ptr<AudioStreamSanitizer> post_splice_sanitizer_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AudioSplicer);
};
diff --git a/media/base/audio_splicer_unittest.cc b/media/base/audio_splicer_unittest.cc
index 43902687fa..0acd37e0b8 100644
--- a/media/base/audio_splicer_unittest.cc
+++ b/media/base/audio_splicer_unittest.cc
@@ -13,7 +13,10 @@
namespace media {
+// Do not change this format. AddInput() and GetValue() only work with float.
static const SampleFormat kSampleFormat = kSampleFormatF32;
+COMPILE_ASSERT(kSampleFormat == kSampleFormatF32, invalid_splice_format);
+
static const int kChannels = 1;
static const int kDefaultSampleRate = 44100;
static const int kDefaultBufferSize = 100;
@@ -31,7 +34,7 @@ class AudioSplicerTest : public ::testing::Test {
}
scoped_refptr<AudioBuffer> GetNextInputBuffer(float value, int frame_size) {
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<float>(
kSampleFormat,
kChannels,
value,
@@ -43,17 +46,99 @@ class AudioSplicerTest : public ::testing::Test {
return buffer;
}
- bool VerifyData(scoped_refptr<AudioBuffer> buffer, float value) {
+ float GetValue(const scoped_refptr<AudioBuffer>& buffer) {
+ return reinterpret_cast<const float*>(buffer->channel_data()[0])[0];
+ }
+
+ bool VerifyData(const scoped_refptr<AudioBuffer>& buffer, float value) {
int frames = buffer->frame_count();
scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, frames);
buffer->ReadFrames(frames, 0, 0, bus.get());
- for (int i = 0; i < frames; ++i) {
- if (bus->channel(0)[i] != value)
- return false;
+ for (int ch = 0; ch < buffer->channel_count(); ++ch) {
+ for (int i = 0; i < frames; ++i) {
+ if (bus->channel(ch)[i] != value)
+ return false;
+ }
}
return true;
}
+ void VerifyNextBuffer(const scoped_refptr<AudioBuffer>& input) {
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> output = splicer_.GetNextBuffer();
+ EXPECT_EQ(input->timestamp(), output->timestamp());
+ EXPECT_EQ(input->duration(), output->duration());
+ EXPECT_EQ(input->frame_count(), output->frame_count());
+ EXPECT_TRUE(VerifyData(output, GetValue(input)));
+ }
+
+ void VerifyPreSpliceOutput(
+ const scoped_refptr<AudioBuffer>& overlapped_buffer,
+ const scoped_refptr<AudioBuffer>& overlapping_buffer,
+ int expected_pre_splice_size,
+ base::TimeDelta expected_pre_splice_duration) {
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> pre_splice_output = splicer_.GetNextBuffer();
+ EXPECT_EQ(overlapped_buffer->timestamp(), pre_splice_output->timestamp());
+ EXPECT_EQ(expected_pre_splice_size, pre_splice_output->frame_count());
+ EXPECT_EQ(expected_pre_splice_duration, pre_splice_output->duration());
+ EXPECT_TRUE(VerifyData(pre_splice_output, GetValue(overlapped_buffer)));
+ }
+
+ void VerifyCrossfadeOutput(
+ const scoped_refptr<AudioBuffer>& overlapped_buffer,
+ const scoped_refptr<AudioBuffer>& overlapping_buffer,
+ int expected_crossfade_size,
+ base::TimeDelta expected_crossfade_duration) {
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+
+ scoped_refptr<AudioBuffer> crossfade_output = splicer_.GetNextBuffer();
+ EXPECT_EQ(expected_crossfade_size, crossfade_output->frame_count());
+ EXPECT_EQ(expected_crossfade_duration, crossfade_output->duration());
+
+ // The splice timestamp may be adjusted by a microsecond.
+ EXPECT_NEAR(overlapping_buffer->timestamp().InMicroseconds(),
+ crossfade_output->timestamp().InMicroseconds(),
+ 1);
+
+ // Verify the actual crossfade.
+ const int frames = crossfade_output->frame_count();
+ const float overlapped_value = GetValue(overlapped_buffer);
+ const float overlapping_value = GetValue(overlapping_buffer);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, frames);
+ crossfade_output->ReadFrames(frames, 0, 0, bus.get());
+ for (int ch = 0; ch < crossfade_output->channel_count(); ++ch) {
+ float cf_ratio = 0;
+ const float cf_increment = 1.0f / frames;
+ for (int i = 0; i < frames; ++i, cf_ratio += cf_increment) {
+ const float actual = bus->channel(ch)[i];
+ const float expected =
+ (1.0f - cf_ratio) * overlapped_value + cf_ratio * overlapping_value;
+ ASSERT_FLOAT_EQ(expected, actual) << "i=" << i;
+ }
+ }
+ }
+
+ bool AddInput(const scoped_refptr<AudioBuffer>& input) {
+ // Since the splicer doesn't make copies it's working directly on the input
+ // buffers. We must make a copy before adding to ensure the original buffer
+ // is not modified in unexpected ways.
+ scoped_refptr<AudioBuffer> buffer_copy =
+ input->end_of_stream()
+ ? AudioBuffer::CreateEOSBuffer()
+ : AudioBuffer::CopyFrom(kSampleFormat,
+ input->channel_count(),
+ input->frame_count(),
+ &input->channel_data()[0],
+ input->timestamp(),
+ input->duration());
+ return splicer_.AddInput(buffer_copy);
+ }
+
+ base::TimeDelta max_crossfade_duration() {
+ return splicer_.max_crossfade_duration_;
+ }
+
protected:
AudioSplicer splicer_;
AudioTimestampHelper input_timestamp_helper_;
@@ -66,40 +151,24 @@ TEST_F(AudioSplicerTest, PassThru) {
// Test single buffer pass-thru behavior.
scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_1));
+ VerifyNextBuffer(input_1);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
// Test that multiple buffers can be queued in the splicer.
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.AddInput(input_3));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
- EXPECT_TRUE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_2->timestamp(), output_2->timestamp());
- EXPECT_EQ(input_2->duration(), output_2->duration());
- EXPECT_EQ(input_2->frame_count(), output_2->frame_count());
-
- scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_3));
+ VerifyNextBuffer(input_2);
+ VerifyNextBuffer(input_3);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_3->timestamp(), output_3->timestamp());
- EXPECT_EQ(input_3->duration(), output_3->duration());
- EXPECT_EQ(input_3->frame_count(), output_3->frame_count());
}
TEST_F(AudioSplicerTest, Reset) {
scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.HasNextBuffer());
+ EXPECT_TRUE(AddInput(input_1));
+ ASSERT_TRUE(splicer_.HasNextBuffer());
splicer_.Reset();
EXPECT_FALSE(splicer_.HasNextBuffer());
@@ -112,14 +181,10 @@ TEST_F(AudioSplicerTest, Reset) {
// Verify that a new input buffer passes through as expected.
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_2));
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ VerifyNextBuffer(input_2);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_2->timestamp(), output_2->timestamp());
- EXPECT_EQ(input_2->duration(), output_2->duration());
- EXPECT_EQ(input_2->frame_count(), output_2->frame_count());
}
TEST_F(AudioSplicerTest, EndOfStream) {
@@ -128,30 +193,22 @@ TEST_F(AudioSplicerTest, EndOfStream) {
scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.2f);
EXPECT_TRUE(input_2->end_of_stream());
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.HasNextBuffer());
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_TRUE(AddInput(input_2));
+
+ VerifyNextBuffer(input_1);
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
-
EXPECT_TRUE(output_2->end_of_stream());
// Verify that buffers can be added again after Reset().
splicer_.Reset();
- EXPECT_TRUE(splicer_.AddInput(input_3));
- scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_3));
+ VerifyNextBuffer(input_3);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_3->timestamp(), output_3->timestamp());
- EXPECT_EQ(input_3->duration(), output_3->duration());
- EXPECT_EQ(input_3->frame_count(), output_3->frame_count());
}
-
// Test the gap insertion code.
// +--------------+ +--------------+
// |11111111111111| |22222222222222|
@@ -170,23 +227,14 @@ TEST_F(AudioSplicerTest, GapInsertion) {
input_timestamp_helper_.AddFrames(kGapSize);
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
-
- // Verify that a gap buffer is generated.
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
- scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_TRUE(AddInput(input_2));
// Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
+ VerifyNextBuffer(input_1);
// Verify the contents of the gap buffer.
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
base::TimeDelta gap_timestamp =
input_1->timestamp() + input_1->duration();
base::TimeDelta gap_duration = input_2->timestamp() - gap_timestamp;
@@ -197,13 +245,10 @@ TEST_F(AudioSplicerTest, GapInsertion) {
EXPECT_TRUE(VerifyData(output_2, 0.0f));
// Verify that the second input buffer passed through unmodified.
- EXPECT_EQ(input_2->timestamp(), output_3->timestamp());
- EXPECT_EQ(input_2->duration(), output_3->duration());
- EXPECT_EQ(input_2->frame_count(), output_3->frame_count());
- EXPECT_TRUE(VerifyData(output_3, 0.2f));
+ VerifyNextBuffer(input_2);
+ EXPECT_FALSE(splicer_.HasNextBuffer());
}
-
// Test that an error is signalled when the gap between input buffers is
// too large.
TEST_F(AudioSplicerTest, GapTooLarge) {
@@ -215,17 +260,10 @@ TEST_F(AudioSplicerTest, GapTooLarge) {
input_timestamp_helper_.AddFrames(kGapSize);
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_FALSE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_FALSE(AddInput(input_2));
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
-
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
+ VerifyNextBuffer(input_1);
// Verify that the second buffer is not available.
EXPECT_FALSE(splicer_.HasNextBuffer());
@@ -237,17 +275,11 @@ TEST_F(AudioSplicerTest, GapTooLarge) {
// Verify that valid buffers are still accepted.
scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
- EXPECT_TRUE(splicer_.AddInput(input_3));
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_3));
+ VerifyNextBuffer(input_3);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_3->timestamp(), output_2->timestamp());
- EXPECT_EQ(input_3->duration(), output_2->duration());
- EXPECT_EQ(input_3->frame_count(), output_2->frame_count());
- EXPECT_TRUE(VerifyData(output_2, 0.3f));
}
-
// Verifies that an error is signalled if AddInput() is called
// with a timestamp that is earlier than the first buffer added.
TEST_F(AudioSplicerTest, BufferAddedBeforeBase) {
@@ -261,11 +293,10 @@ TEST_F(AudioSplicerTest, BufferAddedBeforeBase) {
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.1f);
EXPECT_GT(input_1->timestamp(), input_2->timestamp());
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_FALSE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_FALSE(AddInput(input_2));
}
-
// Test when one buffer partially overlaps another.
// +--------------+
// |11111111111111|
@@ -288,33 +319,27 @@ TEST_F(AudioSplicerTest, PartialOverlap) {
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_TRUE(AddInput(input_2));
+
+ // Verify that the first input buffer passed through unmodified.
+ VerifyNextBuffer(input_1);
+ ASSERT_TRUE(splicer_.HasNextBuffer());
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
-
// Verify that the second input buffer was truncated to only contain
- // the samples that are after the end of |input_1|. Note that data is not
- // copied, so |input_2|'s values are modified.
+ // the samples that are after the end of |input_1|.
base::TimeDelta expected_timestamp =
input_1->timestamp() + input_1->duration();
base::TimeDelta expected_duration =
(input_2->timestamp() + input_2->duration()) - expected_timestamp;
EXPECT_EQ(expected_timestamp, output_2->timestamp());
EXPECT_EQ(expected_duration, output_2->duration());
- EXPECT_TRUE(VerifyData(output_2, 0.2f));
+ EXPECT_TRUE(VerifyData(output_2, GetValue(input_2)));
}
-
// Test that an input buffer that is completely overlapped by a buffer
// that was already added is dropped.
// +--------------+
@@ -348,27 +373,218 @@ TEST_F(AudioSplicerTest, DropBuffer) {
input_timestamp_helper_.AddFrames(input_1->frame_count());
scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.AddInput(input_3));
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_TRUE(AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_3));
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ VerifyNextBuffer(input_1);
+ VerifyNextBuffer(input_3);
EXPECT_FALSE(splicer_.HasNextBuffer());
+}
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
-
- // Verify that the second output buffer only contains
- // the samples that are in |input_3|.
- EXPECT_EQ(input_3->timestamp(), output_2->timestamp());
- EXPECT_EQ(input_3->duration(), output_2->duration());
- EXPECT_EQ(input_3->frame_count(), output_2->frame_count());
- EXPECT_TRUE(VerifyData(output_2, 0.3f));
+// Test crossfade when one buffer partially overlaps another.
+// +--------------+
+// |11111111111111|
+// +--------------+
+// +--------------+
+// |22222222222222|
+// +--------------+
+// Results in:
+// +----------+----+----------+
+// |1111111111|xxxx|2222222222|
+// +----------+----+----------+
+// Where "xxxx" represents the crossfaded portion of the signal.
+TEST_F(AudioSplicerTest, PartialOverlapCrossfade) {
+ const int kCrossfadeSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration());
+ const int kBufferSize = kCrossfadeSize * 2;
+
+ scoped_refptr<AudioBuffer> extra_pre_splice_buffer =
+ GetNextInputBuffer(0.2f, kBufferSize);
+ scoped_refptr<AudioBuffer> overlapped_buffer =
+ GetNextInputBuffer(1.0f, kBufferSize);
+
+ // Reset timestamp helper so that the next buffer will have a timestamp that
+ // starts in the middle of |overlapped_buffer|.
+ input_timestamp_helper_.SetBaseTimestamp(overlapped_buffer->timestamp());
+ input_timestamp_helper_.AddFrames(overlapped_buffer->frame_count() -
+ kCrossfadeSize);
+ splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ scoped_refptr<AudioBuffer> overlapping_buffer =
+ GetNextInputBuffer(0.0f, kBufferSize);
+
+ // |extra_pre_splice_buffer| is entirely before the splice and should be ready
+ // for output.
+ EXPECT_TRUE(AddInput(extra_pre_splice_buffer));
+ VerifyNextBuffer(extra_pre_splice_buffer);
+
+ // The splicer should be internally queuing input since |overlapped_buffer| is
+ // part of the splice.
+ EXPECT_TRUE(AddInput(overlapped_buffer));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // |overlapping_buffer| should complete the splice, so ensure output is now
+ // available.
+ EXPECT_TRUE(AddInput(overlapping_buffer));
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+
+ // Add one more buffer to make sure it's passed through untouched.
+ scoped_refptr<AudioBuffer> extra_post_splice_buffer =
+ GetNextInputBuffer(0.5f, kBufferSize);
+ EXPECT_TRUE(AddInput(extra_post_splice_buffer));
+
+ VerifyPreSpliceOutput(overlapped_buffer,
+ overlapping_buffer,
+ 221,
+ base::TimeDelta::FromMicroseconds(5012));
+
+ // Due to rounding the crossfade size may vary by up to a frame.
+ const int kExpectedCrossfadeSize = 220;
+ EXPECT_NEAR(kExpectedCrossfadeSize, kCrossfadeSize, 1);
+
+ VerifyCrossfadeOutput(overlapped_buffer,
+ overlapping_buffer,
+ kExpectedCrossfadeSize,
+ base::TimeDelta::FromMicroseconds(4988));
+
+ // Retrieve the remaining portion after crossfade.
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> post_splice_output = splicer_.GetNextBuffer();
+ EXPECT_EQ(base::TimeDelta::FromMicroseconds(20022),
+ post_splice_output->timestamp());
+ EXPECT_EQ(overlapping_buffer->frame_count() - kExpectedCrossfadeSize,
+ post_splice_output->frame_count());
+ EXPECT_EQ(base::TimeDelta::FromMicroseconds(5034),
+ post_splice_output->duration());
+
+ EXPECT_TRUE(VerifyData(post_splice_output, GetValue(overlapping_buffer)));
+
+ VerifyNextBuffer(extra_post_splice_buffer);
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+}
+
+// Test crossfade when one buffer partially overlaps another, but an end of
+// stream buffer is received before the crossfade duration is reached.
+// +--------------+
+// |11111111111111|
+// +--------------+
+// +---------++---+
+// |222222222||EOS|
+// +---------++---+
+// Results in:
+// +----------+----+----++---+
+// |1111111111|xxxx|2222||EOS|
+// +----------+----+----++---+
+// Where "x" represents the crossfaded portion of the signal.
+TEST_F(AudioSplicerTest, PartialOverlapCrossfadeEndOfStream) {
+ const int kCrossfadeSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration());
+
+ scoped_refptr<AudioBuffer> overlapped_buffer =
+ GetNextInputBuffer(1.0f, kCrossfadeSize * 2);
+
+ // Reset timestamp helper so that the next buffer will have a timestamp that
+ // starts 3/4 of the way into |overlapped_buffer|.
+ input_timestamp_helper_.SetBaseTimestamp(overlapped_buffer->timestamp());
+ input_timestamp_helper_.AddFrames(3 * overlapped_buffer->frame_count() / 4);
+ splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ scoped_refptr<AudioBuffer> overlapping_buffer =
+ GetNextInputBuffer(0.0f, kCrossfadeSize / 3);
+
+ // The splicer should be internally queuing input since |overlapped_buffer| is
+ // part of the splice.
+ EXPECT_TRUE(AddInput(overlapped_buffer));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // |overlapping_buffer| should not have enough data to complete the splice, so
+ // ensure output is not available.
+ EXPECT_TRUE(AddInput(overlapping_buffer));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // Now add an EOS buffer which should complete the splice.
+ EXPECT_TRUE(AddInput(AudioBuffer::CreateEOSBuffer()));
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+
+ VerifyPreSpliceOutput(overlapped_buffer,
+ overlapping_buffer,
+ 331,
+ base::TimeDelta::FromMicroseconds(7505));
+ VerifyCrossfadeOutput(overlapped_buffer,
+ overlapping_buffer,
+ overlapping_buffer->frame_count(),
+ overlapping_buffer->duration());
+
+ // Ensure the last buffer is an EOS buffer.
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> post_splice_output = splicer_.GetNextBuffer();
+ EXPECT_TRUE(post_splice_output->end_of_stream());
+
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+}
+
+// Test crossfade when one buffer partially overlaps another, but the amount of
+// overlapped data is less than the crossfade duration.
+// +------------+
+// |111111111111|
+// +------------+
+// +--------------+
+// |22222222222222|
+// +--------------+
+// Results in:
+// +----------+-+------------+
+// |1111111111|x|222222222222|
+// +----------+-+------------+
+// Where "x" represents the crossfaded portion of the signal.
+TEST_F(AudioSplicerTest, PartialOverlapCrossfadeShortPreSplice) {
+ const int kCrossfadeSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration());
+
+ scoped_refptr<AudioBuffer> overlapped_buffer =
+ GetNextInputBuffer(1.0f, kCrossfadeSize / 2);
+
+ // Reset timestamp helper so that the next buffer will have a timestamp that
+ // starts in the middle of |overlapped_buffer|.
+ input_timestamp_helper_.SetBaseTimestamp(overlapped_buffer->timestamp());
+ input_timestamp_helper_.AddFrames(overlapped_buffer->frame_count() / 2);
+ splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ scoped_refptr<AudioBuffer> overlapping_buffer =
+ GetNextInputBuffer(0.0f, kCrossfadeSize * 2);
+
+ // The splicer should be internally queuing input since |overlapped_buffer| is
+ // part of the splice.
+ EXPECT_TRUE(AddInput(overlapped_buffer));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // |overlapping_buffer| should complete the splice, so ensure output is now
+ // available.
+ EXPECT_TRUE(AddInput(overlapping_buffer));
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+
+ const int kExpectedPreSpliceSize = 55;
+ const base::TimeDelta kExpectedPreSpliceDuration =
+ base::TimeDelta::FromMicroseconds(1247);
+ VerifyPreSpliceOutput(overlapped_buffer,
+ overlapping_buffer,
+ kExpectedPreSpliceSize,
+ kExpectedPreSpliceDuration);
+ VerifyCrossfadeOutput(overlapped_buffer,
+ overlapping_buffer,
+ kExpectedPreSpliceSize,
+ kExpectedPreSpliceDuration);
+
+ // Retrieve the remaining portion after crossfade.
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> post_splice_output = splicer_.GetNextBuffer();
+ EXPECT_EQ(overlapping_buffer->timestamp() + kExpectedPreSpliceDuration,
+ post_splice_output->timestamp());
+ EXPECT_EQ(overlapping_buffer->frame_count() - kExpectedPreSpliceSize,
+ post_splice_output->frame_count());
+ EXPECT_EQ(overlapping_buffer->duration() - kExpectedPreSpliceDuration,
+ post_splice_output->duration());
+
+ EXPECT_TRUE(VerifyData(post_splice_output, GetValue(overlapping_buffer)));
+
+ EXPECT_FALSE(splicer_.HasNextBuffer());
}
} // namespace media
diff --git a/media/base/audio_timestamp_helper.h b/media/base/audio_timestamp_helper.h
index 8b5d50e66f..1da8b4a7cd 100644
--- a/media/base/audio_timestamp_helper.h
+++ b/media/base/audio_timestamp_helper.h
@@ -27,12 +27,13 @@ namespace media {
// accumulated frames to reach a target timestamp.
class MEDIA_EXPORT AudioTimestampHelper {
public:
- AudioTimestampHelper(int samples_per_second);
+ explicit AudioTimestampHelper(int samples_per_second);
// Sets the base timestamp to |base_timestamp| and the sets count to 0.
void SetBaseTimestamp(base::TimeDelta base_timestamp);
base::TimeDelta base_timestamp() const;
+ int64 frame_count() const { return frame_count_; }
// Adds |frame_count| to the frame counter.
// Note: SetBaseTimestamp() must be called with a value other than
diff --git a/media/base/bind_to_current_loop.h b/media/base/bind_to_current_loop.h
index 927850c8aa..6461b1c9af 100644
--- a/media/base/bind_to_current_loop.h
+++ b/media/base/bind_to_current_loop.h
@@ -36,13 +36,9 @@ namespace internal {
template <typename T>
T& TrampolineForward(T& t) { return t; }
-template <typename T>
-base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
- scoped_ptr<T>& p) { return base::Passed(&p); }
-
template <typename T, typename R>
-base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
- scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
+base::internal::PassedWrapper<scoped_ptr<T, R> > TrampolineForward(
+ scoped_ptr<T, R>& p) { return base::Passed(&p); }
template <typename T>
base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
diff --git a/media/base/bind_to_current_loop.h.pump b/media/base/bind_to_current_loop.h.pump
index 4ce33de009..4db40f1c1f 100644
--- a/media/base/bind_to_current_loop.h.pump
+++ b/media/base/bind_to_current_loop.h.pump
@@ -41,13 +41,9 @@ namespace internal {
template <typename T>
T& TrampolineForward(T& t) { return t; }
-template <typename T>
-base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
- scoped_ptr<T>& p) { return base::Passed(&p); }
-
template <typename T, typename R>
-base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
- scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
+base::internal::PassedWrapper<scoped_ptr<T, R> > TrampolineForward(
+ scoped_ptr<T, R>& p) { return base::Passed(&p); }
template <typename T>
base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
diff --git a/media/base/bind_to_current_loop_unittest.cc b/media/base/bind_to_current_loop_unittest.cc
index df98525c7d..2303085667 100644
--- a/media/base/bind_to_current_loop_unittest.cc
+++ b/media/base/bind_to_current_loop_unittest.cc
@@ -18,7 +18,9 @@ void BoundBoolSetFromScopedPtr(bool* var, scoped_ptr<bool> val) {
*var = *val;
}
-void BoundBoolSetFromScopedPtrMalloc(bool* var, scoped_ptr_malloc<bool> val) {
+void BoundBoolSetFromScopedPtrFreeDeleter(
+ bool* var,
+ scoped_ptr<bool, base::FreeDeleter> val) {
*var = val;
}
@@ -110,28 +112,29 @@ TEST_F(BindToCurrentLoopTest, PassedScopedArrayBool) {
EXPECT_TRUE(bool_val);
}
-TEST_F(BindToCurrentLoopTest, BoundScopedPtrMallocBool) {
+TEST_F(BindToCurrentLoopTest, BoundScopedPtrFreeDeleterBool) {
bool bool_val = false;
- scoped_ptr_malloc<bool> scoped_ptr_malloc_bool(
+ scoped_ptr<bool, base::FreeDeleter> scoped_ptr_free_deleter_bool(
static_cast<bool*>(malloc(sizeof(bool))));
- *scoped_ptr_malloc_bool = true;
+ *scoped_ptr_free_deleter_bool = true;
base::Closure cb = BindToCurrentLoop(base::Bind(
- &BoundBoolSetFromScopedPtrMalloc, &bool_val,
- base::Passed(&scoped_ptr_malloc_bool)));
+ &BoundBoolSetFromScopedPtrFreeDeleter, &bool_val,
+ base::Passed(&scoped_ptr_free_deleter_bool)));
cb.Run();
EXPECT_FALSE(bool_val);
loop_.RunUntilIdle();
EXPECT_TRUE(bool_val);
}
-TEST_F(BindToCurrentLoopTest, PassedScopedPtrMallocBool) {
+TEST_F(BindToCurrentLoopTest, PassedScopedPtrFreeDeleterBool) {
bool bool_val = false;
- scoped_ptr_malloc<bool> scoped_ptr_malloc_bool(
+ scoped_ptr<bool, base::FreeDeleter> scoped_ptr_free_deleter_bool(
static_cast<bool*>(malloc(sizeof(bool))));
- *scoped_ptr_malloc_bool = true;
- base::Callback<void(scoped_ptr_malloc<bool>)> cb = BindToCurrentLoop(
- base::Bind(&BoundBoolSetFromScopedPtrMalloc, &bool_val));
- cb.Run(scoped_ptr_malloc_bool.Pass());
+ *scoped_ptr_free_deleter_bool = true;
+ base::Callback<void(scoped_ptr<bool, base::FreeDeleter>)> cb =
+ BindToCurrentLoop(base::Bind(&BoundBoolSetFromScopedPtrFreeDeleter,
+ &bool_val));
+ cb.Run(scoped_ptr_free_deleter_bool.Pass());
EXPECT_FALSE(bool_val);
loop_.RunUntilIdle();
EXPECT_TRUE(bool_val);
diff --git a/media/base/buffers.h b/media/base/buffers.h
index 6a6c7303d1..5c5c47b68e 100644
--- a/media/base/buffers.h
+++ b/media/base/buffers.h
@@ -37,7 +37,7 @@ MEDIA_EXPORT extern inline base::TimeDelta kNoTimestamp() {
// Represents an infinite stream duration.
MEDIA_EXPORT extern inline base::TimeDelta kInfiniteDuration() {
- return base::TimeDelta::FromMicroseconds(kint64max);
+ return base::TimeDelta::Max();
}
} // namespace media
diff --git a/media/base/channel_layout.cc b/media/base/channel_layout.cc
index 958430ac4e..4cb3a56b9f 100644
--- a/media/base/channel_layout.cc
+++ b/media/base/channel_layout.cc
@@ -48,7 +48,7 @@ static const int kLayoutToChannels[] = {
// surround sound channel in FFmpeg's 5.1 layout is in the 5th position (because
// the order is L, R, C, LFE, LS, RS), so
// kChannelOrderings[CHANNEL_LAYOUT_5POINT1][SIDE_LEFT] = 4;
-static const int kChannelOrderings[CHANNEL_LAYOUT_MAX][CHANNELS_MAX] = {
+static const int kChannelOrderings[CHANNEL_LAYOUT_MAX + 1][CHANNELS_MAX + 1] = {
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_NONE
@@ -246,8 +246,6 @@ const char* ChannelLayoutToString(ChannelLayout layout) {
return "OCTAGONAL";
case CHANNEL_LAYOUT_DISCRETE:
return "DISCRETE";
- case CHANNEL_LAYOUT_MAX:
- break;
}
NOTREACHED() << "Invalid channel layout provided: " << layout;
return "";
diff --git a/media/base/channel_layout.h b/media/base/channel_layout.h
index 9354eee850..1286b84702 100644
--- a/media/base/channel_layout.h
+++ b/media/base/channel_layout.h
@@ -99,8 +99,8 @@ enum ChannelLayout {
// Channels are not explicitly mapped to speakers.
CHANNEL_LAYOUT_DISCRETE = 29,
- // Total number of layouts.
- CHANNEL_LAYOUT_MAX // Must always be last!
+ // Max value, must always equal the largest entry ever logged.
+ CHANNEL_LAYOUT_MAX = CHANNEL_LAYOUT_DISCRETE
};
enum Channels {
@@ -115,7 +115,7 @@ enum Channels {
BACK_CENTER,
SIDE_LEFT,
SIDE_RIGHT,
- CHANNELS_MAX
+ CHANNELS_MAX = SIDE_RIGHT, // Must always equal the largest value ever logged.
};
// Returns the expected channel position in an interleaved stream. Values of -1
diff --git a/media/base/channel_mixer.cc b/media/base/channel_mixer.cc
index 3de63fe8bf..63c6c38328 100644
--- a/media/base/channel_mixer.cc
+++ b/media/base/channel_mixer.cc
@@ -23,7 +23,7 @@ static const float kEqualPowerScale = static_cast<float>(M_SQRT1_2);
static void ValidateLayout(ChannelLayout layout) {
CHECK_NE(layout, CHANNEL_LAYOUT_NONE);
- CHECK_NE(layout, CHANNEL_LAYOUT_MAX);
+ CHECK_LE(layout, CHANNEL_LAYOUT_MAX);
CHECK_NE(layout, CHANNEL_LAYOUT_UNSUPPORTED);
CHECK_NE(layout, CHANNEL_LAYOUT_DISCRETE);
@@ -170,7 +170,7 @@ bool MatrixBuilder::CreateTransformationMatrix(
}
// Route matching channels and figure out which ones aren't accounted for.
- for (Channels ch = LEFT; ch < CHANNELS_MAX;
+ for (Channels ch = LEFT; ch < CHANNELS_MAX + 1;
ch = static_cast<Channels>(ch + 1)) {
int input_ch_index = ChannelOrder(input_layout_, ch);
if (input_ch_index < 0)
diff --git a/media/base/channel_mixer_unittest.cc b/media/base/channel_mixer_unittest.cc
index e048f8d9fc..bf3f5fb1f7 100644
--- a/media/base/channel_mixer_unittest.cc
+++ b/media/base/channel_mixer_unittest.cc
@@ -21,7 +21,7 @@ enum { kFrames = 16 };
// Test all possible layout conversions can be constructed and mixed.
TEST(ChannelMixerTest, ConstructAllPossibleLayouts) {
for (ChannelLayout input_layout = CHANNEL_LAYOUT_MONO;
- input_layout < CHANNEL_LAYOUT_MAX;
+ input_layout <= CHANNEL_LAYOUT_MAX;
input_layout = static_cast<ChannelLayout>(input_layout + 1)) {
for (ChannelLayout output_layout = CHANNEL_LAYOUT_MONO;
output_layout < CHANNEL_LAYOUT_STEREO_DOWNMIX;
diff --git a/media/base/decoder_buffer_queue.cc b/media/base/decoder_buffer_queue.cc
index d0486cbf93..26ba9f4e69 100644
--- a/media/base/decoder_buffer_queue.cc
+++ b/media/base/decoder_buffer_queue.cc
@@ -5,13 +5,15 @@
#include "media/base/decoder_buffer_queue.h"
#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
namespace media {
DecoderBufferQueue::DecoderBufferQueue()
- : earliest_valid_timestamp_(kNoTimestamp()) {
+ : earliest_valid_timestamp_(kNoTimestamp()),
+ data_size_(0) {
}
DecoderBufferQueue::~DecoderBufferQueue() {}
@@ -21,6 +23,10 @@ void DecoderBufferQueue::Push(const scoped_refptr<DecoderBuffer>& buffer) {
queue_.push_back(buffer);
+ // TODO(damienv): Remove the cast here and in every place in this file
+ // when DecoderBuffer::data_size is modified to return a size_t.
+ data_size_ += base::checked_cast<size_t, int>(buffer->data_size());
+
// TODO(scherkus): FFmpeg returns some packets with no timestamp after
// seeking. Fix and turn this into CHECK(). See http://crbug.com/162192
if (buffer->timestamp() == kNoTimestamp()) {
@@ -49,6 +55,11 @@ scoped_refptr<DecoderBuffer> DecoderBufferQueue::Pop() {
scoped_refptr<DecoderBuffer> buffer = queue_.front();
queue_.pop_front();
+ size_t buffer_data_size =
+ base::checked_cast<size_t, int>(buffer->data_size());
+ DCHECK_LE(buffer_data_size, data_size_);
+ data_size_ -= buffer_data_size;
+
if (!in_order_queue_.empty() &&
in_order_queue_.front().get() == buffer.get()) {
in_order_queue_.pop_front();
@@ -59,6 +70,7 @@ scoped_refptr<DecoderBuffer> DecoderBufferQueue::Pop() {
void DecoderBufferQueue::Clear() {
queue_.clear();
+ data_size_ = 0;
in_order_queue_.clear();
earliest_valid_timestamp_ = kNoTimestamp();
}
diff --git a/media/base/decoder_buffer_queue.h b/media/base/decoder_buffer_queue.h
index 938db63123..9c2c2dc7b1 100644
--- a/media/base/decoder_buffer_queue.h
+++ b/media/base/decoder_buffer_queue.h
@@ -51,6 +51,9 @@ class MEDIA_EXPORT DecoderBufferQueue {
// Returns zero if the queue is empty.
base::TimeDelta Duration();
+ // Returns the total size of buffers inside the queue.
+ size_t data_size() const { return data_size_; }
+
private:
typedef std::deque<scoped_refptr<DecoderBuffer> > Queue;
Queue queue_;
@@ -62,6 +65,9 @@ class MEDIA_EXPORT DecoderBufferQueue {
base::TimeDelta earliest_valid_timestamp_;
+ // Total size in bytes of buffers in the queue.
+ size_t data_size_;
+
DISALLOW_COPY_AND_ASSIGN(DecoderBufferQueue);
};
diff --git a/media/base/decoder_buffer_queue_unittest.cc b/media/base/decoder_buffer_queue_unittest.cc
index 32e62db06b..5eb06d2152 100644
--- a/media/base/decoder_buffer_queue_unittest.cc
+++ b/media/base/decoder_buffer_queue_unittest.cc
@@ -25,6 +25,13 @@ static scoped_refptr<DecoderBuffer> CreateBuffer(int timestamp) {
return buffer;
}
+static scoped_refptr<DecoderBuffer> CreateBuffer(int timestamp, int size) {
+ scoped_refptr<DecoderBuffer> buffer = new DecoderBuffer(size);
+ buffer->set_timestamp(ToTimeDelta(timestamp));
+ buffer->set_duration(ToTimeDelta(0));
+ return buffer;
+}
+
TEST(DecoderBufferQueueTest, IsEmpty) {
DecoderBufferQueue queue;
EXPECT_TRUE(queue.IsEmpty());
@@ -135,4 +142,28 @@ TEST(DecoderBufferQueueTest, Duration_NoTimestamp) {
EXPECT_EQ(0, queue.Duration().InSeconds());
}
+TEST(DecoderBufferQueueTest, DataSize) {
+ DecoderBufferQueue queue;
+ EXPECT_EQ(queue.data_size(), 0u);
+
+ queue.Push(CreateBuffer(0, 1200u));
+ EXPECT_EQ(queue.data_size(), 1200u);
+
+ queue.Push(CreateBuffer(1, 1000u));
+ EXPECT_EQ(queue.data_size(), 2200u);
+
+ queue.Pop();
+ EXPECT_EQ(queue.data_size(), 1000u);
+
+ queue.Push(CreateBuffer(2, 999u));
+ queue.Push(CreateBuffer(3, 999u));
+ EXPECT_EQ(queue.data_size(), 2998u);
+
+ queue.Clear();
+ EXPECT_EQ(queue.data_size(), 0u);
+
+ queue.Push(CreateBuffer(4, 1400u));
+ EXPECT_EQ(queue.data_size(), 1400u);
+}
+
} // namespace media
diff --git a/media/base/media_file_checker.cc b/media/base/media_file_checker.cc
index 00b7883d04..418839d6f1 100644
--- a/media/base/media_file_checker.cc
+++ b/media/base/media_file_checker.cc
@@ -56,8 +56,7 @@ bool MediaFileChecker::Start(base::TimeDelta check_time) {
return false;
AVPacket packet;
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFreeFrame> frame(
- av_frame_alloc());
+ scoped_ptr<AVFrame, media::ScopedPtrAVFreeFrame> frame(av_frame_alloc());
int result = 0;
const base::TimeTicks deadline = base::TimeTicks::Now() +
diff --git a/media/base/media_keys.h b/media/base/media_keys.h
index 84c1401482..a6ef64ceee 100644
--- a/media/base/media_keys.h
+++ b/media/base/media_keys.h
@@ -93,7 +93,7 @@ typedef base::Callback<void(uint32 session_id)> SessionClosedCB;
typedef base::Callback<void(uint32 session_id,
media::MediaKeys::KeyError error_code,
- int system_code)> SessionErrorCB;
+ uint32 system_code)> SessionErrorCB;
} // namespace media
diff --git a/media/base/media_log.cc b/media/base/media_log.cc
index e791b441f4..c689d7a46c 100644
--- a/media/base/media_log.cc
+++ b/media/base/media_log.cc
@@ -97,8 +97,6 @@ const char* MediaLog::PipelineStatusToString(PipelineStatus status) {
return "demuxer: no supported streams";
case DECODER_ERROR_NOT_SUPPORTED:
return "decoder: not supported";
- case PIPELINE_STATUS_MAX:
- NOTREACHED();
}
NOTREACHED();
return NULL;
@@ -143,7 +141,10 @@ scoped_ptr<MediaLogEvent> MediaLog::CreateStringEvent(
scoped_ptr<MediaLogEvent> MediaLog::CreateTimeEvent(
MediaLogEvent::Type type, const char* property, base::TimeDelta value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(type));
- event->params.SetDouble(property, value.InSecondsF());
+ if (value.is_max())
+ event->params.SetString(property, "unknown");
+ else
+ event->params.SetDouble(property, value.InSecondsF());
return event.Pass();
}
@@ -230,4 +231,14 @@ void MediaLog::SetBooleanProperty(
AddEvent(event.Pass());
}
+void MediaLog::SetTimeProperty(
+ const char* key, base::TimeDelta value) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
+ if (value.is_max())
+ event->params.SetString(key, "unknown");
+ else
+ event->params.SetDouble(key, value.InSecondsF());
+ AddEvent(event.Pass());
+}
+
} //namespace media
diff --git a/media/base/media_log.h b/media/base/media_log.h
index 1d25c0973a..f342ee84fc 100644
--- a/media/base/media_log.h
+++ b/media/base/media_log.h
@@ -73,6 +73,7 @@ class MEDIA_EXPORT MediaLog : public base::RefCountedThreadSafe<MediaLog> {
void SetIntegerProperty(const char* key, int value);
void SetDoubleProperty(const char* key, double value);
void SetBooleanProperty(const char* key, bool value);
+ void SetTimeProperty(const char* key, base::TimeDelta value);
protected:
friend class base::RefCountedThreadSafe<MediaLog>;
diff --git a/media/base/media_switches.cc b/media/base/media_switches.cc
index 7377fb9610..4be852da55 100644
--- a/media/base/media_switches.cc
+++ b/media/base/media_switches.cc
@@ -9,12 +9,6 @@ namespace switches {
// Allow users to specify a custom buffer size for debugging purpose.
const char kAudioBufferSize[] = "audio-buffer-size";
-// Disables Opus playback in media elements.
-const char kDisableOpusPlayback[] = "disable-opus-playback";
-
-// Disables VP8 Alpha playback in media elements.
-const char kDisableVp8AlphaPlayback[] = "disable-vp8-alpha-playback";
-
// Set number of threads to use for video decoding.
const char kVideoThreads[] = "video-threads";
@@ -42,18 +36,13 @@ const char kAlsaOutputDevice[] = "alsa-output-device";
#endif
#if defined(OS_MACOSX)
-// Unlike other platforms, OSX requires CoreAudio calls to happen on the main
-// thread of the process. Provide a way to disable this until support is well
-// tested. See http://crbug.com/158170.
-// TODO(dalecurtis): Remove this once we're sure nothing has exploded.
-const char kDisableMainThreadAudio[] = "disable-main-thread-audio";
// AVFoundation is available in versions 10.7 and onwards, and is to be used
// http://crbug.com/288562 for both audio and video device monitoring and for
// video capture. Being a dynamically loaded NSBundle and library, it hits the
// Chrome startup time (http://crbug.com/311325 and http://crbug.com/311437);
// for experimentation purposes, in particular library load time issue, the
-// usage of this library can be hidden behind this flag.
-const char kDisableAVFoundation[] = "disable-avfoundation";
+// usage of this library can be enabled by using this flag.
+const char kEnableAVFoundation[] = "enable-avfoundation";
#endif
#if defined(OS_WIN)
diff --git a/media/base/media_switches.h b/media/base/media_switches.h
index c1f5588eca..dd865ffc41 100644
--- a/media/base/media_switches.h
+++ b/media/base/media_switches.h
@@ -14,10 +14,6 @@ namespace switches {
MEDIA_EXPORT extern const char kAudioBufferSize[];
-MEDIA_EXPORT extern const char kDisableOpusPlayback[];
-
-MEDIA_EXPORT extern const char kDisableVp8AlphaPlayback[];
-
MEDIA_EXPORT extern const char kVideoThreads[];
MEDIA_EXPORT extern const char kEnableADTSStreamParser[];
@@ -34,8 +30,7 @@ MEDIA_EXPORT extern const char kAlsaOutputDevice[];
#endif
#if defined(OS_MACOSX)
-MEDIA_EXPORT extern const char kDisableMainThreadAudio[];
-MEDIA_EXPORT extern const char kDisableAVFoundation[];
+MEDIA_EXPORT extern const char kEnableAVFoundation[];
#endif
#if defined(OS_WIN)
diff --git a/media/base/mock_filters.h b/media/base/mock_filters.h
index 96c560484e..8b2a1c2d0a 100644
--- a/media/base/mock_filters.h
+++ b/media/base/mock_filters.h
@@ -91,10 +91,11 @@ class MockAudioDecoder : public AudioDecoder {
virtual ~MockAudioDecoder();
// AudioDecoder implementation.
- MOCK_METHOD3(Initialize, void(DemuxerStream*,
- const PipelineStatusCB&,
- const StatisticsCB&));
- MOCK_METHOD1(Read, void(const ReadCB&));
+ MOCK_METHOD2(Initialize, void(const AudioDecoderConfig& config,
+ const PipelineStatusCB&));
+ MOCK_METHOD2(Decode,
+ void(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB&));
MOCK_METHOD0(bits_per_channel, int(void));
MOCK_METHOD0(channel_layout, ChannelLayout(void));
MOCK_METHOD0(samples_per_second, int(void));
diff --git a/media/base/pipeline_status.h b/media/base/pipeline_status.h
index a9f8585f57..15e5c9dd76 100644
--- a/media/base/pipeline_status.h
+++ b/media/base/pipeline_status.h
@@ -32,7 +32,8 @@ enum PipelineStatus {
DEMUXER_ERROR_NO_SUPPORTED_STREAMS = 14,
// Decoder related errors.
DECODER_ERROR_NOT_SUPPORTED = 15,
- PIPELINE_STATUS_MAX, // Must be greater than all other values logged.
+ // Must be equal to the largest value ever logged.
+ PIPELINE_STATUS_MAX = DECODER_ERROR_NOT_SUPPORTED,
};
typedef base::Callback<void(PipelineStatus)> PipelineStatusCB;
diff --git a/media/base/sample_format.cc b/media/base/sample_format.cc
index a4791cd686..cf8f20f563 100644
--- a/media/base/sample_format.cc
+++ b/media/base/sample_format.cc
@@ -21,8 +21,6 @@ int SampleFormatToBytesPerChannel(SampleFormat sample_format) {
case kSampleFormatF32:
case kSampleFormatPlanarF32:
return 4;
- case kSampleFormatMax:
- break;
}
NOTREACHED() << "Invalid sample format provided: " << sample_format;
@@ -45,8 +43,6 @@ const char* SampleFormatToString(SampleFormat sample_format) {
return "Signed 16-bit planar";
case kSampleFormatPlanarF32:
return "Float 32-bit planar";
- case kSampleFormatMax:
- break;
}
NOTREACHED() << "Invalid sample format provided: " << sample_format;
return "";
diff --git a/media/base/sample_format.h b/media/base/sample_format.h
index 3d2799fa12..7c3df70215 100644
--- a/media/base/sample_format.h
+++ b/media/base/sample_format.h
@@ -12,7 +12,8 @@ namespace media {
enum SampleFormat {
// These values are histogrammed over time; do not change their ordinal
// values. When deleting a sample format replace it with a dummy value; when
- // adding a sample format, do so at the bottom before kSampleFormatMax.
+ // adding a sample format, do so at the bottom before kSampleFormatMax, and
+ // update the value of kSampleFormatMax.
kUnknownSampleFormat = 0,
kSampleFormatU8, // Unsigned 8-bit w/ bias of 128.
kSampleFormatS16, // Signed 16-bit.
@@ -21,8 +22,8 @@ enum SampleFormat {
kSampleFormatPlanarS16, // Signed 16-bit planar.
kSampleFormatPlanarF32, // Float 32-bit planar.
- // Must always be last!
- kSampleFormatMax
+ // Must always be equal to largest value ever logged.
+ kSampleFormatMax = kSampleFormatPlanarF32,
};
// Returns the number of bytes used per channel for the specified
diff --git a/media/base/test_data_util.cc b/media/base/test_data_util.cc
index 386617e006..a83fa840b4 100644
--- a/media/base/test_data_util.cc
+++ b/media/base/test_data_util.cc
@@ -6,6 +6,7 @@
#include "base/file_util.h"
#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
#include "base/path_service.h"
#include "media/base/decoder_buffer.h"
@@ -15,25 +16,20 @@ base::FilePath GetTestDataFilePath(const std::string& name) {
base::FilePath file_path;
CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
- file_path = file_path.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test")).Append(FILE_PATH_LITERAL("data"))
+ return file_path.AppendASCII("media")
+ .AppendASCII("test")
+ .AppendASCII("data")
.AppendASCII(name);
- return file_path;
}
scoped_refptr<DecoderBuffer> ReadTestDataFile(const std::string& name) {
- base::FilePath file_path;
- CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
-
- file_path = file_path.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test")).Append(FILE_PATH_LITERAL("data"))
- .AppendASCII(name);
+ base::FilePath file_path = GetTestDataFilePath(name);
int64 tmp = 0;
CHECK(base::GetFileSize(file_path, &tmp))
<< "Failed to get file size for '" << name << "'";
- int file_size = static_cast<int>(tmp);
+ int file_size = base::checked_cast<int>(tmp);
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(file_size));
CHECK_EQ(file_size,
diff --git a/media/base/test_helpers.cc b/media/base/test_helpers.cc
index 672f8c2d03..57ac40d5ee 100644
--- a/media/base/test_helpers.cc
+++ b/media/base/test_helpers.cc
@@ -149,100 +149,62 @@ gfx::Size TestVideoConfig::LargeCodedSize() {
}
template <class T>
-scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration) {
- DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
- format == kSampleFormatS32 || format == kSampleFormatF32);
+scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta timestamp,
+ base::TimeDelta duration) {
+ scoped_refptr<AudioBuffer> output =
+ AudioBuffer::CreateBuffer(format, channels, frames);
+ output->set_timestamp(timestamp);
+ output->set_duration(duration);
// Create a block of memory with values:
// start
// start + increment
// start + 2 * increment, ...
- // Since this is interleaved data, channel 0 data will be:
+ // For interleaved data, raw data will be:
// start
// start + channels * increment
// start + 2 * channels * increment, ...
- int buffer_size = frames * channels * sizeof(T);
- scoped_ptr<uint8[]> memory(new uint8[buffer_size]);
- uint8* data[] = { memory.get() };
- T* buffer = reinterpret_cast<T*>(memory.get());
- for (int i = 0; i < frames * channels; ++i) {
- buffer[i] = start;
- start += increment;
- }
- return AudioBuffer::CopyFrom(
- format, channels, frames, data, start_time, duration);
-}
-
-template <class T>
-scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration) {
- DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
-
- // Create multiple blocks of data, one for each channel.
- // Values in channel 0 will be:
+ //
+ // For planar data, values in channel 0 will be:
// start
// start + increment
// start + 2 * increment, ...
- // Values in channel 1 will be:
+ // While, values in channel 1 will be:
// start + frames * increment
// start + (frames + 1) * increment
// start + (frames + 2) * increment, ...
- int buffer_size = frames * sizeof(T);
- scoped_ptr<uint8*[]> data(new uint8*[channels]);
- scoped_ptr<uint8[]> memory(new uint8[channels * buffer_size]);
- for (int i = 0; i < channels; ++i) {
- data.get()[i] = memory.get() + i * buffer_size;
- T* buffer = reinterpret_cast<T*>(data.get()[i]);
- for (int j = 0; j < frames; ++j) {
- buffer[j] = start;
+ const size_t output_size =
+ output->channel_data().size() == 1 ? frames * channels : frames;
+ for (size_t ch = 0; ch < output->channel_data().size(); ++ch) {
+ T* buffer = reinterpret_cast<T*>(output->channel_data()[ch]);
+ for (size_t i = 0; i < output_size; ++i) {
+ buffer[i] = start;
start += increment;
}
}
- return AudioBuffer::CopyFrom(
- format, channels, frames, data.get(), start_time, duration);
-}
-
-// Instantiate all the types of MakeInterleavedAudioBuffer() and
-// MakePlanarAudioBuffer() needed.
-
-#define DEFINE_INTERLEAVED_INSTANCE(type) \
- template scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer<type>( \
- SampleFormat format, \
- int channels, \
- type start, \
- type increment, \
- int frames, \
- base::TimeDelta start_time, \
+ return output;
+}
+
+// Instantiate all the types of MakeAudioBuffer() and
+// MakeAudioBuffer() needed.
+#define DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(type) \
+ template scoped_refptr<AudioBuffer> MakeAudioBuffer<type>( \
+ SampleFormat format, \
+ int channels, \
+ type start, \
+ type increment, \
+ int frames, \
+ base::TimeDelta start_time, \
base::TimeDelta duration)
-DEFINE_INTERLEAVED_INSTANCE(uint8);
-DEFINE_INTERLEAVED_INSTANCE(int16);
-DEFINE_INTERLEAVED_INSTANCE(int32);
-DEFINE_INTERLEAVED_INSTANCE(float);
-
-#define DEFINE_PLANAR_INSTANCE(type) \
- template scoped_refptr<AudioBuffer> MakePlanarAudioBuffer<type>( \
- SampleFormat format, \
- int channels, \
- type start, \
- type increment, \
- int frames, \
- base::TimeDelta start_time, \
- base::TimeDelta duration);
-DEFINE_PLANAR_INSTANCE(int16);
-DEFINE_PLANAR_INSTANCE(float);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(uint8);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(int16);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(int32);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(float);
static const char kFakeVideoBufferHeader[] = "FakeVideoBufferForTest";
diff --git a/media/base/test_helpers.h b/media/base/test_helpers.h
index 872d08d6f8..ee18f535aa 100644
--- a/media/base/test_helpers.h
+++ b/media/base/test_helpers.h
@@ -85,9 +85,11 @@ class TestVideoConfig {
};
// Create an AudioBuffer containing |frames| frames of data, where each sample
-// is of type T. Each frame will have the data from |channels| channels
-// interleaved. |start| and |increment| are used to specify the values for the
-// samples. Since this is interleaved data, channel 0 data will be:
+// is of type T.
+//
+// For interleaved formats, each frame will have the data from |channels|
+// channels interleaved. |start| and |increment| are used to specify the values
+// for the samples. Since this is interleaved data, channel 0 data will be:
// |start|
// |start| + |channels| * |increment|
// |start| + 2 * |channels| * |increment|, and so on.
@@ -95,23 +97,10 @@ class TestVideoConfig {
// requires data to be of type T, but it is verified that |format| is an
// interleaved format.
//
-// |start_time| will be used as the start time for the samples. |duration| is
-// the duration.
-template <class T>
-scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration);
-
-// Create an AudioBuffer containing |frames| frames of data, where each sample
-// is of type T. Since this is planar data, there will be a block for each of
-// |channel| channels. |start| and |increment| are used to specify the values
-// for the samples, which are created in channel order. Since this is planar
-// data, channel 0 data will be:
+// For planar formats, there will be a block for each of |channel| channels.
+// |start| and |increment| are used to specify the values for the samples, which
+// are created in channel order. Since this is planar data, channel 0 data will
+// be:
// |start|
// |start| + |increment|
// |start| + 2 * |increment|, and so on.
@@ -122,14 +111,13 @@ scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
// |start_time| will be used as the start time for the samples. |duration| is
// the duration.
template <class T>
-scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration);
+scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta timestamp,
+ base::TimeDelta duration);
// Create a fake video DecoderBuffer for testing purpose. The buffer contains
// part of video decoder config info embedded so that the testing code can do
diff --git a/media/base/vector_math.cc b/media/base/vector_math.cc
index 32584f5cf6..6152204ff3 100644
--- a/media/base/vector_math.cc
+++ b/media/base/vector_math.cc
@@ -88,6 +88,13 @@ void FMUL_C(const float src[], float scale, int len, float dest[]) {
dest[i] = src[i] * scale;
}
+void Crossfade(const float src[], int len, float dest[]) {
+ float cf_ratio = 0;
+ const float cf_increment = 1.0f / len;
+ for (int i = 0; i < len; ++i, cf_ratio += cf_increment)
+ dest[i] = (1.0f - cf_ratio) * src[i] + cf_ratio * dest[i];
+}
+
std::pair<float, float> EWMAAndMaxPower(
float initial_value, const float src[], int len, float smoothing_factor) {
// Ensure |src| is 16-byte aligned.
diff --git a/media/base/vector_math.h b/media/base/vector_math.h
index a4dea37289..0a2cb06f6b 100644
--- a/media/base/vector_math.h
+++ b/media/base/vector_math.h
@@ -38,6 +38,8 @@ MEDIA_EXPORT void FMUL(const float src[], float scale, int len, float dest[]);
MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower(
float initial_value, const float src[], int len, float smoothing_factor);
+MEDIA_EXPORT void Crossfade(const float src[], int len, float dest[]);
+
} // namespace vector_math
} // namespace media
diff --git a/media/base/vector_math_unittest.cc b/media/base/vector_math_unittest.cc
index bed609098b..95433ca475 100644
--- a/media/base/vector_math_unittest.cc
+++ b/media/base/vector_math_unittest.cc
@@ -44,12 +44,12 @@ class VectorMathTest : public testing::Test {
void VerifyOutput(float value) {
for (int i = 0; i < kVectorSize; ++i)
- ASSERT_FLOAT_EQ(output_vector_.get()[i], value);
+ ASSERT_FLOAT_EQ(output_vector_[i], value);
}
protected:
- scoped_ptr<float, base::AlignedFreeDeleter> input_vector_;
- scoped_ptr<float, base::AlignedFreeDeleter> output_vector_;
+ scoped_ptr<float[], base::AlignedFreeDeleter> input_vector_;
+ scoped_ptr<float[], base::AlignedFreeDeleter> output_vector_;
DISALLOW_COPY_AND_ASSIGN(VectorMathTest);
};
@@ -138,7 +138,15 @@ TEST_F(VectorMathTest, FMUL) {
#endif
}
-namespace {
+TEST_F(VectorMathTest, Crossfade) {
+ FillTestVectors(0, 1);
+ vector_math::Crossfade(
+ input_vector_.get(), kVectorSize, output_vector_.get());
+ for (int i = 0; i < kVectorSize; ++i) {
+ ASSERT_FLOAT_EQ(i / static_cast<float>(kVectorSize), output_vector_[i])
+ << "i=" << i;
+ }
+}
class EWMATestScenario {
public:
@@ -248,8 +256,6 @@ class EWMATestScenario {
float expected_max_;
};
-} // namespace
-
typedef testing::TestWithParam<EWMATestScenario> VectorMathEWMAAndMaxPowerTest;
TEST_P(VectorMathEWMAAndMaxPowerTest, Correctness) {
diff --git a/media/base/video_decoder.cc b/media/base/video_decoder.cc
index 81397b7b67..eedb70a364 100644
--- a/media/base/video_decoder.cc
+++ b/media/base/video_decoder.cc
@@ -4,12 +4,18 @@
#include "media/base/video_decoder.h"
+#include "media/base/video_frame.h"
+
namespace media {
VideoDecoder::VideoDecoder() {}
VideoDecoder::~VideoDecoder() {}
+scoped_refptr<VideoFrame> VideoDecoder::GetDecodeOutput() {
+ return NULL;
+}
+
bool VideoDecoder::HasAlpha() const {
return false;
}
diff --git a/media/base/video_decoder.h b/media/base/video_decoder.h
index 66abc8c85c..c54974c5fb 100644
--- a/media/base/video_decoder.h
+++ b/media/base/video_decoder.h
@@ -20,6 +20,8 @@ class VideoFrame;
class MEDIA_EXPORT VideoDecoder {
public:
// Status codes for decode operations on VideoDecoder.
+ // TODO(rileya): Now that both AudioDecoder and VideoDecoder Status enums
+ // match, break them into a decoder_status.h.
enum Status {
kOk, // Everything went as planned.
kAborted, // Decode was aborted as a result of Reset() being called.
@@ -59,6 +61,11 @@ class MEDIA_EXPORT VideoDecoder {
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) = 0;
+ // Some VideoDecoders may queue up multiple VideoFrames from a single
+ // DecoderBuffer, if we have any such queued frames this will return the next
+ // one. Otherwise we return a NULL VideoFrame.
+ virtual scoped_refptr<VideoFrame> GetDecodeOutput();
+
// Resets decoder state, fulfilling all pending DecodeCB and dropping extra
// queued decoded data. After this call, the decoder is back to an initialized
// clean state.
diff --git a/media/base/video_decoder_config.cc b/media/base/video_decoder_config.cc
index 82d607526b..d2b6e41010 100644
--- a/media/base/video_decoder_config.cc
+++ b/media/base/video_decoder_config.cc
@@ -77,7 +77,7 @@ void VideoDecoderConfig::Initialize(VideoCodec codec,
UMA_HISTOGRAM_COUNTS_10000("Media.VideoVisibleWidth", visible_rect.width());
UmaHistogramAspectRatio("Media.VideoVisibleAspectRatio", visible_rect);
UMA_HISTOGRAM_ENUMERATION(
- "Media.VideoPixelFormat", format, VideoFrame::HISTOGRAM_MAX);
+ "Media.VideoPixelFormat", format, VideoFrame::FORMAT_MAX + 1);
}
codec_ = codec;
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index 7e654319c5..64e898987d 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -63,8 +63,6 @@ std::string VideoFrame::FormatToString(VideoFrame::Format format) {
return "YV12A";
case VideoFrame::YV12J:
return "YV12J";
- case VideoFrame::HISTOGRAM_MAX:
- return "HISTOGRAM_MAX";
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
return "";
@@ -182,10 +180,13 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
// static
scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
const scoped_refptr<VideoFrame>& frame,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
const base::Closure& no_longer_needed_cb) {
+ DCHECK(frame->visible_rect().Contains(visible_rect));
scoped_refptr<VideoFrame> wrapped_frame(new VideoFrame(
- frame->format(), frame->coded_size(), frame->visible_rect(),
- frame->natural_size(), frame->GetTimestamp(), frame->end_of_stream()));
+ frame->format(), frame->coded_size(), visible_rect, natural_size,
+ frame->GetTimestamp(), frame->end_of_stream()));
for (size_t i = 0; i < NumPlanes(frame->format()); ++i) {
wrapped_frame->strides_[i] = frame->stride(i);
@@ -260,7 +261,6 @@ size_t VideoFrame::NumPlanes(Format format) {
case VideoFrame::YV12A:
return 4;
case VideoFrame::UNKNOWN:
- case VideoFrame::HISTOGRAM_MAX:
break;
}
NOTREACHED() << "Unsupported video frame format: " << format;
@@ -326,7 +326,6 @@ size_t VideoFrame::PlaneAllocationSize(Format format,
}
case VideoFrame::UNKNOWN:
case VideoFrame::NATIVE_TEXTURE:
- case VideoFrame::HISTOGRAM_MAX:
#if defined(VIDEO_HOLE)
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index 7ac868e094..0c69627aa2 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -52,7 +52,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
#endif // defined(VIDEO_HOLE)
NATIVE_TEXTURE = 6, // Native texture. Pixel-format agnostic.
YV12J = 7, // JPEG color range version of YV12
- HISTOGRAM_MAX, // Must always be greatest.
+ FORMAT_MAX = YV12J, // Must always be equal to largest entry logged.
};
// Returns the name of a Format as a string.
@@ -150,9 +150,12 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const base::Closure& no_longer_needed_cb);
// Wraps |frame| and calls |no_longer_needed_cb| when the wrapper VideoFrame
- // gets destroyed.
+ // gets destroyed. |visible_rect| must be a sub rect within
+ // frame->visible_rect().
static scoped_refptr<VideoFrame> WrapVideoFrame(
const scoped_refptr<VideoFrame>& frame,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
const base::Closure& no_longer_needed_cb);
// Creates a frame which indicates end-of-stream.
diff --git a/media/base/video_frame_pool.cc b/media/base/video_frame_pool.cc
index 4c5a5e31e2..a0f8682287 100644
--- a/media/base/video_frame_pool.cc
+++ b/media/base/video_frame_pool.cc
@@ -75,18 +75,19 @@ scoped_refptr<VideoFrame> VideoFramePool::PoolImpl::CreateFrame(
pool_frame->visible_rect() == visible_rect &&
pool_frame->natural_size() == natural_size) {
frame = pool_frame;
- frame->SetTimestamp(kNoTimestamp());
+ frame->SetTimestamp(timestamp);
break;
}
}
if (!frame) {
frame = VideoFrame::CreateFrame(
- format, coded_size, visible_rect, natural_size, kNoTimestamp());
+ format, coded_size, visible_rect, natural_size, timestamp);
}
return VideoFrame::WrapVideoFrame(
- frame, base::Bind(&VideoFramePool::PoolImpl::FrameReleased, this, frame));
+ frame, frame->visible_rect(), frame->natural_size(),
+ base::Bind(&VideoFramePool::PoolImpl::FrameReleased, this, frame));
}
void VideoFramePool::PoolImpl::Shutdown() {
diff --git a/media/base/video_frame_pool_unittest.cc b/media/base/video_frame_pool_unittest.cc
index 707279f8b9..7850e04c4b 100644
--- a/media/base/video_frame_pool_unittest.cc
+++ b/media/base/video_frame_pool_unittest.cc
@@ -16,9 +16,19 @@ class VideoFramePoolTest : public ::testing::Test {
gfx::Size coded_size(320,240);
gfx::Rect visible_rect(coded_size);
gfx::Size natural_size(coded_size);
- return pool_->CreateFrame(
- format, coded_size, visible_rect, natural_size,
- base::TimeDelta::FromMilliseconds(timestamp_ms));
+
+ scoped_refptr<VideoFrame> frame =
+ pool_->CreateFrame(
+ format, coded_size, visible_rect, natural_size,
+ base::TimeDelta::FromMilliseconds(timestamp_ms));
+ EXPECT_EQ(format, frame->format());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(timestamp_ms),
+ frame->GetTimestamp());
+ EXPECT_EQ(coded_size, frame->coded_size());
+ EXPECT_EQ(visible_rect, frame->visible_rect());
+ EXPECT_EQ(natural_size, frame->natural_size());
+
+ return frame;
}
void CheckPoolSize(size_t size) const {
@@ -37,7 +47,7 @@ TEST_F(VideoFramePoolTest, SimpleFrameReuse) {
frame = NULL;
// Verify that the next frame from the pool uses the same memory.
- scoped_refptr<VideoFrame> new_frame = CreateFrame(VideoFrame::YV12, 10);
+ scoped_refptr<VideoFrame> new_frame = CreateFrame(VideoFrame::YV12, 20);
EXPECT_EQ(old_y_data, new_frame->data(VideoFrame::kYPlane));
}
diff --git a/media/base/video_frame_unittest.cc b/media/base/video_frame_unittest.cc
index 05465d9b09..967d37653c 100644
--- a/media/base/video_frame_unittest.cc
+++ b/media/base/video_frame_unittest.cc
@@ -200,6 +200,42 @@ TEST(VideoFrame, CreateBlackFrame) {
}
}
+static void FrameNoLongerNeededCallback(
+ const scoped_refptr<media::VideoFrame>& frame,
+ bool* triggered) {
+ *triggered = true;
+}
+
+TEST(VideoFrame, WrapVideoFrame) {
+ const int kWidth = 4;
+ const int kHeight = 4;
+ scoped_refptr<media::VideoFrame> frame;
+ bool no_longer_needed_triggered = false;
+ {
+ scoped_refptr<media::VideoFrame> wrapped_frame =
+ VideoFrame::CreateBlackFrame(gfx::Size(kWidth, kHeight));
+ ASSERT_TRUE(wrapped_frame.get());
+
+ gfx::Rect visible_rect(1, 1, 1, 1);
+ gfx::Size natural_size = visible_rect.size();
+ frame = media::VideoFrame::WrapVideoFrame(
+ wrapped_frame, visible_rect, natural_size,
+ base::Bind(&FrameNoLongerNeededCallback, wrapped_frame,
+ &no_longer_needed_triggered));
+ EXPECT_EQ(wrapped_frame->coded_size(), frame->coded_size());
+ EXPECT_EQ(wrapped_frame->data(media::VideoFrame::kYPlane),
+ frame->data(media::VideoFrame::kYPlane));
+ EXPECT_NE(wrapped_frame->visible_rect(), frame->visible_rect());
+ EXPECT_EQ(visible_rect, frame->visible_rect());
+ EXPECT_NE(wrapped_frame->natural_size(), frame->natural_size());
+ EXPECT_EQ(natural_size, frame->natural_size());
+ }
+
+ EXPECT_FALSE(no_longer_needed_triggered);
+ frame = NULL;
+ EXPECT_TRUE(no_longer_needed_triggered);
+}
+
// Ensure each frame is properly sized and allocated. Will trigger OOB reads
// and writes as well as incorrect frame hashes otherwise.
TEST(VideoFrame, CheckFrameExtents) {