aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine
diff options
context:
space:
mode:
Diffstat (limited to 'webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine')
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java28
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java48
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java4
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java53
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java37
5 files changed, 87 insertions, 83 deletions
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
index 9b90f4ab54..c3ab043868 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
@@ -10,6 +10,7 @@
package org.webrtc.voiceengine;
+import android.annotation.TargetApi;
import android.media.audiofx.AcousticEchoCanceler;
import android.media.audiofx.AudioEffect;
import android.media.audiofx.AudioEffect.Descriptor;
@@ -119,6 +120,7 @@ class WebRtcAudioEffects {
// Returns true if the platform AEC should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
+ @TargetApi(18)
private static boolean isAcousticEchoCancelerExcludedByUUID() {
for (Descriptor d : AudioEffect.queryEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC) &&
@@ -131,6 +133,7 @@ class WebRtcAudioEffects {
// Returns true if the platform AGC should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
+ @TargetApi(18)
private static boolean isAutomaticGainControlExcludedByUUID() {
for (Descriptor d : AudioEffect.queryEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_AGC) &&
@@ -143,6 +146,7 @@ class WebRtcAudioEffects {
// Returns true if the platform NS should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
+ @TargetApi(18)
private static boolean isNoiseSuppressorExcludedByUUID() {
for (Descriptor d : AudioEffect.queryEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) &&
@@ -208,15 +212,6 @@ class WebRtcAudioEffects {
private WebRtcAudioEffects() {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
- for (Descriptor d : AudioEffect.queryEffects()) {
- if (effectTypeIsVoIP(d.type) || DEBUG) {
- // Only log information for VoIP effects (AEC, AEC and NS).
- Logging.d(TAG, "name: " + d.name + ", "
- + "mode: " + d.connectMode + ", "
- + "implementor: " + d.implementor + ", "
- + "UUID: " + d.uuid);
- }
- }
}
// Call this method to enable or disable the platform AEC. It modifies
@@ -282,6 +277,17 @@ class WebRtcAudioEffects {
assertTrue(agc == null);
assertTrue(ns == null);
+ // Add logging of supported effects but filter out "VoIP effects", i.e.,
+ // AEC, AEC and NS.
+ for (Descriptor d : AudioEffect.queryEffects()) {
+ if (effectTypeIsVoIP(d.type) || DEBUG) {
+ Logging.d(TAG, "name: " + d.name + ", "
+ + "mode: " + d.connectMode + ", "
+ + "implementor: " + d.implementor + ", "
+ + "UUID: " + d.uuid);
+ }
+ }
+
if (isAcousticEchoCancelerSupported()) {
// Create an AcousticEchoCanceler and attach it to the AudioRecord on
// the specified audio session.
@@ -366,7 +372,11 @@ class WebRtcAudioEffects {
// AudioEffect.Descriptor array that are actually not available on the device.
// As an example: Samsung Galaxy S6 includes an AGC in the descriptor but
// AutomaticGainControl.isAvailable() returns false.
+ @TargetApi(18)
private boolean effectTypeIsVoIP(UUID type) {
+ if (!WebRtcAudioUtils.runningOnJellyBeanMR2OrHigher())
+ return false;
+
return (AudioEffect.EFFECT_TYPE_AEC.equals(type)
&& isAcousticEchoCancelerSupported())
|| (AudioEffect.EFFECT_TYPE_AGC.equals(type)
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
index cf2f03a2f1..1213f333d9 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
@@ -10,6 +10,7 @@
package org.webrtc.voiceengine;
+import android.annotation.TargetApi;
import android.content.Context;
import android.content.pm.PackageManager;
import android.media.AudioFormat;
@@ -33,11 +34,24 @@ import java.lang.Math;
// recommended to always use AudioManager.MODE_IN_COMMUNICATION.
// This class also adds support for output volume control of the
// STREAM_VOICE_CALL-type stream.
-class WebRtcAudioManager {
+public class WebRtcAudioManager {
private static final boolean DEBUG = false;
private static final String TAG = "WebRtcAudioManager";
+ private static boolean blacklistDeviceForOpenSLESUsage = false;
+ private static boolean blacklistDeviceForOpenSLESUsageIsOverridden = false;
+
+ // Call this method to override the deault list of blacklisted devices
+ // specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS.
+ // Allows an app to take control over which devices to exlude from using
+ // the OpenSL ES audio output path
+ public static synchronized void setBlacklistDeviceForOpenSLESUsage(
+ boolean enable) {
+ blacklistDeviceForOpenSLESUsageIsOverridden = true;
+ blacklistDeviceForOpenSLESUsage = enable;
+ }
+
// Default audio data format is PCM 16 bit per sample.
// Guaranteed to be supported by all devices.
private static final int BITS_PER_SAMPLE = 16;
@@ -71,7 +85,6 @@ class WebRtcAudioManager {
private int channels;
private int outputBufferSize;
private int inputBufferSize;
- private int outputStreamType;
WebRtcAudioManager(Context context, long nativeAudioManager) {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
@@ -85,7 +98,7 @@ class WebRtcAudioManager {
storeAudioParameters();
nativeCacheAudioParameters(
sampleRate, channels, hardwareAEC, hardwareAGC, hardwareNS,
- lowLatencyOutput, outputBufferSize, inputBufferSize, outputStreamType,
+ lowLatencyOutput, outputBufferSize, inputBufferSize,
nativeAudioManager);
}
@@ -110,8 +123,9 @@ class WebRtcAudioManager {
return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION);
}
- private boolean isDeviceBlacklistedForOpenSLESUsage() {
- boolean blacklisted =
+ private boolean isDeviceBlacklistedForOpenSLESUsage() {
+ boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden ?
+ blacklistDeviceForOpenSLESUsage :
WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage();
if (blacklisted) {
Logging.e(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
@@ -133,8 +147,6 @@ class WebRtcAudioManager {
getMinOutputFrameSize(sampleRate, channels);
// TODO(henrika): add support for low-latency input.
inputBufferSize = getMinInputFrameSize(sampleRate, channels);
- outputStreamType = WebRtcAudioUtils.getOutputStreamTypeFromAudioMode(
- audioManager.getMode());
}
// Gets the current earpiece state.
@@ -178,20 +190,26 @@ class WebRtcAudioManager {
// No overrides available. Deliver best possible estimate based on default
// Android AudioManager APIs.
final int sampleRateHz;
- if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
- sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
+ if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
+ sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
} else {
- String sampleRateString = audioManager.getProperty(
- AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
- sampleRateHz = (sampleRateString == null)
- ? WebRtcAudioUtils.getDefaultSampleRateHz()
- : Integer.parseInt(sampleRateString);
+ sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
}
Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
return sampleRateHz;
}
+ @TargetApi(17)
+ private int getSampleRateOnJellyBeanMR10OrHigher() {
+ String sampleRateString = audioManager.getProperty(
+ AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+ return (sampleRateString == null)
+ ? WebRtcAudioUtils.getDefaultSampleRateHz()
+ : Integer.parseInt(sampleRateString);
+ }
+
// Returns the native output buffer size for low-latency output streams.
+ @TargetApi(17)
private int getLowLatencyOutputFramesPerBuffer() {
assertTrue(isLowLatencyOutputSupported());
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
@@ -270,5 +288,5 @@ class WebRtcAudioManager {
private native void nativeCacheAudioParameters(
int sampleRate, int channels, boolean hardwareAEC, boolean hardwareAGC,
boolean hardwareNS, boolean lowLatencyOutput, int outputBufferSize,
- int inputBufferSize, int outputStreamType, long nativeAudioManager);
+ int inputBufferSize, long nativeAudioManager);
}
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
index 7b31e08eed..ff77635843 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
@@ -192,10 +192,6 @@ class WebRtcAudioRecord {
Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
try {
- // TODO(henrika): the only supported audio source for input is currently
- // AudioSource.VOICE_COMMUNICATION. Is there any reason why we should
- // support other types, e.g. DEFAULT or MIC? Only reason I can think of
- // is if the device does not support VOICE_COMMUNICATION.
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
sampleRate,
AudioFormat.CHANNEL_IN_MONO,
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
index ec0e109169..11eb51383d 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
@@ -13,6 +13,7 @@ package org.webrtc.voiceengine;
import java.lang.Thread;
import java.nio.ByteBuffer;
+import android.annotation.TargetApi;
import android.content.Context;
import android.media.AudioFormat;
import android.media.AudioManager;
@@ -39,7 +40,6 @@ class WebRtcAudioTrack {
private final Context context;
private final long nativeAudioTrack;
private final AudioManager audioManager;
- private final int streamType;
private ByteBuffer byteBuffer;
@@ -91,13 +91,9 @@ class WebRtcAudioTrack {
assertTrue(sizeInBytes <= byteBuffer.remaining());
int bytesWritten = 0;
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
- bytesWritten = audioTrack.write(byteBuffer,
- sizeInBytes,
- AudioTrack.WRITE_BLOCKING);
+ bytesWritten = writeOnLollipop(audioTrack, byteBuffer, sizeInBytes);
} else {
- bytesWritten = audioTrack.write(byteBuffer.array(),
- byteBuffer.arrayOffset(),
- sizeInBytes);
+ bytesWritten = writePreLollipop(audioTrack, byteBuffer, sizeInBytes);
}
if (bytesWritten != sizeInBytes) {
Logging.e(TAG, "AudioTrack.write failed: " + bytesWritten);
@@ -124,6 +120,15 @@ class WebRtcAudioTrack {
audioTrack.flush();
}
+ @TargetApi(21)
+ private int writeOnLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
+ return audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
+ }
+
+ private int writePreLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
+ return audioTrack.write(byteBuffer.array(), byteBuffer.arrayOffset(), sizeInBytes);
+ }
+
public void joinThread() {
keepAlive = false;
while (isAlive()) {
@@ -142,9 +147,6 @@ class WebRtcAudioTrack {
this.nativeAudioTrack = nativeAudioTrack;
audioManager = (AudioManager) context.getSystemService(
Context.AUDIO_SERVICE);
- this.streamType =
- WebRtcAudioUtils.getOutputStreamTypeFromAudioMode(
- audioManager.getMode());
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
@@ -181,7 +183,7 @@ class WebRtcAudioTrack {
// Create an AudioTrack object and initialize its associated audio buffer.
// The size of this buffer determines how long an AudioTrack can play
// before running out of data.
- audioTrack = new AudioTrack(streamType,
+ audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL,
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,
@@ -193,7 +195,7 @@ class WebRtcAudioTrack {
}
assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED);
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);
- assertTrue(audioTrack.getStreamType() == streamType);
+ assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL);
}
private boolean startPlayout() {
@@ -217,32 +219,37 @@ class WebRtcAudioTrack {
return true;
}
- /** Get max possible volume index given type of audio stream. */
+ /** Get max possible volume index for a phone call audio stream. */
private int getStreamMaxVolume() {
Logging.d(TAG, "getStreamMaxVolume");
assertTrue(audioManager != null);
- return audioManager.getStreamMaxVolume(streamType);
+ return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
}
- /** Set current volume level given type of audio stream. */
+ /** Set current volume level for a phone call audio stream. */
private boolean setStreamVolume(int volume) {
Logging.d(TAG, "setStreamVolume(" + volume + ")");
assertTrue(audioManager != null);
- if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
- if (audioManager.isVolumeFixed()) {
- Logging.e(TAG, "The device implements a fixed volume policy.");
- return false;
- }
+ if (isVolumeFixed()) {
+ Logging.e(TAG, "The device implements a fixed volume policy.");
+ return false;
}
- audioManager.setStreamVolume(streamType, volume, 0);
+ audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
return true;
}
- /** Get current volume level given type of audio stream. */
+ @TargetApi(21)
+ private boolean isVolumeFixed() {
+ if (!WebRtcAudioUtils.runningOnLollipopOrHigher())
+ return false;
+ return audioManager.isVolumeFixed();
+ }
+
+ /** Get current volume level for a phone call audio stream. */
private int getStreamVolume() {
Logging.d(TAG, "getStreamVolume");
assertTrue(audioManager != null);
- return audioManager.getStreamVolume(streamType);
+ return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
}
/** Helper method which throws an exception when an assertion has failed. */
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
index f08e11dad8..45f564a4dd 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
@@ -144,6 +144,11 @@ public final class WebRtcAudioUtils {
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
}
+ public static boolean runningOnJellyBeanMR2OrHigher() {
+ // July 24, 2013: Android 4.3. API Level 18.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2;
+ }
+
public static boolean runningOnLollipopOrHigher() {
// API Level 21.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP;
@@ -193,37 +198,5 @@ public final class WebRtcAudioUtils {
permission,
Process.myPid(),
Process.myUid()) == PackageManager.PERMISSION_GRANTED;
- }
-
- // Convert the provided audio |mode| into most suitable audio output stream
- // type. The stream type is used for creating audio streams and for volume
- // changes. It is essential that the mode and type are in-line to ensure
- // correct behavior. If for example a STREAM_MUSIC type of stream is created
- // in a MODE_IN_COMMUNICATION mode, audio will be played out and the volume
- // icon will look OK but the actual volume will not be changed when the user
- // changes the volume slider.
- // TODO(henrika): there is currently no mapping to STREAM_ALARM, STREAM_DTMF,
- // or STREAM_NOTIFICATION types since I am unable to see a reason for using
- // them. There are only four different modes.
- public static int getOutputStreamTypeFromAudioMode(int mode) {
- Logging.d(TAG, "getOutputStreamTypeFromAudioMode(mode=" + mode + ")");
- switch (mode) {
- case AudioManager.MODE_NORMAL:
- // The audio stream for music playback.
- Logging.d(TAG, "AudioManager.STREAM_MUSIC");
- return AudioManager.STREAM_MUSIC;
- case AudioManager.MODE_RINGTONE:
- // Audio stream for the phone ring.
- Logging.d(TAG, "AudioManager.STREAM_RING");
- return AudioManager.STREAM_RING;
- case AudioManager.MODE_IN_CALL:
- case AudioManager.MODE_IN_COMMUNICATION:
- // Audio stream for phone calls.
- Logging.d(TAG, "AudioManager.STREAM_VOICE_CALL");
- return AudioManager.STREAM_VOICE_CALL;
- default:
- Logging.d(TAG, "AudioManager.USE_DEFAULT_STREAM_TYPE");
- return AudioManager.USE_DEFAULT_STREAM_TYPE;
}
- }
}