diff options
author | Patrik Höglund <phoglund@webrtc.org> | 2015-11-12 17:36:48 +0100 |
---|---|---|
committer | Patrik Höglund <phoglund@webrtc.org> | 2015-11-12 16:37:01 +0000 |
commit | 68876f990ea1ea365d2d8155df261b38ec9fbeff (patch) | |
tree | 974d8007a9d43e1eaa4e3587ca74441b84bd9d83 /webrtc/modules/audio_device/android | |
parent | 56a34df92807d95a2660765be10abef7c779666f (diff) | |
download | webrtc-68876f990ea1ea365d2d8155df261b38ec9fbeff.tar.gz |
Introduces Android API level linting, fixes all current API lint errors.
This CL attempts to annotate accesses on >16 API levels using as
small scopes as possible. The TargetApi notations mean "yes, I know
I'm accessing a higher API and I take responsibility for gating the
call on Android API level". The Encoder/Decoder classes are annotated
on the whole class, but they're only accessed through JNI; we should
annotate on method level otherwise and preferably on private methods.
This patch also fixes some compiler-level deprecation warnings (i.e.
-Xlint:deprecation), but probably not all of them.
BUG=webrtc:5063
R=henrika@webrtc.org, kjellander@webrtc.org, magjed@webrtc.org
Review URL: https://codereview.webrtc.org/1412673008 .
Cr-Commit-Position: refs/heads/master@{#10624}
Diffstat (limited to 'webrtc/modules/audio_device/android')
4 files changed, 49 insertions, 18 deletions
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java index 7cd769a63c..c3ab043868 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java @@ -10,6 +10,7 @@ package org.webrtc.voiceengine; +import android.annotation.TargetApi; import android.media.audiofx.AcousticEchoCanceler; import android.media.audiofx.AudioEffect; import android.media.audiofx.AudioEffect.Descriptor; @@ -119,6 +120,7 @@ class WebRtcAudioEffects { // Returns true if the platform AEC should be excluded based on its UUID. // AudioEffect.queryEffects() can throw IllegalStateException. + @TargetApi(18) private static boolean isAcousticEchoCancelerExcludedByUUID() { for (Descriptor d : AudioEffect.queryEffects()) { if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC) && @@ -131,6 +133,7 @@ class WebRtcAudioEffects { // Returns true if the platform AGC should be excluded based on its UUID. // AudioEffect.queryEffects() can throw IllegalStateException. + @TargetApi(18) private static boolean isAutomaticGainControlExcludedByUUID() { for (Descriptor d : AudioEffect.queryEffects()) { if (d.type.equals(AudioEffect.EFFECT_TYPE_AGC) && @@ -143,6 +146,7 @@ class WebRtcAudioEffects { // Returns true if the platform NS should be excluded based on its UUID. // AudioEffect.queryEffects() can throw IllegalStateException. + @TargetApi(18) private static boolean isNoiseSuppressorExcludedByUUID() { for (Descriptor d : AudioEffect.queryEffects()) { if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) && @@ -368,7 +372,11 @@ class WebRtcAudioEffects { // AudioEffect.Descriptor array that are actually not available on the device. // As an example: Samsung Galaxy S6 includes an AGC in the descriptor but // AutomaticGainControl.isAvailable() returns false. + @TargetApi(18) private boolean effectTypeIsVoIP(UUID type) { + if (!WebRtcAudioUtils.runningOnJellyBeanMR2OrHigher()) + return false; + return (AudioEffect.EFFECT_TYPE_AEC.equals(type) && isAcousticEchoCancelerSupported()) || (AudioEffect.EFFECT_TYPE_AGC.equals(type) diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java index 7359486a3f..f40317b25d 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java @@ -10,6 +10,7 @@ package org.webrtc.voiceengine; +import android.annotation.TargetApi; import android.content.Context; import android.content.pm.PackageManager; import android.media.AudioFormat; @@ -189,20 +190,26 @@ public class WebRtcAudioManager { // No overrides available. Deliver best possible estimate based on default // Android AudioManager APIs. final int sampleRateHz; - if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { - sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz(); + if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { + sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher(); } else { - String sampleRateString = audioManager.getProperty( - AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE); - sampleRateHz = (sampleRateString == null) - ? WebRtcAudioUtils.getDefaultSampleRateHz() - : Integer.parseInt(sampleRateString); + sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz(); } Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz"); return sampleRateHz; } + @TargetApi(17) + private int getSampleRateOnJellyBeanMR10OrHigher() { + String sampleRateString = audioManager.getProperty( + AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE); + return (sampleRateString == null) + ? WebRtcAudioUtils.getDefaultSampleRateHz() + : Integer.parseInt(sampleRateString); + } + // Returns the native output buffer size for low-latency output streams. + @TargetApi(17) private int getLowLatencyOutputFramesPerBuffer() { assertTrue(isLowLatencyOutputSupported()); if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java index 0602e44c23..11eb51383d 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java @@ -13,6 +13,7 @@ package org.webrtc.voiceengine; import java.lang.Thread; import java.nio.ByteBuffer; +import android.annotation.TargetApi; import android.content.Context; import android.media.AudioFormat; import android.media.AudioManager; @@ -90,13 +91,9 @@ class WebRtcAudioTrack { assertTrue(sizeInBytes <= byteBuffer.remaining()); int bytesWritten = 0; if (WebRtcAudioUtils.runningOnLollipopOrHigher()) { - bytesWritten = audioTrack.write(byteBuffer, - sizeInBytes, - AudioTrack.WRITE_BLOCKING); + bytesWritten = writeOnLollipop(audioTrack, byteBuffer, sizeInBytes); } else { - bytesWritten = audioTrack.write(byteBuffer.array(), - byteBuffer.arrayOffset(), - sizeInBytes); + bytesWritten = writePreLollipop(audioTrack, byteBuffer, sizeInBytes); } if (bytesWritten != sizeInBytes) { Logging.e(TAG, "AudioTrack.write failed: " + bytesWritten); @@ -123,6 +120,15 @@ class WebRtcAudioTrack { audioTrack.flush(); } + @TargetApi(21) + private int writeOnLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) { + return audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING); + } + + private int writePreLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) { + return audioTrack.write(byteBuffer.array(), byteBuffer.arrayOffset(), sizeInBytes); + } + public void joinThread() { keepAlive = false; while (isAlive()) { @@ -224,16 +230,21 @@ class WebRtcAudioTrack { private boolean setStreamVolume(int volume) { Logging.d(TAG, "setStreamVolume(" + volume + ")"); assertTrue(audioManager != null); - if (WebRtcAudioUtils.runningOnLollipopOrHigher()) { - if (audioManager.isVolumeFixed()) { - Logging.e(TAG, "The device implements a fixed volume policy."); - return false; - } + if (isVolumeFixed()) { + Logging.e(TAG, "The device implements a fixed volume policy."); + return false; } audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0); return true; } + @TargetApi(21) + private boolean isVolumeFixed() { + if (!WebRtcAudioUtils.runningOnLollipopOrHigher()) + return false; + return audioManager.isVolumeFixed(); + } + /** Get current volume level for a phone call audio stream. */ private int getStreamVolume() { Logging.d(TAG, "getStreamVolume"); diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java index 9d7a600190..45f564a4dd 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java @@ -144,6 +144,11 @@ public final class WebRtcAudioUtils { return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1; } + public static boolean runningOnJellyBeanMR2OrHigher() { + // July 24, 2013: Android 4.3. API Level 18. + return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2; + } + public static boolean runningOnLollipopOrHigher() { // API Level 21. return Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP; |