diff options
Diffstat (limited to 'webrtc/modules/audio_device/android')
18 files changed, 178 insertions, 180 deletions
diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc index 7b2d6354c4..768047df51 100644 --- a/webrtc/modules/audio_device/android/audio_device_unittest.cc +++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc @@ -383,7 +383,7 @@ class MockAudioTransport : public AudioTransport { int32_t(const void* audioSamples, const size_t nSamples, const size_t nBytesPerSample, - const uint8_t nChannels, + const size_t nChannels, const uint32_t samplesPerSec, const uint32_t totalDelayMS, const int32_t clockDrift, @@ -393,7 +393,7 @@ class MockAudioTransport : public AudioTransport { MOCK_METHOD8(NeedMorePlayData, int32_t(const size_t nSamples, const size_t nBytesPerSample, - const uint8_t nChannels, + const size_t nChannels, const uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut, @@ -423,7 +423,7 @@ class MockAudioTransport : public AudioTransport { int32_t RealRecordedDataIsAvailable(const void* audioSamples, const size_t nSamples, const size_t nBytesPerSample, - const uint8_t nChannels, + const size_t nChannels, const uint32_t samplesPerSec, const uint32_t totalDelayMS, const int32_t clockDrift, @@ -445,7 +445,7 @@ class MockAudioTransport : public AudioTransport { int32_t RealNeedMorePlayData(const size_t nSamples, const size_t nBytesPerSample, - const uint8_t nChannels, + const size_t nChannels, const uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut, @@ -521,10 +521,10 @@ class AudioDeviceTest : public ::testing::Test { int record_sample_rate() const { return record_parameters_.sample_rate(); } - int playout_channels() const { + size_t playout_channels() const { return playout_parameters_.channels(); } - int record_channels() const { + size_t record_channels() const { return record_parameters_.channels(); } size_t playout_frames_per_10ms_buffer() const { @@ -931,7 +931,7 @@ TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) { // not contain any explicit verification that the audio quality is perfect. TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) { // TODO(henrika): extend test when mono output is supported. - EXPECT_EQ(1, playout_channels()); + EXPECT_EQ(1u, playout_channels()); NiceMock<MockAudioTransport> mock(kPlayout); const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond; std::string file_name = GetFileName(playout_sample_rate()); diff --git a/webrtc/modules/audio_device/android/audio_manager.cc b/webrtc/modules/audio_device/android/audio_manager.cc index 169a1929ce..1d08a6adc0 100644 --- a/webrtc/modules/audio_device/android/audio_manager.cc +++ b/webrtc/modules/audio_device/android/audio_manager.cc @@ -10,13 +10,15 @@ #include "webrtc/modules/audio_device/android/audio_manager.h" +#include <utility> + #include <android/log.h> #include "webrtc/base/arraysize.h" #include "webrtc/base/checks.h" #include "webrtc/base/scoped_ptr.h" #include "webrtc/modules/audio_device/android/audio_common.h" -#include "webrtc/modules/utility/interface/helpers_android.h" +#include "webrtc/modules/utility/include/helpers_android.h" #define TAG "AudioManager" #define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__) @@ -29,15 +31,16 @@ namespace webrtc { // AudioManager::JavaAudioManager implementation AudioManager::JavaAudioManager::JavaAudioManager( - NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_manager) - : audio_manager_(audio_manager.Pass()), + NativeRegistration* native_reg, + rtc::scoped_ptr<GlobalRef> audio_manager) + : audio_manager_(std::move(audio_manager)), init_(native_reg->GetMethodId("init", "()Z")), dispose_(native_reg->GetMethodId("dispose", "()V")), is_communication_mode_enabled_( native_reg->GetMethodId("isCommunicationModeEnabled", "()Z")), is_device_blacklisted_for_open_sles_usage_( - native_reg->GetMethodId( - "isDeviceBlacklistedForOpenSLESUsage", "()Z")) { + native_reg->GetMethodId("isDeviceBlacklistedForOpenSLESUsage", + "()Z")) { ALOGD("JavaAudioManager::ctor%s", GetThreadInfo().c_str()); } @@ -71,13 +74,12 @@ AudioManager::AudioManager() hardware_agc_(false), hardware_ns_(false), low_latency_playout_(false), - delay_estimate_in_milliseconds_(0), - output_stream_type_(0) { + delay_estimate_in_milliseconds_(0) { ALOGD("ctor%s", GetThreadInfo().c_str()); RTC_CHECK(j_environment_); JNINativeMethod native_methods[] = { {"nativeCacheAudioParameters", - "(IIZZZZIIIJ)V", + "(IIZZZZIIJ)V", reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}}; j_native_registration_ = j_environment_->RegisterNatives( "org/webrtc/voiceengine/WebRtcAudioManager", @@ -180,14 +182,12 @@ void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env, jboolean low_latency_output, jint output_buffer_size, jint input_buffer_size, - jint output_stream_type, jlong native_audio_manager) { webrtc::AudioManager* this_object = reinterpret_cast<webrtc::AudioManager*>(native_audio_manager); this_object->OnCacheAudioParameters( env, sample_rate, channels, hardware_aec, hardware_agc, hardware_ns, - low_latency_output, output_buffer_size, input_buffer_size, - output_stream_type); + low_latency_output, output_buffer_size, input_buffer_size); } void AudioManager::OnCacheAudioParameters(JNIEnv* env, @@ -198,8 +198,7 @@ void AudioManager::OnCacheAudioParameters(JNIEnv* env, jboolean hardware_ns, jboolean low_latency_output, jint output_buffer_size, - jint input_buffer_size, - jint output_stream_type) { + jint input_buffer_size) { ALOGD("OnCacheAudioParameters%s", GetThreadInfo().c_str()); ALOGD("hardware_aec: %d", hardware_aec); ALOGD("hardware_agc: %d", hardware_agc); @@ -209,17 +208,15 @@ void AudioManager::OnCacheAudioParameters(JNIEnv* env, ALOGD("channels: %d", channels); ALOGD("output_buffer_size: %d", output_buffer_size); ALOGD("input_buffer_size: %d", input_buffer_size); - ALOGD("output_stream_type: %d", output_stream_type); RTC_DCHECK(thread_checker_.CalledOnValidThread()); hardware_aec_ = hardware_aec; hardware_agc_ = hardware_agc; hardware_ns_ = hardware_ns; low_latency_playout_ = low_latency_output; - output_stream_type_ = output_stream_type; // TODO(henrika): add support for stereo output. - playout_parameters_.reset(sample_rate, channels, + playout_parameters_.reset(sample_rate, static_cast<size_t>(channels), static_cast<size_t>(output_buffer_size)); - record_parameters_.reset(sample_rate, channels, + record_parameters_.reset(sample_rate, static_cast<size_t>(channels), static_cast<size_t>(input_buffer_size)); } diff --git a/webrtc/modules/audio_device/android/audio_manager.h b/webrtc/modules/audio_device/android/audio_manager.h index 5f23147b8a..26caf61afe 100644 --- a/webrtc/modules/audio_device/android/audio_manager.h +++ b/webrtc/modules/audio_device/android/audio_manager.h @@ -19,8 +19,8 @@ #include "webrtc/modules/audio_device/audio_device_config.h" #include "webrtc/modules/audio_device/include/audio_device_defines.h" #include "webrtc/modules/audio_device/audio_device_generic.h" -#include "webrtc/modules/utility/interface/helpers_android.h" -#include "webrtc/modules/utility/interface/jvm_android.h" +#include "webrtc/modules/utility/include/helpers_android.h" +#include "webrtc/modules/utility/include/jvm_android.h" namespace webrtc { @@ -93,8 +93,6 @@ class AudioManager { // webrtc::kHighLatencyModeDelayEstimateInMilliseconds. int GetDelayEstimateInMilliseconds() const; - int OutputStreamType() const { return output_stream_type_; } - private: // Called from Java side so we can cache the native audio parameters. // This method will be called by the WebRtcAudioManager constructor, i.e. @@ -109,7 +107,6 @@ class AudioManager { jboolean low_latency_output, jint output_buffer_size, jint input_buffer_size, - jint output_stream_type, jlong native_audio_manager); void OnCacheAudioParameters(JNIEnv* env, jint sample_rate, @@ -119,8 +116,7 @@ class AudioManager { jboolean hardware_ns, jboolean low_latency_output, jint output_buffer_size, - jint input_buffer_size, - jint output_stream_type); + jint input_buffer_size); // Stores thread ID in the constructor. // We can then use ThreadChecker::CalledOnValidThread() to ensure that @@ -159,13 +155,6 @@ class AudioManager { // device supports low-latency output or not. int delay_estimate_in_milliseconds_; - // Contains the output stream type provided to this class at construction by - // the AudioManager in Java land. Possible values are: - // - AudioManager.STREAM_VOICE_CALL = 0 - // - AudioManager.STREAM_RING = 2 - // - AudioManager.STREAM_MUSIC = 3 - int output_stream_type_; - // Contains native parameters (e.g. sample rate, channel configuration). // Set at construction in OnCacheAudioParameters() which is called from // Java on the same thread as this object is created on. diff --git a/webrtc/modules/audio_device/android/audio_manager_unittest.cc b/webrtc/modules/audio_device/android/audio_manager_unittest.cc index a5bc840dff..ddae73067a 100644 --- a/webrtc/modules/audio_device/android/audio_manager_unittest.cc +++ b/webrtc/modules/audio_device/android/audio_manager_unittest.cc @@ -82,14 +82,14 @@ TEST_F(AudioManagerTest, ShowAudioParameterInfo) { PRINT("%saudio layer: %s\n", kTag, low_latency_out ? "Low latency OpenSL" : "Java/JNI based AudioTrack"); PRINT("%ssample rate: %d Hz\n", kTag, playout_parameters_.sample_rate()); - PRINT("%schannels: %d\n", kTag, playout_parameters_.channels()); + PRINT("%schannels: %" PRIuS "\n", kTag, playout_parameters_.channels()); PRINT("%sframes per buffer: %" PRIuS " <=> %.2f ms\n", kTag, playout_parameters_.frames_per_buffer(), playout_parameters_.GetBufferSizeInMilliseconds()); PRINT("RECORD: \n"); PRINT("%saudio layer: %s\n", kTag, "Java/JNI based AudioRecord"); PRINT("%ssample rate: %d Hz\n", kTag, record_parameters_.sample_rate()); - PRINT("%schannels: %d\n", kTag, record_parameters_.channels()); + PRINT("%schannels: %" PRIuS "\n", kTag, record_parameters_.channels()); PRINT("%sframes per buffer: %" PRIuS " <=> %.2f ms\n", kTag, record_parameters_.frames_per_buffer(), record_parameters_.GetBufferSizeInMilliseconds()); @@ -119,7 +119,7 @@ TEST_F(AudioManagerTest, AudioParametersWithDefaultConstruction) { AudioParameters params; EXPECT_FALSE(params.is_valid()); EXPECT_EQ(0, params.sample_rate()); - EXPECT_EQ(0, params.channels()); + EXPECT_EQ(0U, params.channels()); EXPECT_EQ(0U, params.frames_per_buffer()); EXPECT_EQ(0U, params.frames_per_10ms_buffer()); EXPECT_EQ(0U, params.GetBytesPerFrame()); @@ -131,7 +131,7 @@ TEST_F(AudioManagerTest, AudioParametersWithDefaultConstruction) { // Basic test of the AudioParameters class using non default construction. TEST_F(AudioManagerTest, AudioParametersWithNonDefaultConstruction) { const int kSampleRate = 48000; - const int kChannels = 1; + const size_t kChannels = 1; const size_t kFramesPerBuffer = 480; const size_t kFramesPer10msBuffer = 480; const size_t kBytesPerFrame = 2; diff --git a/webrtc/modules/audio_device/android/audio_record_jni.cc b/webrtc/modules/audio_device/android/audio_record_jni.cc index ba3212afc3..5dda7249ac 100644 --- a/webrtc/modules/audio_device/android/audio_record_jni.cc +++ b/webrtc/modules/audio_device/android/audio_record_jni.cc @@ -10,6 +10,8 @@ #include "webrtc/modules/audio_device/android/audio_record_jni.h" +#include <utility> + #include <android/log.h> #include "webrtc/base/arraysize.h" @@ -28,23 +30,20 @@ namespace webrtc { // AudioRecordJni::JavaAudioRecord implementation. AudioRecordJni::JavaAudioRecord::JavaAudioRecord( - NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_record) - : audio_record_(audio_record.Pass()), + NativeRegistration* native_reg, + rtc::scoped_ptr<GlobalRef> audio_record) + : audio_record_(std::move(audio_record)), init_recording_(native_reg->GetMethodId("initRecording", "(II)I")), start_recording_(native_reg->GetMethodId("startRecording", "()Z")), stop_recording_(native_reg->GetMethodId("stopRecording", "()Z")), - enable_built_in_aec_(native_reg->GetMethodId( - "enableBuiltInAEC", "(Z)Z")), - enable_built_in_agc_(native_reg->GetMethodId( - "enableBuiltInAGC", "(Z)Z")), - enable_built_in_ns_(native_reg->GetMethodId( - "enableBuiltInNS", "(Z)Z")) { -} + enable_built_in_aec_(native_reg->GetMethodId("enableBuiltInAEC", "(Z)Z")), + enable_built_in_agc_(native_reg->GetMethodId("enableBuiltInAGC", "(Z)Z")), + enable_built_in_ns_(native_reg->GetMethodId("enableBuiltInNS", "(Z)Z")) {} AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {} int AudioRecordJni::JavaAudioRecord::InitRecording( - int sample_rate, int channels) { + int sample_rate, size_t channels) { return audio_record_->CallIntMethod(init_recording_, static_cast<jint>(sample_rate), static_cast<jint>(channels)); @@ -186,8 +185,8 @@ void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { const int sample_rate_hz = audio_parameters_.sample_rate(); ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz); audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz); - const int channels = audio_parameters_.channels(); - ALOGD("SetRecordingChannels(%d)", channels); + const size_t channels = audio_parameters_.channels(); + ALOGD("SetRecordingChannels(%" PRIuS ")", channels); audio_device_buffer_->SetRecordingChannels(channels); total_delay_in_milliseconds_ = audio_manager_->GetDelayEstimateInMilliseconds(); diff --git a/webrtc/modules/audio_device/android/audio_record_jni.h b/webrtc/modules/audio_device/android/audio_record_jni.h index efd516425a..766316a83a 100644 --- a/webrtc/modules/audio_device/android/audio_record_jni.h +++ b/webrtc/modules/audio_device/android/audio_record_jni.h @@ -17,8 +17,8 @@ #include "webrtc/modules/audio_device/android/audio_manager.h" #include "webrtc/modules/audio_device/include/audio_device_defines.h" #include "webrtc/modules/audio_device/audio_device_generic.h" -#include "webrtc/modules/utility/interface/helpers_android.h" -#include "webrtc/modules/utility/interface/jvm_android.h" +#include "webrtc/modules/utility/include/helpers_android.h" +#include "webrtc/modules/utility/include/jvm_android.h" namespace webrtc { @@ -49,7 +49,7 @@ class AudioRecordJni { rtc::scoped_ptr<GlobalRef> audio_track); ~JavaAudioRecord(); - int InitRecording(int sample_rate, int channels); + int InitRecording(int sample_rate, size_t channels); bool StartRecording(); bool StopRecording(); bool EnableBuiltInAEC(bool enable); diff --git a/webrtc/modules/audio_device/android/audio_track_jni.cc b/webrtc/modules/audio_device/android/audio_track_jni.cc index 29b21ae998..057e016405 100644 --- a/webrtc/modules/audio_device/android/audio_track_jni.cc +++ b/webrtc/modules/audio_device/android/audio_track_jni.cc @@ -11,6 +11,8 @@ #include "webrtc/modules/audio_device/android/audio_manager.h" #include "webrtc/modules/audio_device/android/audio_track_jni.h" +#include <utility> + #include <android/log.h> #include "webrtc/base/arraysize.h" @@ -28,16 +30,16 @@ namespace webrtc { // AudioTrackJni::JavaAudioTrack implementation. AudioTrackJni::JavaAudioTrack::JavaAudioTrack( - NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_track) - : audio_track_(audio_track.Pass()), + NativeRegistration* native_reg, + rtc::scoped_ptr<GlobalRef> audio_track) + : audio_track_(std::move(audio_track)), init_playout_(native_reg->GetMethodId("initPlayout", "(II)V")), start_playout_(native_reg->GetMethodId("startPlayout", "()Z")), stop_playout_(native_reg->GetMethodId("stopPlayout", "()Z")), set_stream_volume_(native_reg->GetMethodId("setStreamVolume", "(I)Z")), - get_stream_max_volume_(native_reg->GetMethodId( - "getStreamMaxVolume", "()I")), - get_stream_volume_(native_reg->GetMethodId("getStreamVolume", "()I")) { -} + get_stream_max_volume_( + native_reg->GetMethodId("getStreamMaxVolume", "()I")), + get_stream_volume_(native_reg->GetMethodId("getStreamVolume", "()I")) {} AudioTrackJni::JavaAudioTrack::~JavaAudioTrack() {} @@ -200,8 +202,8 @@ void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { const int sample_rate_hz = audio_parameters_.sample_rate(); ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz); audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz); - const int channels = audio_parameters_.channels(); - ALOGD("SetPlayoutChannels(%d)", channels); + const size_t channels = audio_parameters_.channels(); + ALOGD("SetPlayoutChannels(%" PRIuS ")", channels); audio_device_buffer_->SetPlayoutChannels(channels); } diff --git a/webrtc/modules/audio_device/android/audio_track_jni.h b/webrtc/modules/audio_device/android/audio_track_jni.h index 43bfcad657..067dc6c651 100644 --- a/webrtc/modules/audio_device/android/audio_track_jni.h +++ b/webrtc/modules/audio_device/android/audio_track_jni.h @@ -18,8 +18,8 @@ #include "webrtc/modules/audio_device/android/audio_manager.h" #include "webrtc/modules/audio_device/include/audio_device_defines.h" #include "webrtc/modules/audio_device/audio_device_generic.h" -#include "webrtc/modules/utility/interface/helpers_android.h" -#include "webrtc/modules/utility/interface/jvm_android.h" +#include "webrtc/modules/utility/include/helpers_android.h" +#include "webrtc/modules/utility/include/jvm_android.h" namespace webrtc { diff --git a/webrtc/modules/audio_device/android/build_info.cc b/webrtc/modules/audio_device/android/build_info.cc index cb5dc293d7..6289697073 100644 --- a/webrtc/modules/audio_device/android/build_info.cc +++ b/webrtc/modules/audio_device/android/build_info.cc @@ -10,7 +10,7 @@ #include "webrtc/modules/audio_device/android/build_info.h" -#include "webrtc/modules/utility/interface/helpers_android.h" +#include "webrtc/modules/utility/include/helpers_android.h" namespace webrtc { diff --git a/webrtc/modules/audio_device/android/build_info.h b/webrtc/modules/audio_device/android/build_info.h index d9b2871841..1490fa0772 100644 --- a/webrtc/modules/audio_device/android/build_info.h +++ b/webrtc/modules/audio_device/android/build_info.h @@ -14,7 +14,7 @@ #include <jni.h> #include <string> -#include "webrtc/modules/utility/interface/jvm_android.h" +#include "webrtc/modules/utility/include/jvm_android.h" namespace webrtc { diff --git a/webrtc/modules/audio_device/android/ensure_initialized.cc b/webrtc/modules/audio_device/android/ensure_initialized.cc index e8197b7ca0..b63aec1f27 100644 --- a/webrtc/modules/audio_device/android/ensure_initialized.cc +++ b/webrtc/modules/audio_device/android/ensure_initialized.cc @@ -14,11 +14,12 @@ // Note: this dependency is dangerous since it reaches into Chromium's base. // There's a risk of e.g. macro clashes. This file may only be used in tests. +#include "base/android/context_utils.h" #include "base/android/jni_android.h" #include "webrtc/base/checks.h" #include "webrtc/modules/audio_device/android/audio_record_jni.h" #include "webrtc/modules/audio_device/android/audio_track_jni.h" -#include "webrtc/modules/utility/interface/jvm_android.h" +#include "webrtc/modules/utility/include/jvm_android.h" namespace webrtc { namespace audiodevicemodule { diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java index 9b90f4ab54..c3ab043868 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java @@ -10,6 +10,7 @@ package org.webrtc.voiceengine; +import android.annotation.TargetApi; import android.media.audiofx.AcousticEchoCanceler; import android.media.audiofx.AudioEffect; import android.media.audiofx.AudioEffect.Descriptor; @@ -119,6 +120,7 @@ class WebRtcAudioEffects { // Returns true if the platform AEC should be excluded based on its UUID. // AudioEffect.queryEffects() can throw IllegalStateException. + @TargetApi(18) private static boolean isAcousticEchoCancelerExcludedByUUID() { for (Descriptor d : AudioEffect.queryEffects()) { if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC) && @@ -131,6 +133,7 @@ class WebRtcAudioEffects { // Returns true if the platform AGC should be excluded based on its UUID. // AudioEffect.queryEffects() can throw IllegalStateException. + @TargetApi(18) private static boolean isAutomaticGainControlExcludedByUUID() { for (Descriptor d : AudioEffect.queryEffects()) { if (d.type.equals(AudioEffect.EFFECT_TYPE_AGC) && @@ -143,6 +146,7 @@ class WebRtcAudioEffects { // Returns true if the platform NS should be excluded based on its UUID. // AudioEffect.queryEffects() can throw IllegalStateException. + @TargetApi(18) private static boolean isNoiseSuppressorExcludedByUUID() { for (Descriptor d : AudioEffect.queryEffects()) { if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) && @@ -208,15 +212,6 @@ class WebRtcAudioEffects { private WebRtcAudioEffects() { Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo()); - for (Descriptor d : AudioEffect.queryEffects()) { - if (effectTypeIsVoIP(d.type) || DEBUG) { - // Only log information for VoIP effects (AEC, AEC and NS). - Logging.d(TAG, "name: " + d.name + ", " - + "mode: " + d.connectMode + ", " - + "implementor: " + d.implementor + ", " - + "UUID: " + d.uuid); - } - } } // Call this method to enable or disable the platform AEC. It modifies @@ -282,6 +277,17 @@ class WebRtcAudioEffects { assertTrue(agc == null); assertTrue(ns == null); + // Add logging of supported effects but filter out "VoIP effects", i.e., + // AEC, AEC and NS. + for (Descriptor d : AudioEffect.queryEffects()) { + if (effectTypeIsVoIP(d.type) || DEBUG) { + Logging.d(TAG, "name: " + d.name + ", " + + "mode: " + d.connectMode + ", " + + "implementor: " + d.implementor + ", " + + "UUID: " + d.uuid); + } + } + if (isAcousticEchoCancelerSupported()) { // Create an AcousticEchoCanceler and attach it to the AudioRecord on // the specified audio session. @@ -366,7 +372,11 @@ class WebRtcAudioEffects { // AudioEffect.Descriptor array that are actually not available on the device. // As an example: Samsung Galaxy S6 includes an AGC in the descriptor but // AutomaticGainControl.isAvailable() returns false. + @TargetApi(18) private boolean effectTypeIsVoIP(UUID type) { + if (!WebRtcAudioUtils.runningOnJellyBeanMR2OrHigher()) + return false; + return (AudioEffect.EFFECT_TYPE_AEC.equals(type) && isAcousticEchoCancelerSupported()) || (AudioEffect.EFFECT_TYPE_AGC.equals(type) diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java index cf2f03a2f1..1213f333d9 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java @@ -10,6 +10,7 @@ package org.webrtc.voiceengine; +import android.annotation.TargetApi; import android.content.Context; import android.content.pm.PackageManager; import android.media.AudioFormat; @@ -33,11 +34,24 @@ import java.lang.Math; // recommended to always use AudioManager.MODE_IN_COMMUNICATION. // This class also adds support for output volume control of the // STREAM_VOICE_CALL-type stream. -class WebRtcAudioManager { +public class WebRtcAudioManager { private static final boolean DEBUG = false; private static final String TAG = "WebRtcAudioManager"; + private static boolean blacklistDeviceForOpenSLESUsage = false; + private static boolean blacklistDeviceForOpenSLESUsageIsOverridden = false; + + // Call this method to override the deault list of blacklisted devices + // specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS. + // Allows an app to take control over which devices to exlude from using + // the OpenSL ES audio output path + public static synchronized void setBlacklistDeviceForOpenSLESUsage( + boolean enable) { + blacklistDeviceForOpenSLESUsageIsOverridden = true; + blacklistDeviceForOpenSLESUsage = enable; + } + // Default audio data format is PCM 16 bit per sample. // Guaranteed to be supported by all devices. private static final int BITS_PER_SAMPLE = 16; @@ -71,7 +85,6 @@ class WebRtcAudioManager { private int channels; private int outputBufferSize; private int inputBufferSize; - private int outputStreamType; WebRtcAudioManager(Context context, long nativeAudioManager) { Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo()); @@ -85,7 +98,7 @@ class WebRtcAudioManager { storeAudioParameters(); nativeCacheAudioParameters( sampleRate, channels, hardwareAEC, hardwareAGC, hardwareNS, - lowLatencyOutput, outputBufferSize, inputBufferSize, outputStreamType, + lowLatencyOutput, outputBufferSize, inputBufferSize, nativeAudioManager); } @@ -110,8 +123,9 @@ class WebRtcAudioManager { return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION); } - private boolean isDeviceBlacklistedForOpenSLESUsage() { - boolean blacklisted = + private boolean isDeviceBlacklistedForOpenSLESUsage() { + boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden ? + blacklistDeviceForOpenSLESUsage : WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage(); if (blacklisted) { Logging.e(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!"); @@ -133,8 +147,6 @@ class WebRtcAudioManager { getMinOutputFrameSize(sampleRate, channels); // TODO(henrika): add support for low-latency input. inputBufferSize = getMinInputFrameSize(sampleRate, channels); - outputStreamType = WebRtcAudioUtils.getOutputStreamTypeFromAudioMode( - audioManager.getMode()); } // Gets the current earpiece state. @@ -178,20 +190,26 @@ class WebRtcAudioManager { // No overrides available. Deliver best possible estimate based on default // Android AudioManager APIs. final int sampleRateHz; - if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { - sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz(); + if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { + sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher(); } else { - String sampleRateString = audioManager.getProperty( - AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE); - sampleRateHz = (sampleRateString == null) - ? WebRtcAudioUtils.getDefaultSampleRateHz() - : Integer.parseInt(sampleRateString); + sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz(); } Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz"); return sampleRateHz; } + @TargetApi(17) + private int getSampleRateOnJellyBeanMR10OrHigher() { + String sampleRateString = audioManager.getProperty( + AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE); + return (sampleRateString == null) + ? WebRtcAudioUtils.getDefaultSampleRateHz() + : Integer.parseInt(sampleRateString); + } + // Returns the native output buffer size for low-latency output streams. + @TargetApi(17) private int getLowLatencyOutputFramesPerBuffer() { assertTrue(isLowLatencyOutputSupported()); if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { @@ -270,5 +288,5 @@ class WebRtcAudioManager { private native void nativeCacheAudioParameters( int sampleRate, int channels, boolean hardwareAEC, boolean hardwareAGC, boolean hardwareNS, boolean lowLatencyOutput, int outputBufferSize, - int inputBufferSize, int outputStreamType, long nativeAudioManager); + int inputBufferSize, long nativeAudioManager); } diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java index 7b31e08eed..ff77635843 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java @@ -192,10 +192,6 @@ class WebRtcAudioRecord { Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity()); Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes); try { - // TODO(henrika): the only supported audio source for input is currently - // AudioSource.VOICE_COMMUNICATION. Is there any reason why we should - // support other types, e.g. DEFAULT or MIC? Only reason I can think of - // is if the device does not support VOICE_COMMUNICATION. audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION, sampleRate, AudioFormat.CHANNEL_IN_MONO, diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java index ec0e109169..11eb51383d 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java @@ -13,6 +13,7 @@ package org.webrtc.voiceengine; import java.lang.Thread; import java.nio.ByteBuffer; +import android.annotation.TargetApi; import android.content.Context; import android.media.AudioFormat; import android.media.AudioManager; @@ -39,7 +40,6 @@ class WebRtcAudioTrack { private final Context context; private final long nativeAudioTrack; private final AudioManager audioManager; - private final int streamType; private ByteBuffer byteBuffer; @@ -91,13 +91,9 @@ class WebRtcAudioTrack { assertTrue(sizeInBytes <= byteBuffer.remaining()); int bytesWritten = 0; if (WebRtcAudioUtils.runningOnLollipopOrHigher()) { - bytesWritten = audioTrack.write(byteBuffer, - sizeInBytes, - AudioTrack.WRITE_BLOCKING); + bytesWritten = writeOnLollipop(audioTrack, byteBuffer, sizeInBytes); } else { - bytesWritten = audioTrack.write(byteBuffer.array(), - byteBuffer.arrayOffset(), - sizeInBytes); + bytesWritten = writePreLollipop(audioTrack, byteBuffer, sizeInBytes); } if (bytesWritten != sizeInBytes) { Logging.e(TAG, "AudioTrack.write failed: " + bytesWritten); @@ -124,6 +120,15 @@ class WebRtcAudioTrack { audioTrack.flush(); } + @TargetApi(21) + private int writeOnLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) { + return audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING); + } + + private int writePreLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) { + return audioTrack.write(byteBuffer.array(), byteBuffer.arrayOffset(), sizeInBytes); + } + public void joinThread() { keepAlive = false; while (isAlive()) { @@ -142,9 +147,6 @@ class WebRtcAudioTrack { this.nativeAudioTrack = nativeAudioTrack; audioManager = (AudioManager) context.getSystemService( Context.AUDIO_SERVICE); - this.streamType = - WebRtcAudioUtils.getOutputStreamTypeFromAudioMode( - audioManager.getMode()); if (DEBUG) { WebRtcAudioUtils.logDeviceInfo(TAG); } @@ -181,7 +183,7 @@ class WebRtcAudioTrack { // Create an AudioTrack object and initialize its associated audio buffer. // The size of this buffer determines how long an AudioTrack can play // before running out of data. - audioTrack = new AudioTrack(streamType, + audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL, sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, @@ -193,7 +195,7 @@ class WebRtcAudioTrack { } assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED); assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED); - assertTrue(audioTrack.getStreamType() == streamType); + assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL); } private boolean startPlayout() { @@ -217,32 +219,37 @@ class WebRtcAudioTrack { return true; } - /** Get max possible volume index given type of audio stream. */ + /** Get max possible volume index for a phone call audio stream. */ private int getStreamMaxVolume() { Logging.d(TAG, "getStreamMaxVolume"); assertTrue(audioManager != null); - return audioManager.getStreamMaxVolume(streamType); + return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL); } - /** Set current volume level given type of audio stream. */ + /** Set current volume level for a phone call audio stream. */ private boolean setStreamVolume(int volume) { Logging.d(TAG, "setStreamVolume(" + volume + ")"); assertTrue(audioManager != null); - if (WebRtcAudioUtils.runningOnLollipopOrHigher()) { - if (audioManager.isVolumeFixed()) { - Logging.e(TAG, "The device implements a fixed volume policy."); - return false; - } + if (isVolumeFixed()) { + Logging.e(TAG, "The device implements a fixed volume policy."); + return false; } - audioManager.setStreamVolume(streamType, volume, 0); + audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0); return true; } - /** Get current volume level given type of audio stream. */ + @TargetApi(21) + private boolean isVolumeFixed() { + if (!WebRtcAudioUtils.runningOnLollipopOrHigher()) + return false; + return audioManager.isVolumeFixed(); + } + + /** Get current volume level for a phone call audio stream. */ private int getStreamVolume() { Logging.d(TAG, "getStreamVolume"); assertTrue(audioManager != null); - return audioManager.getStreamVolume(streamType); + return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL); } /** Helper method which throws an exception when an assertion has failed. */ diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java index f08e11dad8..45f564a4dd 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java @@ -144,6 +144,11 @@ public final class WebRtcAudioUtils { return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1; } + public static boolean runningOnJellyBeanMR2OrHigher() { + // July 24, 2013: Android 4.3. API Level 18. + return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2; + } + public static boolean runningOnLollipopOrHigher() { // API Level 21. return Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP; @@ -193,37 +198,5 @@ public final class WebRtcAudioUtils { permission, Process.myPid(), Process.myUid()) == PackageManager.PERMISSION_GRANTED; - } - - // Convert the provided audio |mode| into most suitable audio output stream - // type. The stream type is used for creating audio streams and for volume - // changes. It is essential that the mode and type are in-line to ensure - // correct behavior. If for example a STREAM_MUSIC type of stream is created - // in a MODE_IN_COMMUNICATION mode, audio will be played out and the volume - // icon will look OK but the actual volume will not be changed when the user - // changes the volume slider. - // TODO(henrika): there is currently no mapping to STREAM_ALARM, STREAM_DTMF, - // or STREAM_NOTIFICATION types since I am unable to see a reason for using - // them. There are only four different modes. - public static int getOutputStreamTypeFromAudioMode(int mode) { - Logging.d(TAG, "getOutputStreamTypeFromAudioMode(mode=" + mode + ")"); - switch (mode) { - case AudioManager.MODE_NORMAL: - // The audio stream for music playback. - Logging.d(TAG, "AudioManager.STREAM_MUSIC"); - return AudioManager.STREAM_MUSIC; - case AudioManager.MODE_RINGTONE: - // Audio stream for the phone ring. - Logging.d(TAG, "AudioManager.STREAM_RING"); - return AudioManager.STREAM_RING; - case AudioManager.MODE_IN_CALL: - case AudioManager.MODE_IN_COMMUNICATION: - // Audio stream for phone calls. - Logging.d(TAG, "AudioManager.STREAM_VOICE_CALL"); - return AudioManager.STREAM_VOICE_CALL; - default: - Logging.d(TAG, "AudioManager.USE_DEFAULT_STREAM_TYPE"); - return AudioManager.USE_DEFAULT_STREAM_TYPE; } - } } diff --git a/webrtc/modules/audio_device/android/opensles_player.cc b/webrtc/modules/audio_device/android/opensles_player.cc index 40967c5fb9..d2bff4905e 100644 --- a/webrtc/modules/audio_device/android/opensles_player.cc +++ b/webrtc/modules/audio_device/android/opensles_player.cc @@ -15,6 +15,7 @@ #include "webrtc/base/arraysize.h" #include "webrtc/base/checks.h" #include "webrtc/base/format_macros.h" +#include "webrtc/base/timeutils.h" #include "webrtc/modules/audio_device/android/audio_manager.h" #include "webrtc/modules/audio_device/fine_audio_buffer.h" @@ -38,7 +39,6 @@ namespace webrtc { OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager) : audio_parameters_(audio_manager->GetPlayoutAudioParameters()), - stream_type_(audio_manager->OutputStreamType()), audio_device_buffer_(NULL), initialized_(false), playing_(false), @@ -47,11 +47,9 @@ OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager) engine_(nullptr), player_(nullptr), simple_buffer_queue_(nullptr), - volume_(nullptr) { + volume_(nullptr), + last_play_time_(0) { ALOGD("ctor%s", GetThreadInfo().c_str()); - RTC_DCHECK(stream_type_ == SL_ANDROID_STREAM_VOICE || - stream_type_ == SL_ANDROID_STREAM_RING || - stream_type_ == SL_ANDROID_STREAM_MEDIA) << stream_type_; // Use native audio output parameters provided by the audio manager and // define the PCM format structure. pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(), @@ -99,6 +97,7 @@ int OpenSLESPlayer::InitPlayout() { CreateMix(); initialized_ = true; buffer_index_ = 0; + last_play_time_ = rtc::Time(); return 0; } @@ -180,15 +179,15 @@ void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { const int sample_rate_hz = audio_parameters_.sample_rate(); ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz); audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz); - const int channels = audio_parameters_.channels(); - ALOGD("SetPlayoutChannels(%d)", channels); + const size_t channels = audio_parameters_.channels(); + ALOGD("SetPlayoutChannels(%" PRIuS ")", channels); audio_device_buffer_->SetPlayoutChannels(channels); RTC_CHECK(audio_device_buffer_); AllocateDataBuffers(); } SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration( - int channels, + size_t channels, int sample_rate, size_t bits_per_sample) { ALOGD("CreatePCMConfiguration"); @@ -237,7 +236,16 @@ void OpenSLESPlayer::AllocateDataBuffers() { RTC_DCHECK(thread_checker_.CalledOnValidThread()); RTC_DCHECK(!simple_buffer_queue_); RTC_CHECK(audio_device_buffer_); - bytes_per_buffer_ = audio_parameters_.GetBytesPerBuffer(); + // Don't use the lowest possible size as native buffer size. Instead, + // use 10ms to better match the frame size that WebRTC uses. It will result + // in a reduced risk for audio glitches and also in a more "clean" sequence + // of callbacks from the OpenSL ES thread in to WebRTC when asking for audio + // to render. + ALOGD("lowest possible buffer size: %" PRIuS, + audio_parameters_.GetBytesPerBuffer()); + bytes_per_buffer_ = audio_parameters_.GetBytesPerFrame() * + audio_parameters_.frames_per_10ms_buffer(); + RTC_DCHECK_GE(bytes_per_buffer_, audio_parameters_.GetBytesPerBuffer()); ALOGD("native buffer size: %" PRIuS, bytes_per_buffer_); // Create a modified audio buffer class which allows us to ask for any number // of samples (and not only multiple of 10ms) to match the native OpenSL ES @@ -351,7 +359,7 @@ bool OpenSLESPlayer::CreateAudioPlayer() { false); // Set audio player configuration to SL_ANDROID_STREAM_VOICE which // corresponds to android.media.AudioManager.STREAM_VOICE_CALL. - SLint32 stream_type = stream_type_; + SLint32 stream_type = SL_ANDROID_STREAM_VOICE; RETURN_ON_ERROR( (*player_config) ->SetConfiguration(player_config, SL_ANDROID_KEY_STREAM_TYPE, @@ -422,6 +430,15 @@ void OpenSLESPlayer::FillBufferQueue() { } void OpenSLESPlayer::EnqueuePlayoutData() { + // Check delta time between two successive callbacks and provide a warning + // if it becomes very large. + // TODO(henrika): using 100ms as upper limit but this value is rather random. + const uint32_t current_time = rtc::Time(); + const uint32_t diff = current_time - last_play_time_; + if (diff > 100) { + ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff); + } + last_play_time_ = current_time; // Read audio data from the WebRTC source using the FineAudioBuffer object // to adjust for differences in buffer size between WebRTC (10ms) and native // OpenSL ES. diff --git a/webrtc/modules/audio_device/android/opensles_player.h b/webrtc/modules/audio_device/android/opensles_player.h index 96b1d49ac5..fa9e931218 100644 --- a/webrtc/modules/audio_device/android/opensles_player.h +++ b/webrtc/modules/audio_device/android/opensles_player.h @@ -22,7 +22,7 @@ #include "webrtc/modules/audio_device/android/opensles_common.h" #include "webrtc/modules/audio_device/include/audio_device_defines.h" #include "webrtc/modules/audio_device/audio_device_generic.h" -#include "webrtc/modules/utility/interface/helpers_android.h" +#include "webrtc/modules/utility/include/helpers_android.h" namespace webrtc { @@ -52,7 +52,7 @@ class OpenSLESPlayer { // buffer count of 2 or more, and a buffer size and sample rate that are // compatible with the device's native output configuration provided via the // audio manager at construction. - static const int kNumOfOpenSLESBuffers = 2; + static const int kNumOfOpenSLESBuffers = 4; // There is no need for this class to use JNI. static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* context) { @@ -94,7 +94,7 @@ class OpenSLESPlayer { void EnqueuePlayoutData(); // Configures the SL_DATAFORMAT_PCM structure. - SLDataFormat_PCM CreatePCMConfiguration(int channels, + SLDataFormat_PCM CreatePCMConfiguration(size_t channels, int sample_rate, size_t bits_per_sample); @@ -130,20 +130,6 @@ class OpenSLESPlayer { // AudioManager. const AudioParameters audio_parameters_; - // Contains the stream type provided to this class at construction by the - // AudioManager. Possible input values are: - // - AudioManager.STREAM_VOICE_CALL = 0 - // - AudioManager.STREAM_RING = 2 - // - AudioManager.STREAM_MUSIC = 3 - // These value are mapped to the corresponding audio playback stream type - // values in the "OpenSL ES domain": - // - SL_ANDROID_STREAM_VOICE <=> STREAM_VOICE_CALL (0) - // - SL_ANDROID_STREAM_RING <=> STREAM_RING (2) - // - SL_ANDROID_STREAM_MEDIA <=> STREAM_MUSIC (3) - // when creating the audio player. See SLES/OpenSLES_AndroidConfiguration.h - // for details. - const int stream_type_; - // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create(). AudioDeviceBuffer* audio_device_buffer_; @@ -209,6 +195,9 @@ class OpenSLESPlayer { // This interface exposes controls for manipulating the object’s audio volume // properties. This interface is supported on the Audio Player object. SLVolumeItf volume_; + + // Last time the OpenSL ES layer asked for audio data to play out. + uint32_t last_play_time_; }; } // namespace webrtc |